Move find_toplevel_char to cp-support.[ch]
[binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2022 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/event-loop.h"
25 #include "gdbsupport/event-pipe.h"
26 #include "gdbsupport/rsp-low.h"
27 #include "gdbsupport/signals-state-save-restore.h"
28 #include "nat/linux-nat.h"
29 #include "nat/linux-waitpid.h"
30 #include "gdbsupport/gdb_wait.h"
31 #include "nat/gdb_ptrace.h"
32 #include "nat/linux-ptrace.h"
33 #include "nat/linux-procfs.h"
34 #include "nat/linux-personality.h"
35 #include <signal.h>
36 #include <sys/ioctl.h>
37 #include <fcntl.h>
38 #include <unistd.h>
39 #include <sys/syscall.h>
40 #include <sched.h>
41 #include <ctype.h>
42 #include <pwd.h>
43 #include <sys/types.h>
44 #include <dirent.h>
45 #include <sys/stat.h>
46 #include <sys/vfs.h>
47 #include <sys/uio.h>
48 #include "gdbsupport/filestuff.h"
49 #include "tracepoint.h"
50 #include <inttypes.h>
51 #include "gdbsupport/common-inferior.h"
52 #include "nat/fork-inferior.h"
53 #include "gdbsupport/environ.h"
54 #include "gdbsupport/gdb-sigmask.h"
55 #include "gdbsupport/scoped_restore.h"
56 #ifndef ELFMAG0
57 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
58 then ELFMAG0 will have been defined. If it didn't get included by
59 gdb_proc_service.h then including it will likely introduce a duplicate
60 definition of elf_fpregset_t. */
61 #include <elf.h>
62 #endif
63 #include "nat/linux-namespaces.h"
64
65 #ifndef O_LARGEFILE
66 #define O_LARGEFILE 0
67 #endif
68
69 #ifndef AT_HWCAP2
70 #define AT_HWCAP2 26
71 #endif
72
73 /* Some targets did not define these ptrace constants from the start,
74 so gdbserver defines them locally here. In the future, these may
75 be removed after they are added to asm/ptrace.h. */
76 #if !(defined(PT_TEXT_ADDR) \
77 || defined(PT_DATA_ADDR) \
78 || defined(PT_TEXT_END_ADDR))
79 #if defined(__mcoldfire__)
80 /* These are still undefined in 3.10 kernels. */
81 #define PT_TEXT_ADDR 49*4
82 #define PT_DATA_ADDR 50*4
83 #define PT_TEXT_END_ADDR 51*4
84 /* These are still undefined in 3.10 kernels. */
85 #elif defined(__TMS320C6X__)
86 #define PT_TEXT_ADDR (0x10000*4)
87 #define PT_DATA_ADDR (0x10004*4)
88 #define PT_TEXT_END_ADDR (0x10008*4)
89 #endif
90 #endif
91
92 #if (defined(__UCLIBC__) \
93 && defined(HAS_NOMMU) \
94 && defined(PT_TEXT_ADDR) \
95 && defined(PT_DATA_ADDR) \
96 && defined(PT_TEXT_END_ADDR))
97 #define SUPPORTS_READ_OFFSETS
98 #endif
99
100 #ifdef HAVE_LINUX_BTRACE
101 # include "nat/linux-btrace.h"
102 # include "gdbsupport/btrace-common.h"
103 #endif
104
105 #ifndef HAVE_ELF32_AUXV_T
106 /* Copied from glibc's elf.h. */
107 typedef struct
108 {
109 uint32_t a_type; /* Entry type */
110 union
111 {
112 uint32_t a_val; /* Integer value */
113 /* We use to have pointer elements added here. We cannot do that,
114 though, since it does not work when using 32-bit definitions
115 on 64-bit platforms and vice versa. */
116 } a_un;
117 } Elf32_auxv_t;
118 #endif
119
120 #ifndef HAVE_ELF64_AUXV_T
121 /* Copied from glibc's elf.h. */
122 typedef struct
123 {
124 uint64_t a_type; /* Entry type */
125 union
126 {
127 uint64_t a_val; /* Integer value */
128 /* We use to have pointer elements added here. We cannot do that,
129 though, since it does not work when using 32-bit definitions
130 on 64-bit platforms and vice versa. */
131 } a_un;
132 } Elf64_auxv_t;
133 #endif
134
135 /* Does the current host support PTRACE_GETREGSET? */
136 int have_ptrace_getregset = -1;
137
138 /* LWP accessors. */
139
140 /* See nat/linux-nat.h. */
141
142 ptid_t
143 ptid_of_lwp (struct lwp_info *lwp)
144 {
145 return ptid_of (get_lwp_thread (lwp));
146 }
147
148 /* See nat/linux-nat.h. */
149
150 void
151 lwp_set_arch_private_info (struct lwp_info *lwp,
152 struct arch_lwp_info *info)
153 {
154 lwp->arch_private = info;
155 }
156
157 /* See nat/linux-nat.h. */
158
159 struct arch_lwp_info *
160 lwp_arch_private_info (struct lwp_info *lwp)
161 {
162 return lwp->arch_private;
163 }
164
165 /* See nat/linux-nat.h. */
166
167 int
168 lwp_is_stopped (struct lwp_info *lwp)
169 {
170 return lwp->stopped;
171 }
172
173 /* See nat/linux-nat.h. */
174
175 enum target_stop_reason
176 lwp_stop_reason (struct lwp_info *lwp)
177 {
178 return lwp->stop_reason;
179 }
180
181 /* See nat/linux-nat.h. */
182
183 int
184 lwp_is_stepping (struct lwp_info *lwp)
185 {
186 return lwp->stepping;
187 }
188
189 /* A list of all unknown processes which receive stop signals. Some
190 other process will presumably claim each of these as forked
191 children momentarily. */
192
193 struct simple_pid_list
194 {
195 /* The process ID. */
196 int pid;
197
198 /* The status as reported by waitpid. */
199 int status;
200
201 /* Next in chain. */
202 struct simple_pid_list *next;
203 };
204 static struct simple_pid_list *stopped_pids;
205
206 /* Trivial list manipulation functions to keep track of a list of new
207 stopped processes. */
208
209 static void
210 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
211 {
212 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
213
214 new_pid->pid = pid;
215 new_pid->status = status;
216 new_pid->next = *listp;
217 *listp = new_pid;
218 }
219
220 static int
221 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
222 {
223 struct simple_pid_list **p;
224
225 for (p = listp; *p != NULL; p = &(*p)->next)
226 if ((*p)->pid == pid)
227 {
228 struct simple_pid_list *next = (*p)->next;
229
230 *statusp = (*p)->status;
231 xfree (*p);
232 *p = next;
233 return 1;
234 }
235 return 0;
236 }
237
238 enum stopping_threads_kind
239 {
240 /* Not stopping threads presently. */
241 NOT_STOPPING_THREADS,
242
243 /* Stopping threads. */
244 STOPPING_THREADS,
245
246 /* Stopping and suspending threads. */
247 STOPPING_AND_SUSPENDING_THREADS
248 };
249
250 /* This is set while stop_all_lwps is in effect. */
251 static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
252
253 /* FIXME make into a target method? */
254 int using_threads = 1;
255
256 /* True if we're presently stabilizing threads (moving them out of
257 jump pads). */
258 static int stabilizing_threads;
259
260 static void unsuspend_all_lwps (struct lwp_info *except);
261 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
262 static int lwp_is_marked_dead (struct lwp_info *lwp);
263 static int kill_lwp (unsigned long lwpid, int signo);
264 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
265 static int linux_low_ptrace_options (int attached);
266 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
267
268 /* When the event-loop is doing a step-over, this points at the thread
269 being stepped. */
270 static ptid_t step_over_bkpt;
271
272 bool
273 linux_process_target::low_supports_breakpoints ()
274 {
275 return false;
276 }
277
278 CORE_ADDR
279 linux_process_target::low_get_pc (regcache *regcache)
280 {
281 return 0;
282 }
283
284 void
285 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
286 {
287 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
288 }
289
290 std::vector<CORE_ADDR>
291 linux_process_target::low_get_next_pcs (regcache *regcache)
292 {
293 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
294 "implemented");
295 }
296
297 int
298 linux_process_target::low_decr_pc_after_break ()
299 {
300 return 0;
301 }
302
303 /* True if LWP is stopped in its stepping range. */
304
305 static int
306 lwp_in_step_range (struct lwp_info *lwp)
307 {
308 CORE_ADDR pc = lwp->stop_pc;
309
310 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
311 }
312
313 /* The event pipe registered as a waitable file in the event loop. */
314 static event_pipe linux_event_pipe;
315
316 /* True if we're currently in async mode. */
317 #define target_is_async_p() (linux_event_pipe.is_open ())
318
319 static void send_sigstop (struct lwp_info *lwp);
320
321 /* Return non-zero if HEADER is a 64-bit ELF file. */
322
323 static int
324 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
325 {
326 if (header->e_ident[EI_MAG0] == ELFMAG0
327 && header->e_ident[EI_MAG1] == ELFMAG1
328 && header->e_ident[EI_MAG2] == ELFMAG2
329 && header->e_ident[EI_MAG3] == ELFMAG3)
330 {
331 *machine = header->e_machine;
332 return header->e_ident[EI_CLASS] == ELFCLASS64;
333
334 }
335 *machine = EM_NONE;
336 return -1;
337 }
338
339 /* Return non-zero if FILE is a 64-bit ELF file,
340 zero if the file is not a 64-bit ELF file,
341 and -1 if the file is not accessible or doesn't exist. */
342
343 static int
344 elf_64_file_p (const char *file, unsigned int *machine)
345 {
346 Elf64_Ehdr header;
347 int fd;
348
349 fd = open (file, O_RDONLY);
350 if (fd < 0)
351 return -1;
352
353 if (read (fd, &header, sizeof (header)) != sizeof (header))
354 {
355 close (fd);
356 return 0;
357 }
358 close (fd);
359
360 return elf_64_header_p (&header, machine);
361 }
362
363 /* Accepts an integer PID; Returns true if the executable PID is
364 running is a 64-bit ELF file.. */
365
366 int
367 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
368 {
369 char file[PATH_MAX];
370
371 sprintf (file, "/proc/%d/exe", pid);
372 return elf_64_file_p (file, machine);
373 }
374
375 void
376 linux_process_target::delete_lwp (lwp_info *lwp)
377 {
378 struct thread_info *thr = get_lwp_thread (lwp);
379
380 threads_debug_printf ("deleting %ld", lwpid_of (thr));
381
382 remove_thread (thr);
383
384 low_delete_thread (lwp->arch_private);
385
386 delete lwp;
387 }
388
389 void
390 linux_process_target::low_delete_thread (arch_lwp_info *info)
391 {
392 /* Default implementation should be overridden if architecture-specific
393 info is being used. */
394 gdb_assert (info == nullptr);
395 }
396
397 process_info *
398 linux_process_target::add_linux_process (int pid, int attached)
399 {
400 struct process_info *proc;
401
402 proc = add_process (pid, attached);
403 proc->priv = XCNEW (struct process_info_private);
404
405 proc->priv->arch_private = low_new_process ();
406
407 return proc;
408 }
409
410 arch_process_info *
411 linux_process_target::low_new_process ()
412 {
413 return nullptr;
414 }
415
416 void
417 linux_process_target::low_delete_process (arch_process_info *info)
418 {
419 /* Default implementation must be overridden if architecture-specific
420 info exists. */
421 gdb_assert (info == nullptr);
422 }
423
424 void
425 linux_process_target::low_new_fork (process_info *parent, process_info *child)
426 {
427 /* Nop. */
428 }
429
430 void
431 linux_process_target::arch_setup_thread (thread_info *thread)
432 {
433 scoped_restore_current_thread restore_thread;
434 switch_to_thread (thread);
435
436 low_arch_setup ();
437 }
438
439 int
440 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
441 int wstat)
442 {
443 client_state &cs = get_client_state ();
444 struct lwp_info *event_lwp = *orig_event_lwp;
445 int event = linux_ptrace_get_extended_event (wstat);
446 struct thread_info *event_thr = get_lwp_thread (event_lwp);
447 struct lwp_info *new_lwp;
448
449 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
450
451 /* All extended events we currently use are mid-syscall. Only
452 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
453 you have to be using PTRACE_SEIZE to get that. */
454 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
455
456 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
457 || (event == PTRACE_EVENT_CLONE))
458 {
459 ptid_t ptid;
460 unsigned long new_pid;
461 int ret, status;
462
463 /* Get the pid of the new lwp. */
464 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
465 &new_pid);
466
467 /* If we haven't already seen the new PID stop, wait for it now. */
468 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
469 {
470 /* The new child has a pending SIGSTOP. We can't affect it until it
471 hits the SIGSTOP, but we're already attached. */
472
473 ret = my_waitpid (new_pid, &status, __WALL);
474
475 if (ret == -1)
476 perror_with_name ("waiting for new child");
477 else if (ret != new_pid)
478 warning ("wait returned unexpected PID %d", ret);
479 else if (!WIFSTOPPED (status))
480 warning ("wait returned unexpected status 0x%x", status);
481 }
482
483 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
484 {
485 struct process_info *parent_proc;
486 struct process_info *child_proc;
487 struct lwp_info *child_lwp;
488 struct thread_info *child_thr;
489
490 ptid = ptid_t (new_pid, new_pid);
491
492 threads_debug_printf ("Got fork event from LWP %ld, "
493 "new child is %d",
494 ptid_of (event_thr).lwp (),
495 ptid.pid ());
496
497 /* Add the new process to the tables and clone the breakpoint
498 lists of the parent. We need to do this even if the new process
499 will be detached, since we will need the process object and the
500 breakpoints to remove any breakpoints from memory when we
501 detach, and the client side will access registers. */
502 child_proc = add_linux_process (new_pid, 0);
503 gdb_assert (child_proc != NULL);
504 child_lwp = add_lwp (ptid);
505 gdb_assert (child_lwp != NULL);
506 child_lwp->stopped = 1;
507 child_lwp->must_set_ptrace_flags = 1;
508 child_lwp->status_pending_p = 0;
509 child_thr = get_lwp_thread (child_lwp);
510 child_thr->last_resume_kind = resume_stop;
511 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
512
513 /* If we're suspending all threads, leave this one suspended
514 too. If the fork/clone parent is stepping over a breakpoint,
515 all other threads have been suspended already. Leave the
516 child suspended too. */
517 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
518 || event_lwp->bp_reinsert != 0)
519 {
520 threads_debug_printf ("leaving child suspended");
521 child_lwp->suspended = 1;
522 }
523
524 parent_proc = get_thread_process (event_thr);
525 child_proc->attached = parent_proc->attached;
526
527 if (event_lwp->bp_reinsert != 0
528 && supports_software_single_step ()
529 && event == PTRACE_EVENT_VFORK)
530 {
531 /* If we leave single-step breakpoints there, child will
532 hit it, so uninsert single-step breakpoints from parent
533 (and child). Once vfork child is done, reinsert
534 them back to parent. */
535 uninsert_single_step_breakpoints (event_thr);
536 }
537
538 clone_all_breakpoints (child_thr, event_thr);
539
540 target_desc_up tdesc = allocate_target_description ();
541 copy_target_description (tdesc.get (), parent_proc->tdesc);
542 child_proc->tdesc = tdesc.release ();
543
544 /* Clone arch-specific process data. */
545 low_new_fork (parent_proc, child_proc);
546
547 /* Save fork info in the parent thread. */
548 if (event == PTRACE_EVENT_FORK)
549 event_lwp->waitstatus.set_forked (ptid);
550 else if (event == PTRACE_EVENT_VFORK)
551 event_lwp->waitstatus.set_vforked (ptid);
552
553 /* The status_pending field contains bits denoting the
554 extended event, so when the pending event is handled,
555 the handler will look at lwp->waitstatus. */
556 event_lwp->status_pending_p = 1;
557 event_lwp->status_pending = wstat;
558
559 /* Link the threads until the parent event is passed on to
560 higher layers. */
561 event_lwp->fork_relative = child_lwp;
562 child_lwp->fork_relative = event_lwp;
563
564 /* If the parent thread is doing step-over with single-step
565 breakpoints, the list of single-step breakpoints are cloned
566 from the parent's. Remove them from the child process.
567 In case of vfork, we'll reinsert them back once vforked
568 child is done. */
569 if (event_lwp->bp_reinsert != 0
570 && supports_software_single_step ())
571 {
572 /* The child process is forked and stopped, so it is safe
573 to access its memory without stopping all other threads
574 from other processes. */
575 delete_single_step_breakpoints (child_thr);
576
577 gdb_assert (has_single_step_breakpoints (event_thr));
578 gdb_assert (!has_single_step_breakpoints (child_thr));
579 }
580
581 /* Report the event. */
582 return 0;
583 }
584
585 threads_debug_printf
586 ("Got clone event from LWP %ld, new child is LWP %ld",
587 lwpid_of (event_thr), new_pid);
588
589 ptid = ptid_t (pid_of (event_thr), new_pid);
590 new_lwp = add_lwp (ptid);
591
592 /* Either we're going to immediately resume the new thread
593 or leave it stopped. resume_one_lwp is a nop if it
594 thinks the thread is currently running, so set this first
595 before calling resume_one_lwp. */
596 new_lwp->stopped = 1;
597
598 /* If we're suspending all threads, leave this one suspended
599 too. If the fork/clone parent is stepping over a breakpoint,
600 all other threads have been suspended already. Leave the
601 child suspended too. */
602 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
603 || event_lwp->bp_reinsert != 0)
604 new_lwp->suspended = 1;
605
606 /* Normally we will get the pending SIGSTOP. But in some cases
607 we might get another signal delivered to the group first.
608 If we do get another signal, be sure not to lose it. */
609 if (WSTOPSIG (status) != SIGSTOP)
610 {
611 new_lwp->stop_expected = 1;
612 new_lwp->status_pending_p = 1;
613 new_lwp->status_pending = status;
614 }
615 else if (cs.report_thread_events)
616 {
617 new_lwp->waitstatus.set_thread_created ();
618 new_lwp->status_pending_p = 1;
619 new_lwp->status_pending = status;
620 }
621
622 #ifdef USE_THREAD_DB
623 thread_db_notice_clone (event_thr, ptid);
624 #endif
625
626 /* Don't report the event. */
627 return 1;
628 }
629 else if (event == PTRACE_EVENT_VFORK_DONE)
630 {
631 event_lwp->waitstatus.set_vfork_done ();
632
633 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
634 {
635 reinsert_single_step_breakpoints (event_thr);
636
637 gdb_assert (has_single_step_breakpoints (event_thr));
638 }
639
640 /* Report the event. */
641 return 0;
642 }
643 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
644 {
645 struct process_info *proc;
646 std::vector<int> syscalls_to_catch;
647 ptid_t event_ptid;
648 pid_t event_pid;
649
650 threads_debug_printf ("Got exec event from LWP %ld",
651 lwpid_of (event_thr));
652
653 /* Get the event ptid. */
654 event_ptid = ptid_of (event_thr);
655 event_pid = event_ptid.pid ();
656
657 /* Save the syscall list from the execing process. */
658 proc = get_thread_process (event_thr);
659 syscalls_to_catch = std::move (proc->syscalls_to_catch);
660
661 /* Delete the execing process and all its threads. */
662 mourn (proc);
663 switch_to_thread (nullptr);
664
665 /* Create a new process/lwp/thread. */
666 proc = add_linux_process (event_pid, 0);
667 event_lwp = add_lwp (event_ptid);
668 event_thr = get_lwp_thread (event_lwp);
669 gdb_assert (current_thread == event_thr);
670 arch_setup_thread (event_thr);
671
672 /* Set the event status. */
673 event_lwp->waitstatus.set_execd
674 (make_unique_xstrdup
675 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
676
677 /* Mark the exec status as pending. */
678 event_lwp->stopped = 1;
679 event_lwp->status_pending_p = 1;
680 event_lwp->status_pending = wstat;
681 event_thr->last_resume_kind = resume_continue;
682 event_thr->last_status.set_ignore ();
683
684 /* Update syscall state in the new lwp, effectively mid-syscall too. */
685 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
686
687 /* Restore the list to catch. Don't rely on the client, which is free
688 to avoid sending a new list when the architecture doesn't change.
689 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
690 proc->syscalls_to_catch = std::move (syscalls_to_catch);
691
692 /* Report the event. */
693 *orig_event_lwp = event_lwp;
694 return 0;
695 }
696
697 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
698 }
699
700 CORE_ADDR
701 linux_process_target::get_pc (lwp_info *lwp)
702 {
703 struct regcache *regcache;
704 CORE_ADDR pc;
705
706 if (!low_supports_breakpoints ())
707 return 0;
708
709 scoped_restore_current_thread restore_thread;
710 switch_to_thread (get_lwp_thread (lwp));
711
712 regcache = get_thread_regcache (current_thread, 1);
713 pc = low_get_pc (regcache);
714
715 threads_debug_printf ("pc is 0x%lx", (long) pc);
716
717 return pc;
718 }
719
720 void
721 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
722 {
723 struct regcache *regcache;
724
725 scoped_restore_current_thread restore_thread;
726 switch_to_thread (get_lwp_thread (lwp));
727
728 regcache = get_thread_regcache (current_thread, 1);
729 low_get_syscall_trapinfo (regcache, sysno);
730
731 threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
732 }
733
734 void
735 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
736 {
737 /* By default, report an unknown system call number. */
738 *sysno = UNKNOWN_SYSCALL;
739 }
740
741 bool
742 linux_process_target::save_stop_reason (lwp_info *lwp)
743 {
744 CORE_ADDR pc;
745 CORE_ADDR sw_breakpoint_pc;
746 #if USE_SIGTRAP_SIGINFO
747 siginfo_t siginfo;
748 #endif
749
750 if (!low_supports_breakpoints ())
751 return false;
752
753 pc = get_pc (lwp);
754 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
755
756 /* breakpoint_at reads from the current thread. */
757 scoped_restore_current_thread restore_thread;
758 switch_to_thread (get_lwp_thread (lwp));
759
760 #if USE_SIGTRAP_SIGINFO
761 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
762 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
763 {
764 if (siginfo.si_signo == SIGTRAP)
765 {
766 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
767 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
768 {
769 /* The si_code is ambiguous on this arch -- check debug
770 registers. */
771 if (!check_stopped_by_watchpoint (lwp))
772 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
773 }
774 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
775 {
776 /* If we determine the LWP stopped for a SW breakpoint,
777 trust it. Particularly don't check watchpoint
778 registers, because at least on s390, we'd find
779 stopped-by-watchpoint as long as there's a watchpoint
780 set. */
781 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
782 }
783 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
784 {
785 /* This can indicate either a hardware breakpoint or
786 hardware watchpoint. Check debug registers. */
787 if (!check_stopped_by_watchpoint (lwp))
788 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
789 }
790 else if (siginfo.si_code == TRAP_TRACE)
791 {
792 /* We may have single stepped an instruction that
793 triggered a watchpoint. In that case, on some
794 architectures (such as x86), instead of TRAP_HWBKPT,
795 si_code indicates TRAP_TRACE, and we need to check
796 the debug registers separately. */
797 if (!check_stopped_by_watchpoint (lwp))
798 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
799 }
800 }
801 }
802 #else
803 /* We may have just stepped a breakpoint instruction. E.g., in
804 non-stop mode, GDB first tells the thread A to step a range, and
805 then the user inserts a breakpoint inside the range. In that
806 case we need to report the breakpoint PC. */
807 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
808 && low_breakpoint_at (sw_breakpoint_pc))
809 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
810
811 if (hardware_breakpoint_inserted_here (pc))
812 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
813
814 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
815 check_stopped_by_watchpoint (lwp);
816 #endif
817
818 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
819 {
820 threads_debug_printf
821 ("%s stopped by software breakpoint",
822 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
823
824 /* Back up the PC if necessary. */
825 if (pc != sw_breakpoint_pc)
826 {
827 struct regcache *regcache
828 = get_thread_regcache (current_thread, 1);
829 low_set_pc (regcache, sw_breakpoint_pc);
830 }
831
832 /* Update this so we record the correct stop PC below. */
833 pc = sw_breakpoint_pc;
834 }
835 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
836 threads_debug_printf
837 ("%s stopped by hardware breakpoint",
838 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
839 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
840 threads_debug_printf
841 ("%s stopped by hardware watchpoint",
842 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
843 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
844 threads_debug_printf
845 ("%s stopped by trace",
846 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
847
848 lwp->stop_pc = pc;
849 return true;
850 }
851
852 lwp_info *
853 linux_process_target::add_lwp (ptid_t ptid)
854 {
855 lwp_info *lwp = new lwp_info;
856
857 lwp->thread = add_thread (ptid, lwp);
858
859 low_new_thread (lwp);
860
861 return lwp;
862 }
863
864 void
865 linux_process_target::low_new_thread (lwp_info *info)
866 {
867 /* Nop. */
868 }
869
870 /* Callback to be used when calling fork_inferior, responsible for
871 actually initiating the tracing of the inferior. */
872
873 static void
874 linux_ptrace_fun ()
875 {
876 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
877 (PTRACE_TYPE_ARG4) 0) < 0)
878 trace_start_error_with_name ("ptrace");
879
880 if (setpgid (0, 0) < 0)
881 trace_start_error_with_name ("setpgid");
882
883 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
884 stdout to stderr so that inferior i/o doesn't corrupt the connection.
885 Also, redirect stdin to /dev/null. */
886 if (remote_connection_is_stdio ())
887 {
888 if (close (0) < 0)
889 trace_start_error_with_name ("close");
890 if (open ("/dev/null", O_RDONLY) < 0)
891 trace_start_error_with_name ("open");
892 if (dup2 (2, 1) < 0)
893 trace_start_error_with_name ("dup2");
894 if (write (2, "stdin/stdout redirected\n",
895 sizeof ("stdin/stdout redirected\n") - 1) < 0)
896 {
897 /* Errors ignored. */;
898 }
899 }
900 }
901
902 /* Start an inferior process and returns its pid.
903 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
904 are its arguments. */
905
906 int
907 linux_process_target::create_inferior (const char *program,
908 const std::vector<char *> &program_args)
909 {
910 client_state &cs = get_client_state ();
911 struct lwp_info *new_lwp;
912 int pid;
913 ptid_t ptid;
914
915 {
916 maybe_disable_address_space_randomization restore_personality
917 (cs.disable_randomization);
918 std::string str_program_args = construct_inferior_arguments (program_args);
919
920 pid = fork_inferior (program,
921 str_program_args.c_str (),
922 get_environ ()->envp (), linux_ptrace_fun,
923 NULL, NULL, NULL, NULL);
924 }
925
926 add_linux_process (pid, 0);
927
928 ptid = ptid_t (pid, pid);
929 new_lwp = add_lwp (ptid);
930 new_lwp->must_set_ptrace_flags = 1;
931
932 post_fork_inferior (pid, program);
933
934 return pid;
935 }
936
937 /* Implement the post_create_inferior target_ops method. */
938
939 void
940 linux_process_target::post_create_inferior ()
941 {
942 struct lwp_info *lwp = get_thread_lwp (current_thread);
943
944 low_arch_setup ();
945
946 if (lwp->must_set_ptrace_flags)
947 {
948 struct process_info *proc = current_process ();
949 int options = linux_low_ptrace_options (proc->attached);
950
951 linux_enable_event_reporting (lwpid_of (current_thread), options);
952 lwp->must_set_ptrace_flags = 0;
953 }
954 }
955
956 int
957 linux_process_target::attach_lwp (ptid_t ptid)
958 {
959 struct lwp_info *new_lwp;
960 int lwpid = ptid.lwp ();
961
962 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
963 != 0)
964 return errno;
965
966 new_lwp = add_lwp (ptid);
967
968 /* We need to wait for SIGSTOP before being able to make the next
969 ptrace call on this LWP. */
970 new_lwp->must_set_ptrace_flags = 1;
971
972 if (linux_proc_pid_is_stopped (lwpid))
973 {
974 threads_debug_printf ("Attached to a stopped process");
975
976 /* The process is definitely stopped. It is in a job control
977 stop, unless the kernel predates the TASK_STOPPED /
978 TASK_TRACED distinction, in which case it might be in a
979 ptrace stop. Make sure it is in a ptrace stop; from there we
980 can kill it, signal it, et cetera.
981
982 First make sure there is a pending SIGSTOP. Since we are
983 already attached, the process can not transition from stopped
984 to running without a PTRACE_CONT; so we know this signal will
985 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
986 probably already in the queue (unless this kernel is old
987 enough to use TASK_STOPPED for ptrace stops); but since
988 SIGSTOP is not an RT signal, it can only be queued once. */
989 kill_lwp (lwpid, SIGSTOP);
990
991 /* Finally, resume the stopped process. This will deliver the
992 SIGSTOP (or a higher priority signal, just like normal
993 PTRACE_ATTACH), which we'll catch later on. */
994 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
995 }
996
997 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
998 brings it to a halt.
999
1000 There are several cases to consider here:
1001
1002 1) gdbserver has already attached to the process and is being notified
1003 of a new thread that is being created.
1004 In this case we should ignore that SIGSTOP and resume the
1005 process. This is handled below by setting stop_expected = 1,
1006 and the fact that add_thread sets last_resume_kind ==
1007 resume_continue.
1008
1009 2) This is the first thread (the process thread), and we're attaching
1010 to it via attach_inferior.
1011 In this case we want the process thread to stop.
1012 This is handled by having linux_attach set last_resume_kind ==
1013 resume_stop after we return.
1014
1015 If the pid we are attaching to is also the tgid, we attach to and
1016 stop all the existing threads. Otherwise, we attach to pid and
1017 ignore any other threads in the same group as this pid.
1018
1019 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1020 existing threads.
1021 In this case we want the thread to stop.
1022 FIXME: This case is currently not properly handled.
1023 We should wait for the SIGSTOP but don't. Things work apparently
1024 because enough time passes between when we ptrace (ATTACH) and when
1025 gdb makes the next ptrace call on the thread.
1026
1027 On the other hand, if we are currently trying to stop all threads, we
1028 should treat the new thread as if we had sent it a SIGSTOP. This works
1029 because we are guaranteed that the add_lwp call above added us to the
1030 end of the list, and so the new thread has not yet reached
1031 wait_for_sigstop (but will). */
1032 new_lwp->stop_expected = 1;
1033
1034 return 0;
1035 }
1036
1037 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1038 already attached. Returns true if a new LWP is found, false
1039 otherwise. */
1040
1041 static int
1042 attach_proc_task_lwp_callback (ptid_t ptid)
1043 {
1044 /* Is this a new thread? */
1045 if (find_thread_ptid (ptid) == NULL)
1046 {
1047 int lwpid = ptid.lwp ();
1048 int err;
1049
1050 threads_debug_printf ("Found new lwp %d", lwpid);
1051
1052 err = the_linux_target->attach_lwp (ptid);
1053
1054 /* Be quiet if we simply raced with the thread exiting. EPERM
1055 is returned if the thread's task still exists, and is marked
1056 as exited or zombie, as well as other conditions, so in that
1057 case, confirm the status in /proc/PID/status. */
1058 if (err == ESRCH
1059 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1060 threads_debug_printf
1061 ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1062 lwpid, err, safe_strerror (err));
1063 else if (err != 0)
1064 {
1065 std::string reason
1066 = linux_ptrace_attach_fail_reason_string (ptid, err);
1067
1068 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1069 }
1070
1071 return 1;
1072 }
1073 return 0;
1074 }
1075
1076 static void async_file_mark (void);
1077
1078 /* Attach to PID. If PID is the tgid, attach to it and all
1079 of its threads. */
1080
1081 int
1082 linux_process_target::attach (unsigned long pid)
1083 {
1084 struct process_info *proc;
1085 struct thread_info *initial_thread;
1086 ptid_t ptid = ptid_t (pid, pid);
1087 int err;
1088
1089 proc = add_linux_process (pid, 1);
1090
1091 /* Attach to PID. We will check for other threads
1092 soon. */
1093 err = attach_lwp (ptid);
1094 if (err != 0)
1095 {
1096 remove_process (proc);
1097
1098 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1099 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1100 }
1101
1102 /* Don't ignore the initial SIGSTOP if we just attached to this
1103 process. It will be collected by wait shortly. */
1104 initial_thread = find_thread_ptid (ptid_t (pid, pid));
1105 initial_thread->last_resume_kind = resume_stop;
1106
1107 /* We must attach to every LWP. If /proc is mounted, use that to
1108 find them now. On the one hand, the inferior may be using raw
1109 clone instead of using pthreads. On the other hand, even if it
1110 is using pthreads, GDB may not be connected yet (thread_db needs
1111 to do symbol lookups, through qSymbol). Also, thread_db walks
1112 structures in the inferior's address space to find the list of
1113 threads/LWPs, and those structures may well be corrupted. Note
1114 that once thread_db is loaded, we'll still use it to list threads
1115 and associate pthread info with each LWP. */
1116 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1117
1118 /* GDB will shortly read the xml target description for this
1119 process, to figure out the process' architecture. But the target
1120 description is only filled in when the first process/thread in
1121 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1122 that now, otherwise, if GDB is fast enough, it could read the
1123 target description _before_ that initial stop. */
1124 if (non_stop)
1125 {
1126 struct lwp_info *lwp;
1127 int wstat, lwpid;
1128 ptid_t pid_ptid = ptid_t (pid);
1129
1130 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1131 gdb_assert (lwpid > 0);
1132
1133 lwp = find_lwp_pid (ptid_t (lwpid));
1134
1135 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1136 {
1137 lwp->status_pending_p = 1;
1138 lwp->status_pending = wstat;
1139 }
1140
1141 initial_thread->last_resume_kind = resume_continue;
1142
1143 async_file_mark ();
1144
1145 gdb_assert (proc->tdesc != NULL);
1146 }
1147
1148 return 0;
1149 }
1150
1151 static int
1152 last_thread_of_process_p (int pid)
1153 {
1154 bool seen_one = false;
1155
1156 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1157 {
1158 if (!seen_one)
1159 {
1160 /* This is the first thread of this process we see. */
1161 seen_one = true;
1162 return false;
1163 }
1164 else
1165 {
1166 /* This is the second thread of this process we see. */
1167 return true;
1168 }
1169 });
1170
1171 return thread == NULL;
1172 }
1173
1174 /* Kill LWP. */
1175
1176 static void
1177 linux_kill_one_lwp (struct lwp_info *lwp)
1178 {
1179 struct thread_info *thr = get_lwp_thread (lwp);
1180 int pid = lwpid_of (thr);
1181
1182 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1183 there is no signal context, and ptrace(PTRACE_KILL) (or
1184 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1185 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1186 alternative is to kill with SIGKILL. We only need one SIGKILL
1187 per process, not one for each thread. But since we still support
1188 support debugging programs using raw clone without CLONE_THREAD,
1189 we send one for each thread. For years, we used PTRACE_KILL
1190 only, so we're being a bit paranoid about some old kernels where
1191 PTRACE_KILL might work better (dubious if there are any such, but
1192 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1193 second, and so we're fine everywhere. */
1194
1195 errno = 0;
1196 kill_lwp (pid, SIGKILL);
1197 if (debug_threads)
1198 {
1199 int save_errno = errno;
1200
1201 threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1202 target_pid_to_str (ptid_of (thr)).c_str (),
1203 save_errno ? safe_strerror (save_errno) : "OK");
1204 }
1205
1206 errno = 0;
1207 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1208 if (debug_threads)
1209 {
1210 int save_errno = errno;
1211
1212 threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1213 target_pid_to_str (ptid_of (thr)).c_str (),
1214 save_errno ? safe_strerror (save_errno) : "OK");
1215 }
1216 }
1217
1218 /* Kill LWP and wait for it to die. */
1219
1220 static void
1221 kill_wait_lwp (struct lwp_info *lwp)
1222 {
1223 struct thread_info *thr = get_lwp_thread (lwp);
1224 int pid = ptid_of (thr).pid ();
1225 int lwpid = ptid_of (thr).lwp ();
1226 int wstat;
1227 int res;
1228
1229 threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
1230
1231 do
1232 {
1233 linux_kill_one_lwp (lwp);
1234
1235 /* Make sure it died. Notes:
1236
1237 - The loop is most likely unnecessary.
1238
1239 - We don't use wait_for_event as that could delete lwps
1240 while we're iterating over them. We're not interested in
1241 any pending status at this point, only in making sure all
1242 wait status on the kernel side are collected until the
1243 process is reaped.
1244
1245 - We don't use __WALL here as the __WALL emulation relies on
1246 SIGCHLD, and killing a stopped process doesn't generate
1247 one, nor an exit status.
1248 */
1249 res = my_waitpid (lwpid, &wstat, 0);
1250 if (res == -1 && errno == ECHILD)
1251 res = my_waitpid (lwpid, &wstat, __WCLONE);
1252 } while (res > 0 && WIFSTOPPED (wstat));
1253
1254 /* Even if it was stopped, the child may have already disappeared.
1255 E.g., if it was killed by SIGKILL. */
1256 if (res < 0 && errno != ECHILD)
1257 perror_with_name ("kill_wait_lwp");
1258 }
1259
1260 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1261 except the leader. */
1262
1263 static void
1264 kill_one_lwp_callback (thread_info *thread, int pid)
1265 {
1266 struct lwp_info *lwp = get_thread_lwp (thread);
1267
1268 /* We avoid killing the first thread here, because of a Linux kernel (at
1269 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1270 the children get a chance to be reaped, it will remain a zombie
1271 forever. */
1272
1273 if (lwpid_of (thread) == pid)
1274 {
1275 threads_debug_printf ("is last of process %s",
1276 target_pid_to_str (thread->id).c_str ());
1277 return;
1278 }
1279
1280 kill_wait_lwp (lwp);
1281 }
1282
1283 int
1284 linux_process_target::kill (process_info *process)
1285 {
1286 int pid = process->pid;
1287
1288 /* If we're killing a running inferior, make sure it is stopped
1289 first, as PTRACE_KILL will not work otherwise. */
1290 stop_all_lwps (0, NULL);
1291
1292 for_each_thread (pid, [&] (thread_info *thread)
1293 {
1294 kill_one_lwp_callback (thread, pid);
1295 });
1296
1297 /* See the comment in linux_kill_one_lwp. We did not kill the first
1298 thread in the list, so do so now. */
1299 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1300
1301 if (lwp == NULL)
1302 threads_debug_printf ("cannot find lwp for pid: %d", pid);
1303 else
1304 kill_wait_lwp (lwp);
1305
1306 mourn (process);
1307
1308 /* Since we presently can only stop all lwps of all processes, we
1309 need to unstop lwps of other processes. */
1310 unstop_all_lwps (0, NULL);
1311 return 0;
1312 }
1313
1314 /* Get pending signal of THREAD, for detaching purposes. This is the
1315 signal the thread last stopped for, which we need to deliver to the
1316 thread when detaching, otherwise, it'd be suppressed/lost. */
1317
1318 static int
1319 get_detach_signal (struct thread_info *thread)
1320 {
1321 client_state &cs = get_client_state ();
1322 enum gdb_signal signo = GDB_SIGNAL_0;
1323 int status;
1324 struct lwp_info *lp = get_thread_lwp (thread);
1325
1326 if (lp->status_pending_p)
1327 status = lp->status_pending;
1328 else
1329 {
1330 /* If the thread had been suspended by gdbserver, and it stopped
1331 cleanly, then it'll have stopped with SIGSTOP. But we don't
1332 want to deliver that SIGSTOP. */
1333 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1334 || thread->last_status.sig () == GDB_SIGNAL_0)
1335 return 0;
1336
1337 /* Otherwise, we may need to deliver the signal we
1338 intercepted. */
1339 status = lp->last_status;
1340 }
1341
1342 if (!WIFSTOPPED (status))
1343 {
1344 threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1345 target_pid_to_str (ptid_of (thread)).c_str ());
1346 return 0;
1347 }
1348
1349 /* Extended wait statuses aren't real SIGTRAPs. */
1350 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1351 {
1352 threads_debug_printf ("lwp %s had stopped with extended "
1353 "status: no pending signal",
1354 target_pid_to_str (ptid_of (thread)).c_str ());
1355 return 0;
1356 }
1357
1358 signo = gdb_signal_from_host (WSTOPSIG (status));
1359
1360 if (cs.program_signals_p && !cs.program_signals[signo])
1361 {
1362 threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1363 target_pid_to_str (ptid_of (thread)).c_str (),
1364 gdb_signal_to_string (signo));
1365 return 0;
1366 }
1367 else if (!cs.program_signals_p
1368 /* If we have no way to know which signals GDB does not
1369 want to have passed to the program, assume
1370 SIGTRAP/SIGINT, which is GDB's default. */
1371 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1372 {
1373 threads_debug_printf ("lwp %s had signal %s, "
1374 "but we don't know if we should pass it. "
1375 "Default to not.",
1376 target_pid_to_str (ptid_of (thread)).c_str (),
1377 gdb_signal_to_string (signo));
1378 return 0;
1379 }
1380 else
1381 {
1382 threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1383 target_pid_to_str (ptid_of (thread)).c_str (),
1384 gdb_signal_to_string (signo));
1385
1386 return WSTOPSIG (status);
1387 }
1388 }
1389
1390 void
1391 linux_process_target::detach_one_lwp (lwp_info *lwp)
1392 {
1393 struct thread_info *thread = get_lwp_thread (lwp);
1394 int sig;
1395 int lwpid;
1396
1397 /* If there is a pending SIGSTOP, get rid of it. */
1398 if (lwp->stop_expected)
1399 {
1400 threads_debug_printf ("Sending SIGCONT to %s",
1401 target_pid_to_str (ptid_of (thread)).c_str ());
1402
1403 kill_lwp (lwpid_of (thread), SIGCONT);
1404 lwp->stop_expected = 0;
1405 }
1406
1407 /* Pass on any pending signal for this thread. */
1408 sig = get_detach_signal (thread);
1409
1410 /* Preparing to resume may try to write registers, and fail if the
1411 lwp is zombie. If that happens, ignore the error. We'll handle
1412 it below, when detach fails with ESRCH. */
1413 try
1414 {
1415 /* Flush any pending changes to the process's registers. */
1416 regcache_invalidate_thread (thread);
1417
1418 /* Finally, let it resume. */
1419 low_prepare_to_resume (lwp);
1420 }
1421 catch (const gdb_exception_error &ex)
1422 {
1423 if (!check_ptrace_stopped_lwp_gone (lwp))
1424 throw;
1425 }
1426
1427 lwpid = lwpid_of (thread);
1428 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1429 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1430 {
1431 int save_errno = errno;
1432
1433 /* We know the thread exists, so ESRCH must mean the lwp is
1434 zombie. This can happen if one of the already-detached
1435 threads exits the whole thread group. In that case we're
1436 still attached, and must reap the lwp. */
1437 if (save_errno == ESRCH)
1438 {
1439 int ret, status;
1440
1441 ret = my_waitpid (lwpid, &status, __WALL);
1442 if (ret == -1)
1443 {
1444 warning (_("Couldn't reap LWP %d while detaching: %s"),
1445 lwpid, safe_strerror (errno));
1446 }
1447 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1448 {
1449 warning (_("Reaping LWP %d while detaching "
1450 "returned unexpected status 0x%x"),
1451 lwpid, status);
1452 }
1453 }
1454 else
1455 {
1456 error (_("Can't detach %s: %s"),
1457 target_pid_to_str (ptid_of (thread)).c_str (),
1458 safe_strerror (save_errno));
1459 }
1460 }
1461 else
1462 threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1463 target_pid_to_str (ptid_of (thread)).c_str (),
1464 strsignal (sig));
1465
1466 delete_lwp (lwp);
1467 }
1468
1469 int
1470 linux_process_target::detach (process_info *process)
1471 {
1472 struct lwp_info *main_lwp;
1473
1474 /* As there's a step over already in progress, let it finish first,
1475 otherwise nesting a stabilize_threads operation on top gets real
1476 messy. */
1477 complete_ongoing_step_over ();
1478
1479 /* Stop all threads before detaching. First, ptrace requires that
1480 the thread is stopped to successfully detach. Second, thread_db
1481 may need to uninstall thread event breakpoints from memory, which
1482 only works with a stopped process anyway. */
1483 stop_all_lwps (0, NULL);
1484
1485 #ifdef USE_THREAD_DB
1486 thread_db_detach (process);
1487 #endif
1488
1489 /* Stabilize threads (move out of jump pads). */
1490 target_stabilize_threads ();
1491
1492 /* Detach from the clone lwps first. If the thread group exits just
1493 while we're detaching, we must reap the clone lwps before we're
1494 able to reap the leader. */
1495 for_each_thread (process->pid, [this] (thread_info *thread)
1496 {
1497 /* We don't actually detach from the thread group leader just yet.
1498 If the thread group exits, we must reap the zombie clone lwps
1499 before we're able to reap the leader. */
1500 if (thread->id.pid () == thread->id.lwp ())
1501 return;
1502
1503 lwp_info *lwp = get_thread_lwp (thread);
1504 detach_one_lwp (lwp);
1505 });
1506
1507 main_lwp = find_lwp_pid (ptid_t (process->pid));
1508 detach_one_lwp (main_lwp);
1509
1510 mourn (process);
1511
1512 /* Since we presently can only stop all lwps of all processes, we
1513 need to unstop lwps of other processes. */
1514 unstop_all_lwps (0, NULL);
1515 return 0;
1516 }
1517
1518 /* Remove all LWPs that belong to process PROC from the lwp list. */
1519
1520 void
1521 linux_process_target::mourn (process_info *process)
1522 {
1523 struct process_info_private *priv;
1524
1525 #ifdef USE_THREAD_DB
1526 thread_db_mourn (process);
1527 #endif
1528
1529 for_each_thread (process->pid, [this] (thread_info *thread)
1530 {
1531 delete_lwp (get_thread_lwp (thread));
1532 });
1533
1534 /* Freeing all private data. */
1535 priv = process->priv;
1536 low_delete_process (priv->arch_private);
1537 free (priv);
1538 process->priv = NULL;
1539
1540 remove_process (process);
1541 }
1542
1543 void
1544 linux_process_target::join (int pid)
1545 {
1546 int status, ret;
1547
1548 do {
1549 ret = my_waitpid (pid, &status, 0);
1550 if (WIFEXITED (status) || WIFSIGNALED (status))
1551 break;
1552 } while (ret != -1 || errno != ECHILD);
1553 }
1554
1555 /* Return true if the given thread is still alive. */
1556
1557 bool
1558 linux_process_target::thread_alive (ptid_t ptid)
1559 {
1560 struct lwp_info *lwp = find_lwp_pid (ptid);
1561
1562 /* We assume we always know if a thread exits. If a whole process
1563 exited but we still haven't been able to report it to GDB, we'll
1564 hold on to the last lwp of the dead process. */
1565 if (lwp != NULL)
1566 return !lwp_is_marked_dead (lwp);
1567 else
1568 return 0;
1569 }
1570
1571 bool
1572 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1573 {
1574 struct lwp_info *lp = get_thread_lwp (thread);
1575
1576 if (!lp->status_pending_p)
1577 return 0;
1578
1579 if (thread->last_resume_kind != resume_stop
1580 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1581 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1582 {
1583 CORE_ADDR pc;
1584 int discard = 0;
1585
1586 gdb_assert (lp->last_status != 0);
1587
1588 pc = get_pc (lp);
1589
1590 scoped_restore_current_thread restore_thread;
1591 switch_to_thread (thread);
1592
1593 if (pc != lp->stop_pc)
1594 {
1595 threads_debug_printf ("PC of %ld changed",
1596 lwpid_of (thread));
1597 discard = 1;
1598 }
1599
1600 #if !USE_SIGTRAP_SIGINFO
1601 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1602 && !low_breakpoint_at (pc))
1603 {
1604 threads_debug_printf ("previous SW breakpoint of %ld gone",
1605 lwpid_of (thread));
1606 discard = 1;
1607 }
1608 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1609 && !hardware_breakpoint_inserted_here (pc))
1610 {
1611 threads_debug_printf ("previous HW breakpoint of %ld gone",
1612 lwpid_of (thread));
1613 discard = 1;
1614 }
1615 #endif
1616
1617 if (discard)
1618 {
1619 threads_debug_printf ("discarding pending breakpoint status");
1620 lp->status_pending_p = 0;
1621 return 0;
1622 }
1623 }
1624
1625 return 1;
1626 }
1627
1628 /* Returns true if LWP is resumed from the client's perspective. */
1629
1630 static int
1631 lwp_resumed (struct lwp_info *lwp)
1632 {
1633 struct thread_info *thread = get_lwp_thread (lwp);
1634
1635 if (thread->last_resume_kind != resume_stop)
1636 return 1;
1637
1638 /* Did gdb send us a `vCont;t', but we haven't reported the
1639 corresponding stop to gdb yet? If so, the thread is still
1640 resumed/running from gdb's perspective. */
1641 if (thread->last_resume_kind == resume_stop
1642 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
1643 return 1;
1644
1645 return 0;
1646 }
1647
1648 bool
1649 linux_process_target::status_pending_p_callback (thread_info *thread,
1650 ptid_t ptid)
1651 {
1652 struct lwp_info *lp = get_thread_lwp (thread);
1653
1654 /* Check if we're only interested in events from a specific process
1655 or a specific LWP. */
1656 if (!thread->id.matches (ptid))
1657 return 0;
1658
1659 if (!lwp_resumed (lp))
1660 return 0;
1661
1662 if (lp->status_pending_p
1663 && !thread_still_has_status_pending (thread))
1664 {
1665 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1666 return 0;
1667 }
1668
1669 return lp->status_pending_p;
1670 }
1671
1672 struct lwp_info *
1673 find_lwp_pid (ptid_t ptid)
1674 {
1675 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1676 {
1677 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1678 return thr_arg->id.lwp () == lwp;
1679 });
1680
1681 if (thread == NULL)
1682 return NULL;
1683
1684 return get_thread_lwp (thread);
1685 }
1686
1687 /* Return the number of known LWPs in the tgid given by PID. */
1688
1689 static int
1690 num_lwps (int pid)
1691 {
1692 int count = 0;
1693
1694 for_each_thread (pid, [&] (thread_info *thread)
1695 {
1696 count++;
1697 });
1698
1699 return count;
1700 }
1701
1702 /* See nat/linux-nat.h. */
1703
1704 struct lwp_info *
1705 iterate_over_lwps (ptid_t filter,
1706 gdb::function_view<iterate_over_lwps_ftype> callback)
1707 {
1708 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1709 {
1710 lwp_info *lwp = get_thread_lwp (thr_arg);
1711
1712 return callback (lwp);
1713 });
1714
1715 if (thread == NULL)
1716 return NULL;
1717
1718 return get_thread_lwp (thread);
1719 }
1720
1721 void
1722 linux_process_target::check_zombie_leaders ()
1723 {
1724 for_each_process ([this] (process_info *proc) {
1725 pid_t leader_pid = pid_of (proc);
1726 struct lwp_info *leader_lp;
1727
1728 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1729
1730 threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1731 "num_lwps=%d, zombie=%d",
1732 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1733 linux_proc_pid_is_zombie (leader_pid));
1734
1735 if (leader_lp != NULL && !leader_lp->stopped
1736 /* Check if there are other threads in the group, as we may
1737 have raced with the inferior simply exiting. */
1738 && !last_thread_of_process_p (leader_pid)
1739 && linux_proc_pid_is_zombie (leader_pid))
1740 {
1741 /* A leader zombie can mean one of two things:
1742
1743 - It exited, and there's an exit status pending
1744 available, or only the leader exited (not the whole
1745 program). In the latter case, we can't waitpid the
1746 leader's exit status until all other threads are gone.
1747
1748 - There are 3 or more threads in the group, and a thread
1749 other than the leader exec'd. On an exec, the Linux
1750 kernel destroys all other threads (except the execing
1751 one) in the thread group, and resets the execing thread's
1752 tid to the tgid. No exit notification is sent for the
1753 execing thread -- from the ptracer's perspective, it
1754 appears as though the execing thread just vanishes.
1755 Until we reap all other threads except the leader and the
1756 execing thread, the leader will be zombie, and the
1757 execing thread will be in `D (disc sleep)'. As soon as
1758 all other threads are reaped, the execing thread changes
1759 it's tid to the tgid, and the previous (zombie) leader
1760 vanishes, giving place to the "new" leader. We could try
1761 distinguishing the exit and exec cases, by waiting once
1762 more, and seeing if something comes out, but it doesn't
1763 sound useful. The previous leader _does_ go away, and
1764 we'll re-add the new one once we see the exec event
1765 (which is just the same as what would happen if the
1766 previous leader did exit voluntarily before some other
1767 thread execs). */
1768
1769 threads_debug_printf ("Thread group leader %d zombie "
1770 "(it exited, or another thread execd).",
1771 leader_pid);
1772
1773 delete_lwp (leader_lp);
1774 }
1775 });
1776 }
1777
1778 /* Callback for `find_thread'. Returns the first LWP that is not
1779 stopped. */
1780
1781 static bool
1782 not_stopped_callback (thread_info *thread, ptid_t filter)
1783 {
1784 if (!thread->id.matches (filter))
1785 return false;
1786
1787 lwp_info *lwp = get_thread_lwp (thread);
1788
1789 return !lwp->stopped;
1790 }
1791
1792 /* Increment LWP's suspend count. */
1793
1794 static void
1795 lwp_suspended_inc (struct lwp_info *lwp)
1796 {
1797 lwp->suspended++;
1798
1799 if (lwp->suspended > 4)
1800 threads_debug_printf
1801 ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1802 lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
1803 }
1804
1805 /* Decrement LWP's suspend count. */
1806
1807 static void
1808 lwp_suspended_decr (struct lwp_info *lwp)
1809 {
1810 lwp->suspended--;
1811
1812 if (lwp->suspended < 0)
1813 {
1814 struct thread_info *thread = get_lwp_thread (lwp);
1815
1816 internal_error (__FILE__, __LINE__,
1817 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1818 lwp->suspended);
1819 }
1820 }
1821
1822 /* This function should only be called if the LWP got a SIGTRAP.
1823
1824 Handle any tracepoint steps or hits. Return true if a tracepoint
1825 event was handled, 0 otherwise. */
1826
1827 static int
1828 handle_tracepoints (struct lwp_info *lwp)
1829 {
1830 struct thread_info *tinfo = get_lwp_thread (lwp);
1831 int tpoint_related_event = 0;
1832
1833 gdb_assert (lwp->suspended == 0);
1834
1835 /* If this tracepoint hit causes a tracing stop, we'll immediately
1836 uninsert tracepoints. To do this, we temporarily pause all
1837 threads, unpatch away, and then unpause threads. We need to make
1838 sure the unpausing doesn't resume LWP too. */
1839 lwp_suspended_inc (lwp);
1840
1841 /* And we need to be sure that any all-threads-stopping doesn't try
1842 to move threads out of the jump pads, as it could deadlock the
1843 inferior (LWP could be in the jump pad, maybe even holding the
1844 lock.) */
1845
1846 /* Do any necessary step collect actions. */
1847 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1848
1849 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1850
1851 /* See if we just hit a tracepoint and do its main collect
1852 actions. */
1853 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1854
1855 lwp_suspended_decr (lwp);
1856
1857 gdb_assert (lwp->suspended == 0);
1858 gdb_assert (!stabilizing_threads
1859 || (lwp->collecting_fast_tracepoint
1860 != fast_tpoint_collect_result::not_collecting));
1861
1862 if (tpoint_related_event)
1863 {
1864 threads_debug_printf ("got a tracepoint event");
1865 return 1;
1866 }
1867
1868 return 0;
1869 }
1870
1871 fast_tpoint_collect_result
1872 linux_process_target::linux_fast_tracepoint_collecting
1873 (lwp_info *lwp, fast_tpoint_collect_status *status)
1874 {
1875 CORE_ADDR thread_area;
1876 struct thread_info *thread = get_lwp_thread (lwp);
1877
1878 /* Get the thread area address. This is used to recognize which
1879 thread is which when tracing with the in-process agent library.
1880 We don't read anything from the address, and treat it as opaque;
1881 it's the address itself that we assume is unique per-thread. */
1882 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
1883 return fast_tpoint_collect_result::not_collecting;
1884
1885 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1886 }
1887
1888 int
1889 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1890 {
1891 return -1;
1892 }
1893
1894 bool
1895 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
1896 {
1897 scoped_restore_current_thread restore_thread;
1898 switch_to_thread (get_lwp_thread (lwp));
1899
1900 if ((wstat == NULL
1901 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1902 && supports_fast_tracepoints ()
1903 && agent_loaded_p ())
1904 {
1905 struct fast_tpoint_collect_status status;
1906
1907 threads_debug_printf
1908 ("Checking whether LWP %ld needs to move out of the jump pad.",
1909 lwpid_of (current_thread));
1910
1911 fast_tpoint_collect_result r
1912 = linux_fast_tracepoint_collecting (lwp, &status);
1913
1914 if (wstat == NULL
1915 || (WSTOPSIG (*wstat) != SIGILL
1916 && WSTOPSIG (*wstat) != SIGFPE
1917 && WSTOPSIG (*wstat) != SIGSEGV
1918 && WSTOPSIG (*wstat) != SIGBUS))
1919 {
1920 lwp->collecting_fast_tracepoint = r;
1921
1922 if (r != fast_tpoint_collect_result::not_collecting)
1923 {
1924 if (r == fast_tpoint_collect_result::before_insn
1925 && lwp->exit_jump_pad_bkpt == NULL)
1926 {
1927 /* Haven't executed the original instruction yet.
1928 Set breakpoint there, and wait till it's hit,
1929 then single-step until exiting the jump pad. */
1930 lwp->exit_jump_pad_bkpt
1931 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1932 }
1933
1934 threads_debug_printf
1935 ("Checking whether LWP %ld needs to move out of the jump pad..."
1936 " it does", lwpid_of (current_thread));
1937
1938 return true;
1939 }
1940 }
1941 else
1942 {
1943 /* If we get a synchronous signal while collecting, *and*
1944 while executing the (relocated) original instruction,
1945 reset the PC to point at the tpoint address, before
1946 reporting to GDB. Otherwise, it's an IPA lib bug: just
1947 report the signal to GDB, and pray for the best. */
1948
1949 lwp->collecting_fast_tracepoint
1950 = fast_tpoint_collect_result::not_collecting;
1951
1952 if (r != fast_tpoint_collect_result::not_collecting
1953 && (status.adjusted_insn_addr <= lwp->stop_pc
1954 && lwp->stop_pc < status.adjusted_insn_addr_end))
1955 {
1956 siginfo_t info;
1957 struct regcache *regcache;
1958
1959 /* The si_addr on a few signals references the address
1960 of the faulting instruction. Adjust that as
1961 well. */
1962 if ((WSTOPSIG (*wstat) == SIGILL
1963 || WSTOPSIG (*wstat) == SIGFPE
1964 || WSTOPSIG (*wstat) == SIGBUS
1965 || WSTOPSIG (*wstat) == SIGSEGV)
1966 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1967 (PTRACE_TYPE_ARG3) 0, &info) == 0
1968 /* Final check just to make sure we don't clobber
1969 the siginfo of non-kernel-sent signals. */
1970 && (uintptr_t) info.si_addr == lwp->stop_pc)
1971 {
1972 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1973 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1974 (PTRACE_TYPE_ARG3) 0, &info);
1975 }
1976
1977 regcache = get_thread_regcache (current_thread, 1);
1978 low_set_pc (regcache, status.tpoint_addr);
1979 lwp->stop_pc = status.tpoint_addr;
1980
1981 /* Cancel any fast tracepoint lock this thread was
1982 holding. */
1983 force_unlock_trace_buffer ();
1984 }
1985
1986 if (lwp->exit_jump_pad_bkpt != NULL)
1987 {
1988 threads_debug_printf
1989 ("Cancelling fast exit-jump-pad: removing bkpt."
1990 "stopping all threads momentarily.");
1991
1992 stop_all_lwps (1, lwp);
1993
1994 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1995 lwp->exit_jump_pad_bkpt = NULL;
1996
1997 unstop_all_lwps (1, lwp);
1998
1999 gdb_assert (lwp->suspended >= 0);
2000 }
2001 }
2002 }
2003
2004 threads_debug_printf
2005 ("Checking whether LWP %ld needs to move out of the jump pad... no",
2006 lwpid_of (current_thread));
2007
2008 return false;
2009 }
2010
2011 /* Enqueue one signal in the "signals to report later when out of the
2012 jump pad" list. */
2013
2014 static void
2015 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2016 {
2017 struct thread_info *thread = get_lwp_thread (lwp);
2018
2019 threads_debug_printf ("Deferring signal %d for LWP %ld.",
2020 WSTOPSIG (*wstat), lwpid_of (thread));
2021
2022 if (debug_threads)
2023 {
2024 for (const auto &sig : lwp->pending_signals_to_report)
2025 threads_debug_printf (" Already queued %d", sig.signal);
2026
2027 threads_debug_printf (" (no more currently queued signals)");
2028 }
2029
2030 /* Don't enqueue non-RT signals if they are already in the deferred
2031 queue. (SIGSTOP being the easiest signal to see ending up here
2032 twice) */
2033 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2034 {
2035 for (const auto &sig : lwp->pending_signals_to_report)
2036 {
2037 if (sig.signal == WSTOPSIG (*wstat))
2038 {
2039 threads_debug_printf
2040 ("Not requeuing already queued non-RT signal %d for LWP %ld",
2041 sig.signal, lwpid_of (thread));
2042 return;
2043 }
2044 }
2045 }
2046
2047 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2048
2049 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2050 &lwp->pending_signals_to_report.back ().info);
2051 }
2052
2053 /* Dequeue one signal from the "signals to report later when out of
2054 the jump pad" list. */
2055
2056 static int
2057 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2058 {
2059 struct thread_info *thread = get_lwp_thread (lwp);
2060
2061 if (!lwp->pending_signals_to_report.empty ())
2062 {
2063 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2064
2065 *wstat = W_STOPCODE (p_sig.signal);
2066 if (p_sig.info.si_signo != 0)
2067 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2068 &p_sig.info);
2069
2070 lwp->pending_signals_to_report.pop_front ();
2071
2072 threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2073 WSTOPSIG (*wstat), lwpid_of (thread));
2074
2075 if (debug_threads)
2076 {
2077 for (const auto &sig : lwp->pending_signals_to_report)
2078 threads_debug_printf (" Still queued %d", sig.signal);
2079
2080 threads_debug_printf (" (no more queued signals)");
2081 }
2082
2083 return 1;
2084 }
2085
2086 return 0;
2087 }
2088
2089 bool
2090 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2091 {
2092 scoped_restore_current_thread restore_thread;
2093 switch_to_thread (get_lwp_thread (child));
2094
2095 if (low_stopped_by_watchpoint ())
2096 {
2097 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2098 child->stopped_data_address = low_stopped_data_address ();
2099 }
2100
2101 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2102 }
2103
2104 bool
2105 linux_process_target::low_stopped_by_watchpoint ()
2106 {
2107 return false;
2108 }
2109
2110 CORE_ADDR
2111 linux_process_target::low_stopped_data_address ()
2112 {
2113 return 0;
2114 }
2115
2116 /* Return the ptrace options that we want to try to enable. */
2117
2118 static int
2119 linux_low_ptrace_options (int attached)
2120 {
2121 client_state &cs = get_client_state ();
2122 int options = 0;
2123
2124 if (!attached)
2125 options |= PTRACE_O_EXITKILL;
2126
2127 if (cs.report_fork_events)
2128 options |= PTRACE_O_TRACEFORK;
2129
2130 if (cs.report_vfork_events)
2131 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2132
2133 if (cs.report_exec_events)
2134 options |= PTRACE_O_TRACEEXEC;
2135
2136 options |= PTRACE_O_TRACESYSGOOD;
2137
2138 return options;
2139 }
2140
2141 void
2142 linux_process_target::filter_event (int lwpid, int wstat)
2143 {
2144 client_state &cs = get_client_state ();
2145 struct lwp_info *child;
2146 struct thread_info *thread;
2147 int have_stop_pc = 0;
2148
2149 child = find_lwp_pid (ptid_t (lwpid));
2150
2151 /* Check for stop events reported by a process we didn't already
2152 know about - anything not already in our LWP list.
2153
2154 If we're expecting to receive stopped processes after
2155 fork, vfork, and clone events, then we'll just add the
2156 new one to our list and go back to waiting for the event
2157 to be reported - the stopped process might be returned
2158 from waitpid before or after the event is.
2159
2160 But note the case of a non-leader thread exec'ing after the
2161 leader having exited, and gone from our lists (because
2162 check_zombie_leaders deleted it). The non-leader thread
2163 changes its tid to the tgid. */
2164
2165 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2166 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2167 {
2168 ptid_t child_ptid;
2169
2170 /* A multi-thread exec after we had seen the leader exiting. */
2171 threads_debug_printf ("Re-adding thread group leader LWP %d after exec.",
2172 lwpid);
2173
2174 child_ptid = ptid_t (lwpid, lwpid);
2175 child = add_lwp (child_ptid);
2176 child->stopped = 1;
2177 switch_to_thread (child->thread);
2178 }
2179
2180 /* If we didn't find a process, one of two things presumably happened:
2181 - A process we started and then detached from has exited. Ignore it.
2182 - A process we are controlling has forked and the new child's stop
2183 was reported to us by the kernel. Save its PID. */
2184 if (child == NULL && WIFSTOPPED (wstat))
2185 {
2186 add_to_pid_list (&stopped_pids, lwpid, wstat);
2187 return;
2188 }
2189 else if (child == NULL)
2190 return;
2191
2192 thread = get_lwp_thread (child);
2193
2194 child->stopped = 1;
2195
2196 child->last_status = wstat;
2197
2198 /* Check if the thread has exited. */
2199 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2200 {
2201 threads_debug_printf ("%d exited", lwpid);
2202
2203 if (finish_step_over (child))
2204 {
2205 /* Unsuspend all other LWPs, and set them back running again. */
2206 unsuspend_all_lwps (child);
2207 }
2208
2209 /* If there is at least one more LWP, then the exit signal was
2210 not the end of the debugged application and should be
2211 ignored, unless GDB wants to hear about thread exits. */
2212 if (cs.report_thread_events
2213 || last_thread_of_process_p (pid_of (thread)))
2214 {
2215 /* Since events are serialized to GDB core, and we can't
2216 report this one right now. Leave the status pending for
2217 the next time we're able to report it. */
2218 mark_lwp_dead (child, wstat);
2219 return;
2220 }
2221 else
2222 {
2223 delete_lwp (child);
2224 return;
2225 }
2226 }
2227
2228 gdb_assert (WIFSTOPPED (wstat));
2229
2230 if (WIFSTOPPED (wstat))
2231 {
2232 struct process_info *proc;
2233
2234 /* Architecture-specific setup after inferior is running. */
2235 proc = find_process_pid (pid_of (thread));
2236 if (proc->tdesc == NULL)
2237 {
2238 if (proc->attached)
2239 {
2240 /* This needs to happen after we have attached to the
2241 inferior and it is stopped for the first time, but
2242 before we access any inferior registers. */
2243 arch_setup_thread (thread);
2244 }
2245 else
2246 {
2247 /* The process is started, but GDBserver will do
2248 architecture-specific setup after the program stops at
2249 the first instruction. */
2250 child->status_pending_p = 1;
2251 child->status_pending = wstat;
2252 return;
2253 }
2254 }
2255 }
2256
2257 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2258 {
2259 struct process_info *proc = find_process_pid (pid_of (thread));
2260 int options = linux_low_ptrace_options (proc->attached);
2261
2262 linux_enable_event_reporting (lwpid, options);
2263 child->must_set_ptrace_flags = 0;
2264 }
2265
2266 /* Always update syscall_state, even if it will be filtered later. */
2267 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2268 {
2269 child->syscall_state
2270 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2271 ? TARGET_WAITKIND_SYSCALL_RETURN
2272 : TARGET_WAITKIND_SYSCALL_ENTRY);
2273 }
2274 else
2275 {
2276 /* Almost all other ptrace-stops are known to be outside of system
2277 calls, with further exceptions in handle_extended_wait. */
2278 child->syscall_state = TARGET_WAITKIND_IGNORE;
2279 }
2280
2281 /* Be careful to not overwrite stop_pc until save_stop_reason is
2282 called. */
2283 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2284 && linux_is_extended_waitstatus (wstat))
2285 {
2286 child->stop_pc = get_pc (child);
2287 if (handle_extended_wait (&child, wstat))
2288 {
2289 /* The event has been handled, so just return without
2290 reporting it. */
2291 return;
2292 }
2293 }
2294
2295 if (linux_wstatus_maybe_breakpoint (wstat))
2296 {
2297 if (save_stop_reason (child))
2298 have_stop_pc = 1;
2299 }
2300
2301 if (!have_stop_pc)
2302 child->stop_pc = get_pc (child);
2303
2304 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2305 && child->stop_expected)
2306 {
2307 threads_debug_printf ("Expected stop.");
2308
2309 child->stop_expected = 0;
2310
2311 if (thread->last_resume_kind == resume_stop)
2312 {
2313 /* We want to report the stop to the core. Treat the
2314 SIGSTOP as a normal event. */
2315 threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2316 target_pid_to_str (ptid_of (thread)).c_str ());
2317 }
2318 else if (stopping_threads != NOT_STOPPING_THREADS)
2319 {
2320 /* Stopping threads. We don't want this SIGSTOP to end up
2321 pending. */
2322 threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2323 target_pid_to_str (ptid_of (thread)).c_str ());
2324 return;
2325 }
2326 else
2327 {
2328 /* This is a delayed SIGSTOP. Filter out the event. */
2329 threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2330 child->stepping ? "step" : "continue",
2331 target_pid_to_str (ptid_of (thread)).c_str ());
2332
2333 resume_one_lwp (child, child->stepping, 0, NULL);
2334 return;
2335 }
2336 }
2337
2338 child->status_pending_p = 1;
2339 child->status_pending = wstat;
2340 return;
2341 }
2342
2343 bool
2344 linux_process_target::maybe_hw_step (thread_info *thread)
2345 {
2346 if (supports_hardware_single_step ())
2347 return true;
2348 else
2349 {
2350 /* GDBserver must insert single-step breakpoint for software
2351 single step. */
2352 gdb_assert (has_single_step_breakpoints (thread));
2353 return false;
2354 }
2355 }
2356
2357 void
2358 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2359 {
2360 struct lwp_info *lp = get_thread_lwp (thread);
2361
2362 if (lp->stopped
2363 && !lp->suspended
2364 && !lp->status_pending_p
2365 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
2366 {
2367 int step = 0;
2368
2369 if (thread->last_resume_kind == resume_step)
2370 step = maybe_hw_step (thread);
2371
2372 threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2373 target_pid_to_str (ptid_of (thread)).c_str (),
2374 paddress (lp->stop_pc), step);
2375
2376 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2377 }
2378 }
2379
2380 int
2381 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2382 ptid_t filter_ptid,
2383 int *wstatp, int options)
2384 {
2385 struct thread_info *event_thread;
2386 struct lwp_info *event_child, *requested_child;
2387 sigset_t block_mask, prev_mask;
2388
2389 retry:
2390 /* N.B. event_thread points to the thread_info struct that contains
2391 event_child. Keep them in sync. */
2392 event_thread = NULL;
2393 event_child = NULL;
2394 requested_child = NULL;
2395
2396 /* Check for a lwp with a pending status. */
2397
2398 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2399 {
2400 event_thread = find_thread_in_random ([&] (thread_info *thread)
2401 {
2402 return status_pending_p_callback (thread, filter_ptid);
2403 });
2404
2405 if (event_thread != NULL)
2406 {
2407 event_child = get_thread_lwp (event_thread);
2408 threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2409 }
2410 }
2411 else if (filter_ptid != null_ptid)
2412 {
2413 requested_child = find_lwp_pid (filter_ptid);
2414
2415 if (stopping_threads == NOT_STOPPING_THREADS
2416 && requested_child->status_pending_p
2417 && (requested_child->collecting_fast_tracepoint
2418 != fast_tpoint_collect_result::not_collecting))
2419 {
2420 enqueue_one_deferred_signal (requested_child,
2421 &requested_child->status_pending);
2422 requested_child->status_pending_p = 0;
2423 requested_child->status_pending = 0;
2424 resume_one_lwp (requested_child, 0, 0, NULL);
2425 }
2426
2427 if (requested_child->suspended
2428 && requested_child->status_pending_p)
2429 {
2430 internal_error (__FILE__, __LINE__,
2431 "requesting an event out of a"
2432 " suspended child?");
2433 }
2434
2435 if (requested_child->status_pending_p)
2436 {
2437 event_child = requested_child;
2438 event_thread = get_lwp_thread (event_child);
2439 }
2440 }
2441
2442 if (event_child != NULL)
2443 {
2444 threads_debug_printf ("Got an event from pending child %ld (%04x)",
2445 lwpid_of (event_thread),
2446 event_child->status_pending);
2447
2448 *wstatp = event_child->status_pending;
2449 event_child->status_pending_p = 0;
2450 event_child->status_pending = 0;
2451 switch_to_thread (event_thread);
2452 return lwpid_of (event_thread);
2453 }
2454
2455 /* But if we don't find a pending event, we'll have to wait.
2456
2457 We only enter this loop if no process has a pending wait status.
2458 Thus any action taken in response to a wait status inside this
2459 loop is responding as soon as we detect the status, not after any
2460 pending events. */
2461
2462 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2463 all signals while here. */
2464 sigfillset (&block_mask);
2465 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2466
2467 /* Always pull all events out of the kernel. We'll randomly select
2468 an event LWP out of all that have events, to prevent
2469 starvation. */
2470 while (event_child == NULL)
2471 {
2472 pid_t ret = 0;
2473
2474 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2475 quirks:
2476
2477 - If the thread group leader exits while other threads in the
2478 thread group still exist, waitpid(TGID, ...) hangs. That
2479 waitpid won't return an exit status until the other threads
2480 in the group are reaped.
2481
2482 - When a non-leader thread execs, that thread just vanishes
2483 without reporting an exit (so we'd hang if we waited for it
2484 explicitly in that case). The exec event is reported to
2485 the TGID pid. */
2486 errno = 0;
2487 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2488
2489 threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2490 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2491
2492 if (ret > 0)
2493 {
2494 threads_debug_printf ("waitpid %ld received %s",
2495 (long) ret, status_to_str (*wstatp).c_str ());
2496
2497 /* Filter all events. IOW, leave all events pending. We'll
2498 randomly select an event LWP out of all that have events
2499 below. */
2500 filter_event (ret, *wstatp);
2501 /* Retry until nothing comes out of waitpid. A single
2502 SIGCHLD can indicate more than one child stopped. */
2503 continue;
2504 }
2505
2506 /* Now that we've pulled all events out of the kernel, resume
2507 LWPs that don't have an interesting event to report. */
2508 if (stopping_threads == NOT_STOPPING_THREADS)
2509 for_each_thread ([this] (thread_info *thread)
2510 {
2511 resume_stopped_resumed_lwps (thread);
2512 });
2513
2514 /* ... and find an LWP with a status to report to the core, if
2515 any. */
2516 event_thread = find_thread_in_random ([&] (thread_info *thread)
2517 {
2518 return status_pending_p_callback (thread, filter_ptid);
2519 });
2520
2521 if (event_thread != NULL)
2522 {
2523 event_child = get_thread_lwp (event_thread);
2524 *wstatp = event_child->status_pending;
2525 event_child->status_pending_p = 0;
2526 event_child->status_pending = 0;
2527 break;
2528 }
2529
2530 /* Check for zombie thread group leaders. Those can't be reaped
2531 until all other threads in the thread group are. */
2532 check_zombie_leaders ();
2533
2534 auto not_stopped = [&] (thread_info *thread)
2535 {
2536 return not_stopped_callback (thread, wait_ptid);
2537 };
2538
2539 /* If there are no resumed children left in the set of LWPs we
2540 want to wait for, bail. We can't just block in
2541 waitpid/sigsuspend, because lwps might have been left stopped
2542 in trace-stop state, and we'd be stuck forever waiting for
2543 their status to change (which would only happen if we resumed
2544 them). Even if WNOHANG is set, this return code is preferred
2545 over 0 (below), as it is more detailed. */
2546 if (find_thread (not_stopped) == NULL)
2547 {
2548 threads_debug_printf ("exit (no unwaited-for LWP)");
2549
2550 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2551 return -1;
2552 }
2553
2554 /* No interesting event to report to the caller. */
2555 if ((options & WNOHANG))
2556 {
2557 threads_debug_printf ("WNOHANG set, no event found");
2558
2559 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2560 return 0;
2561 }
2562
2563 /* Block until we get an event reported with SIGCHLD. */
2564 threads_debug_printf ("sigsuspend'ing");
2565
2566 sigsuspend (&prev_mask);
2567 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2568 goto retry;
2569 }
2570
2571 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2572
2573 switch_to_thread (event_thread);
2574
2575 return lwpid_of (event_thread);
2576 }
2577
2578 int
2579 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2580 {
2581 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2582 }
2583
2584 /* Select one LWP out of those that have events pending. */
2585
2586 static void
2587 select_event_lwp (struct lwp_info **orig_lp)
2588 {
2589 struct thread_info *event_thread = NULL;
2590
2591 /* In all-stop, give preference to the LWP that is being
2592 single-stepped. There will be at most one, and it's the LWP that
2593 the core is most interested in. If we didn't do this, then we'd
2594 have to handle pending step SIGTRAPs somehow in case the core
2595 later continues the previously-stepped thread, otherwise we'd
2596 report the pending SIGTRAP, and the core, not having stepped the
2597 thread, wouldn't understand what the trap was for, and therefore
2598 would report it to the user as a random signal. */
2599 if (!non_stop)
2600 {
2601 event_thread = find_thread ([] (thread_info *thread)
2602 {
2603 lwp_info *lp = get_thread_lwp (thread);
2604
2605 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2606 && thread->last_resume_kind == resume_step
2607 && lp->status_pending_p);
2608 });
2609
2610 if (event_thread != NULL)
2611 threads_debug_printf
2612 ("Select single-step %s",
2613 target_pid_to_str (ptid_of (event_thread)).c_str ());
2614 }
2615 if (event_thread == NULL)
2616 {
2617 /* No single-stepping LWP. Select one at random, out of those
2618 which have had events. */
2619
2620 event_thread = find_thread_in_random ([&] (thread_info *thread)
2621 {
2622 lwp_info *lp = get_thread_lwp (thread);
2623
2624 /* Only resumed LWPs that have an event pending. */
2625 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2626 && lp->status_pending_p);
2627 });
2628 }
2629
2630 if (event_thread != NULL)
2631 {
2632 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2633
2634 /* Switch the event LWP. */
2635 *orig_lp = event_lp;
2636 }
2637 }
2638
2639 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2640 NULL. */
2641
2642 static void
2643 unsuspend_all_lwps (struct lwp_info *except)
2644 {
2645 for_each_thread ([&] (thread_info *thread)
2646 {
2647 lwp_info *lwp = get_thread_lwp (thread);
2648
2649 if (lwp != except)
2650 lwp_suspended_decr (lwp);
2651 });
2652 }
2653
2654 static bool lwp_running (thread_info *thread);
2655
2656 /* Stabilize threads (move out of jump pads).
2657
2658 If a thread is midway collecting a fast tracepoint, we need to
2659 finish the collection and move it out of the jump pad before
2660 reporting the signal.
2661
2662 This avoids recursion while collecting (when a signal arrives
2663 midway, and the signal handler itself collects), which would trash
2664 the trace buffer. In case the user set a breakpoint in a signal
2665 handler, this avoids the backtrace showing the jump pad, etc..
2666 Most importantly, there are certain things we can't do safely if
2667 threads are stopped in a jump pad (or in its callee's). For
2668 example:
2669
2670 - starting a new trace run. A thread still collecting the
2671 previous run, could trash the trace buffer when resumed. The trace
2672 buffer control structures would have been reset but the thread had
2673 no way to tell. The thread could even midway memcpy'ing to the
2674 buffer, which would mean that when resumed, it would clobber the
2675 trace buffer that had been set for a new run.
2676
2677 - we can't rewrite/reuse the jump pads for new tracepoints
2678 safely. Say you do tstart while a thread is stopped midway while
2679 collecting. When the thread is later resumed, it finishes the
2680 collection, and returns to the jump pad, to execute the original
2681 instruction that was under the tracepoint jump at the time the
2682 older run had been started. If the jump pad had been rewritten
2683 since for something else in the new run, the thread would now
2684 execute the wrong / random instructions. */
2685
2686 void
2687 linux_process_target::stabilize_threads ()
2688 {
2689 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2690 {
2691 return stuck_in_jump_pad (thread);
2692 });
2693
2694 if (thread_stuck != NULL)
2695 {
2696 threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2697 lwpid_of (thread_stuck));
2698 return;
2699 }
2700
2701 scoped_restore_current_thread restore_thread;
2702
2703 stabilizing_threads = 1;
2704
2705 /* Kick 'em all. */
2706 for_each_thread ([this] (thread_info *thread)
2707 {
2708 move_out_of_jump_pad (thread);
2709 });
2710
2711 /* Loop until all are stopped out of the jump pads. */
2712 while (find_thread (lwp_running) != NULL)
2713 {
2714 struct target_waitstatus ourstatus;
2715 struct lwp_info *lwp;
2716 int wstat;
2717
2718 /* Note that we go through the full wait even loop. While
2719 moving threads out of jump pad, we need to be able to step
2720 over internal breakpoints and such. */
2721 wait_1 (minus_one_ptid, &ourstatus, 0);
2722
2723 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
2724 {
2725 lwp = get_thread_lwp (current_thread);
2726
2727 /* Lock it. */
2728 lwp_suspended_inc (lwp);
2729
2730 if (ourstatus.sig () != GDB_SIGNAL_0
2731 || current_thread->last_resume_kind == resume_stop)
2732 {
2733 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
2734 enqueue_one_deferred_signal (lwp, &wstat);
2735 }
2736 }
2737 }
2738
2739 unsuspend_all_lwps (NULL);
2740
2741 stabilizing_threads = 0;
2742
2743 if (debug_threads)
2744 {
2745 thread_stuck = find_thread ([this] (thread_info *thread)
2746 {
2747 return stuck_in_jump_pad (thread);
2748 });
2749
2750 if (thread_stuck != NULL)
2751 threads_debug_printf
2752 ("couldn't stabilize, LWP %ld got stuck in jump pad",
2753 lwpid_of (thread_stuck));
2754 }
2755 }
2756
2757 /* Convenience function that is called when the kernel reports an
2758 event that is not passed out to GDB. */
2759
2760 static ptid_t
2761 ignore_event (struct target_waitstatus *ourstatus)
2762 {
2763 /* If we got an event, there may still be others, as a single
2764 SIGCHLD can indicate more than one child stopped. This forces
2765 another target_wait call. */
2766 async_file_mark ();
2767
2768 ourstatus->set_ignore ();
2769 return null_ptid;
2770 }
2771
2772 ptid_t
2773 linux_process_target::filter_exit_event (lwp_info *event_child,
2774 target_waitstatus *ourstatus)
2775 {
2776 client_state &cs = get_client_state ();
2777 struct thread_info *thread = get_lwp_thread (event_child);
2778 ptid_t ptid = ptid_of (thread);
2779
2780 if (!last_thread_of_process_p (pid_of (thread)))
2781 {
2782 if (cs.report_thread_events)
2783 ourstatus->set_thread_exited (0);
2784 else
2785 ourstatus->set_ignore ();
2786
2787 delete_lwp (event_child);
2788 }
2789 return ptid;
2790 }
2791
2792 /* Returns 1 if GDB is interested in any event_child syscalls. */
2793
2794 static int
2795 gdb_catching_syscalls_p (struct lwp_info *event_child)
2796 {
2797 struct thread_info *thread = get_lwp_thread (event_child);
2798 struct process_info *proc = get_thread_process (thread);
2799
2800 return !proc->syscalls_to_catch.empty ();
2801 }
2802
2803 bool
2804 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2805 {
2806 int sysno;
2807 struct thread_info *thread = get_lwp_thread (event_child);
2808 struct process_info *proc = get_thread_process (thread);
2809
2810 if (proc->syscalls_to_catch.empty ())
2811 return false;
2812
2813 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2814 return true;
2815
2816 get_syscall_trapinfo (event_child, &sysno);
2817
2818 for (int iter : proc->syscalls_to_catch)
2819 if (iter == sysno)
2820 return true;
2821
2822 return false;
2823 }
2824
2825 ptid_t
2826 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2827 target_wait_flags target_options)
2828 {
2829 THREADS_SCOPED_DEBUG_ENTER_EXIT;
2830
2831 client_state &cs = get_client_state ();
2832 int w;
2833 struct lwp_info *event_child;
2834 int options;
2835 int pid;
2836 int step_over_finished;
2837 int bp_explains_trap;
2838 int maybe_internal_trap;
2839 int report_to_gdb;
2840 int trace_event;
2841 int in_step_range;
2842 int any_resumed;
2843
2844 threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
2845
2846 /* Translate generic target options into linux options. */
2847 options = __WALL;
2848 if (target_options & TARGET_WNOHANG)
2849 options |= WNOHANG;
2850
2851 bp_explains_trap = 0;
2852 trace_event = 0;
2853 in_step_range = 0;
2854 ourstatus->set_ignore ();
2855
2856 auto status_pending_p_any = [&] (thread_info *thread)
2857 {
2858 return status_pending_p_callback (thread, minus_one_ptid);
2859 };
2860
2861 auto not_stopped = [&] (thread_info *thread)
2862 {
2863 return not_stopped_callback (thread, minus_one_ptid);
2864 };
2865
2866 /* Find a resumed LWP, if any. */
2867 if (find_thread (status_pending_p_any) != NULL)
2868 any_resumed = 1;
2869 else if (find_thread (not_stopped) != NULL)
2870 any_resumed = 1;
2871 else
2872 any_resumed = 0;
2873
2874 if (step_over_bkpt == null_ptid)
2875 pid = wait_for_event (ptid, &w, options);
2876 else
2877 {
2878 threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
2879 target_pid_to_str (step_over_bkpt).c_str ());
2880 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2881 }
2882
2883 if (pid == 0 || (pid == -1 && !any_resumed))
2884 {
2885 gdb_assert (target_options & TARGET_WNOHANG);
2886
2887 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
2888
2889 ourstatus->set_ignore ();
2890 return null_ptid;
2891 }
2892 else if (pid == -1)
2893 {
2894 threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
2895
2896 ourstatus->set_no_resumed ();
2897 return null_ptid;
2898 }
2899
2900 event_child = get_thread_lwp (current_thread);
2901
2902 /* wait_for_event only returns an exit status for the last
2903 child of a process. Report it. */
2904 if (WIFEXITED (w) || WIFSIGNALED (w))
2905 {
2906 if (WIFEXITED (w))
2907 {
2908 ourstatus->set_exited (WEXITSTATUS (w));
2909
2910 threads_debug_printf
2911 ("ret = %s, exited with retcode %d",
2912 target_pid_to_str (ptid_of (current_thread)).c_str (),
2913 WEXITSTATUS (w));
2914 }
2915 else
2916 {
2917 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
2918
2919 threads_debug_printf
2920 ("ret = %s, terminated with signal %d",
2921 target_pid_to_str (ptid_of (current_thread)).c_str (),
2922 WTERMSIG (w));
2923 }
2924
2925 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
2926 return filter_exit_event (event_child, ourstatus);
2927
2928 return ptid_of (current_thread);
2929 }
2930
2931 /* If step-over executes a breakpoint instruction, in the case of a
2932 hardware single step it means a gdb/gdbserver breakpoint had been
2933 planted on top of a permanent breakpoint, in the case of a software
2934 single step it may just mean that gdbserver hit the reinsert breakpoint.
2935 The PC has been adjusted by save_stop_reason to point at
2936 the breakpoint address.
2937 So in the case of the hardware single step advance the PC manually
2938 past the breakpoint and in the case of software single step advance only
2939 if it's not the single_step_breakpoint we are hitting.
2940 This avoids that a program would keep trapping a permanent breakpoint
2941 forever. */
2942 if (step_over_bkpt != null_ptid
2943 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2944 && (event_child->stepping
2945 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
2946 {
2947 int increment_pc = 0;
2948 int breakpoint_kind = 0;
2949 CORE_ADDR stop_pc = event_child->stop_pc;
2950
2951 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
2952 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
2953
2954 threads_debug_printf
2955 ("step-over for %s executed software breakpoint",
2956 target_pid_to_str (ptid_of (current_thread)).c_str ());
2957
2958 if (increment_pc != 0)
2959 {
2960 struct regcache *regcache
2961 = get_thread_regcache (current_thread, 1);
2962
2963 event_child->stop_pc += increment_pc;
2964 low_set_pc (regcache, event_child->stop_pc);
2965
2966 if (!low_breakpoint_at (event_child->stop_pc))
2967 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2968 }
2969 }
2970
2971 /* If this event was not handled before, and is not a SIGTRAP, we
2972 report it. SIGILL and SIGSEGV are also treated as traps in case
2973 a breakpoint is inserted at the current PC. If this target does
2974 not support internal breakpoints at all, we also report the
2975 SIGTRAP without further processing; it's of no concern to us. */
2976 maybe_internal_trap
2977 = (low_supports_breakpoints ()
2978 && (WSTOPSIG (w) == SIGTRAP
2979 || ((WSTOPSIG (w) == SIGILL
2980 || WSTOPSIG (w) == SIGSEGV)
2981 && low_breakpoint_at (event_child->stop_pc))));
2982
2983 if (maybe_internal_trap)
2984 {
2985 /* Handle anything that requires bookkeeping before deciding to
2986 report the event or continue waiting. */
2987
2988 /* First check if we can explain the SIGTRAP with an internal
2989 breakpoint, or if we should possibly report the event to GDB.
2990 Do this before anything that may remove or insert a
2991 breakpoint. */
2992 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2993
2994 /* We have a SIGTRAP, possibly a step-over dance has just
2995 finished. If so, tweak the state machine accordingly,
2996 reinsert breakpoints and delete any single-step
2997 breakpoints. */
2998 step_over_finished = finish_step_over (event_child);
2999
3000 /* Now invoke the callbacks of any internal breakpoints there. */
3001 check_breakpoints (event_child->stop_pc);
3002
3003 /* Handle tracepoint data collecting. This may overflow the
3004 trace buffer, and cause a tracing stop, removing
3005 breakpoints. */
3006 trace_event = handle_tracepoints (event_child);
3007
3008 if (bp_explains_trap)
3009 threads_debug_printf ("Hit a gdbserver breakpoint.");
3010 }
3011 else
3012 {
3013 /* We have some other signal, possibly a step-over dance was in
3014 progress, and it should be cancelled too. */
3015 step_over_finished = finish_step_over (event_child);
3016 }
3017
3018 /* We have all the data we need. Either report the event to GDB, or
3019 resume threads and keep waiting for more. */
3020
3021 /* If we're collecting a fast tracepoint, finish the collection and
3022 move out of the jump pad before delivering a signal. See
3023 linux_stabilize_threads. */
3024
3025 if (WIFSTOPPED (w)
3026 && WSTOPSIG (w) != SIGTRAP
3027 && supports_fast_tracepoints ()
3028 && agent_loaded_p ())
3029 {
3030 threads_debug_printf ("Got signal %d for LWP %ld. Check if we need "
3031 "to defer or adjust it.",
3032 WSTOPSIG (w), lwpid_of (current_thread));
3033
3034 /* Allow debugging the jump pad itself. */
3035 if (current_thread->last_resume_kind != resume_step
3036 && maybe_move_out_of_jump_pad (event_child, &w))
3037 {
3038 enqueue_one_deferred_signal (event_child, &w);
3039
3040 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3041 WSTOPSIG (w), lwpid_of (current_thread));
3042
3043 resume_one_lwp (event_child, 0, 0, NULL);
3044
3045 return ignore_event (ourstatus);
3046 }
3047 }
3048
3049 if (event_child->collecting_fast_tracepoint
3050 != fast_tpoint_collect_result::not_collecting)
3051 {
3052 threads_debug_printf
3053 ("LWP %ld was trying to move out of the jump pad (%d). "
3054 "Check if we're already there.",
3055 lwpid_of (current_thread),
3056 (int) event_child->collecting_fast_tracepoint);
3057
3058 trace_event = 1;
3059
3060 event_child->collecting_fast_tracepoint
3061 = linux_fast_tracepoint_collecting (event_child, NULL);
3062
3063 if (event_child->collecting_fast_tracepoint
3064 != fast_tpoint_collect_result::before_insn)
3065 {
3066 /* No longer need this breakpoint. */
3067 if (event_child->exit_jump_pad_bkpt != NULL)
3068 {
3069 threads_debug_printf
3070 ("No longer need exit-jump-pad bkpt; removing it."
3071 "stopping all threads momentarily.");
3072
3073 /* Other running threads could hit this breakpoint.
3074 We don't handle moribund locations like GDB does,
3075 instead we always pause all threads when removing
3076 breakpoints, so that any step-over or
3077 decr_pc_after_break adjustment is always taken
3078 care of while the breakpoint is still
3079 inserted. */
3080 stop_all_lwps (1, event_child);
3081
3082 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3083 event_child->exit_jump_pad_bkpt = NULL;
3084
3085 unstop_all_lwps (1, event_child);
3086
3087 gdb_assert (event_child->suspended >= 0);
3088 }
3089 }
3090
3091 if (event_child->collecting_fast_tracepoint
3092 == fast_tpoint_collect_result::not_collecting)
3093 {
3094 threads_debug_printf
3095 ("fast tracepoint finished collecting successfully.");
3096
3097 /* We may have a deferred signal to report. */
3098 if (dequeue_one_deferred_signal (event_child, &w))
3099 threads_debug_printf ("dequeued one signal.");
3100 else
3101 {
3102 threads_debug_printf ("no deferred signals.");
3103
3104 if (stabilizing_threads)
3105 {
3106 ourstatus->set_stopped (GDB_SIGNAL_0);
3107
3108 threads_debug_printf
3109 ("ret = %s, stopped while stabilizing threads",
3110 target_pid_to_str (ptid_of (current_thread)).c_str ());
3111
3112 return ptid_of (current_thread);
3113 }
3114 }
3115 }
3116 }
3117
3118 /* Check whether GDB would be interested in this event. */
3119
3120 /* Check if GDB is interested in this syscall. */
3121 if (WIFSTOPPED (w)
3122 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3123 && !gdb_catch_this_syscall (event_child))
3124 {
3125 threads_debug_printf ("Ignored syscall for LWP %ld.",
3126 lwpid_of (current_thread));
3127
3128 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3129
3130 return ignore_event (ourstatus);
3131 }
3132
3133 /* If GDB is not interested in this signal, don't stop other
3134 threads, and don't report it to GDB. Just resume the inferior
3135 right away. We do this for threading-related signals as well as
3136 any that GDB specifically requested we ignore. But never ignore
3137 SIGSTOP if we sent it ourselves, and do not ignore signals when
3138 stepping - they may require special handling to skip the signal
3139 handler. Also never ignore signals that could be caused by a
3140 breakpoint. */
3141 if (WIFSTOPPED (w)
3142 && current_thread->last_resume_kind != resume_step
3143 && (
3144 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3145 (current_process ()->priv->thread_db != NULL
3146 && (WSTOPSIG (w) == __SIGRTMIN
3147 || WSTOPSIG (w) == __SIGRTMIN + 1))
3148 ||
3149 #endif
3150 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3151 && !(WSTOPSIG (w) == SIGSTOP
3152 && current_thread->last_resume_kind == resume_stop)
3153 && !linux_wstatus_maybe_breakpoint (w))))
3154 {
3155 siginfo_t info, *info_p;
3156
3157 threads_debug_printf ("Ignored signal %d for LWP %ld.",
3158 WSTOPSIG (w), lwpid_of (current_thread));
3159
3160 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3161 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3162 info_p = &info;
3163 else
3164 info_p = NULL;
3165
3166 if (step_over_finished)
3167 {
3168 /* We cancelled this thread's step-over above. We still
3169 need to unsuspend all other LWPs, and set them back
3170 running again while the signal handler runs. */
3171 unsuspend_all_lwps (event_child);
3172
3173 /* Enqueue the pending signal info so that proceed_all_lwps
3174 doesn't lose it. */
3175 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3176
3177 proceed_all_lwps ();
3178 }
3179 else
3180 {
3181 resume_one_lwp (event_child, event_child->stepping,
3182 WSTOPSIG (w), info_p);
3183 }
3184
3185 return ignore_event (ourstatus);
3186 }
3187
3188 /* Note that all addresses are always "out of the step range" when
3189 there's no range to begin with. */
3190 in_step_range = lwp_in_step_range (event_child);
3191
3192 /* If GDB wanted this thread to single step, and the thread is out
3193 of the step range, we always want to report the SIGTRAP, and let
3194 GDB handle it. Watchpoints should always be reported. So should
3195 signals we can't explain. A SIGTRAP we can't explain could be a
3196 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3197 do, we're be able to handle GDB breakpoints on top of internal
3198 breakpoints, by handling the internal breakpoint and still
3199 reporting the event to GDB. If we don't, we're out of luck, GDB
3200 won't see the breakpoint hit. If we see a single-step event but
3201 the thread should be continuing, don't pass the trap to gdb.
3202 That indicates that we had previously finished a single-step but
3203 left the single-step pending -- see
3204 complete_ongoing_step_over. */
3205 report_to_gdb = (!maybe_internal_trap
3206 || (current_thread->last_resume_kind == resume_step
3207 && !in_step_range)
3208 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3209 || (!in_step_range
3210 && !bp_explains_trap
3211 && !trace_event
3212 && !step_over_finished
3213 && !(current_thread->last_resume_kind == resume_continue
3214 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3215 || (gdb_breakpoint_here (event_child->stop_pc)
3216 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3217 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3218 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
3219
3220 run_breakpoint_commands (event_child->stop_pc);
3221
3222 /* We found no reason GDB would want us to stop. We either hit one
3223 of our own breakpoints, or finished an internal step GDB
3224 shouldn't know about. */
3225 if (!report_to_gdb)
3226 {
3227 if (bp_explains_trap)
3228 threads_debug_printf ("Hit a gdbserver breakpoint.");
3229
3230 if (step_over_finished)
3231 threads_debug_printf ("Step-over finished.");
3232
3233 if (trace_event)
3234 threads_debug_printf ("Tracepoint event.");
3235
3236 if (lwp_in_step_range (event_child))
3237 threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3238 paddress (event_child->stop_pc),
3239 paddress (event_child->step_range_start),
3240 paddress (event_child->step_range_end));
3241
3242 /* We're not reporting this breakpoint to GDB, so apply the
3243 decr_pc_after_break adjustment to the inferior's regcache
3244 ourselves. */
3245
3246 if (low_supports_breakpoints ())
3247 {
3248 struct regcache *regcache
3249 = get_thread_regcache (current_thread, 1);
3250 low_set_pc (regcache, event_child->stop_pc);
3251 }
3252
3253 if (step_over_finished)
3254 {
3255 /* If we have finished stepping over a breakpoint, we've
3256 stopped and suspended all LWPs momentarily except the
3257 stepping one. This is where we resume them all again.
3258 We're going to keep waiting, so use proceed, which
3259 handles stepping over the next breakpoint. */
3260 unsuspend_all_lwps (event_child);
3261 }
3262 else
3263 {
3264 /* Remove the single-step breakpoints if any. Note that
3265 there isn't single-step breakpoint if we finished stepping
3266 over. */
3267 if (supports_software_single_step ()
3268 && has_single_step_breakpoints (current_thread))
3269 {
3270 stop_all_lwps (0, event_child);
3271 delete_single_step_breakpoints (current_thread);
3272 unstop_all_lwps (0, event_child);
3273 }
3274 }
3275
3276 threads_debug_printf ("proceeding all threads.");
3277
3278 proceed_all_lwps ();
3279
3280 return ignore_event (ourstatus);
3281 }
3282
3283 if (debug_threads)
3284 {
3285 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3286 threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3287 lwpid_of (get_lwp_thread (event_child)),
3288 event_child->waitstatus.to_string ().c_str ());
3289
3290 if (current_thread->last_resume_kind == resume_step)
3291 {
3292 if (event_child->step_range_start == event_child->step_range_end)
3293 threads_debug_printf
3294 ("GDB wanted to single-step, reporting event.");
3295 else if (!lwp_in_step_range (event_child))
3296 threads_debug_printf ("Out of step range, reporting event.");
3297 }
3298
3299 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3300 threads_debug_printf ("Stopped by watchpoint.");
3301 else if (gdb_breakpoint_here (event_child->stop_pc))
3302 threads_debug_printf ("Stopped by GDB breakpoint.");
3303 }
3304
3305 threads_debug_printf ("Hit a non-gdbserver trap event.");
3306
3307 /* Alright, we're going to report a stop. */
3308
3309 /* Remove single-step breakpoints. */
3310 if (supports_software_single_step ())
3311 {
3312 /* Remove single-step breakpoints or not. It it is true, stop all
3313 lwps, so that other threads won't hit the breakpoint in the
3314 staled memory. */
3315 int remove_single_step_breakpoints_p = 0;
3316
3317 if (non_stop)
3318 {
3319 remove_single_step_breakpoints_p
3320 = has_single_step_breakpoints (current_thread);
3321 }
3322 else
3323 {
3324 /* In all-stop, a stop reply cancels all previous resume
3325 requests. Delete all single-step breakpoints. */
3326
3327 find_thread ([&] (thread_info *thread) {
3328 if (has_single_step_breakpoints (thread))
3329 {
3330 remove_single_step_breakpoints_p = 1;
3331 return true;
3332 }
3333
3334 return false;
3335 });
3336 }
3337
3338 if (remove_single_step_breakpoints_p)
3339 {
3340 /* If we remove single-step breakpoints from memory, stop all lwps,
3341 so that other threads won't hit the breakpoint in the staled
3342 memory. */
3343 stop_all_lwps (0, event_child);
3344
3345 if (non_stop)
3346 {
3347 gdb_assert (has_single_step_breakpoints (current_thread));
3348 delete_single_step_breakpoints (current_thread);
3349 }
3350 else
3351 {
3352 for_each_thread ([] (thread_info *thread){
3353 if (has_single_step_breakpoints (thread))
3354 delete_single_step_breakpoints (thread);
3355 });
3356 }
3357
3358 unstop_all_lwps (0, event_child);
3359 }
3360 }
3361
3362 if (!stabilizing_threads)
3363 {
3364 /* In all-stop, stop all threads. */
3365 if (!non_stop)
3366 stop_all_lwps (0, NULL);
3367
3368 if (step_over_finished)
3369 {
3370 if (!non_stop)
3371 {
3372 /* If we were doing a step-over, all other threads but
3373 the stepping one had been paused in start_step_over,
3374 with their suspend counts incremented. We don't want
3375 to do a full unstop/unpause, because we're in
3376 all-stop mode (so we want threads stopped), but we
3377 still need to unsuspend the other threads, to
3378 decrement their `suspended' count back. */
3379 unsuspend_all_lwps (event_child);
3380 }
3381 else
3382 {
3383 /* If we just finished a step-over, then all threads had
3384 been momentarily paused. In all-stop, that's fine,
3385 we want threads stopped by now anyway. In non-stop,
3386 we need to re-resume threads that GDB wanted to be
3387 running. */
3388 unstop_all_lwps (1, event_child);
3389 }
3390 }
3391
3392 /* If we're not waiting for a specific LWP, choose an event LWP
3393 from among those that have had events. Giving equal priority
3394 to all LWPs that have had events helps prevent
3395 starvation. */
3396 if (ptid == minus_one_ptid)
3397 {
3398 event_child->status_pending_p = 1;
3399 event_child->status_pending = w;
3400
3401 select_event_lwp (&event_child);
3402
3403 /* current_thread and event_child must stay in sync. */
3404 switch_to_thread (get_lwp_thread (event_child));
3405
3406 event_child->status_pending_p = 0;
3407 w = event_child->status_pending;
3408 }
3409
3410
3411 /* Stabilize threads (move out of jump pads). */
3412 if (!non_stop)
3413 target_stabilize_threads ();
3414 }
3415 else
3416 {
3417 /* If we just finished a step-over, then all threads had been
3418 momentarily paused. In all-stop, that's fine, we want
3419 threads stopped by now anyway. In non-stop, we need to
3420 re-resume threads that GDB wanted to be running. */
3421 if (step_over_finished)
3422 unstop_all_lwps (1, event_child);
3423 }
3424
3425 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3426 {
3427 /* If the reported event is an exit, fork, vfork or exec, let
3428 GDB know. */
3429
3430 /* Break the unreported fork relationship chain. */
3431 if (event_child->waitstatus.kind () == TARGET_WAITKIND_FORKED
3432 || event_child->waitstatus.kind () == TARGET_WAITKIND_VFORKED)
3433 {
3434 event_child->fork_relative->fork_relative = NULL;
3435 event_child->fork_relative = NULL;
3436 }
3437
3438 *ourstatus = event_child->waitstatus;
3439 /* Clear the event lwp's waitstatus since we handled it already. */
3440 event_child->waitstatus.set_ignore ();
3441 }
3442 else
3443 {
3444 /* The actual stop signal is overwritten below. */
3445 ourstatus->set_stopped (GDB_SIGNAL_0);
3446 }
3447
3448 /* Now that we've selected our final event LWP, un-adjust its PC if
3449 it was a software breakpoint, and the client doesn't know we can
3450 adjust the breakpoint ourselves. */
3451 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3452 && !cs.swbreak_feature)
3453 {
3454 int decr_pc = low_decr_pc_after_break ();
3455
3456 if (decr_pc != 0)
3457 {
3458 struct regcache *regcache
3459 = get_thread_regcache (current_thread, 1);
3460 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3461 }
3462 }
3463
3464 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3465 {
3466 int syscall_number;
3467
3468 get_syscall_trapinfo (event_child, &syscall_number);
3469 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3470 ourstatus->set_syscall_entry (syscall_number);
3471 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3472 ourstatus->set_syscall_return (syscall_number);
3473 else
3474 gdb_assert_not_reached ("unexpected syscall state");
3475 }
3476 else if (current_thread->last_resume_kind == resume_stop
3477 && WSTOPSIG (w) == SIGSTOP)
3478 {
3479 /* A thread that has been requested to stop by GDB with vCont;t,
3480 and it stopped cleanly, so report as SIG0. The use of
3481 SIGSTOP is an implementation detail. */
3482 ourstatus->set_stopped (GDB_SIGNAL_0);
3483 }
3484 else if (current_thread->last_resume_kind == resume_stop
3485 && WSTOPSIG (w) != SIGSTOP)
3486 {
3487 /* A thread that has been requested to stop by GDB with vCont;t,
3488 but, it stopped for other reasons. */
3489 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3490 }
3491 else if (ourstatus->kind () == TARGET_WAITKIND_STOPPED)
3492 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3493
3494 gdb_assert (step_over_bkpt == null_ptid);
3495
3496 threads_debug_printf ("ret = %s, %d, %d",
3497 target_pid_to_str (ptid_of (current_thread)).c_str (),
3498 ourstatus->kind (), ourstatus->sig ());
3499
3500 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
3501 return filter_exit_event (event_child, ourstatus);
3502
3503 return ptid_of (current_thread);
3504 }
3505
3506 /* Get rid of any pending event in the pipe. */
3507 static void
3508 async_file_flush (void)
3509 {
3510 linux_event_pipe.flush ();
3511 }
3512
3513 /* Put something in the pipe, so the event loop wakes up. */
3514 static void
3515 async_file_mark (void)
3516 {
3517 linux_event_pipe.mark ();
3518 }
3519
3520 ptid_t
3521 linux_process_target::wait (ptid_t ptid,
3522 target_waitstatus *ourstatus,
3523 target_wait_flags target_options)
3524 {
3525 ptid_t event_ptid;
3526
3527 /* Flush the async file first. */
3528 if (target_is_async_p ())
3529 async_file_flush ();
3530
3531 do
3532 {
3533 event_ptid = wait_1 (ptid, ourstatus, target_options);
3534 }
3535 while ((target_options & TARGET_WNOHANG) == 0
3536 && event_ptid == null_ptid
3537 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3538
3539 /* If at least one stop was reported, there may be more. A single
3540 SIGCHLD can signal more than one child stop. */
3541 if (target_is_async_p ()
3542 && (target_options & TARGET_WNOHANG) != 0
3543 && event_ptid != null_ptid)
3544 async_file_mark ();
3545
3546 return event_ptid;
3547 }
3548
3549 /* Send a signal to an LWP. */
3550
3551 static int
3552 kill_lwp (unsigned long lwpid, int signo)
3553 {
3554 int ret;
3555
3556 errno = 0;
3557 ret = syscall (__NR_tkill, lwpid, signo);
3558 if (errno == ENOSYS)
3559 {
3560 /* If tkill fails, then we are not using nptl threads, a
3561 configuration we no longer support. */
3562 perror_with_name (("tkill"));
3563 }
3564 return ret;
3565 }
3566
3567 void
3568 linux_stop_lwp (struct lwp_info *lwp)
3569 {
3570 send_sigstop (lwp);
3571 }
3572
3573 static void
3574 send_sigstop (struct lwp_info *lwp)
3575 {
3576 int pid;
3577
3578 pid = lwpid_of (get_lwp_thread (lwp));
3579
3580 /* If we already have a pending stop signal for this process, don't
3581 send another. */
3582 if (lwp->stop_expected)
3583 {
3584 threads_debug_printf ("Have pending sigstop for lwp %d", pid);
3585
3586 return;
3587 }
3588
3589 threads_debug_printf ("Sending sigstop to lwp %d", pid);
3590
3591 lwp->stop_expected = 1;
3592 kill_lwp (pid, SIGSTOP);
3593 }
3594
3595 static void
3596 send_sigstop (thread_info *thread, lwp_info *except)
3597 {
3598 struct lwp_info *lwp = get_thread_lwp (thread);
3599
3600 /* Ignore EXCEPT. */
3601 if (lwp == except)
3602 return;
3603
3604 if (lwp->stopped)
3605 return;
3606
3607 send_sigstop (lwp);
3608 }
3609
3610 /* Increment the suspend count of an LWP, and stop it, if not stopped
3611 yet. */
3612 static void
3613 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3614 {
3615 struct lwp_info *lwp = get_thread_lwp (thread);
3616
3617 /* Ignore EXCEPT. */
3618 if (lwp == except)
3619 return;
3620
3621 lwp_suspended_inc (lwp);
3622
3623 send_sigstop (thread, except);
3624 }
3625
3626 static void
3627 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3628 {
3629 /* Store the exit status for later. */
3630 lwp->status_pending_p = 1;
3631 lwp->status_pending = wstat;
3632
3633 /* Store in waitstatus as well, as there's nothing else to process
3634 for this event. */
3635 if (WIFEXITED (wstat))
3636 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3637 else if (WIFSIGNALED (wstat))
3638 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3639
3640 /* Prevent trying to stop it. */
3641 lwp->stopped = 1;
3642
3643 /* No further stops are expected from a dead lwp. */
3644 lwp->stop_expected = 0;
3645 }
3646
3647 /* Return true if LWP has exited already, and has a pending exit event
3648 to report to GDB. */
3649
3650 static int
3651 lwp_is_marked_dead (struct lwp_info *lwp)
3652 {
3653 return (lwp->status_pending_p
3654 && (WIFEXITED (lwp->status_pending)
3655 || WIFSIGNALED (lwp->status_pending)));
3656 }
3657
3658 void
3659 linux_process_target::wait_for_sigstop ()
3660 {
3661 struct thread_info *saved_thread;
3662 ptid_t saved_tid;
3663 int wstat;
3664 int ret;
3665
3666 saved_thread = current_thread;
3667 if (saved_thread != NULL)
3668 saved_tid = saved_thread->id;
3669 else
3670 saved_tid = null_ptid; /* avoid bogus unused warning */
3671
3672 scoped_restore_current_thread restore_thread;
3673
3674 threads_debug_printf ("pulling events");
3675
3676 /* Passing NULL_PTID as filter indicates we want all events to be
3677 left pending. Eventually this returns when there are no
3678 unwaited-for children left. */
3679 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3680 gdb_assert (ret == -1);
3681
3682 if (saved_thread == NULL || mythread_alive (saved_tid))
3683 return;
3684 else
3685 {
3686 threads_debug_printf ("Previously current thread died.");
3687
3688 /* We can't change the current inferior behind GDB's back,
3689 otherwise, a subsequent command may apply to the wrong
3690 process. */
3691 restore_thread.dont_restore ();
3692 switch_to_thread (nullptr);
3693 }
3694 }
3695
3696 bool
3697 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3698 {
3699 struct lwp_info *lwp = get_thread_lwp (thread);
3700
3701 if (lwp->suspended != 0)
3702 {
3703 internal_error (__FILE__, __LINE__,
3704 "LWP %ld is suspended, suspended=%d\n",
3705 lwpid_of (thread), lwp->suspended);
3706 }
3707 gdb_assert (lwp->stopped);
3708
3709 /* Allow debugging the jump pad, gdb_collect, etc.. */
3710 return (supports_fast_tracepoints ()
3711 && agent_loaded_p ()
3712 && (gdb_breakpoint_here (lwp->stop_pc)
3713 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3714 || thread->last_resume_kind == resume_step)
3715 && (linux_fast_tracepoint_collecting (lwp, NULL)
3716 != fast_tpoint_collect_result::not_collecting));
3717 }
3718
3719 void
3720 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3721 {
3722 struct lwp_info *lwp = get_thread_lwp (thread);
3723 int *wstat;
3724
3725 if (lwp->suspended != 0)
3726 {
3727 internal_error (__FILE__, __LINE__,
3728 "LWP %ld is suspended, suspended=%d\n",
3729 lwpid_of (thread), lwp->suspended);
3730 }
3731 gdb_assert (lwp->stopped);
3732
3733 /* For gdb_breakpoint_here. */
3734 scoped_restore_current_thread restore_thread;
3735 switch_to_thread (thread);
3736
3737 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3738
3739 /* Allow debugging the jump pad, gdb_collect, etc. */
3740 if (!gdb_breakpoint_here (lwp->stop_pc)
3741 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3742 && thread->last_resume_kind != resume_step
3743 && maybe_move_out_of_jump_pad (lwp, wstat))
3744 {
3745 threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3746 lwpid_of (thread));
3747
3748 if (wstat)
3749 {
3750 lwp->status_pending_p = 0;
3751 enqueue_one_deferred_signal (lwp, wstat);
3752
3753 threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3754 WSTOPSIG (*wstat), lwpid_of (thread));
3755 }
3756
3757 resume_one_lwp (lwp, 0, 0, NULL);
3758 }
3759 else
3760 lwp_suspended_inc (lwp);
3761 }
3762
3763 static bool
3764 lwp_running (thread_info *thread)
3765 {
3766 struct lwp_info *lwp = get_thread_lwp (thread);
3767
3768 if (lwp_is_marked_dead (lwp))
3769 return false;
3770
3771 return !lwp->stopped;
3772 }
3773
3774 void
3775 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3776 {
3777 /* Should not be called recursively. */
3778 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3779
3780 THREADS_SCOPED_DEBUG_ENTER_EXIT;
3781
3782 threads_debug_printf
3783 ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3784 (except != NULL
3785 ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3786 : "none"));
3787
3788 stopping_threads = (suspend
3789 ? STOPPING_AND_SUSPENDING_THREADS
3790 : STOPPING_THREADS);
3791
3792 if (suspend)
3793 for_each_thread ([&] (thread_info *thread)
3794 {
3795 suspend_and_send_sigstop (thread, except);
3796 });
3797 else
3798 for_each_thread ([&] (thread_info *thread)
3799 {
3800 send_sigstop (thread, except);
3801 });
3802
3803 wait_for_sigstop ();
3804 stopping_threads = NOT_STOPPING_THREADS;
3805
3806 threads_debug_printf ("setting stopping_threads back to !stopping");
3807 }
3808
3809 /* Enqueue one signal in the chain of signals which need to be
3810 delivered to this process on next resume. */
3811
3812 static void
3813 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3814 {
3815 lwp->pending_signals.emplace_back (signal);
3816 if (info == nullptr)
3817 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
3818 else
3819 lwp->pending_signals.back ().info = *info;
3820 }
3821
3822 void
3823 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
3824 {
3825 struct thread_info *thread = get_lwp_thread (lwp);
3826 struct regcache *regcache = get_thread_regcache (thread, 1);
3827
3828 scoped_restore_current_thread restore_thread;
3829
3830 switch_to_thread (thread);
3831 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
3832
3833 for (CORE_ADDR pc : next_pcs)
3834 set_single_step_breakpoint (pc, current_ptid);
3835 }
3836
3837 int
3838 linux_process_target::single_step (lwp_info* lwp)
3839 {
3840 int step = 0;
3841
3842 if (supports_hardware_single_step ())
3843 {
3844 step = 1;
3845 }
3846 else if (supports_software_single_step ())
3847 {
3848 install_software_single_step_breakpoints (lwp);
3849 step = 0;
3850 }
3851 else
3852 threads_debug_printf ("stepping is not implemented on this target");
3853
3854 return step;
3855 }
3856
3857 /* The signal can be delivered to the inferior if we are not trying to
3858 finish a fast tracepoint collect. Since signal can be delivered in
3859 the step-over, the program may go to signal handler and trap again
3860 after return from the signal handler. We can live with the spurious
3861 double traps. */
3862
3863 static int
3864 lwp_signal_can_be_delivered (struct lwp_info *lwp)
3865 {
3866 return (lwp->collecting_fast_tracepoint
3867 == fast_tpoint_collect_result::not_collecting);
3868 }
3869
3870 void
3871 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
3872 int signal, siginfo_t *info)
3873 {
3874 struct thread_info *thread = get_lwp_thread (lwp);
3875 int ptrace_request;
3876 struct process_info *proc = get_thread_process (thread);
3877
3878 /* Note that target description may not be initialised
3879 (proc->tdesc == NULL) at this point because the program hasn't
3880 stopped at the first instruction yet. It means GDBserver skips
3881 the extra traps from the wrapper program (see option --wrapper).
3882 Code in this function that requires register access should be
3883 guarded by proc->tdesc == NULL or something else. */
3884
3885 if (lwp->stopped == 0)
3886 return;
3887
3888 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
3889
3890 fast_tpoint_collect_result fast_tp_collecting
3891 = lwp->collecting_fast_tracepoint;
3892
3893 gdb_assert (!stabilizing_threads
3894 || (fast_tp_collecting
3895 != fast_tpoint_collect_result::not_collecting));
3896
3897 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3898 user used the "jump" command, or "set $pc = foo"). */
3899 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
3900 {
3901 /* Collecting 'while-stepping' actions doesn't make sense
3902 anymore. */
3903 release_while_stepping_state_list (thread);
3904 }
3905
3906 /* If we have pending signals or status, and a new signal, enqueue the
3907 signal. Also enqueue the signal if it can't be delivered to the
3908 inferior right now. */
3909 if (signal != 0
3910 && (lwp->status_pending_p
3911 || !lwp->pending_signals.empty ()
3912 || !lwp_signal_can_be_delivered (lwp)))
3913 {
3914 enqueue_pending_signal (lwp, signal, info);
3915
3916 /* Postpone any pending signal. It was enqueued above. */
3917 signal = 0;
3918 }
3919
3920 if (lwp->status_pending_p)
3921 {
3922 threads_debug_printf
3923 ("Not resuming lwp %ld (%s, stop %s); has pending status",
3924 lwpid_of (thread), step ? "step" : "continue",
3925 lwp->stop_expected ? "expected" : "not expected");
3926 return;
3927 }
3928
3929 scoped_restore_current_thread restore_thread;
3930 switch_to_thread (thread);
3931
3932 /* This bit needs some thinking about. If we get a signal that
3933 we must report while a single-step reinsert is still pending,
3934 we often end up resuming the thread. It might be better to
3935 (ew) allow a stack of pending events; then we could be sure that
3936 the reinsert happened right away and not lose any signals.
3937
3938 Making this stack would also shrink the window in which breakpoints are
3939 uninserted (see comment in linux_wait_for_lwp) but not enough for
3940 complete correctness, so it won't solve that problem. It may be
3941 worthwhile just to solve this one, however. */
3942 if (lwp->bp_reinsert != 0)
3943 {
3944 threads_debug_printf (" pending reinsert at 0x%s",
3945 paddress (lwp->bp_reinsert));
3946
3947 if (supports_hardware_single_step ())
3948 {
3949 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
3950 {
3951 if (step == 0)
3952 warning ("BAD - reinserting but not stepping.");
3953 if (lwp->suspended)
3954 warning ("BAD - reinserting and suspended(%d).",
3955 lwp->suspended);
3956 }
3957 }
3958
3959 step = maybe_hw_step (thread);
3960 }
3961
3962 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
3963 threads_debug_printf
3964 ("lwp %ld wants to get out of fast tracepoint jump pad "
3965 "(exit-jump-pad-bkpt)", lwpid_of (thread));
3966
3967 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
3968 {
3969 threads_debug_printf
3970 ("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
3971 lwpid_of (thread));
3972
3973 if (supports_hardware_single_step ())
3974 step = 1;
3975 else
3976 {
3977 internal_error (__FILE__, __LINE__,
3978 "moving out of jump pad single-stepping"
3979 " not implemented on this target");
3980 }
3981 }
3982
3983 /* If we have while-stepping actions in this thread set it stepping.
3984 If we have a signal to deliver, it may or may not be set to
3985 SIG_IGN, we don't know. Assume so, and allow collecting
3986 while-stepping into a signal handler. A possible smart thing to
3987 do would be to set an internal breakpoint at the signal return
3988 address, continue, and carry on catching this while-stepping
3989 action only when that breakpoint is hit. A future
3990 enhancement. */
3991 if (thread->while_stepping != NULL)
3992 {
3993 threads_debug_printf
3994 ("lwp %ld has a while-stepping action -> forcing step.",
3995 lwpid_of (thread));
3996
3997 step = single_step (lwp);
3998 }
3999
4000 if (proc->tdesc != NULL && low_supports_breakpoints ())
4001 {
4002 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4003
4004 lwp->stop_pc = low_get_pc (regcache);
4005
4006 threads_debug_printf (" %s from pc 0x%lx", step ? "step" : "continue",
4007 (long) lwp->stop_pc);
4008 }
4009
4010 /* If we have pending signals, consume one if it can be delivered to
4011 the inferior. */
4012 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4013 {
4014 const pending_signal &p_sig = lwp->pending_signals.front ();
4015
4016 signal = p_sig.signal;
4017 if (p_sig.info.si_signo != 0)
4018 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4019 &p_sig.info);
4020
4021 lwp->pending_signals.pop_front ();
4022 }
4023
4024 threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4025 lwpid_of (thread), step ? "step" : "continue", signal,
4026 lwp->stop_expected ? "expected" : "not expected");
4027
4028 low_prepare_to_resume (lwp);
4029
4030 regcache_invalidate_thread (thread);
4031 errno = 0;
4032 lwp->stepping = step;
4033 if (step)
4034 ptrace_request = PTRACE_SINGLESTEP;
4035 else if (gdb_catching_syscalls_p (lwp))
4036 ptrace_request = PTRACE_SYSCALL;
4037 else
4038 ptrace_request = PTRACE_CONT;
4039 ptrace (ptrace_request,
4040 lwpid_of (thread),
4041 (PTRACE_TYPE_ARG3) 0,
4042 /* Coerce to a uintptr_t first to avoid potential gcc warning
4043 of coercing an 8 byte integer to a 4 byte pointer. */
4044 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4045
4046 if (errno)
4047 perror_with_name ("resuming thread");
4048
4049 /* Successfully resumed. Clear state that no longer makes sense,
4050 and mark the LWP as running. Must not do this before resuming
4051 otherwise if that fails other code will be confused. E.g., we'd
4052 later try to stop the LWP and hang forever waiting for a stop
4053 status. Note that we must not throw after this is cleared,
4054 otherwise handle_zombie_lwp_error would get confused. */
4055 lwp->stopped = 0;
4056 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4057 }
4058
4059 void
4060 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4061 {
4062 /* Nop. */
4063 }
4064
4065 /* Called when we try to resume a stopped LWP and that errors out. If
4066 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4067 or about to become), discard the error, clear any pending status
4068 the LWP may have, and return true (we'll collect the exit status
4069 soon enough). Otherwise, return false. */
4070
4071 static int
4072 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4073 {
4074 struct thread_info *thread = get_lwp_thread (lp);
4075
4076 /* If we get an error after resuming the LWP successfully, we'd
4077 confuse !T state for the LWP being gone. */
4078 gdb_assert (lp->stopped);
4079
4080 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4081 because even if ptrace failed with ESRCH, the tracee may be "not
4082 yet fully dead", but already refusing ptrace requests. In that
4083 case the tracee has 'R (Running)' state for a little bit
4084 (observed in Linux 3.18). See also the note on ESRCH in the
4085 ptrace(2) man page. Instead, check whether the LWP has any state
4086 other than ptrace-stopped. */
4087
4088 /* Don't assume anything if /proc/PID/status can't be read. */
4089 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4090 {
4091 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4092 lp->status_pending_p = 0;
4093 return 1;
4094 }
4095 return 0;
4096 }
4097
4098 void
4099 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4100 siginfo_t *info)
4101 {
4102 try
4103 {
4104 resume_one_lwp_throw (lwp, step, signal, info);
4105 }
4106 catch (const gdb_exception_error &ex)
4107 {
4108 if (!check_ptrace_stopped_lwp_gone (lwp))
4109 throw;
4110 }
4111 }
4112
4113 /* This function is called once per thread via for_each_thread.
4114 We look up which resume request applies to THREAD and mark it with a
4115 pointer to the appropriate resume request.
4116
4117 This algorithm is O(threads * resume elements), but resume elements
4118 is small (and will remain small at least until GDB supports thread
4119 suspension). */
4120
4121 static void
4122 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4123 {
4124 struct lwp_info *lwp = get_thread_lwp (thread);
4125
4126 for (int ndx = 0; ndx < n; ndx++)
4127 {
4128 ptid_t ptid = resume[ndx].thread;
4129 if (ptid == minus_one_ptid
4130 || ptid == thread->id
4131 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4132 of PID'. */
4133 || (ptid.pid () == pid_of (thread)
4134 && (ptid.is_pid ()
4135 || ptid.lwp () == -1)))
4136 {
4137 if (resume[ndx].kind == resume_stop
4138 && thread->last_resume_kind == resume_stop)
4139 {
4140 threads_debug_printf
4141 ("already %s LWP %ld at GDB's request",
4142 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4143 ? "stopped" : "stopping"),
4144 lwpid_of (thread));
4145
4146 continue;
4147 }
4148
4149 /* Ignore (wildcard) resume requests for already-resumed
4150 threads. */
4151 if (resume[ndx].kind != resume_stop
4152 && thread->last_resume_kind != resume_stop)
4153 {
4154 threads_debug_printf
4155 ("already %s LWP %ld at GDB's request",
4156 (thread->last_resume_kind == resume_step
4157 ? "stepping" : "continuing"),
4158 lwpid_of (thread));
4159 continue;
4160 }
4161
4162 /* Don't let wildcard resumes resume fork children that GDB
4163 does not yet know are new fork children. */
4164 if (lwp->fork_relative != NULL)
4165 {
4166 struct lwp_info *rel = lwp->fork_relative;
4167
4168 if (rel->status_pending_p
4169 && (rel->waitstatus.kind () == TARGET_WAITKIND_FORKED
4170 || rel->waitstatus.kind () == TARGET_WAITKIND_VFORKED))
4171 {
4172 threads_debug_printf
4173 ("not resuming LWP %ld: has queued stop reply",
4174 lwpid_of (thread));
4175 continue;
4176 }
4177 }
4178
4179 /* If the thread has a pending event that has already been
4180 reported to GDBserver core, but GDB has not pulled the
4181 event out of the vStopped queue yet, likewise, ignore the
4182 (wildcard) resume request. */
4183 if (in_queued_stop_replies (thread->id))
4184 {
4185 threads_debug_printf
4186 ("not resuming LWP %ld: has queued stop reply",
4187 lwpid_of (thread));
4188 continue;
4189 }
4190
4191 lwp->resume = &resume[ndx];
4192 thread->last_resume_kind = lwp->resume->kind;
4193
4194 lwp->step_range_start = lwp->resume->step_range_start;
4195 lwp->step_range_end = lwp->resume->step_range_end;
4196
4197 /* If we had a deferred signal to report, dequeue one now.
4198 This can happen if LWP gets more than one signal while
4199 trying to get out of a jump pad. */
4200 if (lwp->stopped
4201 && !lwp->status_pending_p
4202 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4203 {
4204 lwp->status_pending_p = 1;
4205
4206 threads_debug_printf
4207 ("Dequeueing deferred signal %d for LWP %ld, "
4208 "leaving status pending.",
4209 WSTOPSIG (lwp->status_pending),
4210 lwpid_of (thread));
4211 }
4212
4213 return;
4214 }
4215 }
4216
4217 /* No resume action for this thread. */
4218 lwp->resume = NULL;
4219 }
4220
4221 bool
4222 linux_process_target::resume_status_pending (thread_info *thread)
4223 {
4224 struct lwp_info *lwp = get_thread_lwp (thread);
4225
4226 /* LWPs which will not be resumed are not interesting, because
4227 we might not wait for them next time through linux_wait. */
4228 if (lwp->resume == NULL)
4229 return false;
4230
4231 return thread_still_has_status_pending (thread);
4232 }
4233
4234 bool
4235 linux_process_target::thread_needs_step_over (thread_info *thread)
4236 {
4237 struct lwp_info *lwp = get_thread_lwp (thread);
4238 CORE_ADDR pc;
4239 struct process_info *proc = get_thread_process (thread);
4240
4241 /* GDBserver is skipping the extra traps from the wrapper program,
4242 don't have to do step over. */
4243 if (proc->tdesc == NULL)
4244 return false;
4245
4246 /* LWPs which will not be resumed are not interesting, because we
4247 might not wait for them next time through linux_wait. */
4248
4249 if (!lwp->stopped)
4250 {
4251 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4252 lwpid_of (thread));
4253 return false;
4254 }
4255
4256 if (thread->last_resume_kind == resume_stop)
4257 {
4258 threads_debug_printf
4259 ("Need step over [LWP %ld]? Ignoring, should remain stopped",
4260 lwpid_of (thread));
4261 return false;
4262 }
4263
4264 gdb_assert (lwp->suspended >= 0);
4265
4266 if (lwp->suspended)
4267 {
4268 threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4269 lwpid_of (thread));
4270 return false;
4271 }
4272
4273 if (lwp->status_pending_p)
4274 {
4275 threads_debug_printf
4276 ("Need step over [LWP %ld]? Ignoring, has pending status.",
4277 lwpid_of (thread));
4278 return false;
4279 }
4280
4281 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4282 or we have. */
4283 pc = get_pc (lwp);
4284
4285 /* If the PC has changed since we stopped, then don't do anything,
4286 and let the breakpoint/tracepoint be hit. This happens if, for
4287 instance, GDB handled the decr_pc_after_break subtraction itself,
4288 GDB is OOL stepping this thread, or the user has issued a "jump"
4289 command, or poked thread's registers herself. */
4290 if (pc != lwp->stop_pc)
4291 {
4292 threads_debug_printf
4293 ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4294 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4295 paddress (lwp->stop_pc), paddress (pc));
4296 return false;
4297 }
4298
4299 /* On software single step target, resume the inferior with signal
4300 rather than stepping over. */
4301 if (supports_software_single_step ()
4302 && !lwp->pending_signals.empty ()
4303 && lwp_signal_can_be_delivered (lwp))
4304 {
4305 threads_debug_printf
4306 ("Need step over [LWP %ld]? Ignoring, has pending signals.",
4307 lwpid_of (thread));
4308
4309 return false;
4310 }
4311
4312 scoped_restore_current_thread restore_thread;
4313 switch_to_thread (thread);
4314
4315 /* We can only step over breakpoints we know about. */
4316 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4317 {
4318 /* Don't step over a breakpoint that GDB expects to hit
4319 though. If the condition is being evaluated on the target's side
4320 and it evaluate to false, step over this breakpoint as well. */
4321 if (gdb_breakpoint_here (pc)
4322 && gdb_condition_true_at_breakpoint (pc)
4323 && gdb_no_commands_at_breakpoint (pc))
4324 {
4325 threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4326 " GDB breakpoint at 0x%s; skipping step over",
4327 lwpid_of (thread), paddress (pc));
4328
4329 return false;
4330 }
4331 else
4332 {
4333 threads_debug_printf ("Need step over [LWP %ld]? yes, "
4334 "found breakpoint at 0x%s",
4335 lwpid_of (thread), paddress (pc));
4336
4337 /* We've found an lwp that needs stepping over --- return 1 so
4338 that find_thread stops looking. */
4339 return true;
4340 }
4341 }
4342
4343 threads_debug_printf
4344 ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4345 lwpid_of (thread), paddress (pc));
4346
4347 return false;
4348 }
4349
4350 void
4351 linux_process_target::start_step_over (lwp_info *lwp)
4352 {
4353 struct thread_info *thread = get_lwp_thread (lwp);
4354 CORE_ADDR pc;
4355
4356 threads_debug_printf ("Starting step-over on LWP %ld. Stopping all threads",
4357 lwpid_of (thread));
4358
4359 stop_all_lwps (1, lwp);
4360
4361 if (lwp->suspended != 0)
4362 {
4363 internal_error (__FILE__, __LINE__,
4364 "LWP %ld suspended=%d\n", lwpid_of (thread),
4365 lwp->suspended);
4366 }
4367
4368 threads_debug_printf ("Done stopping all threads for step-over.");
4369
4370 /* Note, we should always reach here with an already adjusted PC,
4371 either by GDB (if we're resuming due to GDB's request), or by our
4372 caller, if we just finished handling an internal breakpoint GDB
4373 shouldn't care about. */
4374 pc = get_pc (lwp);
4375
4376 bool step = false;
4377 {
4378 scoped_restore_current_thread restore_thread;
4379 switch_to_thread (thread);
4380
4381 lwp->bp_reinsert = pc;
4382 uninsert_breakpoints_at (pc);
4383 uninsert_fast_tracepoint_jumps_at (pc);
4384
4385 step = single_step (lwp);
4386 }
4387
4388 resume_one_lwp (lwp, step, 0, NULL);
4389
4390 /* Require next event from this LWP. */
4391 step_over_bkpt = thread->id;
4392 }
4393
4394 bool
4395 linux_process_target::finish_step_over (lwp_info *lwp)
4396 {
4397 if (lwp->bp_reinsert != 0)
4398 {
4399 scoped_restore_current_thread restore_thread;
4400
4401 threads_debug_printf ("Finished step over.");
4402
4403 switch_to_thread (get_lwp_thread (lwp));
4404
4405 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4406 may be no breakpoint to reinsert there by now. */
4407 reinsert_breakpoints_at (lwp->bp_reinsert);
4408 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4409
4410 lwp->bp_reinsert = 0;
4411
4412 /* Delete any single-step breakpoints. No longer needed. We
4413 don't have to worry about other threads hitting this trap,
4414 and later not being able to explain it, because we were
4415 stepping over a breakpoint, and we hold all threads but
4416 LWP stopped while doing that. */
4417 if (!supports_hardware_single_step ())
4418 {
4419 gdb_assert (has_single_step_breakpoints (current_thread));
4420 delete_single_step_breakpoints (current_thread);
4421 }
4422
4423 step_over_bkpt = null_ptid;
4424 return true;
4425 }
4426 else
4427 return false;
4428 }
4429
4430 void
4431 linux_process_target::complete_ongoing_step_over ()
4432 {
4433 if (step_over_bkpt != null_ptid)
4434 {
4435 struct lwp_info *lwp;
4436 int wstat;
4437 int ret;
4438
4439 threads_debug_printf ("detach: step over in progress, finish it first");
4440
4441 /* Passing NULL_PTID as filter indicates we want all events to
4442 be left pending. Eventually this returns when there are no
4443 unwaited-for children left. */
4444 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4445 __WALL);
4446 gdb_assert (ret == -1);
4447
4448 lwp = find_lwp_pid (step_over_bkpt);
4449 if (lwp != NULL)
4450 {
4451 finish_step_over (lwp);
4452
4453 /* If we got our step SIGTRAP, don't leave it pending,
4454 otherwise we would report it to GDB as a spurious
4455 SIGTRAP. */
4456 gdb_assert (lwp->status_pending_p);
4457 if (WIFSTOPPED (lwp->status_pending)
4458 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4459 {
4460 thread_info *thread = get_lwp_thread (lwp);
4461 if (thread->last_resume_kind != resume_step)
4462 {
4463 threads_debug_printf ("detach: discard step-over SIGTRAP");
4464
4465 lwp->status_pending_p = 0;
4466 lwp->status_pending = 0;
4467 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4468 }
4469 else
4470 threads_debug_printf
4471 ("detach: resume_step, not discarding step-over SIGTRAP");
4472 }
4473 }
4474 step_over_bkpt = null_ptid;
4475 unsuspend_all_lwps (lwp);
4476 }
4477 }
4478
4479 void
4480 linux_process_target::resume_one_thread (thread_info *thread,
4481 bool leave_all_stopped)
4482 {
4483 struct lwp_info *lwp = get_thread_lwp (thread);
4484 int leave_pending;
4485
4486 if (lwp->resume == NULL)
4487 return;
4488
4489 if (lwp->resume->kind == resume_stop)
4490 {
4491 threads_debug_printf ("resume_stop request for LWP %ld",
4492 lwpid_of (thread));
4493
4494 if (!lwp->stopped)
4495 {
4496 threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
4497
4498 /* Stop the thread, and wait for the event asynchronously,
4499 through the event loop. */
4500 send_sigstop (lwp);
4501 }
4502 else
4503 {
4504 threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
4505
4506 /* The LWP may have been stopped in an internal event that
4507 was not meant to be notified back to GDB (e.g., gdbserver
4508 breakpoint), so we should be reporting a stop event in
4509 this case too. */
4510
4511 /* If the thread already has a pending SIGSTOP, this is a
4512 no-op. Otherwise, something later will presumably resume
4513 the thread and this will cause it to cancel any pending
4514 operation, due to last_resume_kind == resume_stop. If
4515 the thread already has a pending status to report, we
4516 will still report it the next time we wait - see
4517 status_pending_p_callback. */
4518
4519 /* If we already have a pending signal to report, then
4520 there's no need to queue a SIGSTOP, as this means we're
4521 midway through moving the LWP out of the jumppad, and we
4522 will report the pending signal as soon as that is
4523 finished. */
4524 if (lwp->pending_signals_to_report.empty ())
4525 send_sigstop (lwp);
4526 }
4527
4528 /* For stop requests, we're done. */
4529 lwp->resume = NULL;
4530 thread->last_status.set_ignore ();
4531 return;
4532 }
4533
4534 /* If this thread which is about to be resumed has a pending status,
4535 then don't resume it - we can just report the pending status.
4536 Likewise if it is suspended, because e.g., another thread is
4537 stepping past a breakpoint. Make sure to queue any signals that
4538 would otherwise be sent. In all-stop mode, we do this decision
4539 based on if *any* thread has a pending status. If there's a
4540 thread that needs the step-over-breakpoint dance, then don't
4541 resume any other thread but that particular one. */
4542 leave_pending = (lwp->suspended
4543 || lwp->status_pending_p
4544 || leave_all_stopped);
4545
4546 /* If we have a new signal, enqueue the signal. */
4547 if (lwp->resume->sig != 0)
4548 {
4549 siginfo_t info, *info_p;
4550
4551 /* If this is the same signal we were previously stopped by,
4552 make sure to queue its siginfo. */
4553 if (WIFSTOPPED (lwp->last_status)
4554 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4555 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4556 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4557 info_p = &info;
4558 else
4559 info_p = NULL;
4560
4561 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4562 }
4563
4564 if (!leave_pending)
4565 {
4566 threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
4567
4568 proceed_one_lwp (thread, NULL);
4569 }
4570 else
4571 threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
4572
4573 thread->last_status.set_ignore ();
4574 lwp->resume = NULL;
4575 }
4576
4577 void
4578 linux_process_target::resume (thread_resume *resume_info, size_t n)
4579 {
4580 struct thread_info *need_step_over = NULL;
4581
4582 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4583
4584 for_each_thread ([&] (thread_info *thread)
4585 {
4586 linux_set_resume_request (thread, resume_info, n);
4587 });
4588
4589 /* If there is a thread which would otherwise be resumed, which has
4590 a pending status, then don't resume any threads - we can just
4591 report the pending status. Make sure to queue any signals that
4592 would otherwise be sent. In non-stop mode, we'll apply this
4593 logic to each thread individually. We consume all pending events
4594 before considering to start a step-over (in all-stop). */
4595 bool any_pending = false;
4596 if (!non_stop)
4597 any_pending = find_thread ([this] (thread_info *thread)
4598 {
4599 return resume_status_pending (thread);
4600 }) != nullptr;
4601
4602 /* If there is a thread which would otherwise be resumed, which is
4603 stopped at a breakpoint that needs stepping over, then don't
4604 resume any threads - have it step over the breakpoint with all
4605 other threads stopped, then resume all threads again. Make sure
4606 to queue any signals that would otherwise be delivered or
4607 queued. */
4608 if (!any_pending && low_supports_breakpoints ())
4609 need_step_over = find_thread ([this] (thread_info *thread)
4610 {
4611 return thread_needs_step_over (thread);
4612 });
4613
4614 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4615
4616 if (need_step_over != NULL)
4617 threads_debug_printf ("Not resuming all, need step over");
4618 else if (any_pending)
4619 threads_debug_printf ("Not resuming, all-stop and found "
4620 "an LWP with pending status");
4621 else
4622 threads_debug_printf ("Resuming, no pending status or step over needed");
4623
4624 /* Even if we're leaving threads stopped, queue all signals we'd
4625 otherwise deliver. */
4626 for_each_thread ([&] (thread_info *thread)
4627 {
4628 resume_one_thread (thread, leave_all_stopped);
4629 });
4630
4631 if (need_step_over)
4632 start_step_over (get_thread_lwp (need_step_over));
4633
4634 /* We may have events that were pending that can/should be sent to
4635 the client now. Trigger a linux_wait call. */
4636 if (target_is_async_p ())
4637 async_file_mark ();
4638 }
4639
4640 void
4641 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4642 {
4643 struct lwp_info *lwp = get_thread_lwp (thread);
4644 int step;
4645
4646 if (lwp == except)
4647 return;
4648
4649 threads_debug_printf ("lwp %ld", lwpid_of (thread));
4650
4651 if (!lwp->stopped)
4652 {
4653 threads_debug_printf (" LWP %ld already running", lwpid_of (thread));
4654 return;
4655 }
4656
4657 if (thread->last_resume_kind == resume_stop
4658 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
4659 {
4660 threads_debug_printf (" client wants LWP to remain %ld stopped",
4661 lwpid_of (thread));
4662 return;
4663 }
4664
4665 if (lwp->status_pending_p)
4666 {
4667 threads_debug_printf (" LWP %ld has pending status, leaving stopped",
4668 lwpid_of (thread));
4669 return;
4670 }
4671
4672 gdb_assert (lwp->suspended >= 0);
4673
4674 if (lwp->suspended)
4675 {
4676 threads_debug_printf (" LWP %ld is suspended", lwpid_of (thread));
4677 return;
4678 }
4679
4680 if (thread->last_resume_kind == resume_stop
4681 && lwp->pending_signals_to_report.empty ()
4682 && (lwp->collecting_fast_tracepoint
4683 == fast_tpoint_collect_result::not_collecting))
4684 {
4685 /* We haven't reported this LWP as stopped yet (otherwise, the
4686 last_status.kind check above would catch it, and we wouldn't
4687 reach here. This LWP may have been momentarily paused by a
4688 stop_all_lwps call while handling for example, another LWP's
4689 step-over. In that case, the pending expected SIGSTOP signal
4690 that was queued at vCont;t handling time will have already
4691 been consumed by wait_for_sigstop, and so we need to requeue
4692 another one here. Note that if the LWP already has a SIGSTOP
4693 pending, this is a no-op. */
4694
4695 threads_debug_printf
4696 ("Client wants LWP %ld to stop. Making sure it has a SIGSTOP pending",
4697 lwpid_of (thread));
4698
4699 send_sigstop (lwp);
4700 }
4701
4702 if (thread->last_resume_kind == resume_step)
4703 {
4704 threads_debug_printf (" stepping LWP %ld, client wants it stepping",
4705 lwpid_of (thread));
4706
4707 /* If resume_step is requested by GDB, install single-step
4708 breakpoints when the thread is about to be actually resumed if
4709 the single-step breakpoints weren't removed. */
4710 if (supports_software_single_step ()
4711 && !has_single_step_breakpoints (thread))
4712 install_software_single_step_breakpoints (lwp);
4713
4714 step = maybe_hw_step (thread);
4715 }
4716 else if (lwp->bp_reinsert != 0)
4717 {
4718 threads_debug_printf (" stepping LWP %ld, reinsert set",
4719 lwpid_of (thread));
4720
4721 step = maybe_hw_step (thread);
4722 }
4723 else
4724 step = 0;
4725
4726 resume_one_lwp (lwp, step, 0, NULL);
4727 }
4728
4729 void
4730 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4731 lwp_info *except)
4732 {
4733 struct lwp_info *lwp = get_thread_lwp (thread);
4734
4735 if (lwp == except)
4736 return;
4737
4738 lwp_suspended_decr (lwp);
4739
4740 proceed_one_lwp (thread, except);
4741 }
4742
4743 void
4744 linux_process_target::proceed_all_lwps ()
4745 {
4746 struct thread_info *need_step_over;
4747
4748 /* If there is a thread which would otherwise be resumed, which is
4749 stopped at a breakpoint that needs stepping over, then don't
4750 resume any threads - have it step over the breakpoint with all
4751 other threads stopped, then resume all threads again. */
4752
4753 if (low_supports_breakpoints ())
4754 {
4755 need_step_over = find_thread ([this] (thread_info *thread)
4756 {
4757 return thread_needs_step_over (thread);
4758 });
4759
4760 if (need_step_over != NULL)
4761 {
4762 threads_debug_printf ("found thread %ld needing a step-over",
4763 lwpid_of (need_step_over));
4764
4765 start_step_over (get_thread_lwp (need_step_over));
4766 return;
4767 }
4768 }
4769
4770 threads_debug_printf ("Proceeding, no step-over needed");
4771
4772 for_each_thread ([this] (thread_info *thread)
4773 {
4774 proceed_one_lwp (thread, NULL);
4775 });
4776 }
4777
4778 void
4779 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
4780 {
4781 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4782
4783 if (except)
4784 threads_debug_printf ("except=(LWP %ld)",
4785 lwpid_of (get_lwp_thread (except)));
4786 else
4787 threads_debug_printf ("except=nullptr");
4788
4789 if (unsuspend)
4790 for_each_thread ([&] (thread_info *thread)
4791 {
4792 unsuspend_and_proceed_one_lwp (thread, except);
4793 });
4794 else
4795 for_each_thread ([&] (thread_info *thread)
4796 {
4797 proceed_one_lwp (thread, except);
4798 });
4799 }
4800
4801
4802 #ifdef HAVE_LINUX_REGSETS
4803
4804 #define use_linux_regsets 1
4805
4806 /* Returns true if REGSET has been disabled. */
4807
4808 static int
4809 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4810 {
4811 return (info->disabled_regsets != NULL
4812 && info->disabled_regsets[regset - info->regsets]);
4813 }
4814
4815 /* Disable REGSET. */
4816
4817 static void
4818 disable_regset (struct regsets_info *info, struct regset_info *regset)
4819 {
4820 int dr_offset;
4821
4822 dr_offset = regset - info->regsets;
4823 if (info->disabled_regsets == NULL)
4824 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
4825 info->disabled_regsets[dr_offset] = 1;
4826 }
4827
4828 static int
4829 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4830 struct regcache *regcache)
4831 {
4832 struct regset_info *regset;
4833 int saw_general_regs = 0;
4834 int pid;
4835 struct iovec iov;
4836
4837 pid = lwpid_of (current_thread);
4838 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4839 {
4840 void *buf, *data;
4841 int nt_type, res;
4842
4843 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4844 continue;
4845
4846 buf = xmalloc (regset->size);
4847
4848 nt_type = regset->nt_type;
4849 if (nt_type)
4850 {
4851 iov.iov_base = buf;
4852 iov.iov_len = regset->size;
4853 data = (void *) &iov;
4854 }
4855 else
4856 data = buf;
4857
4858 #ifndef __sparc__
4859 res = ptrace (regset->get_request, pid,
4860 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4861 #else
4862 res = ptrace (regset->get_request, pid, data, nt_type);
4863 #endif
4864 if (res < 0)
4865 {
4866 if (errno == EIO
4867 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
4868 {
4869 /* If we get EIO on a regset, or an EINVAL and the regset is
4870 optional, do not try it again for this process mode. */
4871 disable_regset (regsets_info, regset);
4872 }
4873 else if (errno == ENODATA)
4874 {
4875 /* ENODATA may be returned if the regset is currently
4876 not "active". This can happen in normal operation,
4877 so suppress the warning in this case. */
4878 }
4879 else if (errno == ESRCH)
4880 {
4881 /* At this point, ESRCH should mean the process is
4882 already gone, in which case we simply ignore attempts
4883 to read its registers. */
4884 }
4885 else
4886 {
4887 char s[256];
4888 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4889 pid);
4890 perror (s);
4891 }
4892 }
4893 else
4894 {
4895 if (regset->type == GENERAL_REGS)
4896 saw_general_regs = 1;
4897 regset->store_function (regcache, buf);
4898 }
4899 free (buf);
4900 }
4901 if (saw_general_regs)
4902 return 0;
4903 else
4904 return 1;
4905 }
4906
4907 static int
4908 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4909 struct regcache *regcache)
4910 {
4911 struct regset_info *regset;
4912 int saw_general_regs = 0;
4913 int pid;
4914 struct iovec iov;
4915
4916 pid = lwpid_of (current_thread);
4917 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4918 {
4919 void *buf, *data;
4920 int nt_type, res;
4921
4922 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4923 || regset->fill_function == NULL)
4924 continue;
4925
4926 buf = xmalloc (regset->size);
4927
4928 /* First fill the buffer with the current register set contents,
4929 in case there are any items in the kernel's regset that are
4930 not in gdbserver's regcache. */
4931
4932 nt_type = regset->nt_type;
4933 if (nt_type)
4934 {
4935 iov.iov_base = buf;
4936 iov.iov_len = regset->size;
4937 data = (void *) &iov;
4938 }
4939 else
4940 data = buf;
4941
4942 #ifndef __sparc__
4943 res = ptrace (regset->get_request, pid,
4944 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4945 #else
4946 res = ptrace (regset->get_request, pid, data, nt_type);
4947 #endif
4948
4949 if (res == 0)
4950 {
4951 /* Then overlay our cached registers on that. */
4952 regset->fill_function (regcache, buf);
4953
4954 /* Only now do we write the register set. */
4955 #ifndef __sparc__
4956 res = ptrace (regset->set_request, pid,
4957 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4958 #else
4959 res = ptrace (regset->set_request, pid, data, nt_type);
4960 #endif
4961 }
4962
4963 if (res < 0)
4964 {
4965 if (errno == EIO
4966 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
4967 {
4968 /* If we get EIO on a regset, or an EINVAL and the regset is
4969 optional, do not try it again for this process mode. */
4970 disable_regset (regsets_info, regset);
4971 }
4972 else if (errno == ESRCH)
4973 {
4974 /* At this point, ESRCH should mean the process is
4975 already gone, in which case we simply ignore attempts
4976 to change its registers. See also the related
4977 comment in resume_one_lwp. */
4978 free (buf);
4979 return 0;
4980 }
4981 else
4982 {
4983 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4984 }
4985 }
4986 else if (regset->type == GENERAL_REGS)
4987 saw_general_regs = 1;
4988 free (buf);
4989 }
4990 if (saw_general_regs)
4991 return 0;
4992 else
4993 return 1;
4994 }
4995
4996 #else /* !HAVE_LINUX_REGSETS */
4997
4998 #define use_linux_regsets 0
4999 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5000 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5001
5002 #endif
5003
5004 /* Return 1 if register REGNO is supported by one of the regset ptrace
5005 calls or 0 if it has to be transferred individually. */
5006
5007 static int
5008 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5009 {
5010 unsigned char mask = 1 << (regno % 8);
5011 size_t index = regno / 8;
5012
5013 return (use_linux_regsets
5014 && (regs_info->regset_bitmap == NULL
5015 || (regs_info->regset_bitmap[index] & mask) != 0));
5016 }
5017
5018 #ifdef HAVE_LINUX_USRREGS
5019
5020 static int
5021 register_addr (const struct usrregs_info *usrregs, int regnum)
5022 {
5023 int addr;
5024
5025 if (regnum < 0 || regnum >= usrregs->num_regs)
5026 error ("Invalid register number %d.", regnum);
5027
5028 addr = usrregs->regmap[regnum];
5029
5030 return addr;
5031 }
5032
5033
5034 void
5035 linux_process_target::fetch_register (const usrregs_info *usrregs,
5036 regcache *regcache, int regno)
5037 {
5038 CORE_ADDR regaddr;
5039 int i, size;
5040 char *buf;
5041 int pid;
5042
5043 if (regno >= usrregs->num_regs)
5044 return;
5045 if (low_cannot_fetch_register (regno))
5046 return;
5047
5048 regaddr = register_addr (usrregs, regno);
5049 if (regaddr == -1)
5050 return;
5051
5052 size = ((register_size (regcache->tdesc, regno)
5053 + sizeof (PTRACE_XFER_TYPE) - 1)
5054 & -sizeof (PTRACE_XFER_TYPE));
5055 buf = (char *) alloca (size);
5056
5057 pid = lwpid_of (current_thread);
5058 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5059 {
5060 errno = 0;
5061 *(PTRACE_XFER_TYPE *) (buf + i) =
5062 ptrace (PTRACE_PEEKUSER, pid,
5063 /* Coerce to a uintptr_t first to avoid potential gcc warning
5064 of coercing an 8 byte integer to a 4 byte pointer. */
5065 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5066 regaddr += sizeof (PTRACE_XFER_TYPE);
5067 if (errno != 0)
5068 {
5069 /* Mark register REGNO unavailable. */
5070 supply_register (regcache, regno, NULL);
5071 return;
5072 }
5073 }
5074
5075 low_supply_ptrace_register (regcache, regno, buf);
5076 }
5077
5078 void
5079 linux_process_target::store_register (const usrregs_info *usrregs,
5080 regcache *regcache, int regno)
5081 {
5082 CORE_ADDR regaddr;
5083 int i, size;
5084 char *buf;
5085 int pid;
5086
5087 if (regno >= usrregs->num_regs)
5088 return;
5089 if (low_cannot_store_register (regno))
5090 return;
5091
5092 regaddr = register_addr (usrregs, regno);
5093 if (regaddr == -1)
5094 return;
5095
5096 size = ((register_size (regcache->tdesc, regno)
5097 + sizeof (PTRACE_XFER_TYPE) - 1)
5098 & -sizeof (PTRACE_XFER_TYPE));
5099 buf = (char *) alloca (size);
5100 memset (buf, 0, size);
5101
5102 low_collect_ptrace_register (regcache, regno, buf);
5103
5104 pid = lwpid_of (current_thread);
5105 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5106 {
5107 errno = 0;
5108 ptrace (PTRACE_POKEUSER, pid,
5109 /* Coerce to a uintptr_t first to avoid potential gcc warning
5110 about coercing an 8 byte integer to a 4 byte pointer. */
5111 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5112 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5113 if (errno != 0)
5114 {
5115 /* At this point, ESRCH should mean the process is
5116 already gone, in which case we simply ignore attempts
5117 to change its registers. See also the related
5118 comment in resume_one_lwp. */
5119 if (errno == ESRCH)
5120 return;
5121
5122
5123 if (!low_cannot_store_register (regno))
5124 error ("writing register %d: %s", regno, safe_strerror (errno));
5125 }
5126 regaddr += sizeof (PTRACE_XFER_TYPE);
5127 }
5128 }
5129 #endif /* HAVE_LINUX_USRREGS */
5130
5131 void
5132 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5133 int regno, char *buf)
5134 {
5135 collect_register (regcache, regno, buf);
5136 }
5137
5138 void
5139 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5140 int regno, const char *buf)
5141 {
5142 supply_register (regcache, regno, buf);
5143 }
5144
5145 void
5146 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5147 regcache *regcache,
5148 int regno, int all)
5149 {
5150 #ifdef HAVE_LINUX_USRREGS
5151 struct usrregs_info *usr = regs_info->usrregs;
5152
5153 if (regno == -1)
5154 {
5155 for (regno = 0; regno < usr->num_regs; regno++)
5156 if (all || !linux_register_in_regsets (regs_info, regno))
5157 fetch_register (usr, regcache, regno);
5158 }
5159 else
5160 fetch_register (usr, regcache, regno);
5161 #endif
5162 }
5163
5164 void
5165 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5166 regcache *regcache,
5167 int regno, int all)
5168 {
5169 #ifdef HAVE_LINUX_USRREGS
5170 struct usrregs_info *usr = regs_info->usrregs;
5171
5172 if (regno == -1)
5173 {
5174 for (regno = 0; regno < usr->num_regs; regno++)
5175 if (all || !linux_register_in_regsets (regs_info, regno))
5176 store_register (usr, regcache, regno);
5177 }
5178 else
5179 store_register (usr, regcache, regno);
5180 #endif
5181 }
5182
5183 void
5184 linux_process_target::fetch_registers (regcache *regcache, int regno)
5185 {
5186 int use_regsets;
5187 int all = 0;
5188 const regs_info *regs_info = get_regs_info ();
5189
5190 if (regno == -1)
5191 {
5192 if (regs_info->usrregs != NULL)
5193 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5194 low_fetch_register (regcache, regno);
5195
5196 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5197 if (regs_info->usrregs != NULL)
5198 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5199 }
5200 else
5201 {
5202 if (low_fetch_register (regcache, regno))
5203 return;
5204
5205 use_regsets = linux_register_in_regsets (regs_info, regno);
5206 if (use_regsets)
5207 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5208 regcache);
5209 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5210 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5211 }
5212 }
5213
5214 void
5215 linux_process_target::store_registers (regcache *regcache, int regno)
5216 {
5217 int use_regsets;
5218 int all = 0;
5219 const regs_info *regs_info = get_regs_info ();
5220
5221 if (regno == -1)
5222 {
5223 all = regsets_store_inferior_registers (regs_info->regsets_info,
5224 regcache);
5225 if (regs_info->usrregs != NULL)
5226 usr_store_inferior_registers (regs_info, regcache, regno, all);
5227 }
5228 else
5229 {
5230 use_regsets = linux_register_in_regsets (regs_info, regno);
5231 if (use_regsets)
5232 all = regsets_store_inferior_registers (regs_info->regsets_info,
5233 regcache);
5234 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5235 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5236 }
5237 }
5238
5239 bool
5240 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5241 {
5242 return false;
5243 }
5244
5245 /* A wrapper for the read_memory target op. */
5246
5247 static int
5248 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5249 {
5250 return the_target->read_memory (memaddr, myaddr, len);
5251 }
5252
5253 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5254 to debugger memory starting at MYADDR. */
5255
5256 int
5257 linux_process_target::read_memory (CORE_ADDR memaddr,
5258 unsigned char *myaddr, int len)
5259 {
5260 int pid = lwpid_of (current_thread);
5261 PTRACE_XFER_TYPE *buffer;
5262 CORE_ADDR addr;
5263 int count;
5264 char filename[64];
5265 int i;
5266 int ret;
5267 int fd;
5268
5269 /* Try using /proc. Don't bother for one word. */
5270 if (len >= 3 * sizeof (long))
5271 {
5272 int bytes;
5273
5274 /* We could keep this file open and cache it - possibly one per
5275 thread. That requires some juggling, but is even faster. */
5276 sprintf (filename, "/proc/%d/mem", pid);
5277 fd = open (filename, O_RDONLY | O_LARGEFILE);
5278 if (fd == -1)
5279 goto no_proc;
5280
5281 /* If pread64 is available, use it. It's faster if the kernel
5282 supports it (only one syscall), and it's 64-bit safe even on
5283 32-bit platforms (for instance, SPARC debugging a SPARC64
5284 application). */
5285 #ifdef HAVE_PREAD64
5286 bytes = pread64 (fd, myaddr, len, memaddr);
5287 #else
5288 bytes = -1;
5289 if (lseek (fd, memaddr, SEEK_SET) != -1)
5290 bytes = read (fd, myaddr, len);
5291 #endif
5292
5293 close (fd);
5294 if (bytes == len)
5295 return 0;
5296
5297 /* Some data was read, we'll try to get the rest with ptrace. */
5298 if (bytes > 0)
5299 {
5300 memaddr += bytes;
5301 myaddr += bytes;
5302 len -= bytes;
5303 }
5304 }
5305
5306 no_proc:
5307 /* Round starting address down to longword boundary. */
5308 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5309 /* Round ending address up; get number of longwords that makes. */
5310 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5311 / sizeof (PTRACE_XFER_TYPE));
5312 /* Allocate buffer of that many longwords. */
5313 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5314
5315 /* Read all the longwords */
5316 errno = 0;
5317 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5318 {
5319 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5320 about coercing an 8 byte integer to a 4 byte pointer. */
5321 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5322 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5323 (PTRACE_TYPE_ARG4) 0);
5324 if (errno)
5325 break;
5326 }
5327 ret = errno;
5328
5329 /* Copy appropriate bytes out of the buffer. */
5330 if (i > 0)
5331 {
5332 i *= sizeof (PTRACE_XFER_TYPE);
5333 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5334 memcpy (myaddr,
5335 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5336 i < len ? i : len);
5337 }
5338
5339 return ret;
5340 }
5341
5342 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5343 memory at MEMADDR. On failure (cannot write to the inferior)
5344 returns the value of errno. Always succeeds if LEN is zero. */
5345
5346 int
5347 linux_process_target::write_memory (CORE_ADDR memaddr,
5348 const unsigned char *myaddr, int len)
5349 {
5350 int i;
5351 /* Round starting address down to longword boundary. */
5352 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5353 /* Round ending address up; get number of longwords that makes. */
5354 int count
5355 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5356 / sizeof (PTRACE_XFER_TYPE);
5357
5358 /* Allocate buffer of that many longwords. */
5359 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5360
5361 int pid = lwpid_of (current_thread);
5362
5363 if (len == 0)
5364 {
5365 /* Zero length write always succeeds. */
5366 return 0;
5367 }
5368
5369 if (debug_threads)
5370 {
5371 /* Dump up to four bytes. */
5372 char str[4 * 2 + 1];
5373 char *p = str;
5374 int dump = len < 4 ? len : 4;
5375
5376 for (i = 0; i < dump; i++)
5377 {
5378 sprintf (p, "%02x", myaddr[i]);
5379 p += 2;
5380 }
5381 *p = '\0';
5382
5383 threads_debug_printf ("Writing %s to 0x%08lx in process %d",
5384 str, (long) memaddr, pid);
5385 }
5386
5387 /* Fill start and end extra bytes of buffer with existing memory data. */
5388
5389 errno = 0;
5390 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5391 about coercing an 8 byte integer to a 4 byte pointer. */
5392 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5393 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5394 (PTRACE_TYPE_ARG4) 0);
5395 if (errno)
5396 return errno;
5397
5398 if (count > 1)
5399 {
5400 errno = 0;
5401 buffer[count - 1]
5402 = ptrace (PTRACE_PEEKTEXT, pid,
5403 /* Coerce to a uintptr_t first to avoid potential gcc warning
5404 about coercing an 8 byte integer to a 4 byte pointer. */
5405 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5406 * sizeof (PTRACE_XFER_TYPE)),
5407 (PTRACE_TYPE_ARG4) 0);
5408 if (errno)
5409 return errno;
5410 }
5411
5412 /* Copy data to be written over corresponding part of buffer. */
5413
5414 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5415 myaddr, len);
5416
5417 /* Write the entire buffer. */
5418
5419 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5420 {
5421 errno = 0;
5422 ptrace (PTRACE_POKETEXT, pid,
5423 /* Coerce to a uintptr_t first to avoid potential gcc warning
5424 about coercing an 8 byte integer to a 4 byte pointer. */
5425 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5426 (PTRACE_TYPE_ARG4) buffer[i]);
5427 if (errno)
5428 return errno;
5429 }
5430
5431 return 0;
5432 }
5433
5434 void
5435 linux_process_target::look_up_symbols ()
5436 {
5437 #ifdef USE_THREAD_DB
5438 struct process_info *proc = current_process ();
5439
5440 if (proc->priv->thread_db != NULL)
5441 return;
5442
5443 thread_db_init ();
5444 #endif
5445 }
5446
5447 void
5448 linux_process_target::request_interrupt ()
5449 {
5450 /* Send a SIGINT to the process group. This acts just like the user
5451 typed a ^C on the controlling terminal. */
5452 ::kill (-signal_pid, SIGINT);
5453 }
5454
5455 bool
5456 linux_process_target::supports_read_auxv ()
5457 {
5458 return true;
5459 }
5460
5461 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5462 to debugger memory starting at MYADDR. */
5463
5464 int
5465 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5466 unsigned int len)
5467 {
5468 char filename[PATH_MAX];
5469 int fd, n;
5470 int pid = lwpid_of (current_thread);
5471
5472 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5473
5474 fd = open (filename, O_RDONLY);
5475 if (fd < 0)
5476 return -1;
5477
5478 if (offset != (CORE_ADDR) 0
5479 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5480 n = -1;
5481 else
5482 n = read (fd, myaddr, len);
5483
5484 close (fd);
5485
5486 return n;
5487 }
5488
5489 int
5490 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5491 int size, raw_breakpoint *bp)
5492 {
5493 if (type == raw_bkpt_type_sw)
5494 return insert_memory_breakpoint (bp);
5495 else
5496 return low_insert_point (type, addr, size, bp);
5497 }
5498
5499 int
5500 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5501 int size, raw_breakpoint *bp)
5502 {
5503 /* Unsupported (see target.h). */
5504 return 1;
5505 }
5506
5507 int
5508 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5509 int size, raw_breakpoint *bp)
5510 {
5511 if (type == raw_bkpt_type_sw)
5512 return remove_memory_breakpoint (bp);
5513 else
5514 return low_remove_point (type, addr, size, bp);
5515 }
5516
5517 int
5518 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5519 int size, raw_breakpoint *bp)
5520 {
5521 /* Unsupported (see target.h). */
5522 return 1;
5523 }
5524
5525 /* Implement the stopped_by_sw_breakpoint target_ops
5526 method. */
5527
5528 bool
5529 linux_process_target::stopped_by_sw_breakpoint ()
5530 {
5531 struct lwp_info *lwp = get_thread_lwp (current_thread);
5532
5533 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5534 }
5535
5536 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5537 method. */
5538
5539 bool
5540 linux_process_target::supports_stopped_by_sw_breakpoint ()
5541 {
5542 return USE_SIGTRAP_SIGINFO;
5543 }
5544
5545 /* Implement the stopped_by_hw_breakpoint target_ops
5546 method. */
5547
5548 bool
5549 linux_process_target::stopped_by_hw_breakpoint ()
5550 {
5551 struct lwp_info *lwp = get_thread_lwp (current_thread);
5552
5553 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5554 }
5555
5556 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5557 method. */
5558
5559 bool
5560 linux_process_target::supports_stopped_by_hw_breakpoint ()
5561 {
5562 return USE_SIGTRAP_SIGINFO;
5563 }
5564
5565 /* Implement the supports_hardware_single_step target_ops method. */
5566
5567 bool
5568 linux_process_target::supports_hardware_single_step ()
5569 {
5570 return true;
5571 }
5572
5573 bool
5574 linux_process_target::stopped_by_watchpoint ()
5575 {
5576 struct lwp_info *lwp = get_thread_lwp (current_thread);
5577
5578 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5579 }
5580
5581 CORE_ADDR
5582 linux_process_target::stopped_data_address ()
5583 {
5584 struct lwp_info *lwp = get_thread_lwp (current_thread);
5585
5586 return lwp->stopped_data_address;
5587 }
5588
5589 /* This is only used for targets that define PT_TEXT_ADDR,
5590 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5591 the target has different ways of acquiring this information, like
5592 loadmaps. */
5593
5594 bool
5595 linux_process_target::supports_read_offsets ()
5596 {
5597 #ifdef SUPPORTS_READ_OFFSETS
5598 return true;
5599 #else
5600 return false;
5601 #endif
5602 }
5603
5604 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5605 to tell gdb about. */
5606
5607 int
5608 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5609 {
5610 #ifdef SUPPORTS_READ_OFFSETS
5611 unsigned long text, text_end, data;
5612 int pid = lwpid_of (current_thread);
5613
5614 errno = 0;
5615
5616 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5617 (PTRACE_TYPE_ARG4) 0);
5618 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5619 (PTRACE_TYPE_ARG4) 0);
5620 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5621 (PTRACE_TYPE_ARG4) 0);
5622
5623 if (errno == 0)
5624 {
5625 /* Both text and data offsets produced at compile-time (and so
5626 used by gdb) are relative to the beginning of the program,
5627 with the data segment immediately following the text segment.
5628 However, the actual runtime layout in memory may put the data
5629 somewhere else, so when we send gdb a data base-address, we
5630 use the real data base address and subtract the compile-time
5631 data base-address from it (which is just the length of the
5632 text segment). BSS immediately follows data in both
5633 cases. */
5634 *text_p = text;
5635 *data_p = data - (text_end - text);
5636
5637 return 1;
5638 }
5639 return 0;
5640 #else
5641 gdb_assert_not_reached ("target op read_offsets not supported");
5642 #endif
5643 }
5644
5645 bool
5646 linux_process_target::supports_get_tls_address ()
5647 {
5648 #ifdef USE_THREAD_DB
5649 return true;
5650 #else
5651 return false;
5652 #endif
5653 }
5654
5655 int
5656 linux_process_target::get_tls_address (thread_info *thread,
5657 CORE_ADDR offset,
5658 CORE_ADDR load_module,
5659 CORE_ADDR *address)
5660 {
5661 #ifdef USE_THREAD_DB
5662 return thread_db_get_tls_address (thread, offset, load_module, address);
5663 #else
5664 return -1;
5665 #endif
5666 }
5667
5668 bool
5669 linux_process_target::supports_qxfer_osdata ()
5670 {
5671 return true;
5672 }
5673
5674 int
5675 linux_process_target::qxfer_osdata (const char *annex,
5676 unsigned char *readbuf,
5677 unsigned const char *writebuf,
5678 CORE_ADDR offset, int len)
5679 {
5680 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5681 }
5682
5683 void
5684 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5685 gdb_byte *inf_siginfo, int direction)
5686 {
5687 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5688
5689 /* If there was no callback, or the callback didn't do anything,
5690 then just do a straight memcpy. */
5691 if (!done)
5692 {
5693 if (direction == 1)
5694 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5695 else
5696 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5697 }
5698 }
5699
5700 bool
5701 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5702 int direction)
5703 {
5704 return false;
5705 }
5706
5707 bool
5708 linux_process_target::supports_qxfer_siginfo ()
5709 {
5710 return true;
5711 }
5712
5713 int
5714 linux_process_target::qxfer_siginfo (const char *annex,
5715 unsigned char *readbuf,
5716 unsigned const char *writebuf,
5717 CORE_ADDR offset, int len)
5718 {
5719 int pid;
5720 siginfo_t siginfo;
5721 gdb_byte inf_siginfo[sizeof (siginfo_t)];
5722
5723 if (current_thread == NULL)
5724 return -1;
5725
5726 pid = lwpid_of (current_thread);
5727
5728 threads_debug_printf ("%s siginfo for lwp %d.",
5729 readbuf != NULL ? "Reading" : "Writing",
5730 pid);
5731
5732 if (offset >= sizeof (siginfo))
5733 return -1;
5734
5735 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5736 return -1;
5737
5738 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5739 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5740 inferior with a 64-bit GDBSERVER should look the same as debugging it
5741 with a 32-bit GDBSERVER, we need to convert it. */
5742 siginfo_fixup (&siginfo, inf_siginfo, 0);
5743
5744 if (offset + len > sizeof (siginfo))
5745 len = sizeof (siginfo) - offset;
5746
5747 if (readbuf != NULL)
5748 memcpy (readbuf, inf_siginfo + offset, len);
5749 else
5750 {
5751 memcpy (inf_siginfo + offset, writebuf, len);
5752
5753 /* Convert back to ptrace layout before flushing it out. */
5754 siginfo_fixup (&siginfo, inf_siginfo, 1);
5755
5756 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5757 return -1;
5758 }
5759
5760 return len;
5761 }
5762
5763 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5764 so we notice when children change state; as the handler for the
5765 sigsuspend in my_waitpid. */
5766
5767 static void
5768 sigchld_handler (int signo)
5769 {
5770 int old_errno = errno;
5771
5772 if (debug_threads)
5773 {
5774 do
5775 {
5776 /* Use the async signal safe debug function. */
5777 if (debug_write ("sigchld_handler\n",
5778 sizeof ("sigchld_handler\n") - 1) < 0)
5779 break; /* just ignore */
5780 } while (0);
5781 }
5782
5783 if (target_is_async_p ())
5784 async_file_mark (); /* trigger a linux_wait */
5785
5786 errno = old_errno;
5787 }
5788
5789 bool
5790 linux_process_target::supports_non_stop ()
5791 {
5792 return true;
5793 }
5794
5795 bool
5796 linux_process_target::async (bool enable)
5797 {
5798 bool previous = target_is_async_p ();
5799
5800 threads_debug_printf ("async (%d), previous=%d",
5801 enable, previous);
5802
5803 if (previous != enable)
5804 {
5805 sigset_t mask;
5806 sigemptyset (&mask);
5807 sigaddset (&mask, SIGCHLD);
5808
5809 gdb_sigmask (SIG_BLOCK, &mask, NULL);
5810
5811 if (enable)
5812 {
5813 if (!linux_event_pipe.open ())
5814 {
5815 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5816
5817 warning ("creating event pipe failed.");
5818 return previous;
5819 }
5820
5821 /* Register the event loop handler. */
5822 add_file_handler (linux_event_pipe.event_fd (),
5823 handle_target_event, NULL,
5824 "linux-low");
5825
5826 /* Always trigger a linux_wait. */
5827 async_file_mark ();
5828 }
5829 else
5830 {
5831 delete_file_handler (linux_event_pipe.event_fd ());
5832
5833 linux_event_pipe.close ();
5834 }
5835
5836 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5837 }
5838
5839 return previous;
5840 }
5841
5842 int
5843 linux_process_target::start_non_stop (bool nonstop)
5844 {
5845 /* Register or unregister from event-loop accordingly. */
5846 target_async (nonstop);
5847
5848 if (target_is_async_p () != (nonstop != false))
5849 return -1;
5850
5851 return 0;
5852 }
5853
5854 bool
5855 linux_process_target::supports_multi_process ()
5856 {
5857 return true;
5858 }
5859
5860 /* Check if fork events are supported. */
5861
5862 bool
5863 linux_process_target::supports_fork_events ()
5864 {
5865 return true;
5866 }
5867
5868 /* Check if vfork events are supported. */
5869
5870 bool
5871 linux_process_target::supports_vfork_events ()
5872 {
5873 return true;
5874 }
5875
5876 /* Check if exec events are supported. */
5877
5878 bool
5879 linux_process_target::supports_exec_events ()
5880 {
5881 return true;
5882 }
5883
5884 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5885 ptrace flags for all inferiors. This is in case the new GDB connection
5886 doesn't support the same set of events that the previous one did. */
5887
5888 void
5889 linux_process_target::handle_new_gdb_connection ()
5890 {
5891 /* Request that all the lwps reset their ptrace options. */
5892 for_each_thread ([] (thread_info *thread)
5893 {
5894 struct lwp_info *lwp = get_thread_lwp (thread);
5895
5896 if (!lwp->stopped)
5897 {
5898 /* Stop the lwp so we can modify its ptrace options. */
5899 lwp->must_set_ptrace_flags = 1;
5900 linux_stop_lwp (lwp);
5901 }
5902 else
5903 {
5904 /* Already stopped; go ahead and set the ptrace options. */
5905 struct process_info *proc = find_process_pid (pid_of (thread));
5906 int options = linux_low_ptrace_options (proc->attached);
5907
5908 linux_enable_event_reporting (lwpid_of (thread), options);
5909 lwp->must_set_ptrace_flags = 0;
5910 }
5911 });
5912 }
5913
5914 int
5915 linux_process_target::handle_monitor_command (char *mon)
5916 {
5917 #ifdef USE_THREAD_DB
5918 return thread_db_handle_monitor_command (mon);
5919 #else
5920 return 0;
5921 #endif
5922 }
5923
5924 int
5925 linux_process_target::core_of_thread (ptid_t ptid)
5926 {
5927 return linux_common_core_of_thread (ptid);
5928 }
5929
5930 bool
5931 linux_process_target::supports_disable_randomization ()
5932 {
5933 return true;
5934 }
5935
5936 bool
5937 linux_process_target::supports_agent ()
5938 {
5939 return true;
5940 }
5941
5942 bool
5943 linux_process_target::supports_range_stepping ()
5944 {
5945 if (supports_software_single_step ())
5946 return true;
5947
5948 return low_supports_range_stepping ();
5949 }
5950
5951 bool
5952 linux_process_target::low_supports_range_stepping ()
5953 {
5954 return false;
5955 }
5956
5957 bool
5958 linux_process_target::supports_pid_to_exec_file ()
5959 {
5960 return true;
5961 }
5962
5963 const char *
5964 linux_process_target::pid_to_exec_file (int pid)
5965 {
5966 return linux_proc_pid_to_exec_file (pid);
5967 }
5968
5969 bool
5970 linux_process_target::supports_multifs ()
5971 {
5972 return true;
5973 }
5974
5975 int
5976 linux_process_target::multifs_open (int pid, const char *filename,
5977 int flags, mode_t mode)
5978 {
5979 return linux_mntns_open_cloexec (pid, filename, flags, mode);
5980 }
5981
5982 int
5983 linux_process_target::multifs_unlink (int pid, const char *filename)
5984 {
5985 return linux_mntns_unlink (pid, filename);
5986 }
5987
5988 ssize_t
5989 linux_process_target::multifs_readlink (int pid, const char *filename,
5990 char *buf, size_t bufsiz)
5991 {
5992 return linux_mntns_readlink (pid, filename, buf, bufsiz);
5993 }
5994
5995 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5996 struct target_loadseg
5997 {
5998 /* Core address to which the segment is mapped. */
5999 Elf32_Addr addr;
6000 /* VMA recorded in the program header. */
6001 Elf32_Addr p_vaddr;
6002 /* Size of this segment in memory. */
6003 Elf32_Word p_memsz;
6004 };
6005
6006 # if defined PT_GETDSBT
6007 struct target_loadmap
6008 {
6009 /* Protocol version number, must be zero. */
6010 Elf32_Word version;
6011 /* Pointer to the DSBT table, its size, and the DSBT index. */
6012 unsigned *dsbt_table;
6013 unsigned dsbt_size, dsbt_index;
6014 /* Number of segments in this map. */
6015 Elf32_Word nsegs;
6016 /* The actual memory map. */
6017 struct target_loadseg segs[/*nsegs*/];
6018 };
6019 # define LINUX_LOADMAP PT_GETDSBT
6020 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6021 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6022 # else
6023 struct target_loadmap
6024 {
6025 /* Protocol version number, must be zero. */
6026 Elf32_Half version;
6027 /* Number of segments in this map. */
6028 Elf32_Half nsegs;
6029 /* The actual memory map. */
6030 struct target_loadseg segs[/*nsegs*/];
6031 };
6032 # define LINUX_LOADMAP PTRACE_GETFDPIC
6033 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6034 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6035 # endif
6036
6037 bool
6038 linux_process_target::supports_read_loadmap ()
6039 {
6040 return true;
6041 }
6042
6043 int
6044 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6045 unsigned char *myaddr, unsigned int len)
6046 {
6047 int pid = lwpid_of (current_thread);
6048 int addr = -1;
6049 struct target_loadmap *data = NULL;
6050 unsigned int actual_length, copy_length;
6051
6052 if (strcmp (annex, "exec") == 0)
6053 addr = (int) LINUX_LOADMAP_EXEC;
6054 else if (strcmp (annex, "interp") == 0)
6055 addr = (int) LINUX_LOADMAP_INTERP;
6056 else
6057 return -1;
6058
6059 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6060 return -1;
6061
6062 if (data == NULL)
6063 return -1;
6064
6065 actual_length = sizeof (struct target_loadmap)
6066 + sizeof (struct target_loadseg) * data->nsegs;
6067
6068 if (offset < 0 || offset > actual_length)
6069 return -1;
6070
6071 copy_length = actual_length - offset < len ? actual_length - offset : len;
6072 memcpy (myaddr, (char *) data + offset, copy_length);
6073 return copy_length;
6074 }
6075 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6076
6077 bool
6078 linux_process_target::supports_catch_syscall ()
6079 {
6080 return low_supports_catch_syscall ();
6081 }
6082
6083 bool
6084 linux_process_target::low_supports_catch_syscall ()
6085 {
6086 return false;
6087 }
6088
6089 CORE_ADDR
6090 linux_process_target::read_pc (regcache *regcache)
6091 {
6092 if (!low_supports_breakpoints ())
6093 return 0;
6094
6095 return low_get_pc (regcache);
6096 }
6097
6098 void
6099 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6100 {
6101 gdb_assert (low_supports_breakpoints ());
6102
6103 low_set_pc (regcache, pc);
6104 }
6105
6106 bool
6107 linux_process_target::supports_thread_stopped ()
6108 {
6109 return true;
6110 }
6111
6112 bool
6113 linux_process_target::thread_stopped (thread_info *thread)
6114 {
6115 return get_thread_lwp (thread)->stopped;
6116 }
6117
6118 /* This exposes stop-all-threads functionality to other modules. */
6119
6120 void
6121 linux_process_target::pause_all (bool freeze)
6122 {
6123 stop_all_lwps (freeze, NULL);
6124 }
6125
6126 /* This exposes unstop-all-threads functionality to other gdbserver
6127 modules. */
6128
6129 void
6130 linux_process_target::unpause_all (bool unfreeze)
6131 {
6132 unstop_all_lwps (unfreeze, NULL);
6133 }
6134
6135 int
6136 linux_process_target::prepare_to_access_memory ()
6137 {
6138 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6139 running LWP. */
6140 if (non_stop)
6141 target_pause_all (true);
6142 return 0;
6143 }
6144
6145 void
6146 linux_process_target::done_accessing_memory ()
6147 {
6148 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6149 running LWP. */
6150 if (non_stop)
6151 target_unpause_all (true);
6152 }
6153
6154 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6155
6156 static int
6157 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6158 CORE_ADDR *phdr_memaddr, int *num_phdr)
6159 {
6160 char filename[PATH_MAX];
6161 int fd;
6162 const int auxv_size = is_elf64
6163 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6164 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6165
6166 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6167
6168 fd = open (filename, O_RDONLY);
6169 if (fd < 0)
6170 return 1;
6171
6172 *phdr_memaddr = 0;
6173 *num_phdr = 0;
6174 while (read (fd, buf, auxv_size) == auxv_size
6175 && (*phdr_memaddr == 0 || *num_phdr == 0))
6176 {
6177 if (is_elf64)
6178 {
6179 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6180
6181 switch (aux->a_type)
6182 {
6183 case AT_PHDR:
6184 *phdr_memaddr = aux->a_un.a_val;
6185 break;
6186 case AT_PHNUM:
6187 *num_phdr = aux->a_un.a_val;
6188 break;
6189 }
6190 }
6191 else
6192 {
6193 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6194
6195 switch (aux->a_type)
6196 {
6197 case AT_PHDR:
6198 *phdr_memaddr = aux->a_un.a_val;
6199 break;
6200 case AT_PHNUM:
6201 *num_phdr = aux->a_un.a_val;
6202 break;
6203 }
6204 }
6205 }
6206
6207 close (fd);
6208
6209 if (*phdr_memaddr == 0 || *num_phdr == 0)
6210 {
6211 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6212 "phdr_memaddr = %ld, phdr_num = %d",
6213 (long) *phdr_memaddr, *num_phdr);
6214 return 2;
6215 }
6216
6217 return 0;
6218 }
6219
6220 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6221
6222 static CORE_ADDR
6223 get_dynamic (const int pid, const int is_elf64)
6224 {
6225 CORE_ADDR phdr_memaddr, relocation;
6226 int num_phdr, i;
6227 unsigned char *phdr_buf;
6228 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6229
6230 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6231 return 0;
6232
6233 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6234 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6235
6236 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6237 return 0;
6238
6239 /* Compute relocation: it is expected to be 0 for "regular" executables,
6240 non-zero for PIE ones. */
6241 relocation = -1;
6242 for (i = 0; relocation == -1 && i < num_phdr; i++)
6243 if (is_elf64)
6244 {
6245 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6246
6247 if (p->p_type == PT_PHDR)
6248 relocation = phdr_memaddr - p->p_vaddr;
6249 }
6250 else
6251 {
6252 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6253
6254 if (p->p_type == PT_PHDR)
6255 relocation = phdr_memaddr - p->p_vaddr;
6256 }
6257
6258 if (relocation == -1)
6259 {
6260 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6261 any real world executables, including PIE executables, have always
6262 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6263 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6264 or present DT_DEBUG anyway (fpc binaries are statically linked).
6265
6266 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6267
6268 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6269
6270 return 0;
6271 }
6272
6273 for (i = 0; i < num_phdr; i++)
6274 {
6275 if (is_elf64)
6276 {
6277 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6278
6279 if (p->p_type == PT_DYNAMIC)
6280 return p->p_vaddr + relocation;
6281 }
6282 else
6283 {
6284 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6285
6286 if (p->p_type == PT_DYNAMIC)
6287 return p->p_vaddr + relocation;
6288 }
6289 }
6290
6291 return 0;
6292 }
6293
6294 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6295 can be 0 if the inferior does not yet have the library list initialized.
6296 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6297 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6298
6299 static CORE_ADDR
6300 get_r_debug (const int pid, const int is_elf64)
6301 {
6302 CORE_ADDR dynamic_memaddr;
6303 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6304 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6305 CORE_ADDR map = -1;
6306
6307 dynamic_memaddr = get_dynamic (pid, is_elf64);
6308 if (dynamic_memaddr == 0)
6309 return map;
6310
6311 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6312 {
6313 if (is_elf64)
6314 {
6315 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6316 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6317 union
6318 {
6319 Elf64_Xword map;
6320 unsigned char buf[sizeof (Elf64_Xword)];
6321 }
6322 rld_map;
6323 #endif
6324 #ifdef DT_MIPS_RLD_MAP
6325 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6326 {
6327 if (linux_read_memory (dyn->d_un.d_val,
6328 rld_map.buf, sizeof (rld_map.buf)) == 0)
6329 return rld_map.map;
6330 else
6331 break;
6332 }
6333 #endif /* DT_MIPS_RLD_MAP */
6334 #ifdef DT_MIPS_RLD_MAP_REL
6335 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6336 {
6337 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6338 rld_map.buf, sizeof (rld_map.buf)) == 0)
6339 return rld_map.map;
6340 else
6341 break;
6342 }
6343 #endif /* DT_MIPS_RLD_MAP_REL */
6344
6345 if (dyn->d_tag == DT_DEBUG && map == -1)
6346 map = dyn->d_un.d_val;
6347
6348 if (dyn->d_tag == DT_NULL)
6349 break;
6350 }
6351 else
6352 {
6353 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6354 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6355 union
6356 {
6357 Elf32_Word map;
6358 unsigned char buf[sizeof (Elf32_Word)];
6359 }
6360 rld_map;
6361 #endif
6362 #ifdef DT_MIPS_RLD_MAP
6363 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6364 {
6365 if (linux_read_memory (dyn->d_un.d_val,
6366 rld_map.buf, sizeof (rld_map.buf)) == 0)
6367 return rld_map.map;
6368 else
6369 break;
6370 }
6371 #endif /* DT_MIPS_RLD_MAP */
6372 #ifdef DT_MIPS_RLD_MAP_REL
6373 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6374 {
6375 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6376 rld_map.buf, sizeof (rld_map.buf)) == 0)
6377 return rld_map.map;
6378 else
6379 break;
6380 }
6381 #endif /* DT_MIPS_RLD_MAP_REL */
6382
6383 if (dyn->d_tag == DT_DEBUG && map == -1)
6384 map = dyn->d_un.d_val;
6385
6386 if (dyn->d_tag == DT_NULL)
6387 break;
6388 }
6389
6390 dynamic_memaddr += dyn_size;
6391 }
6392
6393 return map;
6394 }
6395
6396 /* Read one pointer from MEMADDR in the inferior. */
6397
6398 static int
6399 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6400 {
6401 int ret;
6402
6403 /* Go through a union so this works on either big or little endian
6404 hosts, when the inferior's pointer size is smaller than the size
6405 of CORE_ADDR. It is assumed the inferior's endianness is the
6406 same of the superior's. */
6407 union
6408 {
6409 CORE_ADDR core_addr;
6410 unsigned int ui;
6411 unsigned char uc;
6412 } addr;
6413
6414 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6415 if (ret == 0)
6416 {
6417 if (ptr_size == sizeof (CORE_ADDR))
6418 *ptr = addr.core_addr;
6419 else if (ptr_size == sizeof (unsigned int))
6420 *ptr = addr.ui;
6421 else
6422 gdb_assert_not_reached ("unhandled pointer size");
6423 }
6424 return ret;
6425 }
6426
6427 bool
6428 linux_process_target::supports_qxfer_libraries_svr4 ()
6429 {
6430 return true;
6431 }
6432
6433 struct link_map_offsets
6434 {
6435 /* Offset and size of r_debug.r_version. */
6436 int r_version_offset;
6437
6438 /* Offset and size of r_debug.r_map. */
6439 int r_map_offset;
6440
6441 /* Offset to l_addr field in struct link_map. */
6442 int l_addr_offset;
6443
6444 /* Offset to l_name field in struct link_map. */
6445 int l_name_offset;
6446
6447 /* Offset to l_ld field in struct link_map. */
6448 int l_ld_offset;
6449
6450 /* Offset to l_next field in struct link_map. */
6451 int l_next_offset;
6452
6453 /* Offset to l_prev field in struct link_map. */
6454 int l_prev_offset;
6455 };
6456
6457 /* Construct qXfer:libraries-svr4:read reply. */
6458
6459 int
6460 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6461 unsigned char *readbuf,
6462 unsigned const char *writebuf,
6463 CORE_ADDR offset, int len)
6464 {
6465 struct process_info_private *const priv = current_process ()->priv;
6466 char filename[PATH_MAX];
6467 int pid, is_elf64;
6468
6469 static const struct link_map_offsets lmo_32bit_offsets =
6470 {
6471 0, /* r_version offset. */
6472 4, /* r_debug.r_map offset. */
6473 0, /* l_addr offset in link_map. */
6474 4, /* l_name offset in link_map. */
6475 8, /* l_ld offset in link_map. */
6476 12, /* l_next offset in link_map. */
6477 16 /* l_prev offset in link_map. */
6478 };
6479
6480 static const struct link_map_offsets lmo_64bit_offsets =
6481 {
6482 0, /* r_version offset. */
6483 8, /* r_debug.r_map offset. */
6484 0, /* l_addr offset in link_map. */
6485 8, /* l_name offset in link_map. */
6486 16, /* l_ld offset in link_map. */
6487 24, /* l_next offset in link_map. */
6488 32 /* l_prev offset in link_map. */
6489 };
6490 const struct link_map_offsets *lmo;
6491 unsigned int machine;
6492 int ptr_size;
6493 CORE_ADDR lm_addr = 0, lm_prev = 0;
6494 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6495 int header_done = 0;
6496
6497 if (writebuf != NULL)
6498 return -2;
6499 if (readbuf == NULL)
6500 return -1;
6501
6502 pid = lwpid_of (current_thread);
6503 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6504 is_elf64 = elf_64_file_p (filename, &machine);
6505 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6506 ptr_size = is_elf64 ? 8 : 4;
6507
6508 while (annex[0] != '\0')
6509 {
6510 const char *sep;
6511 CORE_ADDR *addrp;
6512 int name_len;
6513
6514 sep = strchr (annex, '=');
6515 if (sep == NULL)
6516 break;
6517
6518 name_len = sep - annex;
6519 if (name_len == 5 && startswith (annex, "start"))
6520 addrp = &lm_addr;
6521 else if (name_len == 4 && startswith (annex, "prev"))
6522 addrp = &lm_prev;
6523 else
6524 {
6525 annex = strchr (sep, ';');
6526 if (annex == NULL)
6527 break;
6528 annex++;
6529 continue;
6530 }
6531
6532 annex = decode_address_to_semicolon (addrp, sep + 1);
6533 }
6534
6535 if (lm_addr == 0)
6536 {
6537 int r_version = 0;
6538
6539 if (priv->r_debug == 0)
6540 priv->r_debug = get_r_debug (pid, is_elf64);
6541
6542 /* We failed to find DT_DEBUG. Such situation will not change
6543 for this inferior - do not retry it. Report it to GDB as
6544 E01, see for the reasons at the GDB solib-svr4.c side. */
6545 if (priv->r_debug == (CORE_ADDR) -1)
6546 return -1;
6547
6548 if (priv->r_debug != 0)
6549 {
6550 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6551 (unsigned char *) &r_version,
6552 sizeof (r_version)) != 0
6553 || r_version < 1)
6554 {
6555 warning ("unexpected r_debug version %d", r_version);
6556 }
6557 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6558 &lm_addr, ptr_size) != 0)
6559 {
6560 warning ("unable to read r_map from 0x%lx",
6561 (long) priv->r_debug + lmo->r_map_offset);
6562 }
6563 }
6564 }
6565
6566 std::string document = "<library-list-svr4 version=\"1.0\"";
6567
6568 while (lm_addr
6569 && read_one_ptr (lm_addr + lmo->l_name_offset,
6570 &l_name, ptr_size) == 0
6571 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6572 &l_addr, ptr_size) == 0
6573 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6574 &l_ld, ptr_size) == 0
6575 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6576 &l_prev, ptr_size) == 0
6577 && read_one_ptr (lm_addr + lmo->l_next_offset,
6578 &l_next, ptr_size) == 0)
6579 {
6580 unsigned char libname[PATH_MAX];
6581
6582 if (lm_prev != l_prev)
6583 {
6584 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6585 (long) lm_prev, (long) l_prev);
6586 break;
6587 }
6588
6589 /* Ignore the first entry even if it has valid name as the first entry
6590 corresponds to the main executable. The first entry should not be
6591 skipped if the dynamic loader was loaded late by a static executable
6592 (see solib-svr4.c parameter ignore_first). But in such case the main
6593 executable does not have PT_DYNAMIC present and this function already
6594 exited above due to failed get_r_debug. */
6595 if (lm_prev == 0)
6596 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6597 else
6598 {
6599 /* Not checking for error because reading may stop before
6600 we've got PATH_MAX worth of characters. */
6601 libname[0] = '\0';
6602 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6603 libname[sizeof (libname) - 1] = '\0';
6604 if (libname[0] != '\0')
6605 {
6606 if (!header_done)
6607 {
6608 /* Terminate `<library-list-svr4'. */
6609 document += '>';
6610 header_done = 1;
6611 }
6612
6613 string_appendf (document, "<library name=\"");
6614 xml_escape_text_append (&document, (char *) libname);
6615 string_appendf (document, "\" lm=\"0x%lx\" "
6616 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6617 (unsigned long) lm_addr, (unsigned long) l_addr,
6618 (unsigned long) l_ld);
6619 }
6620 }
6621
6622 lm_prev = lm_addr;
6623 lm_addr = l_next;
6624 }
6625
6626 if (!header_done)
6627 {
6628 /* Empty list; terminate `<library-list-svr4'. */
6629 document += "/>";
6630 }
6631 else
6632 document += "</library-list-svr4>";
6633
6634 int document_len = document.length ();
6635 if (offset < document_len)
6636 document_len -= offset;
6637 else
6638 document_len = 0;
6639 if (len > document_len)
6640 len = document_len;
6641
6642 memcpy (readbuf, document.data () + offset, len);
6643
6644 return len;
6645 }
6646
6647 #ifdef HAVE_LINUX_BTRACE
6648
6649 btrace_target_info *
6650 linux_process_target::enable_btrace (thread_info *tp,
6651 const btrace_config *conf)
6652 {
6653 return linux_enable_btrace (tp->id, conf);
6654 }
6655
6656 /* See to_disable_btrace target method. */
6657
6658 int
6659 linux_process_target::disable_btrace (btrace_target_info *tinfo)
6660 {
6661 enum btrace_error err;
6662
6663 err = linux_disable_btrace (tinfo);
6664 return (err == BTRACE_ERR_NONE ? 0 : -1);
6665 }
6666
6667 /* Encode an Intel Processor Trace configuration. */
6668
6669 static void
6670 linux_low_encode_pt_config (struct buffer *buffer,
6671 const struct btrace_data_pt_config *config)
6672 {
6673 buffer_grow_str (buffer, "<pt-config>\n");
6674
6675 switch (config->cpu.vendor)
6676 {
6677 case CV_INTEL:
6678 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6679 "model=\"%u\" stepping=\"%u\"/>\n",
6680 config->cpu.family, config->cpu.model,
6681 config->cpu.stepping);
6682 break;
6683
6684 default:
6685 break;
6686 }
6687
6688 buffer_grow_str (buffer, "</pt-config>\n");
6689 }
6690
6691 /* Encode a raw buffer. */
6692
6693 static void
6694 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6695 unsigned int size)
6696 {
6697 if (size == 0)
6698 return;
6699
6700 /* We use hex encoding - see gdbsupport/rsp-low.h. */
6701 buffer_grow_str (buffer, "<raw>\n");
6702
6703 while (size-- > 0)
6704 {
6705 char elem[2];
6706
6707 elem[0] = tohex ((*data >> 4) & 0xf);
6708 elem[1] = tohex (*data++ & 0xf);
6709
6710 buffer_grow (buffer, elem, 2);
6711 }
6712
6713 buffer_grow_str (buffer, "</raw>\n");
6714 }
6715
6716 /* See to_read_btrace target method. */
6717
6718 int
6719 linux_process_target::read_btrace (btrace_target_info *tinfo,
6720 buffer *buffer,
6721 enum btrace_read_type type)
6722 {
6723 struct btrace_data btrace;
6724 enum btrace_error err;
6725
6726 err = linux_read_btrace (&btrace, tinfo, type);
6727 if (err != BTRACE_ERR_NONE)
6728 {
6729 if (err == BTRACE_ERR_OVERFLOW)
6730 buffer_grow_str0 (buffer, "E.Overflow.");
6731 else
6732 buffer_grow_str0 (buffer, "E.Generic Error.");
6733
6734 return -1;
6735 }
6736
6737 switch (btrace.format)
6738 {
6739 case BTRACE_FORMAT_NONE:
6740 buffer_grow_str0 (buffer, "E.No Trace.");
6741 return -1;
6742
6743 case BTRACE_FORMAT_BTS:
6744 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6745 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6746
6747 for (const btrace_block &block : *btrace.variant.bts.blocks)
6748 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6749 paddress (block.begin), paddress (block.end));
6750
6751 buffer_grow_str0 (buffer, "</btrace>\n");
6752 break;
6753
6754 case BTRACE_FORMAT_PT:
6755 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6756 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6757 buffer_grow_str (buffer, "<pt>\n");
6758
6759 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6760
6761 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6762 btrace.variant.pt.size);
6763
6764 buffer_grow_str (buffer, "</pt>\n");
6765 buffer_grow_str0 (buffer, "</btrace>\n");
6766 break;
6767
6768 default:
6769 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6770 return -1;
6771 }
6772
6773 return 0;
6774 }
6775
6776 /* See to_btrace_conf target method. */
6777
6778 int
6779 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
6780 buffer *buffer)
6781 {
6782 const struct btrace_config *conf;
6783
6784 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6785 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6786
6787 conf = linux_btrace_conf (tinfo);
6788 if (conf != NULL)
6789 {
6790 switch (conf->format)
6791 {
6792 case BTRACE_FORMAT_NONE:
6793 break;
6794
6795 case BTRACE_FORMAT_BTS:
6796 buffer_xml_printf (buffer, "<bts");
6797 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6798 buffer_xml_printf (buffer, " />\n");
6799 break;
6800
6801 case BTRACE_FORMAT_PT:
6802 buffer_xml_printf (buffer, "<pt");
6803 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6804 buffer_xml_printf (buffer, "/>\n");
6805 break;
6806 }
6807 }
6808
6809 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6810 return 0;
6811 }
6812 #endif /* HAVE_LINUX_BTRACE */
6813
6814 /* See nat/linux-nat.h. */
6815
6816 ptid_t
6817 current_lwp_ptid (void)
6818 {
6819 return ptid_of (current_thread);
6820 }
6821
6822 const char *
6823 linux_process_target::thread_name (ptid_t thread)
6824 {
6825 return linux_proc_tid_get_name (thread);
6826 }
6827
6828 #if USE_THREAD_DB
6829 bool
6830 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
6831 int *handle_len)
6832 {
6833 return thread_db_thread_handle (ptid, handle, handle_len);
6834 }
6835 #endif
6836
6837 thread_info *
6838 linux_process_target::thread_pending_parent (thread_info *thread)
6839 {
6840 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
6841
6842 if (parent == nullptr)
6843 return nullptr;
6844
6845 return get_lwp_thread (parent);
6846 }
6847
6848 thread_info *
6849 linux_process_target::thread_pending_child (thread_info *thread)
6850 {
6851 lwp_info *child = get_thread_lwp (thread)->pending_child ();
6852
6853 if (child == nullptr)
6854 return nullptr;
6855
6856 return get_lwp_thread (child);
6857 }
6858
6859 /* Default implementation of linux_target_ops method "set_pc" for
6860 32-bit pc register which is literally named "pc". */
6861
6862 void
6863 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
6864 {
6865 uint32_t newpc = pc;
6866
6867 supply_register_by_name (regcache, "pc", &newpc);
6868 }
6869
6870 /* Default implementation of linux_target_ops method "get_pc" for
6871 32-bit pc register which is literally named "pc". */
6872
6873 CORE_ADDR
6874 linux_get_pc_32bit (struct regcache *regcache)
6875 {
6876 uint32_t pc;
6877
6878 collect_register_by_name (regcache, "pc", &pc);
6879 threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
6880 return pc;
6881 }
6882
6883 /* Default implementation of linux_target_ops method "set_pc" for
6884 64-bit pc register which is literally named "pc". */
6885
6886 void
6887 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
6888 {
6889 uint64_t newpc = pc;
6890
6891 supply_register_by_name (regcache, "pc", &newpc);
6892 }
6893
6894 /* Default implementation of linux_target_ops method "get_pc" for
6895 64-bit pc register which is literally named "pc". */
6896
6897 CORE_ADDR
6898 linux_get_pc_64bit (struct regcache *regcache)
6899 {
6900 uint64_t pc;
6901
6902 collect_register_by_name (regcache, "pc", &pc);
6903 threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
6904 return pc;
6905 }
6906
6907 /* See linux-low.h. */
6908
6909 int
6910 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
6911 {
6912 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
6913 int offset = 0;
6914
6915 gdb_assert (wordsize == 4 || wordsize == 8);
6916
6917 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
6918 {
6919 if (wordsize == 4)
6920 {
6921 uint32_t *data_p = (uint32_t *) data;
6922 if (data_p[0] == match)
6923 {
6924 *valp = data_p[1];
6925 return 1;
6926 }
6927 }
6928 else
6929 {
6930 uint64_t *data_p = (uint64_t *) data;
6931 if (data_p[0] == match)
6932 {
6933 *valp = data_p[1];
6934 return 1;
6935 }
6936 }
6937
6938 offset += 2 * wordsize;
6939 }
6940
6941 return 0;
6942 }
6943
6944 /* See linux-low.h. */
6945
6946 CORE_ADDR
6947 linux_get_hwcap (int wordsize)
6948 {
6949 CORE_ADDR hwcap = 0;
6950 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
6951 return hwcap;
6952 }
6953
6954 /* See linux-low.h. */
6955
6956 CORE_ADDR
6957 linux_get_hwcap2 (int wordsize)
6958 {
6959 CORE_ADDR hwcap2 = 0;
6960 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
6961 return hwcap2;
6962 }
6963
6964 #ifdef HAVE_LINUX_REGSETS
6965 void
6966 initialize_regsets_info (struct regsets_info *info)
6967 {
6968 for (info->num_regsets = 0;
6969 info->regsets[info->num_regsets].size >= 0;
6970 info->num_regsets++)
6971 ;
6972 }
6973 #endif
6974
6975 void
6976 initialize_low (void)
6977 {
6978 struct sigaction sigchld_action;
6979
6980 memset (&sigchld_action, 0, sizeof (sigchld_action));
6981 set_target_ops (the_linux_target);
6982
6983 linux_ptrace_init_warnings ();
6984 linux_proc_init_warnings ();
6985
6986 sigchld_action.sa_handler = sigchld_handler;
6987 sigemptyset (&sigchld_action.sa_mask);
6988 sigchld_action.sa_flags = SA_RESTART;
6989 sigaction (SIGCHLD, &sigchld_action, NULL);
6990
6991 initialize_low_arch ();
6992
6993 linux_check_ptrace_features ();
6994 }