Automatic date update in version.in
[binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2021 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
48 #include <inttypes.h>
49 #include "gdbsupport/common-inferior.h"
50 #include "nat/fork-inferior.h"
51 #include "gdbsupport/environ.h"
52 #include "gdbsupport/gdb-sigmask.h"
53 #include "gdbsupport/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifndef O_LARGEFILE
64 #define O_LARGEFILE 0
65 #endif
66
67 #ifndef AT_HWCAP2
68 #define AT_HWCAP2 26
69 #endif
70
71 /* Some targets did not define these ptrace constants from the start,
72 so gdbserver defines them locally here. In the future, these may
73 be removed after they are added to asm/ptrace.h. */
74 #if !(defined(PT_TEXT_ADDR) \
75 || defined(PT_DATA_ADDR) \
76 || defined(PT_TEXT_END_ADDR))
77 #if defined(__mcoldfire__)
78 /* These are still undefined in 3.10 kernels. */
79 #define PT_TEXT_ADDR 49*4
80 #define PT_DATA_ADDR 50*4
81 #define PT_TEXT_END_ADDR 51*4
82 /* These are still undefined in 3.10 kernels. */
83 #elif defined(__TMS320C6X__)
84 #define PT_TEXT_ADDR (0x10000*4)
85 #define PT_DATA_ADDR (0x10004*4)
86 #define PT_TEXT_END_ADDR (0x10008*4)
87 #endif
88 #endif
89
90 #if (defined(__UCLIBC__) \
91 && defined(HAS_NOMMU) \
92 && defined(PT_TEXT_ADDR) \
93 && defined(PT_DATA_ADDR) \
94 && defined(PT_TEXT_END_ADDR))
95 #define SUPPORTS_READ_OFFSETS
96 #endif
97
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "gdbsupport/btrace-common.h"
101 #endif
102
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
105 typedef struct
106 {
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115 } Elf32_auxv_t;
116 #endif
117
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
120 typedef struct
121 {
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130 } Elf64_auxv_t;
131 #endif
132
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset = -1;
135
136 /* LWP accessors. */
137
138 /* See nat/linux-nat.h. */
139
140 ptid_t
141 ptid_of_lwp (struct lwp_info *lwp)
142 {
143 return ptid_of (get_lwp_thread (lwp));
144 }
145
146 /* See nat/linux-nat.h. */
147
148 void
149 lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151 {
152 lwp->arch_private = info;
153 }
154
155 /* See nat/linux-nat.h. */
156
157 struct arch_lwp_info *
158 lwp_arch_private_info (struct lwp_info *lwp)
159 {
160 return lwp->arch_private;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 int
166 lwp_is_stopped (struct lwp_info *lwp)
167 {
168 return lwp->stopped;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info *lwp)
175 {
176 return lwp->stop_reason;
177 }
178
179 /* See nat/linux-nat.h. */
180
181 int
182 lwp_is_stepping (struct lwp_info *lwp)
183 {
184 return lwp->stepping;
185 }
186
187 /* A list of all unknown processes which receive stop signals. Some
188 other process will presumably claim each of these as forked
189 children momentarily. */
190
191 struct simple_pid_list
192 {
193 /* The process ID. */
194 int pid;
195
196 /* The status as reported by waitpid. */
197 int status;
198
199 /* Next in chain. */
200 struct simple_pid_list *next;
201 };
202 static struct simple_pid_list *stopped_pids;
203
204 /* Trivial list manipulation functions to keep track of a list of new
205 stopped processes. */
206
207 static void
208 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
209 {
210 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
211
212 new_pid->pid = pid;
213 new_pid->status = status;
214 new_pid->next = *listp;
215 *listp = new_pid;
216 }
217
218 static int
219 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
220 {
221 struct simple_pid_list **p;
222
223 for (p = listp; *p != NULL; p = &(*p)->next)
224 if ((*p)->pid == pid)
225 {
226 struct simple_pid_list *next = (*p)->next;
227
228 *statusp = (*p)->status;
229 xfree (*p);
230 *p = next;
231 return 1;
232 }
233 return 0;
234 }
235
236 enum stopping_threads_kind
237 {
238 /* Not stopping threads presently. */
239 NOT_STOPPING_THREADS,
240
241 /* Stopping threads. */
242 STOPPING_THREADS,
243
244 /* Stopping and suspending threads. */
245 STOPPING_AND_SUSPENDING_THREADS
246 };
247
248 /* This is set while stop_all_lwps is in effect. */
249 static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
250
251 /* FIXME make into a target method? */
252 int using_threads = 1;
253
254 /* True if we're presently stabilizing threads (moving them out of
255 jump pads). */
256 static int stabilizing_threads;
257
258 static void unsuspend_all_lwps (struct lwp_info *except);
259 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
260 static int lwp_is_marked_dead (struct lwp_info *lwp);
261 static int kill_lwp (unsigned long lwpid, int signo);
262 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
263 static int linux_low_ptrace_options (int attached);
264 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
265
266 /* When the event-loop is doing a step-over, this points at the thread
267 being stepped. */
268 static ptid_t step_over_bkpt;
269
270 bool
271 linux_process_target::low_supports_breakpoints ()
272 {
273 return false;
274 }
275
276 CORE_ADDR
277 linux_process_target::low_get_pc (regcache *regcache)
278 {
279 return 0;
280 }
281
282 void
283 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
284 {
285 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
286 }
287
288 std::vector<CORE_ADDR>
289 linux_process_target::low_get_next_pcs (regcache *regcache)
290 {
291 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
292 "implemented");
293 }
294
295 int
296 linux_process_target::low_decr_pc_after_break ()
297 {
298 return 0;
299 }
300
301 /* True if LWP is stopped in its stepping range. */
302
303 static int
304 lwp_in_step_range (struct lwp_info *lwp)
305 {
306 CORE_ADDR pc = lwp->stop_pc;
307
308 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
309 }
310
311 /* The read/write ends of the pipe registered as waitable file in the
312 event loop. */
313 static int linux_event_pipe[2] = { -1, -1 };
314
315 /* True if we're currently in async mode. */
316 #define target_is_async_p() (linux_event_pipe[0] != -1)
317
318 static void send_sigstop (struct lwp_info *lwp);
319
320 /* Return non-zero if HEADER is a 64-bit ELF file. */
321
322 static int
323 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
324 {
325 if (header->e_ident[EI_MAG0] == ELFMAG0
326 && header->e_ident[EI_MAG1] == ELFMAG1
327 && header->e_ident[EI_MAG2] == ELFMAG2
328 && header->e_ident[EI_MAG3] == ELFMAG3)
329 {
330 *machine = header->e_machine;
331 return header->e_ident[EI_CLASS] == ELFCLASS64;
332
333 }
334 *machine = EM_NONE;
335 return -1;
336 }
337
338 /* Return non-zero if FILE is a 64-bit ELF file,
339 zero if the file is not a 64-bit ELF file,
340 and -1 if the file is not accessible or doesn't exist. */
341
342 static int
343 elf_64_file_p (const char *file, unsigned int *machine)
344 {
345 Elf64_Ehdr header;
346 int fd;
347
348 fd = open (file, O_RDONLY);
349 if (fd < 0)
350 return -1;
351
352 if (read (fd, &header, sizeof (header)) != sizeof (header))
353 {
354 close (fd);
355 return 0;
356 }
357 close (fd);
358
359 return elf_64_header_p (&header, machine);
360 }
361
362 /* Accepts an integer PID; Returns true if the executable PID is
363 running is a 64-bit ELF file.. */
364
365 int
366 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
367 {
368 char file[PATH_MAX];
369
370 sprintf (file, "/proc/%d/exe", pid);
371 return elf_64_file_p (file, machine);
372 }
373
374 void
375 linux_process_target::delete_lwp (lwp_info *lwp)
376 {
377 struct thread_info *thr = get_lwp_thread (lwp);
378
379 if (debug_threads)
380 debug_printf ("deleting %ld\n", lwpid_of (thr));
381
382 remove_thread (thr);
383
384 low_delete_thread (lwp->arch_private);
385
386 delete lwp;
387 }
388
389 void
390 linux_process_target::low_delete_thread (arch_lwp_info *info)
391 {
392 /* Default implementation should be overridden if architecture-specific
393 info is being used. */
394 gdb_assert (info == nullptr);
395 }
396
397 process_info *
398 linux_process_target::add_linux_process (int pid, int attached)
399 {
400 struct process_info *proc;
401
402 proc = add_process (pid, attached);
403 proc->priv = XCNEW (struct process_info_private);
404
405 proc->priv->arch_private = low_new_process ();
406
407 return proc;
408 }
409
410 arch_process_info *
411 linux_process_target::low_new_process ()
412 {
413 return nullptr;
414 }
415
416 void
417 linux_process_target::low_delete_process (arch_process_info *info)
418 {
419 /* Default implementation must be overridden if architecture-specific
420 info exists. */
421 gdb_assert (info == nullptr);
422 }
423
424 void
425 linux_process_target::low_new_fork (process_info *parent, process_info *child)
426 {
427 /* Nop. */
428 }
429
430 void
431 linux_process_target::arch_setup_thread (thread_info *thread)
432 {
433 scoped_restore_current_thread restore_thread;
434 switch_to_thread (thread);
435
436 low_arch_setup ();
437 }
438
439 int
440 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
441 int wstat)
442 {
443 client_state &cs = get_client_state ();
444 struct lwp_info *event_lwp = *orig_event_lwp;
445 int event = linux_ptrace_get_extended_event (wstat);
446 struct thread_info *event_thr = get_lwp_thread (event_lwp);
447 struct lwp_info *new_lwp;
448
449 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
450
451 /* All extended events we currently use are mid-syscall. Only
452 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
453 you have to be using PTRACE_SEIZE to get that. */
454 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
455
456 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
457 || (event == PTRACE_EVENT_CLONE))
458 {
459 ptid_t ptid;
460 unsigned long new_pid;
461 int ret, status;
462
463 /* Get the pid of the new lwp. */
464 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
465 &new_pid);
466
467 /* If we haven't already seen the new PID stop, wait for it now. */
468 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
469 {
470 /* The new child has a pending SIGSTOP. We can't affect it until it
471 hits the SIGSTOP, but we're already attached. */
472
473 ret = my_waitpid (new_pid, &status, __WALL);
474
475 if (ret == -1)
476 perror_with_name ("waiting for new child");
477 else if (ret != new_pid)
478 warning ("wait returned unexpected PID %d", ret);
479 else if (!WIFSTOPPED (status))
480 warning ("wait returned unexpected status 0x%x", status);
481 }
482
483 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
484 {
485 struct process_info *parent_proc;
486 struct process_info *child_proc;
487 struct lwp_info *child_lwp;
488 struct thread_info *child_thr;
489
490 ptid = ptid_t (new_pid, new_pid);
491
492 if (debug_threads)
493 {
494 debug_printf ("HEW: Got fork event from LWP %ld, "
495 "new child is %d\n",
496 ptid_of (event_thr).lwp (),
497 ptid.pid ());
498 }
499
500 /* Add the new process to the tables and clone the breakpoint
501 lists of the parent. We need to do this even if the new process
502 will be detached, since we will need the process object and the
503 breakpoints to remove any breakpoints from memory when we
504 detach, and the client side will access registers. */
505 child_proc = add_linux_process (new_pid, 0);
506 gdb_assert (child_proc != NULL);
507 child_lwp = add_lwp (ptid);
508 gdb_assert (child_lwp != NULL);
509 child_lwp->stopped = 1;
510 child_lwp->must_set_ptrace_flags = 1;
511 child_lwp->status_pending_p = 0;
512 child_thr = get_lwp_thread (child_lwp);
513 child_thr->last_resume_kind = resume_stop;
514 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
515
516 /* If we're suspending all threads, leave this one suspended
517 too. If the fork/clone parent is stepping over a breakpoint,
518 all other threads have been suspended already. Leave the
519 child suspended too. */
520 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
521 || event_lwp->bp_reinsert != 0)
522 {
523 if (debug_threads)
524 debug_printf ("HEW: leaving child suspended\n");
525 child_lwp->suspended = 1;
526 }
527
528 parent_proc = get_thread_process (event_thr);
529 child_proc->attached = parent_proc->attached;
530
531 if (event_lwp->bp_reinsert != 0
532 && supports_software_single_step ()
533 && event == PTRACE_EVENT_VFORK)
534 {
535 /* If we leave single-step breakpoints there, child will
536 hit it, so uninsert single-step breakpoints from parent
537 (and child). Once vfork child is done, reinsert
538 them back to parent. */
539 uninsert_single_step_breakpoints (event_thr);
540 }
541
542 clone_all_breakpoints (child_thr, event_thr);
543
544 target_desc_up tdesc = allocate_target_description ();
545 copy_target_description (tdesc.get (), parent_proc->tdesc);
546 child_proc->tdesc = tdesc.release ();
547
548 /* Clone arch-specific process data. */
549 low_new_fork (parent_proc, child_proc);
550
551 /* Save fork info in the parent thread. */
552 if (event == PTRACE_EVENT_FORK)
553 event_lwp->waitstatus.set_forked (ptid);
554 else if (event == PTRACE_EVENT_VFORK)
555 event_lwp->waitstatus.set_vforked (ptid);
556
557 /* The status_pending field contains bits denoting the
558 extended event, so when the pending event is handled,
559 the handler will look at lwp->waitstatus. */
560 event_lwp->status_pending_p = 1;
561 event_lwp->status_pending = wstat;
562
563 /* Link the threads until the parent event is passed on to
564 higher layers. */
565 event_lwp->fork_relative = child_lwp;
566 child_lwp->fork_relative = event_lwp;
567
568 /* If the parent thread is doing step-over with single-step
569 breakpoints, the list of single-step breakpoints are cloned
570 from the parent's. Remove them from the child process.
571 In case of vfork, we'll reinsert them back once vforked
572 child is done. */
573 if (event_lwp->bp_reinsert != 0
574 && supports_software_single_step ())
575 {
576 /* The child process is forked and stopped, so it is safe
577 to access its memory without stopping all other threads
578 from other processes. */
579 delete_single_step_breakpoints (child_thr);
580
581 gdb_assert (has_single_step_breakpoints (event_thr));
582 gdb_assert (!has_single_step_breakpoints (child_thr));
583 }
584
585 /* Report the event. */
586 return 0;
587 }
588
589 if (debug_threads)
590 debug_printf ("HEW: Got clone event "
591 "from LWP %ld, new child is LWP %ld\n",
592 lwpid_of (event_thr), new_pid);
593
594 ptid = ptid_t (pid_of (event_thr), new_pid);
595 new_lwp = add_lwp (ptid);
596
597 /* Either we're going to immediately resume the new thread
598 or leave it stopped. resume_one_lwp is a nop if it
599 thinks the thread is currently running, so set this first
600 before calling resume_one_lwp. */
601 new_lwp->stopped = 1;
602
603 /* If we're suspending all threads, leave this one suspended
604 too. If the fork/clone parent is stepping over a breakpoint,
605 all other threads have been suspended already. Leave the
606 child suspended too. */
607 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
608 || event_lwp->bp_reinsert != 0)
609 new_lwp->suspended = 1;
610
611 /* Normally we will get the pending SIGSTOP. But in some cases
612 we might get another signal delivered to the group first.
613 If we do get another signal, be sure not to lose it. */
614 if (WSTOPSIG (status) != SIGSTOP)
615 {
616 new_lwp->stop_expected = 1;
617 new_lwp->status_pending_p = 1;
618 new_lwp->status_pending = status;
619 }
620 else if (cs.report_thread_events)
621 {
622 new_lwp->waitstatus.set_thread_created ();
623 new_lwp->status_pending_p = 1;
624 new_lwp->status_pending = status;
625 }
626
627 #ifdef USE_THREAD_DB
628 thread_db_notice_clone (event_thr, ptid);
629 #endif
630
631 /* Don't report the event. */
632 return 1;
633 }
634 else if (event == PTRACE_EVENT_VFORK_DONE)
635 {
636 event_lwp->waitstatus.set_vfork_done ();
637
638 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
639 {
640 reinsert_single_step_breakpoints (event_thr);
641
642 gdb_assert (has_single_step_breakpoints (event_thr));
643 }
644
645 /* Report the event. */
646 return 0;
647 }
648 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
649 {
650 struct process_info *proc;
651 std::vector<int> syscalls_to_catch;
652 ptid_t event_ptid;
653 pid_t event_pid;
654
655 if (debug_threads)
656 {
657 debug_printf ("HEW: Got exec event from LWP %ld\n",
658 lwpid_of (event_thr));
659 }
660
661 /* Get the event ptid. */
662 event_ptid = ptid_of (event_thr);
663 event_pid = event_ptid.pid ();
664
665 /* Save the syscall list from the execing process. */
666 proc = get_thread_process (event_thr);
667 syscalls_to_catch = std::move (proc->syscalls_to_catch);
668
669 /* Delete the execing process and all its threads. */
670 mourn (proc);
671 switch_to_thread (nullptr);
672
673 /* Create a new process/lwp/thread. */
674 proc = add_linux_process (event_pid, 0);
675 event_lwp = add_lwp (event_ptid);
676 event_thr = get_lwp_thread (event_lwp);
677 gdb_assert (current_thread == event_thr);
678 arch_setup_thread (event_thr);
679
680 /* Set the event status. */
681 event_lwp->waitstatus.set_execd
682 (make_unique_xstrdup
683 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
684
685 /* Mark the exec status as pending. */
686 event_lwp->stopped = 1;
687 event_lwp->status_pending_p = 1;
688 event_lwp->status_pending = wstat;
689 event_thr->last_resume_kind = resume_continue;
690 event_thr->last_status.set_ignore ();
691
692 /* Update syscall state in the new lwp, effectively mid-syscall too. */
693 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
694
695 /* Restore the list to catch. Don't rely on the client, which is free
696 to avoid sending a new list when the architecture doesn't change.
697 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
698 proc->syscalls_to_catch = std::move (syscalls_to_catch);
699
700 /* Report the event. */
701 *orig_event_lwp = event_lwp;
702 return 0;
703 }
704
705 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
706 }
707
708 CORE_ADDR
709 linux_process_target::get_pc (lwp_info *lwp)
710 {
711 struct regcache *regcache;
712 CORE_ADDR pc;
713
714 if (!low_supports_breakpoints ())
715 return 0;
716
717 scoped_restore_current_thread restore_thread;
718 switch_to_thread (get_lwp_thread (lwp));
719
720 regcache = get_thread_regcache (current_thread, 1);
721 pc = low_get_pc (regcache);
722
723 if (debug_threads)
724 debug_printf ("pc is 0x%lx\n", (long) pc);
725
726 return pc;
727 }
728
729 void
730 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
731 {
732 struct regcache *regcache;
733
734 scoped_restore_current_thread restore_thread;
735 switch_to_thread (get_lwp_thread (lwp));
736
737 regcache = get_thread_regcache (current_thread, 1);
738 low_get_syscall_trapinfo (regcache, sysno);
739
740 if (debug_threads)
741 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
742 }
743
744 void
745 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
746 {
747 /* By default, report an unknown system call number. */
748 *sysno = UNKNOWN_SYSCALL;
749 }
750
751 bool
752 linux_process_target::save_stop_reason (lwp_info *lwp)
753 {
754 CORE_ADDR pc;
755 CORE_ADDR sw_breakpoint_pc;
756 #if USE_SIGTRAP_SIGINFO
757 siginfo_t siginfo;
758 #endif
759
760 if (!low_supports_breakpoints ())
761 return false;
762
763 pc = get_pc (lwp);
764 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
765
766 /* breakpoint_at reads from the current thread. */
767 scoped_restore_current_thread restore_thread;
768 switch_to_thread (get_lwp_thread (lwp));
769
770 #if USE_SIGTRAP_SIGINFO
771 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
772 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
773 {
774 if (siginfo.si_signo == SIGTRAP)
775 {
776 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
777 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
778 {
779 /* The si_code is ambiguous on this arch -- check debug
780 registers. */
781 if (!check_stopped_by_watchpoint (lwp))
782 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
783 }
784 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
785 {
786 /* If we determine the LWP stopped for a SW breakpoint,
787 trust it. Particularly don't check watchpoint
788 registers, because at least on s390, we'd find
789 stopped-by-watchpoint as long as there's a watchpoint
790 set. */
791 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
792 }
793 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
794 {
795 /* This can indicate either a hardware breakpoint or
796 hardware watchpoint. Check debug registers. */
797 if (!check_stopped_by_watchpoint (lwp))
798 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
799 }
800 else if (siginfo.si_code == TRAP_TRACE)
801 {
802 /* We may have single stepped an instruction that
803 triggered a watchpoint. In that case, on some
804 architectures (such as x86), instead of TRAP_HWBKPT,
805 si_code indicates TRAP_TRACE, and we need to check
806 the debug registers separately. */
807 if (!check_stopped_by_watchpoint (lwp))
808 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
809 }
810 }
811 }
812 #else
813 /* We may have just stepped a breakpoint instruction. E.g., in
814 non-stop mode, GDB first tells the thread A to step a range, and
815 then the user inserts a breakpoint inside the range. In that
816 case we need to report the breakpoint PC. */
817 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
818 && low_breakpoint_at (sw_breakpoint_pc))
819 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
820
821 if (hardware_breakpoint_inserted_here (pc))
822 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
823
824 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
825 check_stopped_by_watchpoint (lwp);
826 #endif
827
828 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
829 {
830 if (debug_threads)
831 {
832 struct thread_info *thr = get_lwp_thread (lwp);
833
834 debug_printf ("CSBB: %s stopped by software breakpoint\n",
835 target_pid_to_str (ptid_of (thr)).c_str ());
836 }
837
838 /* Back up the PC if necessary. */
839 if (pc != sw_breakpoint_pc)
840 {
841 struct regcache *regcache
842 = get_thread_regcache (current_thread, 1);
843 low_set_pc (regcache, sw_breakpoint_pc);
844 }
845
846 /* Update this so we record the correct stop PC below. */
847 pc = sw_breakpoint_pc;
848 }
849 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
850 {
851 if (debug_threads)
852 {
853 struct thread_info *thr = get_lwp_thread (lwp);
854
855 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
856 target_pid_to_str (ptid_of (thr)).c_str ());
857 }
858 }
859 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
860 {
861 if (debug_threads)
862 {
863 struct thread_info *thr = get_lwp_thread (lwp);
864
865 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
866 target_pid_to_str (ptid_of (thr)).c_str ());
867 }
868 }
869 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
870 {
871 if (debug_threads)
872 {
873 struct thread_info *thr = get_lwp_thread (lwp);
874
875 debug_printf ("CSBB: %s stopped by trace\n",
876 target_pid_to_str (ptid_of (thr)).c_str ());
877 }
878 }
879
880 lwp->stop_pc = pc;
881 return true;
882 }
883
884 lwp_info *
885 linux_process_target::add_lwp (ptid_t ptid)
886 {
887 lwp_info *lwp = new lwp_info;
888
889 lwp->thread = add_thread (ptid, lwp);
890
891 low_new_thread (lwp);
892
893 return lwp;
894 }
895
896 void
897 linux_process_target::low_new_thread (lwp_info *info)
898 {
899 /* Nop. */
900 }
901
902 /* Callback to be used when calling fork_inferior, responsible for
903 actually initiating the tracing of the inferior. */
904
905 static void
906 linux_ptrace_fun ()
907 {
908 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
909 (PTRACE_TYPE_ARG4) 0) < 0)
910 trace_start_error_with_name ("ptrace");
911
912 if (setpgid (0, 0) < 0)
913 trace_start_error_with_name ("setpgid");
914
915 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
916 stdout to stderr so that inferior i/o doesn't corrupt the connection.
917 Also, redirect stdin to /dev/null. */
918 if (remote_connection_is_stdio ())
919 {
920 if (close (0) < 0)
921 trace_start_error_with_name ("close");
922 if (open ("/dev/null", O_RDONLY) < 0)
923 trace_start_error_with_name ("open");
924 if (dup2 (2, 1) < 0)
925 trace_start_error_with_name ("dup2");
926 if (write (2, "stdin/stdout redirected\n",
927 sizeof ("stdin/stdout redirected\n") - 1) < 0)
928 {
929 /* Errors ignored. */;
930 }
931 }
932 }
933
934 /* Start an inferior process and returns its pid.
935 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
936 are its arguments. */
937
938 int
939 linux_process_target::create_inferior (const char *program,
940 const std::vector<char *> &program_args)
941 {
942 client_state &cs = get_client_state ();
943 struct lwp_info *new_lwp;
944 int pid;
945 ptid_t ptid;
946
947 {
948 maybe_disable_address_space_randomization restore_personality
949 (cs.disable_randomization);
950 std::string str_program_args = construct_inferior_arguments (program_args);
951
952 pid = fork_inferior (program,
953 str_program_args.c_str (),
954 get_environ ()->envp (), linux_ptrace_fun,
955 NULL, NULL, NULL, NULL);
956 }
957
958 add_linux_process (pid, 0);
959
960 ptid = ptid_t (pid, pid);
961 new_lwp = add_lwp (ptid);
962 new_lwp->must_set_ptrace_flags = 1;
963
964 post_fork_inferior (pid, program);
965
966 return pid;
967 }
968
969 /* Implement the post_create_inferior target_ops method. */
970
971 void
972 linux_process_target::post_create_inferior ()
973 {
974 struct lwp_info *lwp = get_thread_lwp (current_thread);
975
976 low_arch_setup ();
977
978 if (lwp->must_set_ptrace_flags)
979 {
980 struct process_info *proc = current_process ();
981 int options = linux_low_ptrace_options (proc->attached);
982
983 linux_enable_event_reporting (lwpid_of (current_thread), options);
984 lwp->must_set_ptrace_flags = 0;
985 }
986 }
987
988 int
989 linux_process_target::attach_lwp (ptid_t ptid)
990 {
991 struct lwp_info *new_lwp;
992 int lwpid = ptid.lwp ();
993
994 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
995 != 0)
996 return errno;
997
998 new_lwp = add_lwp (ptid);
999
1000 /* We need to wait for SIGSTOP before being able to make the next
1001 ptrace call on this LWP. */
1002 new_lwp->must_set_ptrace_flags = 1;
1003
1004 if (linux_proc_pid_is_stopped (lwpid))
1005 {
1006 if (debug_threads)
1007 debug_printf ("Attached to a stopped process\n");
1008
1009 /* The process is definitely stopped. It is in a job control
1010 stop, unless the kernel predates the TASK_STOPPED /
1011 TASK_TRACED distinction, in which case it might be in a
1012 ptrace stop. Make sure it is in a ptrace stop; from there we
1013 can kill it, signal it, et cetera.
1014
1015 First make sure there is a pending SIGSTOP. Since we are
1016 already attached, the process can not transition from stopped
1017 to running without a PTRACE_CONT; so we know this signal will
1018 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1019 probably already in the queue (unless this kernel is old
1020 enough to use TASK_STOPPED for ptrace stops); but since
1021 SIGSTOP is not an RT signal, it can only be queued once. */
1022 kill_lwp (lwpid, SIGSTOP);
1023
1024 /* Finally, resume the stopped process. This will deliver the
1025 SIGSTOP (or a higher priority signal, just like normal
1026 PTRACE_ATTACH), which we'll catch later on. */
1027 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1028 }
1029
1030 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1031 brings it to a halt.
1032
1033 There are several cases to consider here:
1034
1035 1) gdbserver has already attached to the process and is being notified
1036 of a new thread that is being created.
1037 In this case we should ignore that SIGSTOP and resume the
1038 process. This is handled below by setting stop_expected = 1,
1039 and the fact that add_thread sets last_resume_kind ==
1040 resume_continue.
1041
1042 2) This is the first thread (the process thread), and we're attaching
1043 to it via attach_inferior.
1044 In this case we want the process thread to stop.
1045 This is handled by having linux_attach set last_resume_kind ==
1046 resume_stop after we return.
1047
1048 If the pid we are attaching to is also the tgid, we attach to and
1049 stop all the existing threads. Otherwise, we attach to pid and
1050 ignore any other threads in the same group as this pid.
1051
1052 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1053 existing threads.
1054 In this case we want the thread to stop.
1055 FIXME: This case is currently not properly handled.
1056 We should wait for the SIGSTOP but don't. Things work apparently
1057 because enough time passes between when we ptrace (ATTACH) and when
1058 gdb makes the next ptrace call on the thread.
1059
1060 On the other hand, if we are currently trying to stop all threads, we
1061 should treat the new thread as if we had sent it a SIGSTOP. This works
1062 because we are guaranteed that the add_lwp call above added us to the
1063 end of the list, and so the new thread has not yet reached
1064 wait_for_sigstop (but will). */
1065 new_lwp->stop_expected = 1;
1066
1067 return 0;
1068 }
1069
1070 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1071 already attached. Returns true if a new LWP is found, false
1072 otherwise. */
1073
1074 static int
1075 attach_proc_task_lwp_callback (ptid_t ptid)
1076 {
1077 /* Is this a new thread? */
1078 if (find_thread_ptid (ptid) == NULL)
1079 {
1080 int lwpid = ptid.lwp ();
1081 int err;
1082
1083 if (debug_threads)
1084 debug_printf ("Found new lwp %d\n", lwpid);
1085
1086 err = the_linux_target->attach_lwp (ptid);
1087
1088 /* Be quiet if we simply raced with the thread exiting. EPERM
1089 is returned if the thread's task still exists, and is marked
1090 as exited or zombie, as well as other conditions, so in that
1091 case, confirm the status in /proc/PID/status. */
1092 if (err == ESRCH
1093 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1094 {
1095 if (debug_threads)
1096 {
1097 debug_printf ("Cannot attach to lwp %d: "
1098 "thread is gone (%d: %s)\n",
1099 lwpid, err, safe_strerror (err));
1100 }
1101 }
1102 else if (err != 0)
1103 {
1104 std::string reason
1105 = linux_ptrace_attach_fail_reason_string (ptid, err);
1106
1107 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1108 }
1109
1110 return 1;
1111 }
1112 return 0;
1113 }
1114
1115 static void async_file_mark (void);
1116
1117 /* Attach to PID. If PID is the tgid, attach to it and all
1118 of its threads. */
1119
1120 int
1121 linux_process_target::attach (unsigned long pid)
1122 {
1123 struct process_info *proc;
1124 struct thread_info *initial_thread;
1125 ptid_t ptid = ptid_t (pid, pid);
1126 int err;
1127
1128 proc = add_linux_process (pid, 1);
1129
1130 /* Attach to PID. We will check for other threads
1131 soon. */
1132 err = attach_lwp (ptid);
1133 if (err != 0)
1134 {
1135 remove_process (proc);
1136
1137 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1138 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1139 }
1140
1141 /* Don't ignore the initial SIGSTOP if we just attached to this
1142 process. It will be collected by wait shortly. */
1143 initial_thread = find_thread_ptid (ptid_t (pid, pid));
1144 initial_thread->last_resume_kind = resume_stop;
1145
1146 /* We must attach to every LWP. If /proc is mounted, use that to
1147 find them now. On the one hand, the inferior may be using raw
1148 clone instead of using pthreads. On the other hand, even if it
1149 is using pthreads, GDB may not be connected yet (thread_db needs
1150 to do symbol lookups, through qSymbol). Also, thread_db walks
1151 structures in the inferior's address space to find the list of
1152 threads/LWPs, and those structures may well be corrupted. Note
1153 that once thread_db is loaded, we'll still use it to list threads
1154 and associate pthread info with each LWP. */
1155 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1156
1157 /* GDB will shortly read the xml target description for this
1158 process, to figure out the process' architecture. But the target
1159 description is only filled in when the first process/thread in
1160 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1161 that now, otherwise, if GDB is fast enough, it could read the
1162 target description _before_ that initial stop. */
1163 if (non_stop)
1164 {
1165 struct lwp_info *lwp;
1166 int wstat, lwpid;
1167 ptid_t pid_ptid = ptid_t (pid);
1168
1169 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1170 gdb_assert (lwpid > 0);
1171
1172 lwp = find_lwp_pid (ptid_t (lwpid));
1173
1174 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1175 {
1176 lwp->status_pending_p = 1;
1177 lwp->status_pending = wstat;
1178 }
1179
1180 initial_thread->last_resume_kind = resume_continue;
1181
1182 async_file_mark ();
1183
1184 gdb_assert (proc->tdesc != NULL);
1185 }
1186
1187 return 0;
1188 }
1189
1190 static int
1191 last_thread_of_process_p (int pid)
1192 {
1193 bool seen_one = false;
1194
1195 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1196 {
1197 if (!seen_one)
1198 {
1199 /* This is the first thread of this process we see. */
1200 seen_one = true;
1201 return false;
1202 }
1203 else
1204 {
1205 /* This is the second thread of this process we see. */
1206 return true;
1207 }
1208 });
1209
1210 return thread == NULL;
1211 }
1212
1213 /* Kill LWP. */
1214
1215 static void
1216 linux_kill_one_lwp (struct lwp_info *lwp)
1217 {
1218 struct thread_info *thr = get_lwp_thread (lwp);
1219 int pid = lwpid_of (thr);
1220
1221 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1222 there is no signal context, and ptrace(PTRACE_KILL) (or
1223 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1224 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1225 alternative is to kill with SIGKILL. We only need one SIGKILL
1226 per process, not one for each thread. But since we still support
1227 support debugging programs using raw clone without CLONE_THREAD,
1228 we send one for each thread. For years, we used PTRACE_KILL
1229 only, so we're being a bit paranoid about some old kernels where
1230 PTRACE_KILL might work better (dubious if there are any such, but
1231 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1232 second, and so we're fine everywhere. */
1233
1234 errno = 0;
1235 kill_lwp (pid, SIGKILL);
1236 if (debug_threads)
1237 {
1238 int save_errno = errno;
1239
1240 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1241 target_pid_to_str (ptid_of (thr)).c_str (),
1242 save_errno ? safe_strerror (save_errno) : "OK");
1243 }
1244
1245 errno = 0;
1246 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1247 if (debug_threads)
1248 {
1249 int save_errno = errno;
1250
1251 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1252 target_pid_to_str (ptid_of (thr)).c_str (),
1253 save_errno ? safe_strerror (save_errno) : "OK");
1254 }
1255 }
1256
1257 /* Kill LWP and wait for it to die. */
1258
1259 static void
1260 kill_wait_lwp (struct lwp_info *lwp)
1261 {
1262 struct thread_info *thr = get_lwp_thread (lwp);
1263 int pid = ptid_of (thr).pid ();
1264 int lwpid = ptid_of (thr).lwp ();
1265 int wstat;
1266 int res;
1267
1268 if (debug_threads)
1269 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1270
1271 do
1272 {
1273 linux_kill_one_lwp (lwp);
1274
1275 /* Make sure it died. Notes:
1276
1277 - The loop is most likely unnecessary.
1278
1279 - We don't use wait_for_event as that could delete lwps
1280 while we're iterating over them. We're not interested in
1281 any pending status at this point, only in making sure all
1282 wait status on the kernel side are collected until the
1283 process is reaped.
1284
1285 - We don't use __WALL here as the __WALL emulation relies on
1286 SIGCHLD, and killing a stopped process doesn't generate
1287 one, nor an exit status.
1288 */
1289 res = my_waitpid (lwpid, &wstat, 0);
1290 if (res == -1 && errno == ECHILD)
1291 res = my_waitpid (lwpid, &wstat, __WCLONE);
1292 } while (res > 0 && WIFSTOPPED (wstat));
1293
1294 /* Even if it was stopped, the child may have already disappeared.
1295 E.g., if it was killed by SIGKILL. */
1296 if (res < 0 && errno != ECHILD)
1297 perror_with_name ("kill_wait_lwp");
1298 }
1299
1300 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1301 except the leader. */
1302
1303 static void
1304 kill_one_lwp_callback (thread_info *thread, int pid)
1305 {
1306 struct lwp_info *lwp = get_thread_lwp (thread);
1307
1308 /* We avoid killing the first thread here, because of a Linux kernel (at
1309 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1310 the children get a chance to be reaped, it will remain a zombie
1311 forever. */
1312
1313 if (lwpid_of (thread) == pid)
1314 {
1315 if (debug_threads)
1316 debug_printf ("lkop: is last of process %s\n",
1317 target_pid_to_str (thread->id).c_str ());
1318 return;
1319 }
1320
1321 kill_wait_lwp (lwp);
1322 }
1323
1324 int
1325 linux_process_target::kill (process_info *process)
1326 {
1327 int pid = process->pid;
1328
1329 /* If we're killing a running inferior, make sure it is stopped
1330 first, as PTRACE_KILL will not work otherwise. */
1331 stop_all_lwps (0, NULL);
1332
1333 for_each_thread (pid, [&] (thread_info *thread)
1334 {
1335 kill_one_lwp_callback (thread, pid);
1336 });
1337
1338 /* See the comment in linux_kill_one_lwp. We did not kill the first
1339 thread in the list, so do so now. */
1340 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1341
1342 if (lwp == NULL)
1343 {
1344 if (debug_threads)
1345 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1346 pid);
1347 }
1348 else
1349 kill_wait_lwp (lwp);
1350
1351 mourn (process);
1352
1353 /* Since we presently can only stop all lwps of all processes, we
1354 need to unstop lwps of other processes. */
1355 unstop_all_lwps (0, NULL);
1356 return 0;
1357 }
1358
1359 /* Get pending signal of THREAD, for detaching purposes. This is the
1360 signal the thread last stopped for, which we need to deliver to the
1361 thread when detaching, otherwise, it'd be suppressed/lost. */
1362
1363 static int
1364 get_detach_signal (struct thread_info *thread)
1365 {
1366 client_state &cs = get_client_state ();
1367 enum gdb_signal signo = GDB_SIGNAL_0;
1368 int status;
1369 struct lwp_info *lp = get_thread_lwp (thread);
1370
1371 if (lp->status_pending_p)
1372 status = lp->status_pending;
1373 else
1374 {
1375 /* If the thread had been suspended by gdbserver, and it stopped
1376 cleanly, then it'll have stopped with SIGSTOP. But we don't
1377 want to deliver that SIGSTOP. */
1378 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1379 || thread->last_status.sig () == GDB_SIGNAL_0)
1380 return 0;
1381
1382 /* Otherwise, we may need to deliver the signal we
1383 intercepted. */
1384 status = lp->last_status;
1385 }
1386
1387 if (!WIFSTOPPED (status))
1388 {
1389 if (debug_threads)
1390 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1391 target_pid_to_str (ptid_of (thread)).c_str ());
1392 return 0;
1393 }
1394
1395 /* Extended wait statuses aren't real SIGTRAPs. */
1396 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1397 {
1398 if (debug_threads)
1399 debug_printf ("GPS: lwp %s had stopped with extended "
1400 "status: no pending signal\n",
1401 target_pid_to_str (ptid_of (thread)).c_str ());
1402 return 0;
1403 }
1404
1405 signo = gdb_signal_from_host (WSTOPSIG (status));
1406
1407 if (cs.program_signals_p && !cs.program_signals[signo])
1408 {
1409 if (debug_threads)
1410 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1411 target_pid_to_str (ptid_of (thread)).c_str (),
1412 gdb_signal_to_string (signo));
1413 return 0;
1414 }
1415 else if (!cs.program_signals_p
1416 /* If we have no way to know which signals GDB does not
1417 want to have passed to the program, assume
1418 SIGTRAP/SIGINT, which is GDB's default. */
1419 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1420 {
1421 if (debug_threads)
1422 debug_printf ("GPS: lwp %s had signal %s, "
1423 "but we don't know if we should pass it. "
1424 "Default to not.\n",
1425 target_pid_to_str (ptid_of (thread)).c_str (),
1426 gdb_signal_to_string (signo));
1427 return 0;
1428 }
1429 else
1430 {
1431 if (debug_threads)
1432 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1433 target_pid_to_str (ptid_of (thread)).c_str (),
1434 gdb_signal_to_string (signo));
1435
1436 return WSTOPSIG (status);
1437 }
1438 }
1439
1440 void
1441 linux_process_target::detach_one_lwp (lwp_info *lwp)
1442 {
1443 struct thread_info *thread = get_lwp_thread (lwp);
1444 int sig;
1445 int lwpid;
1446
1447 /* If there is a pending SIGSTOP, get rid of it. */
1448 if (lwp->stop_expected)
1449 {
1450 if (debug_threads)
1451 debug_printf ("Sending SIGCONT to %s\n",
1452 target_pid_to_str (ptid_of (thread)).c_str ());
1453
1454 kill_lwp (lwpid_of (thread), SIGCONT);
1455 lwp->stop_expected = 0;
1456 }
1457
1458 /* Pass on any pending signal for this thread. */
1459 sig = get_detach_signal (thread);
1460
1461 /* Preparing to resume may try to write registers, and fail if the
1462 lwp is zombie. If that happens, ignore the error. We'll handle
1463 it below, when detach fails with ESRCH. */
1464 try
1465 {
1466 /* Flush any pending changes to the process's registers. */
1467 regcache_invalidate_thread (thread);
1468
1469 /* Finally, let it resume. */
1470 low_prepare_to_resume (lwp);
1471 }
1472 catch (const gdb_exception_error &ex)
1473 {
1474 if (!check_ptrace_stopped_lwp_gone (lwp))
1475 throw;
1476 }
1477
1478 lwpid = lwpid_of (thread);
1479 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1480 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1481 {
1482 int save_errno = errno;
1483
1484 /* We know the thread exists, so ESRCH must mean the lwp is
1485 zombie. This can happen if one of the already-detached
1486 threads exits the whole thread group. In that case we're
1487 still attached, and must reap the lwp. */
1488 if (save_errno == ESRCH)
1489 {
1490 int ret, status;
1491
1492 ret = my_waitpid (lwpid, &status, __WALL);
1493 if (ret == -1)
1494 {
1495 warning (_("Couldn't reap LWP %d while detaching: %s"),
1496 lwpid, safe_strerror (errno));
1497 }
1498 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1499 {
1500 warning (_("Reaping LWP %d while detaching "
1501 "returned unexpected status 0x%x"),
1502 lwpid, status);
1503 }
1504 }
1505 else
1506 {
1507 error (_("Can't detach %s: %s"),
1508 target_pid_to_str (ptid_of (thread)).c_str (),
1509 safe_strerror (save_errno));
1510 }
1511 }
1512 else if (debug_threads)
1513 {
1514 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1515 target_pid_to_str (ptid_of (thread)).c_str (),
1516 strsignal (sig));
1517 }
1518
1519 delete_lwp (lwp);
1520 }
1521
1522 int
1523 linux_process_target::detach (process_info *process)
1524 {
1525 struct lwp_info *main_lwp;
1526
1527 /* As there's a step over already in progress, let it finish first,
1528 otherwise nesting a stabilize_threads operation on top gets real
1529 messy. */
1530 complete_ongoing_step_over ();
1531
1532 /* Stop all threads before detaching. First, ptrace requires that
1533 the thread is stopped to successfully detach. Second, thread_db
1534 may need to uninstall thread event breakpoints from memory, which
1535 only works with a stopped process anyway. */
1536 stop_all_lwps (0, NULL);
1537
1538 #ifdef USE_THREAD_DB
1539 thread_db_detach (process);
1540 #endif
1541
1542 /* Stabilize threads (move out of jump pads). */
1543 target_stabilize_threads ();
1544
1545 /* Detach from the clone lwps first. If the thread group exits just
1546 while we're detaching, we must reap the clone lwps before we're
1547 able to reap the leader. */
1548 for_each_thread (process->pid, [this] (thread_info *thread)
1549 {
1550 /* We don't actually detach from the thread group leader just yet.
1551 If the thread group exits, we must reap the zombie clone lwps
1552 before we're able to reap the leader. */
1553 if (thread->id.pid () == thread->id.lwp ())
1554 return;
1555
1556 lwp_info *lwp = get_thread_lwp (thread);
1557 detach_one_lwp (lwp);
1558 });
1559
1560 main_lwp = find_lwp_pid (ptid_t (process->pid));
1561 detach_one_lwp (main_lwp);
1562
1563 mourn (process);
1564
1565 /* Since we presently can only stop all lwps of all processes, we
1566 need to unstop lwps of other processes. */
1567 unstop_all_lwps (0, NULL);
1568 return 0;
1569 }
1570
1571 /* Remove all LWPs that belong to process PROC from the lwp list. */
1572
1573 void
1574 linux_process_target::mourn (process_info *process)
1575 {
1576 struct process_info_private *priv;
1577
1578 #ifdef USE_THREAD_DB
1579 thread_db_mourn (process);
1580 #endif
1581
1582 for_each_thread (process->pid, [this] (thread_info *thread)
1583 {
1584 delete_lwp (get_thread_lwp (thread));
1585 });
1586
1587 /* Freeing all private data. */
1588 priv = process->priv;
1589 low_delete_process (priv->arch_private);
1590 free (priv);
1591 process->priv = NULL;
1592
1593 remove_process (process);
1594 }
1595
1596 void
1597 linux_process_target::join (int pid)
1598 {
1599 int status, ret;
1600
1601 do {
1602 ret = my_waitpid (pid, &status, 0);
1603 if (WIFEXITED (status) || WIFSIGNALED (status))
1604 break;
1605 } while (ret != -1 || errno != ECHILD);
1606 }
1607
1608 /* Return true if the given thread is still alive. */
1609
1610 bool
1611 linux_process_target::thread_alive (ptid_t ptid)
1612 {
1613 struct lwp_info *lwp = find_lwp_pid (ptid);
1614
1615 /* We assume we always know if a thread exits. If a whole process
1616 exited but we still haven't been able to report it to GDB, we'll
1617 hold on to the last lwp of the dead process. */
1618 if (lwp != NULL)
1619 return !lwp_is_marked_dead (lwp);
1620 else
1621 return 0;
1622 }
1623
1624 bool
1625 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1626 {
1627 struct lwp_info *lp = get_thread_lwp (thread);
1628
1629 if (!lp->status_pending_p)
1630 return 0;
1631
1632 if (thread->last_resume_kind != resume_stop
1633 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1634 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1635 {
1636 CORE_ADDR pc;
1637 int discard = 0;
1638
1639 gdb_assert (lp->last_status != 0);
1640
1641 pc = get_pc (lp);
1642
1643 scoped_restore_current_thread restore_thread;
1644 switch_to_thread (thread);
1645
1646 if (pc != lp->stop_pc)
1647 {
1648 if (debug_threads)
1649 debug_printf ("PC of %ld changed\n",
1650 lwpid_of (thread));
1651 discard = 1;
1652 }
1653
1654 #if !USE_SIGTRAP_SIGINFO
1655 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1656 && !low_breakpoint_at (pc))
1657 {
1658 if (debug_threads)
1659 debug_printf ("previous SW breakpoint of %ld gone\n",
1660 lwpid_of (thread));
1661 discard = 1;
1662 }
1663 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1664 && !hardware_breakpoint_inserted_here (pc))
1665 {
1666 if (debug_threads)
1667 debug_printf ("previous HW breakpoint of %ld gone\n",
1668 lwpid_of (thread));
1669 discard = 1;
1670 }
1671 #endif
1672
1673 if (discard)
1674 {
1675 if (debug_threads)
1676 debug_printf ("discarding pending breakpoint status\n");
1677 lp->status_pending_p = 0;
1678 return 0;
1679 }
1680 }
1681
1682 return 1;
1683 }
1684
1685 /* Returns true if LWP is resumed from the client's perspective. */
1686
1687 static int
1688 lwp_resumed (struct lwp_info *lwp)
1689 {
1690 struct thread_info *thread = get_lwp_thread (lwp);
1691
1692 if (thread->last_resume_kind != resume_stop)
1693 return 1;
1694
1695 /* Did gdb send us a `vCont;t', but we haven't reported the
1696 corresponding stop to gdb yet? If so, the thread is still
1697 resumed/running from gdb's perspective. */
1698 if (thread->last_resume_kind == resume_stop
1699 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
1700 return 1;
1701
1702 return 0;
1703 }
1704
1705 bool
1706 linux_process_target::status_pending_p_callback (thread_info *thread,
1707 ptid_t ptid)
1708 {
1709 struct lwp_info *lp = get_thread_lwp (thread);
1710
1711 /* Check if we're only interested in events from a specific process
1712 or a specific LWP. */
1713 if (!thread->id.matches (ptid))
1714 return 0;
1715
1716 if (!lwp_resumed (lp))
1717 return 0;
1718
1719 if (lp->status_pending_p
1720 && !thread_still_has_status_pending (thread))
1721 {
1722 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1723 return 0;
1724 }
1725
1726 return lp->status_pending_p;
1727 }
1728
1729 struct lwp_info *
1730 find_lwp_pid (ptid_t ptid)
1731 {
1732 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1733 {
1734 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1735 return thr_arg->id.lwp () == lwp;
1736 });
1737
1738 if (thread == NULL)
1739 return NULL;
1740
1741 return get_thread_lwp (thread);
1742 }
1743
1744 /* Return the number of known LWPs in the tgid given by PID. */
1745
1746 static int
1747 num_lwps (int pid)
1748 {
1749 int count = 0;
1750
1751 for_each_thread (pid, [&] (thread_info *thread)
1752 {
1753 count++;
1754 });
1755
1756 return count;
1757 }
1758
1759 /* See nat/linux-nat.h. */
1760
1761 struct lwp_info *
1762 iterate_over_lwps (ptid_t filter,
1763 gdb::function_view<iterate_over_lwps_ftype> callback)
1764 {
1765 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1766 {
1767 lwp_info *lwp = get_thread_lwp (thr_arg);
1768
1769 return callback (lwp);
1770 });
1771
1772 if (thread == NULL)
1773 return NULL;
1774
1775 return get_thread_lwp (thread);
1776 }
1777
1778 void
1779 linux_process_target::check_zombie_leaders ()
1780 {
1781 for_each_process ([this] (process_info *proc) {
1782 pid_t leader_pid = pid_of (proc);
1783 struct lwp_info *leader_lp;
1784
1785 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1786
1787 if (debug_threads)
1788 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1789 "num_lwps=%d, zombie=%d\n",
1790 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1791 linux_proc_pid_is_zombie (leader_pid));
1792
1793 if (leader_lp != NULL && !leader_lp->stopped
1794 /* Check if there are other threads in the group, as we may
1795 have raced with the inferior simply exiting. */
1796 && !last_thread_of_process_p (leader_pid)
1797 && linux_proc_pid_is_zombie (leader_pid))
1798 {
1799 /* A leader zombie can mean one of two things:
1800
1801 - It exited, and there's an exit status pending
1802 available, or only the leader exited (not the whole
1803 program). In the latter case, we can't waitpid the
1804 leader's exit status until all other threads are gone.
1805
1806 - There are 3 or more threads in the group, and a thread
1807 other than the leader exec'd. On an exec, the Linux
1808 kernel destroys all other threads (except the execing
1809 one) in the thread group, and resets the execing thread's
1810 tid to the tgid. No exit notification is sent for the
1811 execing thread -- from the ptracer's perspective, it
1812 appears as though the execing thread just vanishes.
1813 Until we reap all other threads except the leader and the
1814 execing thread, the leader will be zombie, and the
1815 execing thread will be in `D (disc sleep)'. As soon as
1816 all other threads are reaped, the execing thread changes
1817 it's tid to the tgid, and the previous (zombie) leader
1818 vanishes, giving place to the "new" leader. We could try
1819 distinguishing the exit and exec cases, by waiting once
1820 more, and seeing if something comes out, but it doesn't
1821 sound useful. The previous leader _does_ go away, and
1822 we'll re-add the new one once we see the exec event
1823 (which is just the same as what would happen if the
1824 previous leader did exit voluntarily before some other
1825 thread execs). */
1826
1827 if (debug_threads)
1828 debug_printf ("CZL: Thread group leader %d zombie "
1829 "(it exited, or another thread execd).\n",
1830 leader_pid);
1831
1832 delete_lwp (leader_lp);
1833 }
1834 });
1835 }
1836
1837 /* Callback for `find_thread'. Returns the first LWP that is not
1838 stopped. */
1839
1840 static bool
1841 not_stopped_callback (thread_info *thread, ptid_t filter)
1842 {
1843 if (!thread->id.matches (filter))
1844 return false;
1845
1846 lwp_info *lwp = get_thread_lwp (thread);
1847
1848 return !lwp->stopped;
1849 }
1850
1851 /* Increment LWP's suspend count. */
1852
1853 static void
1854 lwp_suspended_inc (struct lwp_info *lwp)
1855 {
1856 lwp->suspended++;
1857
1858 if (debug_threads && lwp->suspended > 4)
1859 {
1860 struct thread_info *thread = get_lwp_thread (lwp);
1861
1862 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1863 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1864 }
1865 }
1866
1867 /* Decrement LWP's suspend count. */
1868
1869 static void
1870 lwp_suspended_decr (struct lwp_info *lwp)
1871 {
1872 lwp->suspended--;
1873
1874 if (lwp->suspended < 0)
1875 {
1876 struct thread_info *thread = get_lwp_thread (lwp);
1877
1878 internal_error (__FILE__, __LINE__,
1879 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1880 lwp->suspended);
1881 }
1882 }
1883
1884 /* This function should only be called if the LWP got a SIGTRAP.
1885
1886 Handle any tracepoint steps or hits. Return true if a tracepoint
1887 event was handled, 0 otherwise. */
1888
1889 static int
1890 handle_tracepoints (struct lwp_info *lwp)
1891 {
1892 struct thread_info *tinfo = get_lwp_thread (lwp);
1893 int tpoint_related_event = 0;
1894
1895 gdb_assert (lwp->suspended == 0);
1896
1897 /* If this tracepoint hit causes a tracing stop, we'll immediately
1898 uninsert tracepoints. To do this, we temporarily pause all
1899 threads, unpatch away, and then unpause threads. We need to make
1900 sure the unpausing doesn't resume LWP too. */
1901 lwp_suspended_inc (lwp);
1902
1903 /* And we need to be sure that any all-threads-stopping doesn't try
1904 to move threads out of the jump pads, as it could deadlock the
1905 inferior (LWP could be in the jump pad, maybe even holding the
1906 lock.) */
1907
1908 /* Do any necessary step collect actions. */
1909 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1910
1911 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1912
1913 /* See if we just hit a tracepoint and do its main collect
1914 actions. */
1915 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1916
1917 lwp_suspended_decr (lwp);
1918
1919 gdb_assert (lwp->suspended == 0);
1920 gdb_assert (!stabilizing_threads
1921 || (lwp->collecting_fast_tracepoint
1922 != fast_tpoint_collect_result::not_collecting));
1923
1924 if (tpoint_related_event)
1925 {
1926 if (debug_threads)
1927 debug_printf ("got a tracepoint event\n");
1928 return 1;
1929 }
1930
1931 return 0;
1932 }
1933
1934 fast_tpoint_collect_result
1935 linux_process_target::linux_fast_tracepoint_collecting
1936 (lwp_info *lwp, fast_tpoint_collect_status *status)
1937 {
1938 CORE_ADDR thread_area;
1939 struct thread_info *thread = get_lwp_thread (lwp);
1940
1941 /* Get the thread area address. This is used to recognize which
1942 thread is which when tracing with the in-process agent library.
1943 We don't read anything from the address, and treat it as opaque;
1944 it's the address itself that we assume is unique per-thread. */
1945 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
1946 return fast_tpoint_collect_result::not_collecting;
1947
1948 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1949 }
1950
1951 int
1952 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1953 {
1954 return -1;
1955 }
1956
1957 bool
1958 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
1959 {
1960 scoped_restore_current_thread restore_thread;
1961 switch_to_thread (get_lwp_thread (lwp));
1962
1963 if ((wstat == NULL
1964 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1965 && supports_fast_tracepoints ()
1966 && agent_loaded_p ())
1967 {
1968 struct fast_tpoint_collect_status status;
1969
1970 if (debug_threads)
1971 debug_printf ("Checking whether LWP %ld needs to move out of the "
1972 "jump pad.\n",
1973 lwpid_of (current_thread));
1974
1975 fast_tpoint_collect_result r
1976 = linux_fast_tracepoint_collecting (lwp, &status);
1977
1978 if (wstat == NULL
1979 || (WSTOPSIG (*wstat) != SIGILL
1980 && WSTOPSIG (*wstat) != SIGFPE
1981 && WSTOPSIG (*wstat) != SIGSEGV
1982 && WSTOPSIG (*wstat) != SIGBUS))
1983 {
1984 lwp->collecting_fast_tracepoint = r;
1985
1986 if (r != fast_tpoint_collect_result::not_collecting)
1987 {
1988 if (r == fast_tpoint_collect_result::before_insn
1989 && lwp->exit_jump_pad_bkpt == NULL)
1990 {
1991 /* Haven't executed the original instruction yet.
1992 Set breakpoint there, and wait till it's hit,
1993 then single-step until exiting the jump pad. */
1994 lwp->exit_jump_pad_bkpt
1995 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1996 }
1997
1998 if (debug_threads)
1999 debug_printf ("Checking whether LWP %ld needs to move out of "
2000 "the jump pad...it does\n",
2001 lwpid_of (current_thread));
2002
2003 return true;
2004 }
2005 }
2006 else
2007 {
2008 /* If we get a synchronous signal while collecting, *and*
2009 while executing the (relocated) original instruction,
2010 reset the PC to point at the tpoint address, before
2011 reporting to GDB. Otherwise, it's an IPA lib bug: just
2012 report the signal to GDB, and pray for the best. */
2013
2014 lwp->collecting_fast_tracepoint
2015 = fast_tpoint_collect_result::not_collecting;
2016
2017 if (r != fast_tpoint_collect_result::not_collecting
2018 && (status.adjusted_insn_addr <= lwp->stop_pc
2019 && lwp->stop_pc < status.adjusted_insn_addr_end))
2020 {
2021 siginfo_t info;
2022 struct regcache *regcache;
2023
2024 /* The si_addr on a few signals references the address
2025 of the faulting instruction. Adjust that as
2026 well. */
2027 if ((WSTOPSIG (*wstat) == SIGILL
2028 || WSTOPSIG (*wstat) == SIGFPE
2029 || WSTOPSIG (*wstat) == SIGBUS
2030 || WSTOPSIG (*wstat) == SIGSEGV)
2031 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2032 (PTRACE_TYPE_ARG3) 0, &info) == 0
2033 /* Final check just to make sure we don't clobber
2034 the siginfo of non-kernel-sent signals. */
2035 && (uintptr_t) info.si_addr == lwp->stop_pc)
2036 {
2037 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2038 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2039 (PTRACE_TYPE_ARG3) 0, &info);
2040 }
2041
2042 regcache = get_thread_regcache (current_thread, 1);
2043 low_set_pc (regcache, status.tpoint_addr);
2044 lwp->stop_pc = status.tpoint_addr;
2045
2046 /* Cancel any fast tracepoint lock this thread was
2047 holding. */
2048 force_unlock_trace_buffer ();
2049 }
2050
2051 if (lwp->exit_jump_pad_bkpt != NULL)
2052 {
2053 if (debug_threads)
2054 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2055 "stopping all threads momentarily.\n");
2056
2057 stop_all_lwps (1, lwp);
2058
2059 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2060 lwp->exit_jump_pad_bkpt = NULL;
2061
2062 unstop_all_lwps (1, lwp);
2063
2064 gdb_assert (lwp->suspended >= 0);
2065 }
2066 }
2067 }
2068
2069 if (debug_threads)
2070 debug_printf ("Checking whether LWP %ld needs to move out of the "
2071 "jump pad...no\n",
2072 lwpid_of (current_thread));
2073
2074 return false;
2075 }
2076
2077 /* Enqueue one signal in the "signals to report later when out of the
2078 jump pad" list. */
2079
2080 static void
2081 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2082 {
2083 struct thread_info *thread = get_lwp_thread (lwp);
2084
2085 if (debug_threads)
2086 debug_printf ("Deferring signal %d for LWP %ld.\n",
2087 WSTOPSIG (*wstat), lwpid_of (thread));
2088
2089 if (debug_threads)
2090 {
2091 for (const auto &sig : lwp->pending_signals_to_report)
2092 debug_printf (" Already queued %d\n",
2093 sig.signal);
2094
2095 debug_printf (" (no more currently queued signals)\n");
2096 }
2097
2098 /* Don't enqueue non-RT signals if they are already in the deferred
2099 queue. (SIGSTOP being the easiest signal to see ending up here
2100 twice) */
2101 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2102 {
2103 for (const auto &sig : lwp->pending_signals_to_report)
2104 {
2105 if (sig.signal == WSTOPSIG (*wstat))
2106 {
2107 if (debug_threads)
2108 debug_printf ("Not requeuing already queued non-RT signal %d"
2109 " for LWP %ld\n",
2110 sig.signal,
2111 lwpid_of (thread));
2112 return;
2113 }
2114 }
2115 }
2116
2117 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2118
2119 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2120 &lwp->pending_signals_to_report.back ().info);
2121 }
2122
2123 /* Dequeue one signal from the "signals to report later when out of
2124 the jump pad" list. */
2125
2126 static int
2127 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2128 {
2129 struct thread_info *thread = get_lwp_thread (lwp);
2130
2131 if (!lwp->pending_signals_to_report.empty ())
2132 {
2133 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2134
2135 *wstat = W_STOPCODE (p_sig.signal);
2136 if (p_sig.info.si_signo != 0)
2137 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2138 &p_sig.info);
2139
2140 lwp->pending_signals_to_report.pop_front ();
2141
2142 if (debug_threads)
2143 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2144 WSTOPSIG (*wstat), lwpid_of (thread));
2145
2146 if (debug_threads)
2147 {
2148 for (const auto &sig : lwp->pending_signals_to_report)
2149 debug_printf (" Still queued %d\n",
2150 sig.signal);
2151
2152 debug_printf (" (no more queued signals)\n");
2153 }
2154
2155 return 1;
2156 }
2157
2158 return 0;
2159 }
2160
2161 bool
2162 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2163 {
2164 scoped_restore_current_thread restore_thread;
2165 switch_to_thread (get_lwp_thread (child));
2166
2167 if (low_stopped_by_watchpoint ())
2168 {
2169 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2170 child->stopped_data_address = low_stopped_data_address ();
2171 }
2172
2173 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2174 }
2175
2176 bool
2177 linux_process_target::low_stopped_by_watchpoint ()
2178 {
2179 return false;
2180 }
2181
2182 CORE_ADDR
2183 linux_process_target::low_stopped_data_address ()
2184 {
2185 return 0;
2186 }
2187
2188 /* Return the ptrace options that we want to try to enable. */
2189
2190 static int
2191 linux_low_ptrace_options (int attached)
2192 {
2193 client_state &cs = get_client_state ();
2194 int options = 0;
2195
2196 if (!attached)
2197 options |= PTRACE_O_EXITKILL;
2198
2199 if (cs.report_fork_events)
2200 options |= PTRACE_O_TRACEFORK;
2201
2202 if (cs.report_vfork_events)
2203 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2204
2205 if (cs.report_exec_events)
2206 options |= PTRACE_O_TRACEEXEC;
2207
2208 options |= PTRACE_O_TRACESYSGOOD;
2209
2210 return options;
2211 }
2212
2213 void
2214 linux_process_target::filter_event (int lwpid, int wstat)
2215 {
2216 client_state &cs = get_client_state ();
2217 struct lwp_info *child;
2218 struct thread_info *thread;
2219 int have_stop_pc = 0;
2220
2221 child = find_lwp_pid (ptid_t (lwpid));
2222
2223 /* Check for stop events reported by a process we didn't already
2224 know about - anything not already in our LWP list.
2225
2226 If we're expecting to receive stopped processes after
2227 fork, vfork, and clone events, then we'll just add the
2228 new one to our list and go back to waiting for the event
2229 to be reported - the stopped process might be returned
2230 from waitpid before or after the event is.
2231
2232 But note the case of a non-leader thread exec'ing after the
2233 leader having exited, and gone from our lists (because
2234 check_zombie_leaders deleted it). The non-leader thread
2235 changes its tid to the tgid. */
2236
2237 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2238 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2239 {
2240 ptid_t child_ptid;
2241
2242 /* A multi-thread exec after we had seen the leader exiting. */
2243 if (debug_threads)
2244 {
2245 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2246 "after exec.\n", lwpid);
2247 }
2248
2249 child_ptid = ptid_t (lwpid, lwpid);
2250 child = add_lwp (child_ptid);
2251 child->stopped = 1;
2252 switch_to_thread (child->thread);
2253 }
2254
2255 /* If we didn't find a process, one of two things presumably happened:
2256 - A process we started and then detached from has exited. Ignore it.
2257 - A process we are controlling has forked and the new child's stop
2258 was reported to us by the kernel. Save its PID. */
2259 if (child == NULL && WIFSTOPPED (wstat))
2260 {
2261 add_to_pid_list (&stopped_pids, lwpid, wstat);
2262 return;
2263 }
2264 else if (child == NULL)
2265 return;
2266
2267 thread = get_lwp_thread (child);
2268
2269 child->stopped = 1;
2270
2271 child->last_status = wstat;
2272
2273 /* Check if the thread has exited. */
2274 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2275 {
2276 if (debug_threads)
2277 debug_printf ("LLFE: %d exited.\n", lwpid);
2278
2279 if (finish_step_over (child))
2280 {
2281 /* Unsuspend all other LWPs, and set them back running again. */
2282 unsuspend_all_lwps (child);
2283 }
2284
2285 /* If there is at least one more LWP, then the exit signal was
2286 not the end of the debugged application and should be
2287 ignored, unless GDB wants to hear about thread exits. */
2288 if (cs.report_thread_events
2289 || last_thread_of_process_p (pid_of (thread)))
2290 {
2291 /* Since events are serialized to GDB core, and we can't
2292 report this one right now. Leave the status pending for
2293 the next time we're able to report it. */
2294 mark_lwp_dead (child, wstat);
2295 return;
2296 }
2297 else
2298 {
2299 delete_lwp (child);
2300 return;
2301 }
2302 }
2303
2304 gdb_assert (WIFSTOPPED (wstat));
2305
2306 if (WIFSTOPPED (wstat))
2307 {
2308 struct process_info *proc;
2309
2310 /* Architecture-specific setup after inferior is running. */
2311 proc = find_process_pid (pid_of (thread));
2312 if (proc->tdesc == NULL)
2313 {
2314 if (proc->attached)
2315 {
2316 /* This needs to happen after we have attached to the
2317 inferior and it is stopped for the first time, but
2318 before we access any inferior registers. */
2319 arch_setup_thread (thread);
2320 }
2321 else
2322 {
2323 /* The process is started, but GDBserver will do
2324 architecture-specific setup after the program stops at
2325 the first instruction. */
2326 child->status_pending_p = 1;
2327 child->status_pending = wstat;
2328 return;
2329 }
2330 }
2331 }
2332
2333 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2334 {
2335 struct process_info *proc = find_process_pid (pid_of (thread));
2336 int options = linux_low_ptrace_options (proc->attached);
2337
2338 linux_enable_event_reporting (lwpid, options);
2339 child->must_set_ptrace_flags = 0;
2340 }
2341
2342 /* Always update syscall_state, even if it will be filtered later. */
2343 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2344 {
2345 child->syscall_state
2346 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2347 ? TARGET_WAITKIND_SYSCALL_RETURN
2348 : TARGET_WAITKIND_SYSCALL_ENTRY);
2349 }
2350 else
2351 {
2352 /* Almost all other ptrace-stops are known to be outside of system
2353 calls, with further exceptions in handle_extended_wait. */
2354 child->syscall_state = TARGET_WAITKIND_IGNORE;
2355 }
2356
2357 /* Be careful to not overwrite stop_pc until save_stop_reason is
2358 called. */
2359 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2360 && linux_is_extended_waitstatus (wstat))
2361 {
2362 child->stop_pc = get_pc (child);
2363 if (handle_extended_wait (&child, wstat))
2364 {
2365 /* The event has been handled, so just return without
2366 reporting it. */
2367 return;
2368 }
2369 }
2370
2371 if (linux_wstatus_maybe_breakpoint (wstat))
2372 {
2373 if (save_stop_reason (child))
2374 have_stop_pc = 1;
2375 }
2376
2377 if (!have_stop_pc)
2378 child->stop_pc = get_pc (child);
2379
2380 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2381 && child->stop_expected)
2382 {
2383 if (debug_threads)
2384 debug_printf ("Expected stop.\n");
2385 child->stop_expected = 0;
2386
2387 if (thread->last_resume_kind == resume_stop)
2388 {
2389 /* We want to report the stop to the core. Treat the
2390 SIGSTOP as a normal event. */
2391 if (debug_threads)
2392 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2393 target_pid_to_str (ptid_of (thread)).c_str ());
2394 }
2395 else if (stopping_threads != NOT_STOPPING_THREADS)
2396 {
2397 /* Stopping threads. We don't want this SIGSTOP to end up
2398 pending. */
2399 if (debug_threads)
2400 debug_printf ("LLW: SIGSTOP caught for %s "
2401 "while stopping threads.\n",
2402 target_pid_to_str (ptid_of (thread)).c_str ());
2403 return;
2404 }
2405 else
2406 {
2407 /* This is a delayed SIGSTOP. Filter out the event. */
2408 if (debug_threads)
2409 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2410 child->stepping ? "step" : "continue",
2411 target_pid_to_str (ptid_of (thread)).c_str ());
2412
2413 resume_one_lwp (child, child->stepping, 0, NULL);
2414 return;
2415 }
2416 }
2417
2418 child->status_pending_p = 1;
2419 child->status_pending = wstat;
2420 return;
2421 }
2422
2423 bool
2424 linux_process_target::maybe_hw_step (thread_info *thread)
2425 {
2426 if (supports_hardware_single_step ())
2427 return true;
2428 else
2429 {
2430 /* GDBserver must insert single-step breakpoint for software
2431 single step. */
2432 gdb_assert (has_single_step_breakpoints (thread));
2433 return false;
2434 }
2435 }
2436
2437 void
2438 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2439 {
2440 struct lwp_info *lp = get_thread_lwp (thread);
2441
2442 if (lp->stopped
2443 && !lp->suspended
2444 && !lp->status_pending_p
2445 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
2446 {
2447 int step = 0;
2448
2449 if (thread->last_resume_kind == resume_step)
2450 step = maybe_hw_step (thread);
2451
2452 if (debug_threads)
2453 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2454 target_pid_to_str (ptid_of (thread)).c_str (),
2455 paddress (lp->stop_pc),
2456 step);
2457
2458 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2459 }
2460 }
2461
2462 int
2463 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2464 ptid_t filter_ptid,
2465 int *wstatp, int options)
2466 {
2467 struct thread_info *event_thread;
2468 struct lwp_info *event_child, *requested_child;
2469 sigset_t block_mask, prev_mask;
2470
2471 retry:
2472 /* N.B. event_thread points to the thread_info struct that contains
2473 event_child. Keep them in sync. */
2474 event_thread = NULL;
2475 event_child = NULL;
2476 requested_child = NULL;
2477
2478 /* Check for a lwp with a pending status. */
2479
2480 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2481 {
2482 event_thread = find_thread_in_random ([&] (thread_info *thread)
2483 {
2484 return status_pending_p_callback (thread, filter_ptid);
2485 });
2486
2487 if (event_thread != NULL)
2488 event_child = get_thread_lwp (event_thread);
2489 if (debug_threads && event_thread)
2490 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2491 }
2492 else if (filter_ptid != null_ptid)
2493 {
2494 requested_child = find_lwp_pid (filter_ptid);
2495
2496 if (stopping_threads == NOT_STOPPING_THREADS
2497 && requested_child->status_pending_p
2498 && (requested_child->collecting_fast_tracepoint
2499 != fast_tpoint_collect_result::not_collecting))
2500 {
2501 enqueue_one_deferred_signal (requested_child,
2502 &requested_child->status_pending);
2503 requested_child->status_pending_p = 0;
2504 requested_child->status_pending = 0;
2505 resume_one_lwp (requested_child, 0, 0, NULL);
2506 }
2507
2508 if (requested_child->suspended
2509 && requested_child->status_pending_p)
2510 {
2511 internal_error (__FILE__, __LINE__,
2512 "requesting an event out of a"
2513 " suspended child?");
2514 }
2515
2516 if (requested_child->status_pending_p)
2517 {
2518 event_child = requested_child;
2519 event_thread = get_lwp_thread (event_child);
2520 }
2521 }
2522
2523 if (event_child != NULL)
2524 {
2525 if (debug_threads)
2526 debug_printf ("Got an event from pending child %ld (%04x)\n",
2527 lwpid_of (event_thread), event_child->status_pending);
2528 *wstatp = event_child->status_pending;
2529 event_child->status_pending_p = 0;
2530 event_child->status_pending = 0;
2531 switch_to_thread (event_thread);
2532 return lwpid_of (event_thread);
2533 }
2534
2535 /* But if we don't find a pending event, we'll have to wait.
2536
2537 We only enter this loop if no process has a pending wait status.
2538 Thus any action taken in response to a wait status inside this
2539 loop is responding as soon as we detect the status, not after any
2540 pending events. */
2541
2542 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2543 all signals while here. */
2544 sigfillset (&block_mask);
2545 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2546
2547 /* Always pull all events out of the kernel. We'll randomly select
2548 an event LWP out of all that have events, to prevent
2549 starvation. */
2550 while (event_child == NULL)
2551 {
2552 pid_t ret = 0;
2553
2554 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2555 quirks:
2556
2557 - If the thread group leader exits while other threads in the
2558 thread group still exist, waitpid(TGID, ...) hangs. That
2559 waitpid won't return an exit status until the other threads
2560 in the group are reaped.
2561
2562 - When a non-leader thread execs, that thread just vanishes
2563 without reporting an exit (so we'd hang if we waited for it
2564 explicitly in that case). The exec event is reported to
2565 the TGID pid. */
2566 errno = 0;
2567 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2568
2569 if (debug_threads)
2570 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2571 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2572
2573 if (ret > 0)
2574 {
2575 if (debug_threads)
2576 {
2577 debug_printf ("LLW: waitpid %ld received %s\n",
2578 (long) ret, status_to_str (*wstatp).c_str ());
2579 }
2580
2581 /* Filter all events. IOW, leave all events pending. We'll
2582 randomly select an event LWP out of all that have events
2583 below. */
2584 filter_event (ret, *wstatp);
2585 /* Retry until nothing comes out of waitpid. A single
2586 SIGCHLD can indicate more than one child stopped. */
2587 continue;
2588 }
2589
2590 /* Now that we've pulled all events out of the kernel, resume
2591 LWPs that don't have an interesting event to report. */
2592 if (stopping_threads == NOT_STOPPING_THREADS)
2593 for_each_thread ([this] (thread_info *thread)
2594 {
2595 resume_stopped_resumed_lwps (thread);
2596 });
2597
2598 /* ... and find an LWP with a status to report to the core, if
2599 any. */
2600 event_thread = find_thread_in_random ([&] (thread_info *thread)
2601 {
2602 return status_pending_p_callback (thread, filter_ptid);
2603 });
2604
2605 if (event_thread != NULL)
2606 {
2607 event_child = get_thread_lwp (event_thread);
2608 *wstatp = event_child->status_pending;
2609 event_child->status_pending_p = 0;
2610 event_child->status_pending = 0;
2611 break;
2612 }
2613
2614 /* Check for zombie thread group leaders. Those can't be reaped
2615 until all other threads in the thread group are. */
2616 check_zombie_leaders ();
2617
2618 auto not_stopped = [&] (thread_info *thread)
2619 {
2620 return not_stopped_callback (thread, wait_ptid);
2621 };
2622
2623 /* If there are no resumed children left in the set of LWPs we
2624 want to wait for, bail. We can't just block in
2625 waitpid/sigsuspend, because lwps might have been left stopped
2626 in trace-stop state, and we'd be stuck forever waiting for
2627 their status to change (which would only happen if we resumed
2628 them). Even if WNOHANG is set, this return code is preferred
2629 over 0 (below), as it is more detailed. */
2630 if (find_thread (not_stopped) == NULL)
2631 {
2632 if (debug_threads)
2633 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2634 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2635 return -1;
2636 }
2637
2638 /* No interesting event to report to the caller. */
2639 if ((options & WNOHANG))
2640 {
2641 if (debug_threads)
2642 debug_printf ("WNOHANG set, no event found\n");
2643
2644 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2645 return 0;
2646 }
2647
2648 /* Block until we get an event reported with SIGCHLD. */
2649 if (debug_threads)
2650 debug_printf ("sigsuspend'ing\n");
2651
2652 sigsuspend (&prev_mask);
2653 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2654 goto retry;
2655 }
2656
2657 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2658
2659 switch_to_thread (event_thread);
2660
2661 return lwpid_of (event_thread);
2662 }
2663
2664 int
2665 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2666 {
2667 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2668 }
2669
2670 /* Select one LWP out of those that have events pending. */
2671
2672 static void
2673 select_event_lwp (struct lwp_info **orig_lp)
2674 {
2675 struct thread_info *event_thread = NULL;
2676
2677 /* In all-stop, give preference to the LWP that is being
2678 single-stepped. There will be at most one, and it's the LWP that
2679 the core is most interested in. If we didn't do this, then we'd
2680 have to handle pending step SIGTRAPs somehow in case the core
2681 later continues the previously-stepped thread, otherwise we'd
2682 report the pending SIGTRAP, and the core, not having stepped the
2683 thread, wouldn't understand what the trap was for, and therefore
2684 would report it to the user as a random signal. */
2685 if (!non_stop)
2686 {
2687 event_thread = find_thread ([] (thread_info *thread)
2688 {
2689 lwp_info *lp = get_thread_lwp (thread);
2690
2691 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2692 && thread->last_resume_kind == resume_step
2693 && lp->status_pending_p);
2694 });
2695
2696 if (event_thread != NULL)
2697 {
2698 if (debug_threads)
2699 debug_printf ("SEL: Select single-step %s\n",
2700 target_pid_to_str (ptid_of (event_thread)).c_str ());
2701 }
2702 }
2703 if (event_thread == NULL)
2704 {
2705 /* No single-stepping LWP. Select one at random, out of those
2706 which have had events. */
2707
2708 event_thread = find_thread_in_random ([&] (thread_info *thread)
2709 {
2710 lwp_info *lp = get_thread_lwp (thread);
2711
2712 /* Only resumed LWPs that have an event pending. */
2713 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2714 && lp->status_pending_p);
2715 });
2716 }
2717
2718 if (event_thread != NULL)
2719 {
2720 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2721
2722 /* Switch the event LWP. */
2723 *orig_lp = event_lp;
2724 }
2725 }
2726
2727 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2728 NULL. */
2729
2730 static void
2731 unsuspend_all_lwps (struct lwp_info *except)
2732 {
2733 for_each_thread ([&] (thread_info *thread)
2734 {
2735 lwp_info *lwp = get_thread_lwp (thread);
2736
2737 if (lwp != except)
2738 lwp_suspended_decr (lwp);
2739 });
2740 }
2741
2742 static bool lwp_running (thread_info *thread);
2743
2744 /* Stabilize threads (move out of jump pads).
2745
2746 If a thread is midway collecting a fast tracepoint, we need to
2747 finish the collection and move it out of the jump pad before
2748 reporting the signal.
2749
2750 This avoids recursion while collecting (when a signal arrives
2751 midway, and the signal handler itself collects), which would trash
2752 the trace buffer. In case the user set a breakpoint in a signal
2753 handler, this avoids the backtrace showing the jump pad, etc..
2754 Most importantly, there are certain things we can't do safely if
2755 threads are stopped in a jump pad (or in its callee's). For
2756 example:
2757
2758 - starting a new trace run. A thread still collecting the
2759 previous run, could trash the trace buffer when resumed. The trace
2760 buffer control structures would have been reset but the thread had
2761 no way to tell. The thread could even midway memcpy'ing to the
2762 buffer, which would mean that when resumed, it would clobber the
2763 trace buffer that had been set for a new run.
2764
2765 - we can't rewrite/reuse the jump pads for new tracepoints
2766 safely. Say you do tstart while a thread is stopped midway while
2767 collecting. When the thread is later resumed, it finishes the
2768 collection, and returns to the jump pad, to execute the original
2769 instruction that was under the tracepoint jump at the time the
2770 older run had been started. If the jump pad had been rewritten
2771 since for something else in the new run, the thread would now
2772 execute the wrong / random instructions. */
2773
2774 void
2775 linux_process_target::stabilize_threads ()
2776 {
2777 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2778 {
2779 return stuck_in_jump_pad (thread);
2780 });
2781
2782 if (thread_stuck != NULL)
2783 {
2784 if (debug_threads)
2785 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2786 lwpid_of (thread_stuck));
2787 return;
2788 }
2789
2790 scoped_restore_current_thread restore_thread;
2791
2792 stabilizing_threads = 1;
2793
2794 /* Kick 'em all. */
2795 for_each_thread ([this] (thread_info *thread)
2796 {
2797 move_out_of_jump_pad (thread);
2798 });
2799
2800 /* Loop until all are stopped out of the jump pads. */
2801 while (find_thread (lwp_running) != NULL)
2802 {
2803 struct target_waitstatus ourstatus;
2804 struct lwp_info *lwp;
2805 int wstat;
2806
2807 /* Note that we go through the full wait even loop. While
2808 moving threads out of jump pad, we need to be able to step
2809 over internal breakpoints and such. */
2810 wait_1 (minus_one_ptid, &ourstatus, 0);
2811
2812 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
2813 {
2814 lwp = get_thread_lwp (current_thread);
2815
2816 /* Lock it. */
2817 lwp_suspended_inc (lwp);
2818
2819 if (ourstatus.sig () != GDB_SIGNAL_0
2820 || current_thread->last_resume_kind == resume_stop)
2821 {
2822 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
2823 enqueue_one_deferred_signal (lwp, &wstat);
2824 }
2825 }
2826 }
2827
2828 unsuspend_all_lwps (NULL);
2829
2830 stabilizing_threads = 0;
2831
2832 if (debug_threads)
2833 {
2834 thread_stuck = find_thread ([this] (thread_info *thread)
2835 {
2836 return stuck_in_jump_pad (thread);
2837 });
2838
2839 if (thread_stuck != NULL)
2840 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2841 lwpid_of (thread_stuck));
2842 }
2843 }
2844
2845 /* Convenience function that is called when the kernel reports an
2846 event that is not passed out to GDB. */
2847
2848 static ptid_t
2849 ignore_event (struct target_waitstatus *ourstatus)
2850 {
2851 /* If we got an event, there may still be others, as a single
2852 SIGCHLD can indicate more than one child stopped. This forces
2853 another target_wait call. */
2854 async_file_mark ();
2855
2856 ourstatus->set_ignore ();
2857 return null_ptid;
2858 }
2859
2860 ptid_t
2861 linux_process_target::filter_exit_event (lwp_info *event_child,
2862 target_waitstatus *ourstatus)
2863 {
2864 client_state &cs = get_client_state ();
2865 struct thread_info *thread = get_lwp_thread (event_child);
2866 ptid_t ptid = ptid_of (thread);
2867
2868 if (!last_thread_of_process_p (pid_of (thread)))
2869 {
2870 if (cs.report_thread_events)
2871 ourstatus->set_thread_exited (0);
2872 else
2873 ourstatus->set_ignore ();
2874
2875 delete_lwp (event_child);
2876 }
2877 return ptid;
2878 }
2879
2880 /* Returns 1 if GDB is interested in any event_child syscalls. */
2881
2882 static int
2883 gdb_catching_syscalls_p (struct lwp_info *event_child)
2884 {
2885 struct thread_info *thread = get_lwp_thread (event_child);
2886 struct process_info *proc = get_thread_process (thread);
2887
2888 return !proc->syscalls_to_catch.empty ();
2889 }
2890
2891 bool
2892 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2893 {
2894 int sysno;
2895 struct thread_info *thread = get_lwp_thread (event_child);
2896 struct process_info *proc = get_thread_process (thread);
2897
2898 if (proc->syscalls_to_catch.empty ())
2899 return false;
2900
2901 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2902 return true;
2903
2904 get_syscall_trapinfo (event_child, &sysno);
2905
2906 for (int iter : proc->syscalls_to_catch)
2907 if (iter == sysno)
2908 return true;
2909
2910 return false;
2911 }
2912
2913 ptid_t
2914 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2915 target_wait_flags target_options)
2916 {
2917 client_state &cs = get_client_state ();
2918 int w;
2919 struct lwp_info *event_child;
2920 int options;
2921 int pid;
2922 int step_over_finished;
2923 int bp_explains_trap;
2924 int maybe_internal_trap;
2925 int report_to_gdb;
2926 int trace_event;
2927 int in_step_range;
2928 int any_resumed;
2929
2930 if (debug_threads)
2931 {
2932 debug_enter ();
2933 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid).c_str ());
2934 }
2935
2936 /* Translate generic target options into linux options. */
2937 options = __WALL;
2938 if (target_options & TARGET_WNOHANG)
2939 options |= WNOHANG;
2940
2941 bp_explains_trap = 0;
2942 trace_event = 0;
2943 in_step_range = 0;
2944 ourstatus->set_ignore ();
2945
2946 auto status_pending_p_any = [&] (thread_info *thread)
2947 {
2948 return status_pending_p_callback (thread, minus_one_ptid);
2949 };
2950
2951 auto not_stopped = [&] (thread_info *thread)
2952 {
2953 return not_stopped_callback (thread, minus_one_ptid);
2954 };
2955
2956 /* Find a resumed LWP, if any. */
2957 if (find_thread (status_pending_p_any) != NULL)
2958 any_resumed = 1;
2959 else if (find_thread (not_stopped) != NULL)
2960 any_resumed = 1;
2961 else
2962 any_resumed = 0;
2963
2964 if (step_over_bkpt == null_ptid)
2965 pid = wait_for_event (ptid, &w, options);
2966 else
2967 {
2968 if (debug_threads)
2969 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2970 target_pid_to_str (step_over_bkpt).c_str ());
2971 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2972 }
2973
2974 if (pid == 0 || (pid == -1 && !any_resumed))
2975 {
2976 gdb_assert (target_options & TARGET_WNOHANG);
2977
2978 if (debug_threads)
2979 {
2980 debug_printf ("wait_1 ret = null_ptid, "
2981 "TARGET_WAITKIND_IGNORE\n");
2982 debug_exit ();
2983 }
2984
2985 ourstatus->set_ignore ();
2986 return null_ptid;
2987 }
2988 else if (pid == -1)
2989 {
2990 if (debug_threads)
2991 {
2992 debug_printf ("wait_1 ret = null_ptid, "
2993 "TARGET_WAITKIND_NO_RESUMED\n");
2994 debug_exit ();
2995 }
2996
2997 ourstatus->set_no_resumed ();
2998 return null_ptid;
2999 }
3000
3001 event_child = get_thread_lwp (current_thread);
3002
3003 /* wait_for_event only returns an exit status for the last
3004 child of a process. Report it. */
3005 if (WIFEXITED (w) || WIFSIGNALED (w))
3006 {
3007 if (WIFEXITED (w))
3008 {
3009 ourstatus->set_exited (WEXITSTATUS (w));
3010
3011 if (debug_threads)
3012 {
3013 debug_printf ("wait_1 ret = %s, exited with "
3014 "retcode %d\n",
3015 target_pid_to_str (ptid_of (current_thread)).c_str (),
3016 WEXITSTATUS (w));
3017 debug_exit ();
3018 }
3019 }
3020 else
3021 {
3022 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
3023
3024 if (debug_threads)
3025 {
3026 debug_printf ("wait_1 ret = %s, terminated with "
3027 "signal %d\n",
3028 target_pid_to_str (ptid_of (current_thread)).c_str (),
3029 WTERMSIG (w));
3030 debug_exit ();
3031 }
3032 }
3033
3034 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
3035 return filter_exit_event (event_child, ourstatus);
3036
3037 return ptid_of (current_thread);
3038 }
3039
3040 /* If step-over executes a breakpoint instruction, in the case of a
3041 hardware single step it means a gdb/gdbserver breakpoint had been
3042 planted on top of a permanent breakpoint, in the case of a software
3043 single step it may just mean that gdbserver hit the reinsert breakpoint.
3044 The PC has been adjusted by save_stop_reason to point at
3045 the breakpoint address.
3046 So in the case of the hardware single step advance the PC manually
3047 past the breakpoint and in the case of software single step advance only
3048 if it's not the single_step_breakpoint we are hitting.
3049 This avoids that a program would keep trapping a permanent breakpoint
3050 forever. */
3051 if (step_over_bkpt != null_ptid
3052 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3053 && (event_child->stepping
3054 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3055 {
3056 int increment_pc = 0;
3057 int breakpoint_kind = 0;
3058 CORE_ADDR stop_pc = event_child->stop_pc;
3059
3060 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3061 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3062
3063 if (debug_threads)
3064 {
3065 debug_printf ("step-over for %s executed software breakpoint\n",
3066 target_pid_to_str (ptid_of (current_thread)).c_str ());
3067 }
3068
3069 if (increment_pc != 0)
3070 {
3071 struct regcache *regcache
3072 = get_thread_regcache (current_thread, 1);
3073
3074 event_child->stop_pc += increment_pc;
3075 low_set_pc (regcache, event_child->stop_pc);
3076
3077 if (!low_breakpoint_at (event_child->stop_pc))
3078 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3079 }
3080 }
3081
3082 /* If this event was not handled before, and is not a SIGTRAP, we
3083 report it. SIGILL and SIGSEGV are also treated as traps in case
3084 a breakpoint is inserted at the current PC. If this target does
3085 not support internal breakpoints at all, we also report the
3086 SIGTRAP without further processing; it's of no concern to us. */
3087 maybe_internal_trap
3088 = (low_supports_breakpoints ()
3089 && (WSTOPSIG (w) == SIGTRAP
3090 || ((WSTOPSIG (w) == SIGILL
3091 || WSTOPSIG (w) == SIGSEGV)
3092 && low_breakpoint_at (event_child->stop_pc))));
3093
3094 if (maybe_internal_trap)
3095 {
3096 /* Handle anything that requires bookkeeping before deciding to
3097 report the event or continue waiting. */
3098
3099 /* First check if we can explain the SIGTRAP with an internal
3100 breakpoint, or if we should possibly report the event to GDB.
3101 Do this before anything that may remove or insert a
3102 breakpoint. */
3103 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3104
3105 /* We have a SIGTRAP, possibly a step-over dance has just
3106 finished. If so, tweak the state machine accordingly,
3107 reinsert breakpoints and delete any single-step
3108 breakpoints. */
3109 step_over_finished = finish_step_over (event_child);
3110
3111 /* Now invoke the callbacks of any internal breakpoints there. */
3112 check_breakpoints (event_child->stop_pc);
3113
3114 /* Handle tracepoint data collecting. This may overflow the
3115 trace buffer, and cause a tracing stop, removing
3116 breakpoints. */
3117 trace_event = handle_tracepoints (event_child);
3118
3119 if (bp_explains_trap)
3120 {
3121 if (debug_threads)
3122 debug_printf ("Hit a gdbserver breakpoint.\n");
3123 }
3124 }
3125 else
3126 {
3127 /* We have some other signal, possibly a step-over dance was in
3128 progress, and it should be cancelled too. */
3129 step_over_finished = finish_step_over (event_child);
3130 }
3131
3132 /* We have all the data we need. Either report the event to GDB, or
3133 resume threads and keep waiting for more. */
3134
3135 /* If we're collecting a fast tracepoint, finish the collection and
3136 move out of the jump pad before delivering a signal. See
3137 linux_stabilize_threads. */
3138
3139 if (WIFSTOPPED (w)
3140 && WSTOPSIG (w) != SIGTRAP
3141 && supports_fast_tracepoints ()
3142 && agent_loaded_p ())
3143 {
3144 if (debug_threads)
3145 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3146 "to defer or adjust it.\n",
3147 WSTOPSIG (w), lwpid_of (current_thread));
3148
3149 /* Allow debugging the jump pad itself. */
3150 if (current_thread->last_resume_kind != resume_step
3151 && maybe_move_out_of_jump_pad (event_child, &w))
3152 {
3153 enqueue_one_deferred_signal (event_child, &w);
3154
3155 if (debug_threads)
3156 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3157 WSTOPSIG (w), lwpid_of (current_thread));
3158
3159 resume_one_lwp (event_child, 0, 0, NULL);
3160
3161 if (debug_threads)
3162 debug_exit ();
3163 return ignore_event (ourstatus);
3164 }
3165 }
3166
3167 if (event_child->collecting_fast_tracepoint
3168 != fast_tpoint_collect_result::not_collecting)
3169 {
3170 if (debug_threads)
3171 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3172 "Check if we're already there.\n",
3173 lwpid_of (current_thread),
3174 (int) event_child->collecting_fast_tracepoint);
3175
3176 trace_event = 1;
3177
3178 event_child->collecting_fast_tracepoint
3179 = linux_fast_tracepoint_collecting (event_child, NULL);
3180
3181 if (event_child->collecting_fast_tracepoint
3182 != fast_tpoint_collect_result::before_insn)
3183 {
3184 /* No longer need this breakpoint. */
3185 if (event_child->exit_jump_pad_bkpt != NULL)
3186 {
3187 if (debug_threads)
3188 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3189 "stopping all threads momentarily.\n");
3190
3191 /* Other running threads could hit this breakpoint.
3192 We don't handle moribund locations like GDB does,
3193 instead we always pause all threads when removing
3194 breakpoints, so that any step-over or
3195 decr_pc_after_break adjustment is always taken
3196 care of while the breakpoint is still
3197 inserted. */
3198 stop_all_lwps (1, event_child);
3199
3200 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3201 event_child->exit_jump_pad_bkpt = NULL;
3202
3203 unstop_all_lwps (1, event_child);
3204
3205 gdb_assert (event_child->suspended >= 0);
3206 }
3207 }
3208
3209 if (event_child->collecting_fast_tracepoint
3210 == fast_tpoint_collect_result::not_collecting)
3211 {
3212 if (debug_threads)
3213 debug_printf ("fast tracepoint finished "
3214 "collecting successfully.\n");
3215
3216 /* We may have a deferred signal to report. */
3217 if (dequeue_one_deferred_signal (event_child, &w))
3218 {
3219 if (debug_threads)
3220 debug_printf ("dequeued one signal.\n");
3221 }
3222 else
3223 {
3224 if (debug_threads)
3225 debug_printf ("no deferred signals.\n");
3226
3227 if (stabilizing_threads)
3228 {
3229 ourstatus->set_stopped (GDB_SIGNAL_0);
3230
3231 if (debug_threads)
3232 {
3233 debug_printf ("wait_1 ret = %s, stopped "
3234 "while stabilizing threads\n",
3235 target_pid_to_str
3236 (ptid_of (current_thread)).c_str ());
3237 debug_exit ();
3238 }
3239
3240 return ptid_of (current_thread);
3241 }
3242 }
3243 }
3244 }
3245
3246 /* Check whether GDB would be interested in this event. */
3247
3248 /* Check if GDB is interested in this syscall. */
3249 if (WIFSTOPPED (w)
3250 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3251 && !gdb_catch_this_syscall (event_child))
3252 {
3253 if (debug_threads)
3254 {
3255 debug_printf ("Ignored syscall for LWP %ld.\n",
3256 lwpid_of (current_thread));
3257 }
3258
3259 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3260
3261 if (debug_threads)
3262 debug_exit ();
3263 return ignore_event (ourstatus);
3264 }
3265
3266 /* If GDB is not interested in this signal, don't stop other
3267 threads, and don't report it to GDB. Just resume the inferior
3268 right away. We do this for threading-related signals as well as
3269 any that GDB specifically requested we ignore. But never ignore
3270 SIGSTOP if we sent it ourselves, and do not ignore signals when
3271 stepping - they may require special handling to skip the signal
3272 handler. Also never ignore signals that could be caused by a
3273 breakpoint. */
3274 if (WIFSTOPPED (w)
3275 && current_thread->last_resume_kind != resume_step
3276 && (
3277 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3278 (current_process ()->priv->thread_db != NULL
3279 && (WSTOPSIG (w) == __SIGRTMIN
3280 || WSTOPSIG (w) == __SIGRTMIN + 1))
3281 ||
3282 #endif
3283 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3284 && !(WSTOPSIG (w) == SIGSTOP
3285 && current_thread->last_resume_kind == resume_stop)
3286 && !linux_wstatus_maybe_breakpoint (w))))
3287 {
3288 siginfo_t info, *info_p;
3289
3290 if (debug_threads)
3291 debug_printf ("Ignored signal %d for LWP %ld.\n",
3292 WSTOPSIG (w), lwpid_of (current_thread));
3293
3294 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3295 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3296 info_p = &info;
3297 else
3298 info_p = NULL;
3299
3300 if (step_over_finished)
3301 {
3302 /* We cancelled this thread's step-over above. We still
3303 need to unsuspend all other LWPs, and set them back
3304 running again while the signal handler runs. */
3305 unsuspend_all_lwps (event_child);
3306
3307 /* Enqueue the pending signal info so that proceed_all_lwps
3308 doesn't lose it. */
3309 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3310
3311 proceed_all_lwps ();
3312 }
3313 else
3314 {
3315 resume_one_lwp (event_child, event_child->stepping,
3316 WSTOPSIG (w), info_p);
3317 }
3318
3319 if (debug_threads)
3320 debug_exit ();
3321
3322 return ignore_event (ourstatus);
3323 }
3324
3325 /* Note that all addresses are always "out of the step range" when
3326 there's no range to begin with. */
3327 in_step_range = lwp_in_step_range (event_child);
3328
3329 /* If GDB wanted this thread to single step, and the thread is out
3330 of the step range, we always want to report the SIGTRAP, and let
3331 GDB handle it. Watchpoints should always be reported. So should
3332 signals we can't explain. A SIGTRAP we can't explain could be a
3333 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3334 do, we're be able to handle GDB breakpoints on top of internal
3335 breakpoints, by handling the internal breakpoint and still
3336 reporting the event to GDB. If we don't, we're out of luck, GDB
3337 won't see the breakpoint hit. If we see a single-step event but
3338 the thread should be continuing, don't pass the trap to gdb.
3339 That indicates that we had previously finished a single-step but
3340 left the single-step pending -- see
3341 complete_ongoing_step_over. */
3342 report_to_gdb = (!maybe_internal_trap
3343 || (current_thread->last_resume_kind == resume_step
3344 && !in_step_range)
3345 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3346 || (!in_step_range
3347 && !bp_explains_trap
3348 && !trace_event
3349 && !step_over_finished
3350 && !(current_thread->last_resume_kind == resume_continue
3351 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3352 || (gdb_breakpoint_here (event_child->stop_pc)
3353 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3354 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3355 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
3356
3357 run_breakpoint_commands (event_child->stop_pc);
3358
3359 /* We found no reason GDB would want us to stop. We either hit one
3360 of our own breakpoints, or finished an internal step GDB
3361 shouldn't know about. */
3362 if (!report_to_gdb)
3363 {
3364 if (debug_threads)
3365 {
3366 if (bp_explains_trap)
3367 debug_printf ("Hit a gdbserver breakpoint.\n");
3368 if (step_over_finished)
3369 debug_printf ("Step-over finished.\n");
3370 if (trace_event)
3371 debug_printf ("Tracepoint event.\n");
3372 if (lwp_in_step_range (event_child))
3373 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3374 paddress (event_child->stop_pc),
3375 paddress (event_child->step_range_start),
3376 paddress (event_child->step_range_end));
3377 }
3378
3379 /* We're not reporting this breakpoint to GDB, so apply the
3380 decr_pc_after_break adjustment to the inferior's regcache
3381 ourselves. */
3382
3383 if (low_supports_breakpoints ())
3384 {
3385 struct regcache *regcache
3386 = get_thread_regcache (current_thread, 1);
3387 low_set_pc (regcache, event_child->stop_pc);
3388 }
3389
3390 if (step_over_finished)
3391 {
3392 /* If we have finished stepping over a breakpoint, we've
3393 stopped and suspended all LWPs momentarily except the
3394 stepping one. This is where we resume them all again.
3395 We're going to keep waiting, so use proceed, which
3396 handles stepping over the next breakpoint. */
3397 unsuspend_all_lwps (event_child);
3398 }
3399 else
3400 {
3401 /* Remove the single-step breakpoints if any. Note that
3402 there isn't single-step breakpoint if we finished stepping
3403 over. */
3404 if (supports_software_single_step ()
3405 && has_single_step_breakpoints (current_thread))
3406 {
3407 stop_all_lwps (0, event_child);
3408 delete_single_step_breakpoints (current_thread);
3409 unstop_all_lwps (0, event_child);
3410 }
3411 }
3412
3413 if (debug_threads)
3414 debug_printf ("proceeding all threads.\n");
3415 proceed_all_lwps ();
3416
3417 if (debug_threads)
3418 debug_exit ();
3419
3420 return ignore_event (ourstatus);
3421 }
3422
3423 if (debug_threads)
3424 {
3425 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3426 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3427 lwpid_of (get_lwp_thread (event_child)),
3428 event_child->waitstatus.to_string ().c_str ());
3429 if (current_thread->last_resume_kind == resume_step)
3430 {
3431 if (event_child->step_range_start == event_child->step_range_end)
3432 debug_printf ("GDB wanted to single-step, reporting event.\n");
3433 else if (!lwp_in_step_range (event_child))
3434 debug_printf ("Out of step range, reporting event.\n");
3435 }
3436 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3437 debug_printf ("Stopped by watchpoint.\n");
3438 else if (gdb_breakpoint_here (event_child->stop_pc))
3439 debug_printf ("Stopped by GDB breakpoint.\n");
3440 if (debug_threads)
3441 debug_printf ("Hit a non-gdbserver trap event.\n");
3442 }
3443
3444 /* Alright, we're going to report a stop. */
3445
3446 /* Remove single-step breakpoints. */
3447 if (supports_software_single_step ())
3448 {
3449 /* Remove single-step breakpoints or not. It it is true, stop all
3450 lwps, so that other threads won't hit the breakpoint in the
3451 staled memory. */
3452 int remove_single_step_breakpoints_p = 0;
3453
3454 if (non_stop)
3455 {
3456 remove_single_step_breakpoints_p
3457 = has_single_step_breakpoints (current_thread);
3458 }
3459 else
3460 {
3461 /* In all-stop, a stop reply cancels all previous resume
3462 requests. Delete all single-step breakpoints. */
3463
3464 find_thread ([&] (thread_info *thread) {
3465 if (has_single_step_breakpoints (thread))
3466 {
3467 remove_single_step_breakpoints_p = 1;
3468 return true;
3469 }
3470
3471 return false;
3472 });
3473 }
3474
3475 if (remove_single_step_breakpoints_p)
3476 {
3477 /* If we remove single-step breakpoints from memory, stop all lwps,
3478 so that other threads won't hit the breakpoint in the staled
3479 memory. */
3480 stop_all_lwps (0, event_child);
3481
3482 if (non_stop)
3483 {
3484 gdb_assert (has_single_step_breakpoints (current_thread));
3485 delete_single_step_breakpoints (current_thread);
3486 }
3487 else
3488 {
3489 for_each_thread ([] (thread_info *thread){
3490 if (has_single_step_breakpoints (thread))
3491 delete_single_step_breakpoints (thread);
3492 });
3493 }
3494
3495 unstop_all_lwps (0, event_child);
3496 }
3497 }
3498
3499 if (!stabilizing_threads)
3500 {
3501 /* In all-stop, stop all threads. */
3502 if (!non_stop)
3503 stop_all_lwps (0, NULL);
3504
3505 if (step_over_finished)
3506 {
3507 if (!non_stop)
3508 {
3509 /* If we were doing a step-over, all other threads but
3510 the stepping one had been paused in start_step_over,
3511 with their suspend counts incremented. We don't want
3512 to do a full unstop/unpause, because we're in
3513 all-stop mode (so we want threads stopped), but we
3514 still need to unsuspend the other threads, to
3515 decrement their `suspended' count back. */
3516 unsuspend_all_lwps (event_child);
3517 }
3518 else
3519 {
3520 /* If we just finished a step-over, then all threads had
3521 been momentarily paused. In all-stop, that's fine,
3522 we want threads stopped by now anyway. In non-stop,
3523 we need to re-resume threads that GDB wanted to be
3524 running. */
3525 unstop_all_lwps (1, event_child);
3526 }
3527 }
3528
3529 /* If we're not waiting for a specific LWP, choose an event LWP
3530 from among those that have had events. Giving equal priority
3531 to all LWPs that have had events helps prevent
3532 starvation. */
3533 if (ptid == minus_one_ptid)
3534 {
3535 event_child->status_pending_p = 1;
3536 event_child->status_pending = w;
3537
3538 select_event_lwp (&event_child);
3539
3540 /* current_thread and event_child must stay in sync. */
3541 switch_to_thread (get_lwp_thread (event_child));
3542
3543 event_child->status_pending_p = 0;
3544 w = event_child->status_pending;
3545 }
3546
3547
3548 /* Stabilize threads (move out of jump pads). */
3549 if (!non_stop)
3550 target_stabilize_threads ();
3551 }
3552 else
3553 {
3554 /* If we just finished a step-over, then all threads had been
3555 momentarily paused. In all-stop, that's fine, we want
3556 threads stopped by now anyway. In non-stop, we need to
3557 re-resume threads that GDB wanted to be running. */
3558 if (step_over_finished)
3559 unstop_all_lwps (1, event_child);
3560 }
3561
3562 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3563 {
3564 /* If the reported event is an exit, fork, vfork or exec, let
3565 GDB know. */
3566
3567 /* Break the unreported fork relationship chain. */
3568 if (event_child->waitstatus.kind () == TARGET_WAITKIND_FORKED
3569 || event_child->waitstatus.kind () == TARGET_WAITKIND_VFORKED)
3570 {
3571 event_child->fork_relative->fork_relative = NULL;
3572 event_child->fork_relative = NULL;
3573 }
3574
3575 *ourstatus = event_child->waitstatus;
3576 /* Clear the event lwp's waitstatus since we handled it already. */
3577 event_child->waitstatus.set_ignore ();
3578 }
3579 else
3580 {
3581 /* The actual stop signal is overwritten below. */
3582 ourstatus->set_stopped (GDB_SIGNAL_0);
3583 }
3584
3585 /* Now that we've selected our final event LWP, un-adjust its PC if
3586 it was a software breakpoint, and the client doesn't know we can
3587 adjust the breakpoint ourselves. */
3588 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3589 && !cs.swbreak_feature)
3590 {
3591 int decr_pc = low_decr_pc_after_break ();
3592
3593 if (decr_pc != 0)
3594 {
3595 struct regcache *regcache
3596 = get_thread_regcache (current_thread, 1);
3597 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3598 }
3599 }
3600
3601 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3602 {
3603 int syscall_number;
3604
3605 get_syscall_trapinfo (event_child, &syscall_number);
3606 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3607 ourstatus->set_syscall_entry (syscall_number);
3608 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3609 ourstatus->set_syscall_return (syscall_number);
3610 else
3611 gdb_assert_not_reached ("unexpected syscall state");
3612 }
3613 else if (current_thread->last_resume_kind == resume_stop
3614 && WSTOPSIG (w) == SIGSTOP)
3615 {
3616 /* A thread that has been requested to stop by GDB with vCont;t,
3617 and it stopped cleanly, so report as SIG0. The use of
3618 SIGSTOP is an implementation detail. */
3619 ourstatus->set_stopped (GDB_SIGNAL_0);
3620 }
3621 else if (current_thread->last_resume_kind == resume_stop
3622 && WSTOPSIG (w) != SIGSTOP)
3623 {
3624 /* A thread that has been requested to stop by GDB with vCont;t,
3625 but, it stopped for other reasons. */
3626 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3627 }
3628 else if (ourstatus->kind () == TARGET_WAITKIND_STOPPED)
3629 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3630
3631 gdb_assert (step_over_bkpt == null_ptid);
3632
3633 if (debug_threads)
3634 {
3635 debug_printf ("wait_1 ret = %s, %d, %d\n",
3636 target_pid_to_str (ptid_of (current_thread)).c_str (),
3637 ourstatus->kind (), ourstatus->sig ());
3638 debug_exit ();
3639 }
3640
3641 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
3642 return filter_exit_event (event_child, ourstatus);
3643
3644 return ptid_of (current_thread);
3645 }
3646
3647 /* Get rid of any pending event in the pipe. */
3648 static void
3649 async_file_flush (void)
3650 {
3651 int ret;
3652 char buf;
3653
3654 do
3655 ret = read (linux_event_pipe[0], &buf, 1);
3656 while (ret >= 0 || (ret == -1 && errno == EINTR));
3657 }
3658
3659 /* Put something in the pipe, so the event loop wakes up. */
3660 static void
3661 async_file_mark (void)
3662 {
3663 int ret;
3664
3665 async_file_flush ();
3666
3667 do
3668 ret = write (linux_event_pipe[1], "+", 1);
3669 while (ret == 0 || (ret == -1 && errno == EINTR));
3670
3671 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3672 be awakened anyway. */
3673 }
3674
3675 ptid_t
3676 linux_process_target::wait (ptid_t ptid,
3677 target_waitstatus *ourstatus,
3678 target_wait_flags target_options)
3679 {
3680 ptid_t event_ptid;
3681
3682 /* Flush the async file first. */
3683 if (target_is_async_p ())
3684 async_file_flush ();
3685
3686 do
3687 {
3688 event_ptid = wait_1 (ptid, ourstatus, target_options);
3689 }
3690 while ((target_options & TARGET_WNOHANG) == 0
3691 && event_ptid == null_ptid
3692 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3693
3694 /* If at least one stop was reported, there may be more. A single
3695 SIGCHLD can signal more than one child stop. */
3696 if (target_is_async_p ()
3697 && (target_options & TARGET_WNOHANG) != 0
3698 && event_ptid != null_ptid)
3699 async_file_mark ();
3700
3701 return event_ptid;
3702 }
3703
3704 /* Send a signal to an LWP. */
3705
3706 static int
3707 kill_lwp (unsigned long lwpid, int signo)
3708 {
3709 int ret;
3710
3711 errno = 0;
3712 ret = syscall (__NR_tkill, lwpid, signo);
3713 if (errno == ENOSYS)
3714 {
3715 /* If tkill fails, then we are not using nptl threads, a
3716 configuration we no longer support. */
3717 perror_with_name (("tkill"));
3718 }
3719 return ret;
3720 }
3721
3722 void
3723 linux_stop_lwp (struct lwp_info *lwp)
3724 {
3725 send_sigstop (lwp);
3726 }
3727
3728 static void
3729 send_sigstop (struct lwp_info *lwp)
3730 {
3731 int pid;
3732
3733 pid = lwpid_of (get_lwp_thread (lwp));
3734
3735 /* If we already have a pending stop signal for this process, don't
3736 send another. */
3737 if (lwp->stop_expected)
3738 {
3739 if (debug_threads)
3740 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3741
3742 return;
3743 }
3744
3745 if (debug_threads)
3746 debug_printf ("Sending sigstop to lwp %d\n", pid);
3747
3748 lwp->stop_expected = 1;
3749 kill_lwp (pid, SIGSTOP);
3750 }
3751
3752 static void
3753 send_sigstop (thread_info *thread, lwp_info *except)
3754 {
3755 struct lwp_info *lwp = get_thread_lwp (thread);
3756
3757 /* Ignore EXCEPT. */
3758 if (lwp == except)
3759 return;
3760
3761 if (lwp->stopped)
3762 return;
3763
3764 send_sigstop (lwp);
3765 }
3766
3767 /* Increment the suspend count of an LWP, and stop it, if not stopped
3768 yet. */
3769 static void
3770 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3771 {
3772 struct lwp_info *lwp = get_thread_lwp (thread);
3773
3774 /* Ignore EXCEPT. */
3775 if (lwp == except)
3776 return;
3777
3778 lwp_suspended_inc (lwp);
3779
3780 send_sigstop (thread, except);
3781 }
3782
3783 static void
3784 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3785 {
3786 /* Store the exit status for later. */
3787 lwp->status_pending_p = 1;
3788 lwp->status_pending = wstat;
3789
3790 /* Store in waitstatus as well, as there's nothing else to process
3791 for this event. */
3792 if (WIFEXITED (wstat))
3793 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3794 else if (WIFSIGNALED (wstat))
3795 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3796
3797 /* Prevent trying to stop it. */
3798 lwp->stopped = 1;
3799
3800 /* No further stops are expected from a dead lwp. */
3801 lwp->stop_expected = 0;
3802 }
3803
3804 /* Return true if LWP has exited already, and has a pending exit event
3805 to report to GDB. */
3806
3807 static int
3808 lwp_is_marked_dead (struct lwp_info *lwp)
3809 {
3810 return (lwp->status_pending_p
3811 && (WIFEXITED (lwp->status_pending)
3812 || WIFSIGNALED (lwp->status_pending)));
3813 }
3814
3815 void
3816 linux_process_target::wait_for_sigstop ()
3817 {
3818 struct thread_info *saved_thread;
3819 ptid_t saved_tid;
3820 int wstat;
3821 int ret;
3822
3823 saved_thread = current_thread;
3824 if (saved_thread != NULL)
3825 saved_tid = saved_thread->id;
3826 else
3827 saved_tid = null_ptid; /* avoid bogus unused warning */
3828
3829 scoped_restore_current_thread restore_thread;
3830
3831 if (debug_threads)
3832 debug_printf ("wait_for_sigstop: pulling events\n");
3833
3834 /* Passing NULL_PTID as filter indicates we want all events to be
3835 left pending. Eventually this returns when there are no
3836 unwaited-for children left. */
3837 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3838 gdb_assert (ret == -1);
3839
3840 if (saved_thread == NULL || mythread_alive (saved_tid))
3841 return;
3842 else
3843 {
3844 if (debug_threads)
3845 debug_printf ("Previously current thread died.\n");
3846
3847 /* We can't change the current inferior behind GDB's back,
3848 otherwise, a subsequent command may apply to the wrong
3849 process. */
3850 restore_thread.dont_restore ();
3851 switch_to_thread (nullptr);
3852 }
3853 }
3854
3855 bool
3856 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3857 {
3858 struct lwp_info *lwp = get_thread_lwp (thread);
3859
3860 if (lwp->suspended != 0)
3861 {
3862 internal_error (__FILE__, __LINE__,
3863 "LWP %ld is suspended, suspended=%d\n",
3864 lwpid_of (thread), lwp->suspended);
3865 }
3866 gdb_assert (lwp->stopped);
3867
3868 /* Allow debugging the jump pad, gdb_collect, etc.. */
3869 return (supports_fast_tracepoints ()
3870 && agent_loaded_p ()
3871 && (gdb_breakpoint_here (lwp->stop_pc)
3872 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3873 || thread->last_resume_kind == resume_step)
3874 && (linux_fast_tracepoint_collecting (lwp, NULL)
3875 != fast_tpoint_collect_result::not_collecting));
3876 }
3877
3878 void
3879 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3880 {
3881 struct lwp_info *lwp = get_thread_lwp (thread);
3882 int *wstat;
3883
3884 if (lwp->suspended != 0)
3885 {
3886 internal_error (__FILE__, __LINE__,
3887 "LWP %ld is suspended, suspended=%d\n",
3888 lwpid_of (thread), lwp->suspended);
3889 }
3890 gdb_assert (lwp->stopped);
3891
3892 /* For gdb_breakpoint_here. */
3893 scoped_restore_current_thread restore_thread;
3894 switch_to_thread (thread);
3895
3896 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3897
3898 /* Allow debugging the jump pad, gdb_collect, etc. */
3899 if (!gdb_breakpoint_here (lwp->stop_pc)
3900 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3901 && thread->last_resume_kind != resume_step
3902 && maybe_move_out_of_jump_pad (lwp, wstat))
3903 {
3904 if (debug_threads)
3905 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3906 lwpid_of (thread));
3907
3908 if (wstat)
3909 {
3910 lwp->status_pending_p = 0;
3911 enqueue_one_deferred_signal (lwp, wstat);
3912
3913 if (debug_threads)
3914 debug_printf ("Signal %d for LWP %ld deferred "
3915 "(in jump pad)\n",
3916 WSTOPSIG (*wstat), lwpid_of (thread));
3917 }
3918
3919 resume_one_lwp (lwp, 0, 0, NULL);
3920 }
3921 else
3922 lwp_suspended_inc (lwp);
3923 }
3924
3925 static bool
3926 lwp_running (thread_info *thread)
3927 {
3928 struct lwp_info *lwp = get_thread_lwp (thread);
3929
3930 if (lwp_is_marked_dead (lwp))
3931 return false;
3932
3933 return !lwp->stopped;
3934 }
3935
3936 void
3937 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3938 {
3939 /* Should not be called recursively. */
3940 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3941
3942 if (debug_threads)
3943 {
3944 debug_enter ();
3945 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3946 suspend ? "stop-and-suspend" : "stop",
3947 (except != NULL
3948 ? target_pid_to_str
3949 (ptid_of (get_lwp_thread (except))).c_str ()
3950 : "none"));
3951 }
3952
3953 stopping_threads = (suspend
3954 ? STOPPING_AND_SUSPENDING_THREADS
3955 : STOPPING_THREADS);
3956
3957 if (suspend)
3958 for_each_thread ([&] (thread_info *thread)
3959 {
3960 suspend_and_send_sigstop (thread, except);
3961 });
3962 else
3963 for_each_thread ([&] (thread_info *thread)
3964 {
3965 send_sigstop (thread, except);
3966 });
3967
3968 wait_for_sigstop ();
3969 stopping_threads = NOT_STOPPING_THREADS;
3970
3971 if (debug_threads)
3972 {
3973 debug_printf ("stop_all_lwps done, setting stopping_threads "
3974 "back to !stopping\n");
3975 debug_exit ();
3976 }
3977 }
3978
3979 /* Enqueue one signal in the chain of signals which need to be
3980 delivered to this process on next resume. */
3981
3982 static void
3983 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3984 {
3985 lwp->pending_signals.emplace_back (signal);
3986 if (info == nullptr)
3987 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
3988 else
3989 lwp->pending_signals.back ().info = *info;
3990 }
3991
3992 void
3993 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
3994 {
3995 struct thread_info *thread = get_lwp_thread (lwp);
3996 struct regcache *regcache = get_thread_regcache (thread, 1);
3997
3998 scoped_restore_current_thread restore_thread;
3999
4000 switch_to_thread (thread);
4001 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
4002
4003 for (CORE_ADDR pc : next_pcs)
4004 set_single_step_breakpoint (pc, current_ptid);
4005 }
4006
4007 int
4008 linux_process_target::single_step (lwp_info* lwp)
4009 {
4010 int step = 0;
4011
4012 if (supports_hardware_single_step ())
4013 {
4014 step = 1;
4015 }
4016 else if (supports_software_single_step ())
4017 {
4018 install_software_single_step_breakpoints (lwp);
4019 step = 0;
4020 }
4021 else
4022 {
4023 if (debug_threads)
4024 debug_printf ("stepping is not implemented on this target");
4025 }
4026
4027 return step;
4028 }
4029
4030 /* The signal can be delivered to the inferior if we are not trying to
4031 finish a fast tracepoint collect. Since signal can be delivered in
4032 the step-over, the program may go to signal handler and trap again
4033 after return from the signal handler. We can live with the spurious
4034 double traps. */
4035
4036 static int
4037 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4038 {
4039 return (lwp->collecting_fast_tracepoint
4040 == fast_tpoint_collect_result::not_collecting);
4041 }
4042
4043 void
4044 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4045 int signal, siginfo_t *info)
4046 {
4047 struct thread_info *thread = get_lwp_thread (lwp);
4048 int ptrace_request;
4049 struct process_info *proc = get_thread_process (thread);
4050
4051 /* Note that target description may not be initialised
4052 (proc->tdesc == NULL) at this point because the program hasn't
4053 stopped at the first instruction yet. It means GDBserver skips
4054 the extra traps from the wrapper program (see option --wrapper).
4055 Code in this function that requires register access should be
4056 guarded by proc->tdesc == NULL or something else. */
4057
4058 if (lwp->stopped == 0)
4059 return;
4060
4061 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
4062
4063 fast_tpoint_collect_result fast_tp_collecting
4064 = lwp->collecting_fast_tracepoint;
4065
4066 gdb_assert (!stabilizing_threads
4067 || (fast_tp_collecting
4068 != fast_tpoint_collect_result::not_collecting));
4069
4070 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4071 user used the "jump" command, or "set $pc = foo"). */
4072 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4073 {
4074 /* Collecting 'while-stepping' actions doesn't make sense
4075 anymore. */
4076 release_while_stepping_state_list (thread);
4077 }
4078
4079 /* If we have pending signals or status, and a new signal, enqueue the
4080 signal. Also enqueue the signal if it can't be delivered to the
4081 inferior right now. */
4082 if (signal != 0
4083 && (lwp->status_pending_p
4084 || !lwp->pending_signals.empty ()
4085 || !lwp_signal_can_be_delivered (lwp)))
4086 {
4087 enqueue_pending_signal (lwp, signal, info);
4088
4089 /* Postpone any pending signal. It was enqueued above. */
4090 signal = 0;
4091 }
4092
4093 if (lwp->status_pending_p)
4094 {
4095 if (debug_threads)
4096 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4097 " has pending status\n",
4098 lwpid_of (thread), step ? "step" : "continue",
4099 lwp->stop_expected ? "expected" : "not expected");
4100 return;
4101 }
4102
4103 scoped_restore_current_thread restore_thread;
4104 switch_to_thread (thread);
4105
4106 /* This bit needs some thinking about. If we get a signal that
4107 we must report while a single-step reinsert is still pending,
4108 we often end up resuming the thread. It might be better to
4109 (ew) allow a stack of pending events; then we could be sure that
4110 the reinsert happened right away and not lose any signals.
4111
4112 Making this stack would also shrink the window in which breakpoints are
4113 uninserted (see comment in linux_wait_for_lwp) but not enough for
4114 complete correctness, so it won't solve that problem. It may be
4115 worthwhile just to solve this one, however. */
4116 if (lwp->bp_reinsert != 0)
4117 {
4118 if (debug_threads)
4119 debug_printf (" pending reinsert at 0x%s\n",
4120 paddress (lwp->bp_reinsert));
4121
4122 if (supports_hardware_single_step ())
4123 {
4124 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4125 {
4126 if (step == 0)
4127 warning ("BAD - reinserting but not stepping.");
4128 if (lwp->suspended)
4129 warning ("BAD - reinserting and suspended(%d).",
4130 lwp->suspended);
4131 }
4132 }
4133
4134 step = maybe_hw_step (thread);
4135 }
4136
4137 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4138 {
4139 if (debug_threads)
4140 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4141 " (exit-jump-pad-bkpt)\n",
4142 lwpid_of (thread));
4143 }
4144 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4145 {
4146 if (debug_threads)
4147 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4148 " single-stepping\n",
4149 lwpid_of (thread));
4150
4151 if (supports_hardware_single_step ())
4152 step = 1;
4153 else
4154 {
4155 internal_error (__FILE__, __LINE__,
4156 "moving out of jump pad single-stepping"
4157 " not implemented on this target");
4158 }
4159 }
4160
4161 /* If we have while-stepping actions in this thread set it stepping.
4162 If we have a signal to deliver, it may or may not be set to
4163 SIG_IGN, we don't know. Assume so, and allow collecting
4164 while-stepping into a signal handler. A possible smart thing to
4165 do would be to set an internal breakpoint at the signal return
4166 address, continue, and carry on catching this while-stepping
4167 action only when that breakpoint is hit. A future
4168 enhancement. */
4169 if (thread->while_stepping != NULL)
4170 {
4171 if (debug_threads)
4172 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4173 lwpid_of (thread));
4174
4175 step = single_step (lwp);
4176 }
4177
4178 if (proc->tdesc != NULL && low_supports_breakpoints ())
4179 {
4180 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4181
4182 lwp->stop_pc = low_get_pc (regcache);
4183
4184 if (debug_threads)
4185 {
4186 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4187 (long) lwp->stop_pc);
4188 }
4189 }
4190
4191 /* If we have pending signals, consume one if it can be delivered to
4192 the inferior. */
4193 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4194 {
4195 const pending_signal &p_sig = lwp->pending_signals.front ();
4196
4197 signal = p_sig.signal;
4198 if (p_sig.info.si_signo != 0)
4199 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4200 &p_sig.info);
4201
4202 lwp->pending_signals.pop_front ();
4203 }
4204
4205 if (debug_threads)
4206 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4207 lwpid_of (thread), step ? "step" : "continue", signal,
4208 lwp->stop_expected ? "expected" : "not expected");
4209
4210 low_prepare_to_resume (lwp);
4211
4212 regcache_invalidate_thread (thread);
4213 errno = 0;
4214 lwp->stepping = step;
4215 if (step)
4216 ptrace_request = PTRACE_SINGLESTEP;
4217 else if (gdb_catching_syscalls_p (lwp))
4218 ptrace_request = PTRACE_SYSCALL;
4219 else
4220 ptrace_request = PTRACE_CONT;
4221 ptrace (ptrace_request,
4222 lwpid_of (thread),
4223 (PTRACE_TYPE_ARG3) 0,
4224 /* Coerce to a uintptr_t first to avoid potential gcc warning
4225 of coercing an 8 byte integer to a 4 byte pointer. */
4226 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4227
4228 if (errno)
4229 perror_with_name ("resuming thread");
4230
4231 /* Successfully resumed. Clear state that no longer makes sense,
4232 and mark the LWP as running. Must not do this before resuming
4233 otherwise if that fails other code will be confused. E.g., we'd
4234 later try to stop the LWP and hang forever waiting for a stop
4235 status. Note that we must not throw after this is cleared,
4236 otherwise handle_zombie_lwp_error would get confused. */
4237 lwp->stopped = 0;
4238 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4239 }
4240
4241 void
4242 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4243 {
4244 /* Nop. */
4245 }
4246
4247 /* Called when we try to resume a stopped LWP and that errors out. If
4248 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4249 or about to become), discard the error, clear any pending status
4250 the LWP may have, and return true (we'll collect the exit status
4251 soon enough). Otherwise, return false. */
4252
4253 static int
4254 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4255 {
4256 struct thread_info *thread = get_lwp_thread (lp);
4257
4258 /* If we get an error after resuming the LWP successfully, we'd
4259 confuse !T state for the LWP being gone. */
4260 gdb_assert (lp->stopped);
4261
4262 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4263 because even if ptrace failed with ESRCH, the tracee may be "not
4264 yet fully dead", but already refusing ptrace requests. In that
4265 case the tracee has 'R (Running)' state for a little bit
4266 (observed in Linux 3.18). See also the note on ESRCH in the
4267 ptrace(2) man page. Instead, check whether the LWP has any state
4268 other than ptrace-stopped. */
4269
4270 /* Don't assume anything if /proc/PID/status can't be read. */
4271 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4272 {
4273 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4274 lp->status_pending_p = 0;
4275 return 1;
4276 }
4277 return 0;
4278 }
4279
4280 void
4281 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4282 siginfo_t *info)
4283 {
4284 try
4285 {
4286 resume_one_lwp_throw (lwp, step, signal, info);
4287 }
4288 catch (const gdb_exception_error &ex)
4289 {
4290 if (!check_ptrace_stopped_lwp_gone (lwp))
4291 throw;
4292 }
4293 }
4294
4295 /* This function is called once per thread via for_each_thread.
4296 We look up which resume request applies to THREAD and mark it with a
4297 pointer to the appropriate resume request.
4298
4299 This algorithm is O(threads * resume elements), but resume elements
4300 is small (and will remain small at least until GDB supports thread
4301 suspension). */
4302
4303 static void
4304 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4305 {
4306 struct lwp_info *lwp = get_thread_lwp (thread);
4307
4308 for (int ndx = 0; ndx < n; ndx++)
4309 {
4310 ptid_t ptid = resume[ndx].thread;
4311 if (ptid == minus_one_ptid
4312 || ptid == thread->id
4313 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4314 of PID'. */
4315 || (ptid.pid () == pid_of (thread)
4316 && (ptid.is_pid ()
4317 || ptid.lwp () == -1)))
4318 {
4319 if (resume[ndx].kind == resume_stop
4320 && thread->last_resume_kind == resume_stop)
4321 {
4322 if (debug_threads)
4323 debug_printf ("already %s LWP %ld at GDB's request\n",
4324 (thread->last_status.kind ()
4325 == TARGET_WAITKIND_STOPPED)
4326 ? "stopped"
4327 : "stopping",
4328 lwpid_of (thread));
4329
4330 continue;
4331 }
4332
4333 /* Ignore (wildcard) resume requests for already-resumed
4334 threads. */
4335 if (resume[ndx].kind != resume_stop
4336 && thread->last_resume_kind != resume_stop)
4337 {
4338 if (debug_threads)
4339 debug_printf ("already %s LWP %ld at GDB's request\n",
4340 (thread->last_resume_kind
4341 == resume_step)
4342 ? "stepping"
4343 : "continuing",
4344 lwpid_of (thread));
4345 continue;
4346 }
4347
4348 /* Don't let wildcard resumes resume fork children that GDB
4349 does not yet know are new fork children. */
4350 if (lwp->fork_relative != NULL)
4351 {
4352 struct lwp_info *rel = lwp->fork_relative;
4353
4354 if (rel->status_pending_p
4355 && (rel->waitstatus.kind () == TARGET_WAITKIND_FORKED
4356 || rel->waitstatus.kind () == TARGET_WAITKIND_VFORKED))
4357 {
4358 if (debug_threads)
4359 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4360 lwpid_of (thread));
4361 continue;
4362 }
4363 }
4364
4365 /* If the thread has a pending event that has already been
4366 reported to GDBserver core, but GDB has not pulled the
4367 event out of the vStopped queue yet, likewise, ignore the
4368 (wildcard) resume request. */
4369 if (in_queued_stop_replies (thread->id))
4370 {
4371 if (debug_threads)
4372 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4373 lwpid_of (thread));
4374 continue;
4375 }
4376
4377 lwp->resume = &resume[ndx];
4378 thread->last_resume_kind = lwp->resume->kind;
4379
4380 lwp->step_range_start = lwp->resume->step_range_start;
4381 lwp->step_range_end = lwp->resume->step_range_end;
4382
4383 /* If we had a deferred signal to report, dequeue one now.
4384 This can happen if LWP gets more than one signal while
4385 trying to get out of a jump pad. */
4386 if (lwp->stopped
4387 && !lwp->status_pending_p
4388 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4389 {
4390 lwp->status_pending_p = 1;
4391
4392 if (debug_threads)
4393 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4394 "leaving status pending.\n",
4395 WSTOPSIG (lwp->status_pending),
4396 lwpid_of (thread));
4397 }
4398
4399 return;
4400 }
4401 }
4402
4403 /* No resume action for this thread. */
4404 lwp->resume = NULL;
4405 }
4406
4407 bool
4408 linux_process_target::resume_status_pending (thread_info *thread)
4409 {
4410 struct lwp_info *lwp = get_thread_lwp (thread);
4411
4412 /* LWPs which will not be resumed are not interesting, because
4413 we might not wait for them next time through linux_wait. */
4414 if (lwp->resume == NULL)
4415 return false;
4416
4417 return thread_still_has_status_pending (thread);
4418 }
4419
4420 bool
4421 linux_process_target::thread_needs_step_over (thread_info *thread)
4422 {
4423 struct lwp_info *lwp = get_thread_lwp (thread);
4424 CORE_ADDR pc;
4425 struct process_info *proc = get_thread_process (thread);
4426
4427 /* GDBserver is skipping the extra traps from the wrapper program,
4428 don't have to do step over. */
4429 if (proc->tdesc == NULL)
4430 return false;
4431
4432 /* LWPs which will not be resumed are not interesting, because we
4433 might not wait for them next time through linux_wait. */
4434
4435 if (!lwp->stopped)
4436 {
4437 if (debug_threads)
4438 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4439 lwpid_of (thread));
4440 return false;
4441 }
4442
4443 if (thread->last_resume_kind == resume_stop)
4444 {
4445 if (debug_threads)
4446 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4447 " stopped\n",
4448 lwpid_of (thread));
4449 return false;
4450 }
4451
4452 gdb_assert (lwp->suspended >= 0);
4453
4454 if (lwp->suspended)
4455 {
4456 if (debug_threads)
4457 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4458 lwpid_of (thread));
4459 return false;
4460 }
4461
4462 if (lwp->status_pending_p)
4463 {
4464 if (debug_threads)
4465 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4466 " status.\n",
4467 lwpid_of (thread));
4468 return false;
4469 }
4470
4471 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4472 or we have. */
4473 pc = get_pc (lwp);
4474
4475 /* If the PC has changed since we stopped, then don't do anything,
4476 and let the breakpoint/tracepoint be hit. This happens if, for
4477 instance, GDB handled the decr_pc_after_break subtraction itself,
4478 GDB is OOL stepping this thread, or the user has issued a "jump"
4479 command, or poked thread's registers herself. */
4480 if (pc != lwp->stop_pc)
4481 {
4482 if (debug_threads)
4483 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4484 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4485 lwpid_of (thread),
4486 paddress (lwp->stop_pc), paddress (pc));
4487 return false;
4488 }
4489
4490 /* On software single step target, resume the inferior with signal
4491 rather than stepping over. */
4492 if (supports_software_single_step ()
4493 && !lwp->pending_signals.empty ()
4494 && lwp_signal_can_be_delivered (lwp))
4495 {
4496 if (debug_threads)
4497 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4498 " signals.\n",
4499 lwpid_of (thread));
4500
4501 return false;
4502 }
4503
4504 scoped_restore_current_thread restore_thread;
4505 switch_to_thread (thread);
4506
4507 /* We can only step over breakpoints we know about. */
4508 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4509 {
4510 /* Don't step over a breakpoint that GDB expects to hit
4511 though. If the condition is being evaluated on the target's side
4512 and it evaluate to false, step over this breakpoint as well. */
4513 if (gdb_breakpoint_here (pc)
4514 && gdb_condition_true_at_breakpoint (pc)
4515 && gdb_no_commands_at_breakpoint (pc))
4516 {
4517 if (debug_threads)
4518 debug_printf ("Need step over [LWP %ld]? yes, but found"
4519 " GDB breakpoint at 0x%s; skipping step over\n",
4520 lwpid_of (thread), paddress (pc));
4521
4522 return false;
4523 }
4524 else
4525 {
4526 if (debug_threads)
4527 debug_printf ("Need step over [LWP %ld]? yes, "
4528 "found breakpoint at 0x%s\n",
4529 lwpid_of (thread), paddress (pc));
4530
4531 /* We've found an lwp that needs stepping over --- return 1 so
4532 that find_thread stops looking. */
4533 return true;
4534 }
4535 }
4536
4537 if (debug_threads)
4538 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4539 " at 0x%s\n",
4540 lwpid_of (thread), paddress (pc));
4541
4542 return false;
4543 }
4544
4545 void
4546 linux_process_target::start_step_over (lwp_info *lwp)
4547 {
4548 struct thread_info *thread = get_lwp_thread (lwp);
4549 CORE_ADDR pc;
4550
4551 if (debug_threads)
4552 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4553 lwpid_of (thread));
4554
4555 stop_all_lwps (1, lwp);
4556
4557 if (lwp->suspended != 0)
4558 {
4559 internal_error (__FILE__, __LINE__,
4560 "LWP %ld suspended=%d\n", lwpid_of (thread),
4561 lwp->suspended);
4562 }
4563
4564 if (debug_threads)
4565 debug_printf ("Done stopping all threads for step-over.\n");
4566
4567 /* Note, we should always reach here with an already adjusted PC,
4568 either by GDB (if we're resuming due to GDB's request), or by our
4569 caller, if we just finished handling an internal breakpoint GDB
4570 shouldn't care about. */
4571 pc = get_pc (lwp);
4572
4573 bool step = false;
4574 {
4575 scoped_restore_current_thread restore_thread;
4576 switch_to_thread (thread);
4577
4578 lwp->bp_reinsert = pc;
4579 uninsert_breakpoints_at (pc);
4580 uninsert_fast_tracepoint_jumps_at (pc);
4581
4582 step = single_step (lwp);
4583 }
4584
4585 resume_one_lwp (lwp, step, 0, NULL);
4586
4587 /* Require next event from this LWP. */
4588 step_over_bkpt = thread->id;
4589 }
4590
4591 bool
4592 linux_process_target::finish_step_over (lwp_info *lwp)
4593 {
4594 if (lwp->bp_reinsert != 0)
4595 {
4596 scoped_restore_current_thread restore_thread;
4597
4598 if (debug_threads)
4599 debug_printf ("Finished step over.\n");
4600
4601 switch_to_thread (get_lwp_thread (lwp));
4602
4603 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4604 may be no breakpoint to reinsert there by now. */
4605 reinsert_breakpoints_at (lwp->bp_reinsert);
4606 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4607
4608 lwp->bp_reinsert = 0;
4609
4610 /* Delete any single-step breakpoints. No longer needed. We
4611 don't have to worry about other threads hitting this trap,
4612 and later not being able to explain it, because we were
4613 stepping over a breakpoint, and we hold all threads but
4614 LWP stopped while doing that. */
4615 if (!supports_hardware_single_step ())
4616 {
4617 gdb_assert (has_single_step_breakpoints (current_thread));
4618 delete_single_step_breakpoints (current_thread);
4619 }
4620
4621 step_over_bkpt = null_ptid;
4622 return true;
4623 }
4624 else
4625 return false;
4626 }
4627
4628 void
4629 linux_process_target::complete_ongoing_step_over ()
4630 {
4631 if (step_over_bkpt != null_ptid)
4632 {
4633 struct lwp_info *lwp;
4634 int wstat;
4635 int ret;
4636
4637 if (debug_threads)
4638 debug_printf ("detach: step over in progress, finish it first\n");
4639
4640 /* Passing NULL_PTID as filter indicates we want all events to
4641 be left pending. Eventually this returns when there are no
4642 unwaited-for children left. */
4643 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4644 __WALL);
4645 gdb_assert (ret == -1);
4646
4647 lwp = find_lwp_pid (step_over_bkpt);
4648 if (lwp != NULL)
4649 {
4650 finish_step_over (lwp);
4651
4652 /* If we got our step SIGTRAP, don't leave it pending,
4653 otherwise we would report it to GDB as a spurious
4654 SIGTRAP. */
4655 gdb_assert (lwp->status_pending_p);
4656 if (WIFSTOPPED (lwp->status_pending)
4657 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4658 {
4659 thread_info *thread = get_lwp_thread (lwp);
4660 if (thread->last_resume_kind != resume_step)
4661 {
4662 if (debug_threads)
4663 debug_printf ("detach: discard step-over SIGTRAP\n");
4664
4665 lwp->status_pending_p = 0;
4666 lwp->status_pending = 0;
4667 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4668 }
4669 else
4670 {
4671 if (debug_threads)
4672 debug_printf ("detach: resume_step, "
4673 "not discarding step-over SIGTRAP\n");
4674 }
4675 }
4676 }
4677 step_over_bkpt = null_ptid;
4678 unsuspend_all_lwps (lwp);
4679 }
4680 }
4681
4682 void
4683 linux_process_target::resume_one_thread (thread_info *thread,
4684 bool leave_all_stopped)
4685 {
4686 struct lwp_info *lwp = get_thread_lwp (thread);
4687 int leave_pending;
4688
4689 if (lwp->resume == NULL)
4690 return;
4691
4692 if (lwp->resume->kind == resume_stop)
4693 {
4694 if (debug_threads)
4695 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4696
4697 if (!lwp->stopped)
4698 {
4699 if (debug_threads)
4700 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4701
4702 /* Stop the thread, and wait for the event asynchronously,
4703 through the event loop. */
4704 send_sigstop (lwp);
4705 }
4706 else
4707 {
4708 if (debug_threads)
4709 debug_printf ("already stopped LWP %ld\n",
4710 lwpid_of (thread));
4711
4712 /* The LWP may have been stopped in an internal event that
4713 was not meant to be notified back to GDB (e.g., gdbserver
4714 breakpoint), so we should be reporting a stop event in
4715 this case too. */
4716
4717 /* If the thread already has a pending SIGSTOP, this is a
4718 no-op. Otherwise, something later will presumably resume
4719 the thread and this will cause it to cancel any pending
4720 operation, due to last_resume_kind == resume_stop. If
4721 the thread already has a pending status to report, we
4722 will still report it the next time we wait - see
4723 status_pending_p_callback. */
4724
4725 /* If we already have a pending signal to report, then
4726 there's no need to queue a SIGSTOP, as this means we're
4727 midway through moving the LWP out of the jumppad, and we
4728 will report the pending signal as soon as that is
4729 finished. */
4730 if (lwp->pending_signals_to_report.empty ())
4731 send_sigstop (lwp);
4732 }
4733
4734 /* For stop requests, we're done. */
4735 lwp->resume = NULL;
4736 thread->last_status.set_ignore ();
4737 return;
4738 }
4739
4740 /* If this thread which is about to be resumed has a pending status,
4741 then don't resume it - we can just report the pending status.
4742 Likewise if it is suspended, because e.g., another thread is
4743 stepping past a breakpoint. Make sure to queue any signals that
4744 would otherwise be sent. In all-stop mode, we do this decision
4745 based on if *any* thread has a pending status. If there's a
4746 thread that needs the step-over-breakpoint dance, then don't
4747 resume any other thread but that particular one. */
4748 leave_pending = (lwp->suspended
4749 || lwp->status_pending_p
4750 || leave_all_stopped);
4751
4752 /* If we have a new signal, enqueue the signal. */
4753 if (lwp->resume->sig != 0)
4754 {
4755 siginfo_t info, *info_p;
4756
4757 /* If this is the same signal we were previously stopped by,
4758 make sure to queue its siginfo. */
4759 if (WIFSTOPPED (lwp->last_status)
4760 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4761 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4762 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4763 info_p = &info;
4764 else
4765 info_p = NULL;
4766
4767 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4768 }
4769
4770 if (!leave_pending)
4771 {
4772 if (debug_threads)
4773 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4774
4775 proceed_one_lwp (thread, NULL);
4776 }
4777 else
4778 {
4779 if (debug_threads)
4780 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4781 }
4782
4783 thread->last_status.set_ignore ();
4784 lwp->resume = NULL;
4785 }
4786
4787 void
4788 linux_process_target::resume (thread_resume *resume_info, size_t n)
4789 {
4790 struct thread_info *need_step_over = NULL;
4791
4792 if (debug_threads)
4793 {
4794 debug_enter ();
4795 debug_printf ("linux_resume:\n");
4796 }
4797
4798 for_each_thread ([&] (thread_info *thread)
4799 {
4800 linux_set_resume_request (thread, resume_info, n);
4801 });
4802
4803 /* If there is a thread which would otherwise be resumed, which has
4804 a pending status, then don't resume any threads - we can just
4805 report the pending status. Make sure to queue any signals that
4806 would otherwise be sent. In non-stop mode, we'll apply this
4807 logic to each thread individually. We consume all pending events
4808 before considering to start a step-over (in all-stop). */
4809 bool any_pending = false;
4810 if (!non_stop)
4811 any_pending = find_thread ([this] (thread_info *thread)
4812 {
4813 return resume_status_pending (thread);
4814 }) != nullptr;
4815
4816 /* If there is a thread which would otherwise be resumed, which is
4817 stopped at a breakpoint that needs stepping over, then don't
4818 resume any threads - have it step over the breakpoint with all
4819 other threads stopped, then resume all threads again. Make sure
4820 to queue any signals that would otherwise be delivered or
4821 queued. */
4822 if (!any_pending && low_supports_breakpoints ())
4823 need_step_over = find_thread ([this] (thread_info *thread)
4824 {
4825 return thread_needs_step_over (thread);
4826 });
4827
4828 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4829
4830 if (debug_threads)
4831 {
4832 if (need_step_over != NULL)
4833 debug_printf ("Not resuming all, need step over\n");
4834 else if (any_pending)
4835 debug_printf ("Not resuming, all-stop and found "
4836 "an LWP with pending status\n");
4837 else
4838 debug_printf ("Resuming, no pending status or step over needed\n");
4839 }
4840
4841 /* Even if we're leaving threads stopped, queue all signals we'd
4842 otherwise deliver. */
4843 for_each_thread ([&] (thread_info *thread)
4844 {
4845 resume_one_thread (thread, leave_all_stopped);
4846 });
4847
4848 if (need_step_over)
4849 start_step_over (get_thread_lwp (need_step_over));
4850
4851 if (debug_threads)
4852 {
4853 debug_printf ("linux_resume done\n");
4854 debug_exit ();
4855 }
4856
4857 /* We may have events that were pending that can/should be sent to
4858 the client now. Trigger a linux_wait call. */
4859 if (target_is_async_p ())
4860 async_file_mark ();
4861 }
4862
4863 void
4864 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4865 {
4866 struct lwp_info *lwp = get_thread_lwp (thread);
4867 int step;
4868
4869 if (lwp == except)
4870 return;
4871
4872 if (debug_threads)
4873 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4874
4875 if (!lwp->stopped)
4876 {
4877 if (debug_threads)
4878 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4879 return;
4880 }
4881
4882 if (thread->last_resume_kind == resume_stop
4883 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
4884 {
4885 if (debug_threads)
4886 debug_printf (" client wants LWP to remain %ld stopped\n",
4887 lwpid_of (thread));
4888 return;
4889 }
4890
4891 if (lwp->status_pending_p)
4892 {
4893 if (debug_threads)
4894 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4895 lwpid_of (thread));
4896 return;
4897 }
4898
4899 gdb_assert (lwp->suspended >= 0);
4900
4901 if (lwp->suspended)
4902 {
4903 if (debug_threads)
4904 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4905 return;
4906 }
4907
4908 if (thread->last_resume_kind == resume_stop
4909 && lwp->pending_signals_to_report.empty ()
4910 && (lwp->collecting_fast_tracepoint
4911 == fast_tpoint_collect_result::not_collecting))
4912 {
4913 /* We haven't reported this LWP as stopped yet (otherwise, the
4914 last_status.kind check above would catch it, and we wouldn't
4915 reach here. This LWP may have been momentarily paused by a
4916 stop_all_lwps call while handling for example, another LWP's
4917 step-over. In that case, the pending expected SIGSTOP signal
4918 that was queued at vCont;t handling time will have already
4919 been consumed by wait_for_sigstop, and so we need to requeue
4920 another one here. Note that if the LWP already has a SIGSTOP
4921 pending, this is a no-op. */
4922
4923 if (debug_threads)
4924 debug_printf ("Client wants LWP %ld to stop. "
4925 "Making sure it has a SIGSTOP pending\n",
4926 lwpid_of (thread));
4927
4928 send_sigstop (lwp);
4929 }
4930
4931 if (thread->last_resume_kind == resume_step)
4932 {
4933 if (debug_threads)
4934 debug_printf (" stepping LWP %ld, client wants it stepping\n",
4935 lwpid_of (thread));
4936
4937 /* If resume_step is requested by GDB, install single-step
4938 breakpoints when the thread is about to be actually resumed if
4939 the single-step breakpoints weren't removed. */
4940 if (supports_software_single_step ()
4941 && !has_single_step_breakpoints (thread))
4942 install_software_single_step_breakpoints (lwp);
4943
4944 step = maybe_hw_step (thread);
4945 }
4946 else if (lwp->bp_reinsert != 0)
4947 {
4948 if (debug_threads)
4949 debug_printf (" stepping LWP %ld, reinsert set\n",
4950 lwpid_of (thread));
4951
4952 step = maybe_hw_step (thread);
4953 }
4954 else
4955 step = 0;
4956
4957 resume_one_lwp (lwp, step, 0, NULL);
4958 }
4959
4960 void
4961 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4962 lwp_info *except)
4963 {
4964 struct lwp_info *lwp = get_thread_lwp (thread);
4965
4966 if (lwp == except)
4967 return;
4968
4969 lwp_suspended_decr (lwp);
4970
4971 proceed_one_lwp (thread, except);
4972 }
4973
4974 void
4975 linux_process_target::proceed_all_lwps ()
4976 {
4977 struct thread_info *need_step_over;
4978
4979 /* If there is a thread which would otherwise be resumed, which is
4980 stopped at a breakpoint that needs stepping over, then don't
4981 resume any threads - have it step over the breakpoint with all
4982 other threads stopped, then resume all threads again. */
4983
4984 if (low_supports_breakpoints ())
4985 {
4986 need_step_over = find_thread ([this] (thread_info *thread)
4987 {
4988 return thread_needs_step_over (thread);
4989 });
4990
4991 if (need_step_over != NULL)
4992 {
4993 if (debug_threads)
4994 debug_printf ("proceed_all_lwps: found "
4995 "thread %ld needing a step-over\n",
4996 lwpid_of (need_step_over));
4997
4998 start_step_over (get_thread_lwp (need_step_over));
4999 return;
5000 }
5001 }
5002
5003 if (debug_threads)
5004 debug_printf ("Proceeding, no step-over needed\n");
5005
5006 for_each_thread ([this] (thread_info *thread)
5007 {
5008 proceed_one_lwp (thread, NULL);
5009 });
5010 }
5011
5012 void
5013 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
5014 {
5015 if (debug_threads)
5016 {
5017 debug_enter ();
5018 if (except)
5019 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5020 lwpid_of (get_lwp_thread (except)));
5021 else
5022 debug_printf ("unstopping all lwps\n");
5023 }
5024
5025 if (unsuspend)
5026 for_each_thread ([&] (thread_info *thread)
5027 {
5028 unsuspend_and_proceed_one_lwp (thread, except);
5029 });
5030 else
5031 for_each_thread ([&] (thread_info *thread)
5032 {
5033 proceed_one_lwp (thread, except);
5034 });
5035
5036 if (debug_threads)
5037 {
5038 debug_printf ("unstop_all_lwps done\n");
5039 debug_exit ();
5040 }
5041 }
5042
5043
5044 #ifdef HAVE_LINUX_REGSETS
5045
5046 #define use_linux_regsets 1
5047
5048 /* Returns true if REGSET has been disabled. */
5049
5050 static int
5051 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5052 {
5053 return (info->disabled_regsets != NULL
5054 && info->disabled_regsets[regset - info->regsets]);
5055 }
5056
5057 /* Disable REGSET. */
5058
5059 static void
5060 disable_regset (struct regsets_info *info, struct regset_info *regset)
5061 {
5062 int dr_offset;
5063
5064 dr_offset = regset - info->regsets;
5065 if (info->disabled_regsets == NULL)
5066 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5067 info->disabled_regsets[dr_offset] = 1;
5068 }
5069
5070 static int
5071 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5072 struct regcache *regcache)
5073 {
5074 struct regset_info *regset;
5075 int saw_general_regs = 0;
5076 int pid;
5077 struct iovec iov;
5078
5079 pid = lwpid_of (current_thread);
5080 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5081 {
5082 void *buf, *data;
5083 int nt_type, res;
5084
5085 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5086 continue;
5087
5088 buf = xmalloc (regset->size);
5089
5090 nt_type = regset->nt_type;
5091 if (nt_type)
5092 {
5093 iov.iov_base = buf;
5094 iov.iov_len = regset->size;
5095 data = (void *) &iov;
5096 }
5097 else
5098 data = buf;
5099
5100 #ifndef __sparc__
5101 res = ptrace (regset->get_request, pid,
5102 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5103 #else
5104 res = ptrace (regset->get_request, pid, data, nt_type);
5105 #endif
5106 if (res < 0)
5107 {
5108 if (errno == EIO
5109 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5110 {
5111 /* If we get EIO on a regset, or an EINVAL and the regset is
5112 optional, do not try it again for this process mode. */
5113 disable_regset (regsets_info, regset);
5114 }
5115 else if (errno == ENODATA)
5116 {
5117 /* ENODATA may be returned if the regset is currently
5118 not "active". This can happen in normal operation,
5119 so suppress the warning in this case. */
5120 }
5121 else if (errno == ESRCH)
5122 {
5123 /* At this point, ESRCH should mean the process is
5124 already gone, in which case we simply ignore attempts
5125 to read its registers. */
5126 }
5127 else
5128 {
5129 char s[256];
5130 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5131 pid);
5132 perror (s);
5133 }
5134 }
5135 else
5136 {
5137 if (regset->type == GENERAL_REGS)
5138 saw_general_regs = 1;
5139 regset->store_function (regcache, buf);
5140 }
5141 free (buf);
5142 }
5143 if (saw_general_regs)
5144 return 0;
5145 else
5146 return 1;
5147 }
5148
5149 static int
5150 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5151 struct regcache *regcache)
5152 {
5153 struct regset_info *regset;
5154 int saw_general_regs = 0;
5155 int pid;
5156 struct iovec iov;
5157
5158 pid = lwpid_of (current_thread);
5159 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5160 {
5161 void *buf, *data;
5162 int nt_type, res;
5163
5164 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5165 || regset->fill_function == NULL)
5166 continue;
5167
5168 buf = xmalloc (regset->size);
5169
5170 /* First fill the buffer with the current register set contents,
5171 in case there are any items in the kernel's regset that are
5172 not in gdbserver's regcache. */
5173
5174 nt_type = regset->nt_type;
5175 if (nt_type)
5176 {
5177 iov.iov_base = buf;
5178 iov.iov_len = regset->size;
5179 data = (void *) &iov;
5180 }
5181 else
5182 data = buf;
5183
5184 #ifndef __sparc__
5185 res = ptrace (regset->get_request, pid,
5186 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5187 #else
5188 res = ptrace (regset->get_request, pid, data, nt_type);
5189 #endif
5190
5191 if (res == 0)
5192 {
5193 /* Then overlay our cached registers on that. */
5194 regset->fill_function (regcache, buf);
5195
5196 /* Only now do we write the register set. */
5197 #ifndef __sparc__
5198 res = ptrace (regset->set_request, pid,
5199 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5200 #else
5201 res = ptrace (regset->set_request, pid, data, nt_type);
5202 #endif
5203 }
5204
5205 if (res < 0)
5206 {
5207 if (errno == EIO
5208 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5209 {
5210 /* If we get EIO on a regset, or an EINVAL and the regset is
5211 optional, do not try it again for this process mode. */
5212 disable_regset (regsets_info, regset);
5213 }
5214 else if (errno == ESRCH)
5215 {
5216 /* At this point, ESRCH should mean the process is
5217 already gone, in which case we simply ignore attempts
5218 to change its registers. See also the related
5219 comment in resume_one_lwp. */
5220 free (buf);
5221 return 0;
5222 }
5223 else
5224 {
5225 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5226 }
5227 }
5228 else if (regset->type == GENERAL_REGS)
5229 saw_general_regs = 1;
5230 free (buf);
5231 }
5232 if (saw_general_regs)
5233 return 0;
5234 else
5235 return 1;
5236 }
5237
5238 #else /* !HAVE_LINUX_REGSETS */
5239
5240 #define use_linux_regsets 0
5241 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5242 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5243
5244 #endif
5245
5246 /* Return 1 if register REGNO is supported by one of the regset ptrace
5247 calls or 0 if it has to be transferred individually. */
5248
5249 static int
5250 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5251 {
5252 unsigned char mask = 1 << (regno % 8);
5253 size_t index = regno / 8;
5254
5255 return (use_linux_regsets
5256 && (regs_info->regset_bitmap == NULL
5257 || (regs_info->regset_bitmap[index] & mask) != 0));
5258 }
5259
5260 #ifdef HAVE_LINUX_USRREGS
5261
5262 static int
5263 register_addr (const struct usrregs_info *usrregs, int regnum)
5264 {
5265 int addr;
5266
5267 if (regnum < 0 || regnum >= usrregs->num_regs)
5268 error ("Invalid register number %d.", regnum);
5269
5270 addr = usrregs->regmap[regnum];
5271
5272 return addr;
5273 }
5274
5275
5276 void
5277 linux_process_target::fetch_register (const usrregs_info *usrregs,
5278 regcache *regcache, int regno)
5279 {
5280 CORE_ADDR regaddr;
5281 int i, size;
5282 char *buf;
5283 int pid;
5284
5285 if (regno >= usrregs->num_regs)
5286 return;
5287 if (low_cannot_fetch_register (regno))
5288 return;
5289
5290 regaddr = register_addr (usrregs, regno);
5291 if (regaddr == -1)
5292 return;
5293
5294 size = ((register_size (regcache->tdesc, regno)
5295 + sizeof (PTRACE_XFER_TYPE) - 1)
5296 & -sizeof (PTRACE_XFER_TYPE));
5297 buf = (char *) alloca (size);
5298
5299 pid = lwpid_of (current_thread);
5300 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5301 {
5302 errno = 0;
5303 *(PTRACE_XFER_TYPE *) (buf + i) =
5304 ptrace (PTRACE_PEEKUSER, pid,
5305 /* Coerce to a uintptr_t first to avoid potential gcc warning
5306 of coercing an 8 byte integer to a 4 byte pointer. */
5307 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5308 regaddr += sizeof (PTRACE_XFER_TYPE);
5309 if (errno != 0)
5310 {
5311 /* Mark register REGNO unavailable. */
5312 supply_register (regcache, regno, NULL);
5313 return;
5314 }
5315 }
5316
5317 low_supply_ptrace_register (regcache, regno, buf);
5318 }
5319
5320 void
5321 linux_process_target::store_register (const usrregs_info *usrregs,
5322 regcache *regcache, int regno)
5323 {
5324 CORE_ADDR regaddr;
5325 int i, size;
5326 char *buf;
5327 int pid;
5328
5329 if (regno >= usrregs->num_regs)
5330 return;
5331 if (low_cannot_store_register (regno))
5332 return;
5333
5334 regaddr = register_addr (usrregs, regno);
5335 if (regaddr == -1)
5336 return;
5337
5338 size = ((register_size (regcache->tdesc, regno)
5339 + sizeof (PTRACE_XFER_TYPE) - 1)
5340 & -sizeof (PTRACE_XFER_TYPE));
5341 buf = (char *) alloca (size);
5342 memset (buf, 0, size);
5343
5344 low_collect_ptrace_register (regcache, regno, buf);
5345
5346 pid = lwpid_of (current_thread);
5347 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5348 {
5349 errno = 0;
5350 ptrace (PTRACE_POKEUSER, pid,
5351 /* Coerce to a uintptr_t first to avoid potential gcc warning
5352 about coercing an 8 byte integer to a 4 byte pointer. */
5353 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5354 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5355 if (errno != 0)
5356 {
5357 /* At this point, ESRCH should mean the process is
5358 already gone, in which case we simply ignore attempts
5359 to change its registers. See also the related
5360 comment in resume_one_lwp. */
5361 if (errno == ESRCH)
5362 return;
5363
5364
5365 if (!low_cannot_store_register (regno))
5366 error ("writing register %d: %s", regno, safe_strerror (errno));
5367 }
5368 regaddr += sizeof (PTRACE_XFER_TYPE);
5369 }
5370 }
5371 #endif /* HAVE_LINUX_USRREGS */
5372
5373 void
5374 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5375 int regno, char *buf)
5376 {
5377 collect_register (regcache, regno, buf);
5378 }
5379
5380 void
5381 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5382 int regno, const char *buf)
5383 {
5384 supply_register (regcache, regno, buf);
5385 }
5386
5387 void
5388 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5389 regcache *regcache,
5390 int regno, int all)
5391 {
5392 #ifdef HAVE_LINUX_USRREGS
5393 struct usrregs_info *usr = regs_info->usrregs;
5394
5395 if (regno == -1)
5396 {
5397 for (regno = 0; regno < usr->num_regs; regno++)
5398 if (all || !linux_register_in_regsets (regs_info, regno))
5399 fetch_register (usr, regcache, regno);
5400 }
5401 else
5402 fetch_register (usr, regcache, regno);
5403 #endif
5404 }
5405
5406 void
5407 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5408 regcache *regcache,
5409 int regno, int all)
5410 {
5411 #ifdef HAVE_LINUX_USRREGS
5412 struct usrregs_info *usr = regs_info->usrregs;
5413
5414 if (regno == -1)
5415 {
5416 for (regno = 0; regno < usr->num_regs; regno++)
5417 if (all || !linux_register_in_regsets (regs_info, regno))
5418 store_register (usr, regcache, regno);
5419 }
5420 else
5421 store_register (usr, regcache, regno);
5422 #endif
5423 }
5424
5425 void
5426 linux_process_target::fetch_registers (regcache *regcache, int regno)
5427 {
5428 int use_regsets;
5429 int all = 0;
5430 const regs_info *regs_info = get_regs_info ();
5431
5432 if (regno == -1)
5433 {
5434 if (regs_info->usrregs != NULL)
5435 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5436 low_fetch_register (regcache, regno);
5437
5438 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5439 if (regs_info->usrregs != NULL)
5440 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5441 }
5442 else
5443 {
5444 if (low_fetch_register (regcache, regno))
5445 return;
5446
5447 use_regsets = linux_register_in_regsets (regs_info, regno);
5448 if (use_regsets)
5449 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5450 regcache);
5451 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5452 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5453 }
5454 }
5455
5456 void
5457 linux_process_target::store_registers (regcache *regcache, int regno)
5458 {
5459 int use_regsets;
5460 int all = 0;
5461 const regs_info *regs_info = get_regs_info ();
5462
5463 if (regno == -1)
5464 {
5465 all = regsets_store_inferior_registers (regs_info->regsets_info,
5466 regcache);
5467 if (regs_info->usrregs != NULL)
5468 usr_store_inferior_registers (regs_info, regcache, regno, all);
5469 }
5470 else
5471 {
5472 use_regsets = linux_register_in_regsets (regs_info, regno);
5473 if (use_regsets)
5474 all = regsets_store_inferior_registers (regs_info->regsets_info,
5475 regcache);
5476 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5477 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5478 }
5479 }
5480
5481 bool
5482 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5483 {
5484 return false;
5485 }
5486
5487 /* A wrapper for the read_memory target op. */
5488
5489 static int
5490 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5491 {
5492 return the_target->read_memory (memaddr, myaddr, len);
5493 }
5494
5495 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5496 to debugger memory starting at MYADDR. */
5497
5498 int
5499 linux_process_target::read_memory (CORE_ADDR memaddr,
5500 unsigned char *myaddr, int len)
5501 {
5502 int pid = lwpid_of (current_thread);
5503 PTRACE_XFER_TYPE *buffer;
5504 CORE_ADDR addr;
5505 int count;
5506 char filename[64];
5507 int i;
5508 int ret;
5509 int fd;
5510
5511 /* Try using /proc. Don't bother for one word. */
5512 if (len >= 3 * sizeof (long))
5513 {
5514 int bytes;
5515
5516 /* We could keep this file open and cache it - possibly one per
5517 thread. That requires some juggling, but is even faster. */
5518 sprintf (filename, "/proc/%d/mem", pid);
5519 fd = open (filename, O_RDONLY | O_LARGEFILE);
5520 if (fd == -1)
5521 goto no_proc;
5522
5523 /* If pread64 is available, use it. It's faster if the kernel
5524 supports it (only one syscall), and it's 64-bit safe even on
5525 32-bit platforms (for instance, SPARC debugging a SPARC64
5526 application). */
5527 #ifdef HAVE_PREAD64
5528 bytes = pread64 (fd, myaddr, len, memaddr);
5529 #else
5530 bytes = -1;
5531 if (lseek (fd, memaddr, SEEK_SET) != -1)
5532 bytes = read (fd, myaddr, len);
5533 #endif
5534
5535 close (fd);
5536 if (bytes == len)
5537 return 0;
5538
5539 /* Some data was read, we'll try to get the rest with ptrace. */
5540 if (bytes > 0)
5541 {
5542 memaddr += bytes;
5543 myaddr += bytes;
5544 len -= bytes;
5545 }
5546 }
5547
5548 no_proc:
5549 /* Round starting address down to longword boundary. */
5550 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5551 /* Round ending address up; get number of longwords that makes. */
5552 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5553 / sizeof (PTRACE_XFER_TYPE));
5554 /* Allocate buffer of that many longwords. */
5555 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5556
5557 /* Read all the longwords */
5558 errno = 0;
5559 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5560 {
5561 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5562 about coercing an 8 byte integer to a 4 byte pointer. */
5563 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5564 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5565 (PTRACE_TYPE_ARG4) 0);
5566 if (errno)
5567 break;
5568 }
5569 ret = errno;
5570
5571 /* Copy appropriate bytes out of the buffer. */
5572 if (i > 0)
5573 {
5574 i *= sizeof (PTRACE_XFER_TYPE);
5575 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5576 memcpy (myaddr,
5577 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5578 i < len ? i : len);
5579 }
5580
5581 return ret;
5582 }
5583
5584 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5585 memory at MEMADDR. On failure (cannot write to the inferior)
5586 returns the value of errno. Always succeeds if LEN is zero. */
5587
5588 int
5589 linux_process_target::write_memory (CORE_ADDR memaddr,
5590 const unsigned char *myaddr, int len)
5591 {
5592 int i;
5593 /* Round starting address down to longword boundary. */
5594 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5595 /* Round ending address up; get number of longwords that makes. */
5596 int count
5597 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5598 / sizeof (PTRACE_XFER_TYPE);
5599
5600 /* Allocate buffer of that many longwords. */
5601 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5602
5603 int pid = lwpid_of (current_thread);
5604
5605 if (len == 0)
5606 {
5607 /* Zero length write always succeeds. */
5608 return 0;
5609 }
5610
5611 if (debug_threads)
5612 {
5613 /* Dump up to four bytes. */
5614 char str[4 * 2 + 1];
5615 char *p = str;
5616 int dump = len < 4 ? len : 4;
5617
5618 for (i = 0; i < dump; i++)
5619 {
5620 sprintf (p, "%02x", myaddr[i]);
5621 p += 2;
5622 }
5623 *p = '\0';
5624
5625 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5626 str, (long) memaddr, pid);
5627 }
5628
5629 /* Fill start and end extra bytes of buffer with existing memory data. */
5630
5631 errno = 0;
5632 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5633 about coercing an 8 byte integer to a 4 byte pointer. */
5634 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5635 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5636 (PTRACE_TYPE_ARG4) 0);
5637 if (errno)
5638 return errno;
5639
5640 if (count > 1)
5641 {
5642 errno = 0;
5643 buffer[count - 1]
5644 = ptrace (PTRACE_PEEKTEXT, pid,
5645 /* Coerce to a uintptr_t first to avoid potential gcc warning
5646 about coercing an 8 byte integer to a 4 byte pointer. */
5647 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5648 * sizeof (PTRACE_XFER_TYPE)),
5649 (PTRACE_TYPE_ARG4) 0);
5650 if (errno)
5651 return errno;
5652 }
5653
5654 /* Copy data to be written over corresponding part of buffer. */
5655
5656 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5657 myaddr, len);
5658
5659 /* Write the entire buffer. */
5660
5661 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5662 {
5663 errno = 0;
5664 ptrace (PTRACE_POKETEXT, pid,
5665 /* Coerce to a uintptr_t first to avoid potential gcc warning
5666 about coercing an 8 byte integer to a 4 byte pointer. */
5667 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5668 (PTRACE_TYPE_ARG4) buffer[i]);
5669 if (errno)
5670 return errno;
5671 }
5672
5673 return 0;
5674 }
5675
5676 void
5677 linux_process_target::look_up_symbols ()
5678 {
5679 #ifdef USE_THREAD_DB
5680 struct process_info *proc = current_process ();
5681
5682 if (proc->priv->thread_db != NULL)
5683 return;
5684
5685 thread_db_init ();
5686 #endif
5687 }
5688
5689 void
5690 linux_process_target::request_interrupt ()
5691 {
5692 /* Send a SIGINT to the process group. This acts just like the user
5693 typed a ^C on the controlling terminal. */
5694 ::kill (-signal_pid, SIGINT);
5695 }
5696
5697 bool
5698 linux_process_target::supports_read_auxv ()
5699 {
5700 return true;
5701 }
5702
5703 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5704 to debugger memory starting at MYADDR. */
5705
5706 int
5707 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5708 unsigned int len)
5709 {
5710 char filename[PATH_MAX];
5711 int fd, n;
5712 int pid = lwpid_of (current_thread);
5713
5714 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5715
5716 fd = open (filename, O_RDONLY);
5717 if (fd < 0)
5718 return -1;
5719
5720 if (offset != (CORE_ADDR) 0
5721 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5722 n = -1;
5723 else
5724 n = read (fd, myaddr, len);
5725
5726 close (fd);
5727
5728 return n;
5729 }
5730
5731 int
5732 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5733 int size, raw_breakpoint *bp)
5734 {
5735 if (type == raw_bkpt_type_sw)
5736 return insert_memory_breakpoint (bp);
5737 else
5738 return low_insert_point (type, addr, size, bp);
5739 }
5740
5741 int
5742 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5743 int size, raw_breakpoint *bp)
5744 {
5745 /* Unsupported (see target.h). */
5746 return 1;
5747 }
5748
5749 int
5750 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5751 int size, raw_breakpoint *bp)
5752 {
5753 if (type == raw_bkpt_type_sw)
5754 return remove_memory_breakpoint (bp);
5755 else
5756 return low_remove_point (type, addr, size, bp);
5757 }
5758
5759 int
5760 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5761 int size, raw_breakpoint *bp)
5762 {
5763 /* Unsupported (see target.h). */
5764 return 1;
5765 }
5766
5767 /* Implement the stopped_by_sw_breakpoint target_ops
5768 method. */
5769
5770 bool
5771 linux_process_target::stopped_by_sw_breakpoint ()
5772 {
5773 struct lwp_info *lwp = get_thread_lwp (current_thread);
5774
5775 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5776 }
5777
5778 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5779 method. */
5780
5781 bool
5782 linux_process_target::supports_stopped_by_sw_breakpoint ()
5783 {
5784 return USE_SIGTRAP_SIGINFO;
5785 }
5786
5787 /* Implement the stopped_by_hw_breakpoint target_ops
5788 method. */
5789
5790 bool
5791 linux_process_target::stopped_by_hw_breakpoint ()
5792 {
5793 struct lwp_info *lwp = get_thread_lwp (current_thread);
5794
5795 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5796 }
5797
5798 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5799 method. */
5800
5801 bool
5802 linux_process_target::supports_stopped_by_hw_breakpoint ()
5803 {
5804 return USE_SIGTRAP_SIGINFO;
5805 }
5806
5807 /* Implement the supports_hardware_single_step target_ops method. */
5808
5809 bool
5810 linux_process_target::supports_hardware_single_step ()
5811 {
5812 return true;
5813 }
5814
5815 bool
5816 linux_process_target::stopped_by_watchpoint ()
5817 {
5818 struct lwp_info *lwp = get_thread_lwp (current_thread);
5819
5820 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5821 }
5822
5823 CORE_ADDR
5824 linux_process_target::stopped_data_address ()
5825 {
5826 struct lwp_info *lwp = get_thread_lwp (current_thread);
5827
5828 return lwp->stopped_data_address;
5829 }
5830
5831 /* This is only used for targets that define PT_TEXT_ADDR,
5832 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5833 the target has different ways of acquiring this information, like
5834 loadmaps. */
5835
5836 bool
5837 linux_process_target::supports_read_offsets ()
5838 {
5839 #ifdef SUPPORTS_READ_OFFSETS
5840 return true;
5841 #else
5842 return false;
5843 #endif
5844 }
5845
5846 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5847 to tell gdb about. */
5848
5849 int
5850 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5851 {
5852 #ifdef SUPPORTS_READ_OFFSETS
5853 unsigned long text, text_end, data;
5854 int pid = lwpid_of (current_thread);
5855
5856 errno = 0;
5857
5858 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5859 (PTRACE_TYPE_ARG4) 0);
5860 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5861 (PTRACE_TYPE_ARG4) 0);
5862 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5863 (PTRACE_TYPE_ARG4) 0);
5864
5865 if (errno == 0)
5866 {
5867 /* Both text and data offsets produced at compile-time (and so
5868 used by gdb) are relative to the beginning of the program,
5869 with the data segment immediately following the text segment.
5870 However, the actual runtime layout in memory may put the data
5871 somewhere else, so when we send gdb a data base-address, we
5872 use the real data base address and subtract the compile-time
5873 data base-address from it (which is just the length of the
5874 text segment). BSS immediately follows data in both
5875 cases. */
5876 *text_p = text;
5877 *data_p = data - (text_end - text);
5878
5879 return 1;
5880 }
5881 return 0;
5882 #else
5883 gdb_assert_not_reached ("target op read_offsets not supported");
5884 #endif
5885 }
5886
5887 bool
5888 linux_process_target::supports_get_tls_address ()
5889 {
5890 #ifdef USE_THREAD_DB
5891 return true;
5892 #else
5893 return false;
5894 #endif
5895 }
5896
5897 int
5898 linux_process_target::get_tls_address (thread_info *thread,
5899 CORE_ADDR offset,
5900 CORE_ADDR load_module,
5901 CORE_ADDR *address)
5902 {
5903 #ifdef USE_THREAD_DB
5904 return thread_db_get_tls_address (thread, offset, load_module, address);
5905 #else
5906 return -1;
5907 #endif
5908 }
5909
5910 bool
5911 linux_process_target::supports_qxfer_osdata ()
5912 {
5913 return true;
5914 }
5915
5916 int
5917 linux_process_target::qxfer_osdata (const char *annex,
5918 unsigned char *readbuf,
5919 unsigned const char *writebuf,
5920 CORE_ADDR offset, int len)
5921 {
5922 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5923 }
5924
5925 void
5926 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5927 gdb_byte *inf_siginfo, int direction)
5928 {
5929 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5930
5931 /* If there was no callback, or the callback didn't do anything,
5932 then just do a straight memcpy. */
5933 if (!done)
5934 {
5935 if (direction == 1)
5936 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5937 else
5938 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5939 }
5940 }
5941
5942 bool
5943 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5944 int direction)
5945 {
5946 return false;
5947 }
5948
5949 bool
5950 linux_process_target::supports_qxfer_siginfo ()
5951 {
5952 return true;
5953 }
5954
5955 int
5956 linux_process_target::qxfer_siginfo (const char *annex,
5957 unsigned char *readbuf,
5958 unsigned const char *writebuf,
5959 CORE_ADDR offset, int len)
5960 {
5961 int pid;
5962 siginfo_t siginfo;
5963 gdb_byte inf_siginfo[sizeof (siginfo_t)];
5964
5965 if (current_thread == NULL)
5966 return -1;
5967
5968 pid = lwpid_of (current_thread);
5969
5970 if (debug_threads)
5971 debug_printf ("%s siginfo for lwp %d.\n",
5972 readbuf != NULL ? "Reading" : "Writing",
5973 pid);
5974
5975 if (offset >= sizeof (siginfo))
5976 return -1;
5977
5978 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5979 return -1;
5980
5981 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5982 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5983 inferior with a 64-bit GDBSERVER should look the same as debugging it
5984 with a 32-bit GDBSERVER, we need to convert it. */
5985 siginfo_fixup (&siginfo, inf_siginfo, 0);
5986
5987 if (offset + len > sizeof (siginfo))
5988 len = sizeof (siginfo) - offset;
5989
5990 if (readbuf != NULL)
5991 memcpy (readbuf, inf_siginfo + offset, len);
5992 else
5993 {
5994 memcpy (inf_siginfo + offset, writebuf, len);
5995
5996 /* Convert back to ptrace layout before flushing it out. */
5997 siginfo_fixup (&siginfo, inf_siginfo, 1);
5998
5999 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6000 return -1;
6001 }
6002
6003 return len;
6004 }
6005
6006 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6007 so we notice when children change state; as the handler for the
6008 sigsuspend in my_waitpid. */
6009
6010 static void
6011 sigchld_handler (int signo)
6012 {
6013 int old_errno = errno;
6014
6015 if (debug_threads)
6016 {
6017 do
6018 {
6019 /* Use the async signal safe debug function. */
6020 if (debug_write ("sigchld_handler\n",
6021 sizeof ("sigchld_handler\n") - 1) < 0)
6022 break; /* just ignore */
6023 } while (0);
6024 }
6025
6026 if (target_is_async_p ())
6027 async_file_mark (); /* trigger a linux_wait */
6028
6029 errno = old_errno;
6030 }
6031
6032 bool
6033 linux_process_target::supports_non_stop ()
6034 {
6035 return true;
6036 }
6037
6038 bool
6039 linux_process_target::async (bool enable)
6040 {
6041 bool previous = target_is_async_p ();
6042
6043 if (debug_threads)
6044 debug_printf ("linux_async (%d), previous=%d\n",
6045 enable, previous);
6046
6047 if (previous != enable)
6048 {
6049 sigset_t mask;
6050 sigemptyset (&mask);
6051 sigaddset (&mask, SIGCHLD);
6052
6053 gdb_sigmask (SIG_BLOCK, &mask, NULL);
6054
6055 if (enable)
6056 {
6057 if (pipe (linux_event_pipe) == -1)
6058 {
6059 linux_event_pipe[0] = -1;
6060 linux_event_pipe[1] = -1;
6061 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6062
6063 warning ("creating event pipe failed.");
6064 return previous;
6065 }
6066
6067 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6068 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6069
6070 /* Register the event loop handler. */
6071 add_file_handler (linux_event_pipe[0],
6072 handle_target_event, NULL,
6073 "linux-low");
6074
6075 /* Always trigger a linux_wait. */
6076 async_file_mark ();
6077 }
6078 else
6079 {
6080 delete_file_handler (linux_event_pipe[0]);
6081
6082 close (linux_event_pipe[0]);
6083 close (linux_event_pipe[1]);
6084 linux_event_pipe[0] = -1;
6085 linux_event_pipe[1] = -1;
6086 }
6087
6088 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6089 }
6090
6091 return previous;
6092 }
6093
6094 int
6095 linux_process_target::start_non_stop (bool nonstop)
6096 {
6097 /* Register or unregister from event-loop accordingly. */
6098 target_async (nonstop);
6099
6100 if (target_is_async_p () != (nonstop != false))
6101 return -1;
6102
6103 return 0;
6104 }
6105
6106 bool
6107 linux_process_target::supports_multi_process ()
6108 {
6109 return true;
6110 }
6111
6112 /* Check if fork events are supported. */
6113
6114 bool
6115 linux_process_target::supports_fork_events ()
6116 {
6117 return linux_supports_tracefork ();
6118 }
6119
6120 /* Check if vfork events are supported. */
6121
6122 bool
6123 linux_process_target::supports_vfork_events ()
6124 {
6125 return linux_supports_tracefork ();
6126 }
6127
6128 /* Check if exec events are supported. */
6129
6130 bool
6131 linux_process_target::supports_exec_events ()
6132 {
6133 return linux_supports_traceexec ();
6134 }
6135
6136 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6137 ptrace flags for all inferiors. This is in case the new GDB connection
6138 doesn't support the same set of events that the previous one did. */
6139
6140 void
6141 linux_process_target::handle_new_gdb_connection ()
6142 {
6143 /* Request that all the lwps reset their ptrace options. */
6144 for_each_thread ([] (thread_info *thread)
6145 {
6146 struct lwp_info *lwp = get_thread_lwp (thread);
6147
6148 if (!lwp->stopped)
6149 {
6150 /* Stop the lwp so we can modify its ptrace options. */
6151 lwp->must_set_ptrace_flags = 1;
6152 linux_stop_lwp (lwp);
6153 }
6154 else
6155 {
6156 /* Already stopped; go ahead and set the ptrace options. */
6157 struct process_info *proc = find_process_pid (pid_of (thread));
6158 int options = linux_low_ptrace_options (proc->attached);
6159
6160 linux_enable_event_reporting (lwpid_of (thread), options);
6161 lwp->must_set_ptrace_flags = 0;
6162 }
6163 });
6164 }
6165
6166 int
6167 linux_process_target::handle_monitor_command (char *mon)
6168 {
6169 #ifdef USE_THREAD_DB
6170 return thread_db_handle_monitor_command (mon);
6171 #else
6172 return 0;
6173 #endif
6174 }
6175
6176 int
6177 linux_process_target::core_of_thread (ptid_t ptid)
6178 {
6179 return linux_common_core_of_thread (ptid);
6180 }
6181
6182 bool
6183 linux_process_target::supports_disable_randomization ()
6184 {
6185 return true;
6186 }
6187
6188 bool
6189 linux_process_target::supports_agent ()
6190 {
6191 return true;
6192 }
6193
6194 bool
6195 linux_process_target::supports_range_stepping ()
6196 {
6197 if (supports_software_single_step ())
6198 return true;
6199
6200 return low_supports_range_stepping ();
6201 }
6202
6203 bool
6204 linux_process_target::low_supports_range_stepping ()
6205 {
6206 return false;
6207 }
6208
6209 bool
6210 linux_process_target::supports_pid_to_exec_file ()
6211 {
6212 return true;
6213 }
6214
6215 const char *
6216 linux_process_target::pid_to_exec_file (int pid)
6217 {
6218 return linux_proc_pid_to_exec_file (pid);
6219 }
6220
6221 bool
6222 linux_process_target::supports_multifs ()
6223 {
6224 return true;
6225 }
6226
6227 int
6228 linux_process_target::multifs_open (int pid, const char *filename,
6229 int flags, mode_t mode)
6230 {
6231 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6232 }
6233
6234 int
6235 linux_process_target::multifs_unlink (int pid, const char *filename)
6236 {
6237 return linux_mntns_unlink (pid, filename);
6238 }
6239
6240 ssize_t
6241 linux_process_target::multifs_readlink (int pid, const char *filename,
6242 char *buf, size_t bufsiz)
6243 {
6244 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6245 }
6246
6247 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6248 struct target_loadseg
6249 {
6250 /* Core address to which the segment is mapped. */
6251 Elf32_Addr addr;
6252 /* VMA recorded in the program header. */
6253 Elf32_Addr p_vaddr;
6254 /* Size of this segment in memory. */
6255 Elf32_Word p_memsz;
6256 };
6257
6258 # if defined PT_GETDSBT
6259 struct target_loadmap
6260 {
6261 /* Protocol version number, must be zero. */
6262 Elf32_Word version;
6263 /* Pointer to the DSBT table, its size, and the DSBT index. */
6264 unsigned *dsbt_table;
6265 unsigned dsbt_size, dsbt_index;
6266 /* Number of segments in this map. */
6267 Elf32_Word nsegs;
6268 /* The actual memory map. */
6269 struct target_loadseg segs[/*nsegs*/];
6270 };
6271 # define LINUX_LOADMAP PT_GETDSBT
6272 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6273 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6274 # else
6275 struct target_loadmap
6276 {
6277 /* Protocol version number, must be zero. */
6278 Elf32_Half version;
6279 /* Number of segments in this map. */
6280 Elf32_Half nsegs;
6281 /* The actual memory map. */
6282 struct target_loadseg segs[/*nsegs*/];
6283 };
6284 # define LINUX_LOADMAP PTRACE_GETFDPIC
6285 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6286 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6287 # endif
6288
6289 bool
6290 linux_process_target::supports_read_loadmap ()
6291 {
6292 return true;
6293 }
6294
6295 int
6296 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6297 unsigned char *myaddr, unsigned int len)
6298 {
6299 int pid = lwpid_of (current_thread);
6300 int addr = -1;
6301 struct target_loadmap *data = NULL;
6302 unsigned int actual_length, copy_length;
6303
6304 if (strcmp (annex, "exec") == 0)
6305 addr = (int) LINUX_LOADMAP_EXEC;
6306 else if (strcmp (annex, "interp") == 0)
6307 addr = (int) LINUX_LOADMAP_INTERP;
6308 else
6309 return -1;
6310
6311 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6312 return -1;
6313
6314 if (data == NULL)
6315 return -1;
6316
6317 actual_length = sizeof (struct target_loadmap)
6318 + sizeof (struct target_loadseg) * data->nsegs;
6319
6320 if (offset < 0 || offset > actual_length)
6321 return -1;
6322
6323 copy_length = actual_length - offset < len ? actual_length - offset : len;
6324 memcpy (myaddr, (char *) data + offset, copy_length);
6325 return copy_length;
6326 }
6327 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6328
6329 bool
6330 linux_process_target::supports_catch_syscall ()
6331 {
6332 return (low_supports_catch_syscall ()
6333 && linux_supports_tracesysgood ());
6334 }
6335
6336 bool
6337 linux_process_target::low_supports_catch_syscall ()
6338 {
6339 return false;
6340 }
6341
6342 CORE_ADDR
6343 linux_process_target::read_pc (regcache *regcache)
6344 {
6345 if (!low_supports_breakpoints ())
6346 return 0;
6347
6348 return low_get_pc (regcache);
6349 }
6350
6351 void
6352 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6353 {
6354 gdb_assert (low_supports_breakpoints ());
6355
6356 low_set_pc (regcache, pc);
6357 }
6358
6359 bool
6360 linux_process_target::supports_thread_stopped ()
6361 {
6362 return true;
6363 }
6364
6365 bool
6366 linux_process_target::thread_stopped (thread_info *thread)
6367 {
6368 return get_thread_lwp (thread)->stopped;
6369 }
6370
6371 /* This exposes stop-all-threads functionality to other modules. */
6372
6373 void
6374 linux_process_target::pause_all (bool freeze)
6375 {
6376 stop_all_lwps (freeze, NULL);
6377 }
6378
6379 /* This exposes unstop-all-threads functionality to other gdbserver
6380 modules. */
6381
6382 void
6383 linux_process_target::unpause_all (bool unfreeze)
6384 {
6385 unstop_all_lwps (unfreeze, NULL);
6386 }
6387
6388 int
6389 linux_process_target::prepare_to_access_memory ()
6390 {
6391 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6392 running LWP. */
6393 if (non_stop)
6394 target_pause_all (true);
6395 return 0;
6396 }
6397
6398 void
6399 linux_process_target::done_accessing_memory ()
6400 {
6401 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6402 running LWP. */
6403 if (non_stop)
6404 target_unpause_all (true);
6405 }
6406
6407 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6408
6409 static int
6410 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6411 CORE_ADDR *phdr_memaddr, int *num_phdr)
6412 {
6413 char filename[PATH_MAX];
6414 int fd;
6415 const int auxv_size = is_elf64
6416 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6417 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6418
6419 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6420
6421 fd = open (filename, O_RDONLY);
6422 if (fd < 0)
6423 return 1;
6424
6425 *phdr_memaddr = 0;
6426 *num_phdr = 0;
6427 while (read (fd, buf, auxv_size) == auxv_size
6428 && (*phdr_memaddr == 0 || *num_phdr == 0))
6429 {
6430 if (is_elf64)
6431 {
6432 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6433
6434 switch (aux->a_type)
6435 {
6436 case AT_PHDR:
6437 *phdr_memaddr = aux->a_un.a_val;
6438 break;
6439 case AT_PHNUM:
6440 *num_phdr = aux->a_un.a_val;
6441 break;
6442 }
6443 }
6444 else
6445 {
6446 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6447
6448 switch (aux->a_type)
6449 {
6450 case AT_PHDR:
6451 *phdr_memaddr = aux->a_un.a_val;
6452 break;
6453 case AT_PHNUM:
6454 *num_phdr = aux->a_un.a_val;
6455 break;
6456 }
6457 }
6458 }
6459
6460 close (fd);
6461
6462 if (*phdr_memaddr == 0 || *num_phdr == 0)
6463 {
6464 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6465 "phdr_memaddr = %ld, phdr_num = %d",
6466 (long) *phdr_memaddr, *num_phdr);
6467 return 2;
6468 }
6469
6470 return 0;
6471 }
6472
6473 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6474
6475 static CORE_ADDR
6476 get_dynamic (const int pid, const int is_elf64)
6477 {
6478 CORE_ADDR phdr_memaddr, relocation;
6479 int num_phdr, i;
6480 unsigned char *phdr_buf;
6481 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6482
6483 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6484 return 0;
6485
6486 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6487 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6488
6489 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6490 return 0;
6491
6492 /* Compute relocation: it is expected to be 0 for "regular" executables,
6493 non-zero for PIE ones. */
6494 relocation = -1;
6495 for (i = 0; relocation == -1 && i < num_phdr; i++)
6496 if (is_elf64)
6497 {
6498 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6499
6500 if (p->p_type == PT_PHDR)
6501 relocation = phdr_memaddr - p->p_vaddr;
6502 }
6503 else
6504 {
6505 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6506
6507 if (p->p_type == PT_PHDR)
6508 relocation = phdr_memaddr - p->p_vaddr;
6509 }
6510
6511 if (relocation == -1)
6512 {
6513 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6514 any real world executables, including PIE executables, have always
6515 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6516 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6517 or present DT_DEBUG anyway (fpc binaries are statically linked).
6518
6519 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6520
6521 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6522
6523 return 0;
6524 }
6525
6526 for (i = 0; i < num_phdr; i++)
6527 {
6528 if (is_elf64)
6529 {
6530 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6531
6532 if (p->p_type == PT_DYNAMIC)
6533 return p->p_vaddr + relocation;
6534 }
6535 else
6536 {
6537 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6538
6539 if (p->p_type == PT_DYNAMIC)
6540 return p->p_vaddr + relocation;
6541 }
6542 }
6543
6544 return 0;
6545 }
6546
6547 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6548 can be 0 if the inferior does not yet have the library list initialized.
6549 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6550 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6551
6552 static CORE_ADDR
6553 get_r_debug (const int pid, const int is_elf64)
6554 {
6555 CORE_ADDR dynamic_memaddr;
6556 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6557 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6558 CORE_ADDR map = -1;
6559
6560 dynamic_memaddr = get_dynamic (pid, is_elf64);
6561 if (dynamic_memaddr == 0)
6562 return map;
6563
6564 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6565 {
6566 if (is_elf64)
6567 {
6568 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6569 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6570 union
6571 {
6572 Elf64_Xword map;
6573 unsigned char buf[sizeof (Elf64_Xword)];
6574 }
6575 rld_map;
6576 #endif
6577 #ifdef DT_MIPS_RLD_MAP
6578 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6579 {
6580 if (linux_read_memory (dyn->d_un.d_val,
6581 rld_map.buf, sizeof (rld_map.buf)) == 0)
6582 return rld_map.map;
6583 else
6584 break;
6585 }
6586 #endif /* DT_MIPS_RLD_MAP */
6587 #ifdef DT_MIPS_RLD_MAP_REL
6588 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6589 {
6590 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6591 rld_map.buf, sizeof (rld_map.buf)) == 0)
6592 return rld_map.map;
6593 else
6594 break;
6595 }
6596 #endif /* DT_MIPS_RLD_MAP_REL */
6597
6598 if (dyn->d_tag == DT_DEBUG && map == -1)
6599 map = dyn->d_un.d_val;
6600
6601 if (dyn->d_tag == DT_NULL)
6602 break;
6603 }
6604 else
6605 {
6606 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6607 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6608 union
6609 {
6610 Elf32_Word map;
6611 unsigned char buf[sizeof (Elf32_Word)];
6612 }
6613 rld_map;
6614 #endif
6615 #ifdef DT_MIPS_RLD_MAP
6616 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6617 {
6618 if (linux_read_memory (dyn->d_un.d_val,
6619 rld_map.buf, sizeof (rld_map.buf)) == 0)
6620 return rld_map.map;
6621 else
6622 break;
6623 }
6624 #endif /* DT_MIPS_RLD_MAP */
6625 #ifdef DT_MIPS_RLD_MAP_REL
6626 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6627 {
6628 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6629 rld_map.buf, sizeof (rld_map.buf)) == 0)
6630 return rld_map.map;
6631 else
6632 break;
6633 }
6634 #endif /* DT_MIPS_RLD_MAP_REL */
6635
6636 if (dyn->d_tag == DT_DEBUG && map == -1)
6637 map = dyn->d_un.d_val;
6638
6639 if (dyn->d_tag == DT_NULL)
6640 break;
6641 }
6642
6643 dynamic_memaddr += dyn_size;
6644 }
6645
6646 return map;
6647 }
6648
6649 /* Read one pointer from MEMADDR in the inferior. */
6650
6651 static int
6652 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6653 {
6654 int ret;
6655
6656 /* Go through a union so this works on either big or little endian
6657 hosts, when the inferior's pointer size is smaller than the size
6658 of CORE_ADDR. It is assumed the inferior's endianness is the
6659 same of the superior's. */
6660 union
6661 {
6662 CORE_ADDR core_addr;
6663 unsigned int ui;
6664 unsigned char uc;
6665 } addr;
6666
6667 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6668 if (ret == 0)
6669 {
6670 if (ptr_size == sizeof (CORE_ADDR))
6671 *ptr = addr.core_addr;
6672 else if (ptr_size == sizeof (unsigned int))
6673 *ptr = addr.ui;
6674 else
6675 gdb_assert_not_reached ("unhandled pointer size");
6676 }
6677 return ret;
6678 }
6679
6680 bool
6681 linux_process_target::supports_qxfer_libraries_svr4 ()
6682 {
6683 return true;
6684 }
6685
6686 struct link_map_offsets
6687 {
6688 /* Offset and size of r_debug.r_version. */
6689 int r_version_offset;
6690
6691 /* Offset and size of r_debug.r_map. */
6692 int r_map_offset;
6693
6694 /* Offset to l_addr field in struct link_map. */
6695 int l_addr_offset;
6696
6697 /* Offset to l_name field in struct link_map. */
6698 int l_name_offset;
6699
6700 /* Offset to l_ld field in struct link_map. */
6701 int l_ld_offset;
6702
6703 /* Offset to l_next field in struct link_map. */
6704 int l_next_offset;
6705
6706 /* Offset to l_prev field in struct link_map. */
6707 int l_prev_offset;
6708 };
6709
6710 /* Construct qXfer:libraries-svr4:read reply. */
6711
6712 int
6713 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6714 unsigned char *readbuf,
6715 unsigned const char *writebuf,
6716 CORE_ADDR offset, int len)
6717 {
6718 struct process_info_private *const priv = current_process ()->priv;
6719 char filename[PATH_MAX];
6720 int pid, is_elf64;
6721
6722 static const struct link_map_offsets lmo_32bit_offsets =
6723 {
6724 0, /* r_version offset. */
6725 4, /* r_debug.r_map offset. */
6726 0, /* l_addr offset in link_map. */
6727 4, /* l_name offset in link_map. */
6728 8, /* l_ld offset in link_map. */
6729 12, /* l_next offset in link_map. */
6730 16 /* l_prev offset in link_map. */
6731 };
6732
6733 static const struct link_map_offsets lmo_64bit_offsets =
6734 {
6735 0, /* r_version offset. */
6736 8, /* r_debug.r_map offset. */
6737 0, /* l_addr offset in link_map. */
6738 8, /* l_name offset in link_map. */
6739 16, /* l_ld offset in link_map. */
6740 24, /* l_next offset in link_map. */
6741 32 /* l_prev offset in link_map. */
6742 };
6743 const struct link_map_offsets *lmo;
6744 unsigned int machine;
6745 int ptr_size;
6746 CORE_ADDR lm_addr = 0, lm_prev = 0;
6747 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6748 int header_done = 0;
6749
6750 if (writebuf != NULL)
6751 return -2;
6752 if (readbuf == NULL)
6753 return -1;
6754
6755 pid = lwpid_of (current_thread);
6756 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6757 is_elf64 = elf_64_file_p (filename, &machine);
6758 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6759 ptr_size = is_elf64 ? 8 : 4;
6760
6761 while (annex[0] != '\0')
6762 {
6763 const char *sep;
6764 CORE_ADDR *addrp;
6765 int name_len;
6766
6767 sep = strchr (annex, '=');
6768 if (sep == NULL)
6769 break;
6770
6771 name_len = sep - annex;
6772 if (name_len == 5 && startswith (annex, "start"))
6773 addrp = &lm_addr;
6774 else if (name_len == 4 && startswith (annex, "prev"))
6775 addrp = &lm_prev;
6776 else
6777 {
6778 annex = strchr (sep, ';');
6779 if (annex == NULL)
6780 break;
6781 annex++;
6782 continue;
6783 }
6784
6785 annex = decode_address_to_semicolon (addrp, sep + 1);
6786 }
6787
6788 if (lm_addr == 0)
6789 {
6790 int r_version = 0;
6791
6792 if (priv->r_debug == 0)
6793 priv->r_debug = get_r_debug (pid, is_elf64);
6794
6795 /* We failed to find DT_DEBUG. Such situation will not change
6796 for this inferior - do not retry it. Report it to GDB as
6797 E01, see for the reasons at the GDB solib-svr4.c side. */
6798 if (priv->r_debug == (CORE_ADDR) -1)
6799 return -1;
6800
6801 if (priv->r_debug != 0)
6802 {
6803 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6804 (unsigned char *) &r_version,
6805 sizeof (r_version)) != 0
6806 || r_version < 1)
6807 {
6808 warning ("unexpected r_debug version %d", r_version);
6809 }
6810 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6811 &lm_addr, ptr_size) != 0)
6812 {
6813 warning ("unable to read r_map from 0x%lx",
6814 (long) priv->r_debug + lmo->r_map_offset);
6815 }
6816 }
6817 }
6818
6819 std::string document = "<library-list-svr4 version=\"1.0\"";
6820
6821 while (lm_addr
6822 && read_one_ptr (lm_addr + lmo->l_name_offset,
6823 &l_name, ptr_size) == 0
6824 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6825 &l_addr, ptr_size) == 0
6826 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6827 &l_ld, ptr_size) == 0
6828 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6829 &l_prev, ptr_size) == 0
6830 && read_one_ptr (lm_addr + lmo->l_next_offset,
6831 &l_next, ptr_size) == 0)
6832 {
6833 unsigned char libname[PATH_MAX];
6834
6835 if (lm_prev != l_prev)
6836 {
6837 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6838 (long) lm_prev, (long) l_prev);
6839 break;
6840 }
6841
6842 /* Ignore the first entry even if it has valid name as the first entry
6843 corresponds to the main executable. The first entry should not be
6844 skipped if the dynamic loader was loaded late by a static executable
6845 (see solib-svr4.c parameter ignore_first). But in such case the main
6846 executable does not have PT_DYNAMIC present and this function already
6847 exited above due to failed get_r_debug. */
6848 if (lm_prev == 0)
6849 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6850 else
6851 {
6852 /* Not checking for error because reading may stop before
6853 we've got PATH_MAX worth of characters. */
6854 libname[0] = '\0';
6855 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6856 libname[sizeof (libname) - 1] = '\0';
6857 if (libname[0] != '\0')
6858 {
6859 if (!header_done)
6860 {
6861 /* Terminate `<library-list-svr4'. */
6862 document += '>';
6863 header_done = 1;
6864 }
6865
6866 string_appendf (document, "<library name=\"");
6867 xml_escape_text_append (&document, (char *) libname);
6868 string_appendf (document, "\" lm=\"0x%lx\" "
6869 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6870 (unsigned long) lm_addr, (unsigned long) l_addr,
6871 (unsigned long) l_ld);
6872 }
6873 }
6874
6875 lm_prev = lm_addr;
6876 lm_addr = l_next;
6877 }
6878
6879 if (!header_done)
6880 {
6881 /* Empty list; terminate `<library-list-svr4'. */
6882 document += "/>";
6883 }
6884 else
6885 document += "</library-list-svr4>";
6886
6887 int document_len = document.length ();
6888 if (offset < document_len)
6889 document_len -= offset;
6890 else
6891 document_len = 0;
6892 if (len > document_len)
6893 len = document_len;
6894
6895 memcpy (readbuf, document.data () + offset, len);
6896
6897 return len;
6898 }
6899
6900 #ifdef HAVE_LINUX_BTRACE
6901
6902 btrace_target_info *
6903 linux_process_target::enable_btrace (ptid_t ptid,
6904 const btrace_config *conf)
6905 {
6906 return linux_enable_btrace (ptid, conf);
6907 }
6908
6909 /* See to_disable_btrace target method. */
6910
6911 int
6912 linux_process_target::disable_btrace (btrace_target_info *tinfo)
6913 {
6914 enum btrace_error err;
6915
6916 err = linux_disable_btrace (tinfo);
6917 return (err == BTRACE_ERR_NONE ? 0 : -1);
6918 }
6919
6920 /* Encode an Intel Processor Trace configuration. */
6921
6922 static void
6923 linux_low_encode_pt_config (struct buffer *buffer,
6924 const struct btrace_data_pt_config *config)
6925 {
6926 buffer_grow_str (buffer, "<pt-config>\n");
6927
6928 switch (config->cpu.vendor)
6929 {
6930 case CV_INTEL:
6931 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6932 "model=\"%u\" stepping=\"%u\"/>\n",
6933 config->cpu.family, config->cpu.model,
6934 config->cpu.stepping);
6935 break;
6936
6937 default:
6938 break;
6939 }
6940
6941 buffer_grow_str (buffer, "</pt-config>\n");
6942 }
6943
6944 /* Encode a raw buffer. */
6945
6946 static void
6947 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6948 unsigned int size)
6949 {
6950 if (size == 0)
6951 return;
6952
6953 /* We use hex encoding - see gdbsupport/rsp-low.h. */
6954 buffer_grow_str (buffer, "<raw>\n");
6955
6956 while (size-- > 0)
6957 {
6958 char elem[2];
6959
6960 elem[0] = tohex ((*data >> 4) & 0xf);
6961 elem[1] = tohex (*data++ & 0xf);
6962
6963 buffer_grow (buffer, elem, 2);
6964 }
6965
6966 buffer_grow_str (buffer, "</raw>\n");
6967 }
6968
6969 /* See to_read_btrace target method. */
6970
6971 int
6972 linux_process_target::read_btrace (btrace_target_info *tinfo,
6973 buffer *buffer,
6974 enum btrace_read_type type)
6975 {
6976 struct btrace_data btrace;
6977 enum btrace_error err;
6978
6979 err = linux_read_btrace (&btrace, tinfo, type);
6980 if (err != BTRACE_ERR_NONE)
6981 {
6982 if (err == BTRACE_ERR_OVERFLOW)
6983 buffer_grow_str0 (buffer, "E.Overflow.");
6984 else
6985 buffer_grow_str0 (buffer, "E.Generic Error.");
6986
6987 return -1;
6988 }
6989
6990 switch (btrace.format)
6991 {
6992 case BTRACE_FORMAT_NONE:
6993 buffer_grow_str0 (buffer, "E.No Trace.");
6994 return -1;
6995
6996 case BTRACE_FORMAT_BTS:
6997 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6998 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6999
7000 for (const btrace_block &block : *btrace.variant.bts.blocks)
7001 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7002 paddress (block.begin), paddress (block.end));
7003
7004 buffer_grow_str0 (buffer, "</btrace>\n");
7005 break;
7006
7007 case BTRACE_FORMAT_PT:
7008 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7009 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7010 buffer_grow_str (buffer, "<pt>\n");
7011
7012 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7013
7014 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7015 btrace.variant.pt.size);
7016
7017 buffer_grow_str (buffer, "</pt>\n");
7018 buffer_grow_str0 (buffer, "</btrace>\n");
7019 break;
7020
7021 default:
7022 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7023 return -1;
7024 }
7025
7026 return 0;
7027 }
7028
7029 /* See to_btrace_conf target method. */
7030
7031 int
7032 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7033 buffer *buffer)
7034 {
7035 const struct btrace_config *conf;
7036
7037 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7038 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7039
7040 conf = linux_btrace_conf (tinfo);
7041 if (conf != NULL)
7042 {
7043 switch (conf->format)
7044 {
7045 case BTRACE_FORMAT_NONE:
7046 break;
7047
7048 case BTRACE_FORMAT_BTS:
7049 buffer_xml_printf (buffer, "<bts");
7050 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7051 buffer_xml_printf (buffer, " />\n");
7052 break;
7053
7054 case BTRACE_FORMAT_PT:
7055 buffer_xml_printf (buffer, "<pt");
7056 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7057 buffer_xml_printf (buffer, "/>\n");
7058 break;
7059 }
7060 }
7061
7062 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7063 return 0;
7064 }
7065 #endif /* HAVE_LINUX_BTRACE */
7066
7067 /* See nat/linux-nat.h. */
7068
7069 ptid_t
7070 current_lwp_ptid (void)
7071 {
7072 return ptid_of (current_thread);
7073 }
7074
7075 const char *
7076 linux_process_target::thread_name (ptid_t thread)
7077 {
7078 return linux_proc_tid_get_name (thread);
7079 }
7080
7081 #if USE_THREAD_DB
7082 bool
7083 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7084 int *handle_len)
7085 {
7086 return thread_db_thread_handle (ptid, handle, handle_len);
7087 }
7088 #endif
7089
7090 thread_info *
7091 linux_process_target::thread_pending_parent (thread_info *thread)
7092 {
7093 lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
7094
7095 if (parent == nullptr)
7096 return nullptr;
7097
7098 return get_lwp_thread (parent);
7099 }
7100
7101 thread_info *
7102 linux_process_target::thread_pending_child (thread_info *thread)
7103 {
7104 lwp_info *child = get_thread_lwp (thread)->pending_child ();
7105
7106 if (child == nullptr)
7107 return nullptr;
7108
7109 return get_lwp_thread (child);
7110 }
7111
7112 /* Default implementation of linux_target_ops method "set_pc" for
7113 32-bit pc register which is literally named "pc". */
7114
7115 void
7116 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7117 {
7118 uint32_t newpc = pc;
7119
7120 supply_register_by_name (regcache, "pc", &newpc);
7121 }
7122
7123 /* Default implementation of linux_target_ops method "get_pc" for
7124 32-bit pc register which is literally named "pc". */
7125
7126 CORE_ADDR
7127 linux_get_pc_32bit (struct regcache *regcache)
7128 {
7129 uint32_t pc;
7130
7131 collect_register_by_name (regcache, "pc", &pc);
7132 if (debug_threads)
7133 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7134 return pc;
7135 }
7136
7137 /* Default implementation of linux_target_ops method "set_pc" for
7138 64-bit pc register which is literally named "pc". */
7139
7140 void
7141 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7142 {
7143 uint64_t newpc = pc;
7144
7145 supply_register_by_name (regcache, "pc", &newpc);
7146 }
7147
7148 /* Default implementation of linux_target_ops method "get_pc" for
7149 64-bit pc register which is literally named "pc". */
7150
7151 CORE_ADDR
7152 linux_get_pc_64bit (struct regcache *regcache)
7153 {
7154 uint64_t pc;
7155
7156 collect_register_by_name (regcache, "pc", &pc);
7157 if (debug_threads)
7158 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7159 return pc;
7160 }
7161
7162 /* See linux-low.h. */
7163
7164 int
7165 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7166 {
7167 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7168 int offset = 0;
7169
7170 gdb_assert (wordsize == 4 || wordsize == 8);
7171
7172 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
7173 {
7174 if (wordsize == 4)
7175 {
7176 uint32_t *data_p = (uint32_t *) data;
7177 if (data_p[0] == match)
7178 {
7179 *valp = data_p[1];
7180 return 1;
7181 }
7182 }
7183 else
7184 {
7185 uint64_t *data_p = (uint64_t *) data;
7186 if (data_p[0] == match)
7187 {
7188 *valp = data_p[1];
7189 return 1;
7190 }
7191 }
7192
7193 offset += 2 * wordsize;
7194 }
7195
7196 return 0;
7197 }
7198
7199 /* See linux-low.h. */
7200
7201 CORE_ADDR
7202 linux_get_hwcap (int wordsize)
7203 {
7204 CORE_ADDR hwcap = 0;
7205 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7206 return hwcap;
7207 }
7208
7209 /* See linux-low.h. */
7210
7211 CORE_ADDR
7212 linux_get_hwcap2 (int wordsize)
7213 {
7214 CORE_ADDR hwcap2 = 0;
7215 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7216 return hwcap2;
7217 }
7218
7219 #ifdef HAVE_LINUX_REGSETS
7220 void
7221 initialize_regsets_info (struct regsets_info *info)
7222 {
7223 for (info->num_regsets = 0;
7224 info->regsets[info->num_regsets].size >= 0;
7225 info->num_regsets++)
7226 ;
7227 }
7228 #endif
7229
7230 void
7231 initialize_low (void)
7232 {
7233 struct sigaction sigchld_action;
7234
7235 memset (&sigchld_action, 0, sizeof (sigchld_action));
7236 set_target_ops (the_linux_target);
7237
7238 linux_ptrace_init_warnings ();
7239 linux_proc_init_warnings ();
7240
7241 sigchld_action.sa_handler = sigchld_handler;
7242 sigemptyset (&sigchld_action.sa_mask);
7243 sigchld_action.sa_flags = SA_RESTART;
7244 sigaction (SIGCHLD, &sigchld_action, NULL);
7245
7246 initialize_low_arch ();
7247
7248 linux_check_ptrace_features ();
7249 }