Automatic date update in version.in
[binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2021 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
48 #include <inttypes.h>
49 #include "gdbsupport/common-inferior.h"
50 #include "nat/fork-inferior.h"
51 #include "gdbsupport/environ.h"
52 #include "gdbsupport/gdb-sigmask.h"
53 #include "gdbsupport/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifndef O_LARGEFILE
64 #define O_LARGEFILE 0
65 #endif
66
67 #ifndef AT_HWCAP2
68 #define AT_HWCAP2 26
69 #endif
70
71 /* Some targets did not define these ptrace constants from the start,
72 so gdbserver defines them locally here. In the future, these may
73 be removed after they are added to asm/ptrace.h. */
74 #if !(defined(PT_TEXT_ADDR) \
75 || defined(PT_DATA_ADDR) \
76 || defined(PT_TEXT_END_ADDR))
77 #if defined(__mcoldfire__)
78 /* These are still undefined in 3.10 kernels. */
79 #define PT_TEXT_ADDR 49*4
80 #define PT_DATA_ADDR 50*4
81 #define PT_TEXT_END_ADDR 51*4
82 /* These are still undefined in 3.10 kernels. */
83 #elif defined(__TMS320C6X__)
84 #define PT_TEXT_ADDR (0x10000*4)
85 #define PT_DATA_ADDR (0x10004*4)
86 #define PT_TEXT_END_ADDR (0x10008*4)
87 #endif
88 #endif
89
90 #if (defined(__UCLIBC__) \
91 && defined(HAS_NOMMU) \
92 && defined(PT_TEXT_ADDR) \
93 && defined(PT_DATA_ADDR) \
94 && defined(PT_TEXT_END_ADDR))
95 #define SUPPORTS_READ_OFFSETS
96 #endif
97
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "gdbsupport/btrace-common.h"
101 #endif
102
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
105 typedef struct
106 {
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115 } Elf32_auxv_t;
116 #endif
117
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
120 typedef struct
121 {
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130 } Elf64_auxv_t;
131 #endif
132
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset = -1;
135
136 /* LWP accessors. */
137
138 /* See nat/linux-nat.h. */
139
140 ptid_t
141 ptid_of_lwp (struct lwp_info *lwp)
142 {
143 return ptid_of (get_lwp_thread (lwp));
144 }
145
146 /* See nat/linux-nat.h. */
147
148 void
149 lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151 {
152 lwp->arch_private = info;
153 }
154
155 /* See nat/linux-nat.h. */
156
157 struct arch_lwp_info *
158 lwp_arch_private_info (struct lwp_info *lwp)
159 {
160 return lwp->arch_private;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 int
166 lwp_is_stopped (struct lwp_info *lwp)
167 {
168 return lwp->stopped;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info *lwp)
175 {
176 return lwp->stop_reason;
177 }
178
179 /* See nat/linux-nat.h. */
180
181 int
182 lwp_is_stepping (struct lwp_info *lwp)
183 {
184 return lwp->stepping;
185 }
186
187 /* A list of all unknown processes which receive stop signals. Some
188 other process will presumably claim each of these as forked
189 children momentarily. */
190
191 struct simple_pid_list
192 {
193 /* The process ID. */
194 int pid;
195
196 /* The status as reported by waitpid. */
197 int status;
198
199 /* Next in chain. */
200 struct simple_pid_list *next;
201 };
202 static struct simple_pid_list *stopped_pids;
203
204 /* Trivial list manipulation functions to keep track of a list of new
205 stopped processes. */
206
207 static void
208 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
209 {
210 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
211
212 new_pid->pid = pid;
213 new_pid->status = status;
214 new_pid->next = *listp;
215 *listp = new_pid;
216 }
217
218 static int
219 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
220 {
221 struct simple_pid_list **p;
222
223 for (p = listp; *p != NULL; p = &(*p)->next)
224 if ((*p)->pid == pid)
225 {
226 struct simple_pid_list *next = (*p)->next;
227
228 *statusp = (*p)->status;
229 xfree (*p);
230 *p = next;
231 return 1;
232 }
233 return 0;
234 }
235
236 enum stopping_threads_kind
237 {
238 /* Not stopping threads presently. */
239 NOT_STOPPING_THREADS,
240
241 /* Stopping threads. */
242 STOPPING_THREADS,
243
244 /* Stopping and suspending threads. */
245 STOPPING_AND_SUSPENDING_THREADS
246 };
247
248 /* This is set while stop_all_lwps is in effect. */
249 static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
250
251 /* FIXME make into a target method? */
252 int using_threads = 1;
253
254 /* True if we're presently stabilizing threads (moving them out of
255 jump pads). */
256 static int stabilizing_threads;
257
258 static void unsuspend_all_lwps (struct lwp_info *except);
259 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
260 static int lwp_is_marked_dead (struct lwp_info *lwp);
261 static int kill_lwp (unsigned long lwpid, int signo);
262 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
263 static int linux_low_ptrace_options (int attached);
264 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
265
266 /* When the event-loop is doing a step-over, this points at the thread
267 being stepped. */
268 static ptid_t step_over_bkpt;
269
270 bool
271 linux_process_target::low_supports_breakpoints ()
272 {
273 return false;
274 }
275
276 CORE_ADDR
277 linux_process_target::low_get_pc (regcache *regcache)
278 {
279 return 0;
280 }
281
282 void
283 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
284 {
285 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
286 }
287
288 std::vector<CORE_ADDR>
289 linux_process_target::low_get_next_pcs (regcache *regcache)
290 {
291 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
292 "implemented");
293 }
294
295 int
296 linux_process_target::low_decr_pc_after_break ()
297 {
298 return 0;
299 }
300
301 /* True if LWP is stopped in its stepping range. */
302
303 static int
304 lwp_in_step_range (struct lwp_info *lwp)
305 {
306 CORE_ADDR pc = lwp->stop_pc;
307
308 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
309 }
310
311 /* The read/write ends of the pipe registered as waitable file in the
312 event loop. */
313 static int linux_event_pipe[2] = { -1, -1 };
314
315 /* True if we're currently in async mode. */
316 #define target_is_async_p() (linux_event_pipe[0] != -1)
317
318 static void send_sigstop (struct lwp_info *lwp);
319
320 /* Return non-zero if HEADER is a 64-bit ELF file. */
321
322 static int
323 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
324 {
325 if (header->e_ident[EI_MAG0] == ELFMAG0
326 && header->e_ident[EI_MAG1] == ELFMAG1
327 && header->e_ident[EI_MAG2] == ELFMAG2
328 && header->e_ident[EI_MAG3] == ELFMAG3)
329 {
330 *machine = header->e_machine;
331 return header->e_ident[EI_CLASS] == ELFCLASS64;
332
333 }
334 *machine = EM_NONE;
335 return -1;
336 }
337
338 /* Return non-zero if FILE is a 64-bit ELF file,
339 zero if the file is not a 64-bit ELF file,
340 and -1 if the file is not accessible or doesn't exist. */
341
342 static int
343 elf_64_file_p (const char *file, unsigned int *machine)
344 {
345 Elf64_Ehdr header;
346 int fd;
347
348 fd = open (file, O_RDONLY);
349 if (fd < 0)
350 return -1;
351
352 if (read (fd, &header, sizeof (header)) != sizeof (header))
353 {
354 close (fd);
355 return 0;
356 }
357 close (fd);
358
359 return elf_64_header_p (&header, machine);
360 }
361
362 /* Accepts an integer PID; Returns true if the executable PID is
363 running is a 64-bit ELF file.. */
364
365 int
366 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
367 {
368 char file[PATH_MAX];
369
370 sprintf (file, "/proc/%d/exe", pid);
371 return elf_64_file_p (file, machine);
372 }
373
374 void
375 linux_process_target::delete_lwp (lwp_info *lwp)
376 {
377 struct thread_info *thr = get_lwp_thread (lwp);
378
379 if (debug_threads)
380 debug_printf ("deleting %ld\n", lwpid_of (thr));
381
382 remove_thread (thr);
383
384 low_delete_thread (lwp->arch_private);
385
386 delete lwp;
387 }
388
389 void
390 linux_process_target::low_delete_thread (arch_lwp_info *info)
391 {
392 /* Default implementation should be overridden if architecture-specific
393 info is being used. */
394 gdb_assert (info == nullptr);
395 }
396
397 process_info *
398 linux_process_target::add_linux_process (int pid, int attached)
399 {
400 struct process_info *proc;
401
402 proc = add_process (pid, attached);
403 proc->priv = XCNEW (struct process_info_private);
404
405 proc->priv->arch_private = low_new_process ();
406
407 return proc;
408 }
409
410 arch_process_info *
411 linux_process_target::low_new_process ()
412 {
413 return nullptr;
414 }
415
416 void
417 linux_process_target::low_delete_process (arch_process_info *info)
418 {
419 /* Default implementation must be overridden if architecture-specific
420 info exists. */
421 gdb_assert (info == nullptr);
422 }
423
424 void
425 linux_process_target::low_new_fork (process_info *parent, process_info *child)
426 {
427 /* Nop. */
428 }
429
430 void
431 linux_process_target::arch_setup_thread (thread_info *thread)
432 {
433 struct thread_info *saved_thread;
434
435 saved_thread = current_thread;
436 current_thread = thread;
437
438 low_arch_setup ();
439
440 current_thread = saved_thread;
441 }
442
443 int
444 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
445 int wstat)
446 {
447 client_state &cs = get_client_state ();
448 struct lwp_info *event_lwp = *orig_event_lwp;
449 int event = linux_ptrace_get_extended_event (wstat);
450 struct thread_info *event_thr = get_lwp_thread (event_lwp);
451 struct lwp_info *new_lwp;
452
453 gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
454
455 /* All extended events we currently use are mid-syscall. Only
456 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
457 you have to be using PTRACE_SEIZE to get that. */
458 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
459
460 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
461 || (event == PTRACE_EVENT_CLONE))
462 {
463 ptid_t ptid;
464 unsigned long new_pid;
465 int ret, status;
466
467 /* Get the pid of the new lwp. */
468 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
469 &new_pid);
470
471 /* If we haven't already seen the new PID stop, wait for it now. */
472 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
473 {
474 /* The new child has a pending SIGSTOP. We can't affect it until it
475 hits the SIGSTOP, but we're already attached. */
476
477 ret = my_waitpid (new_pid, &status, __WALL);
478
479 if (ret == -1)
480 perror_with_name ("waiting for new child");
481 else if (ret != new_pid)
482 warning ("wait returned unexpected PID %d", ret);
483 else if (!WIFSTOPPED (status))
484 warning ("wait returned unexpected status 0x%x", status);
485 }
486
487 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
488 {
489 struct process_info *parent_proc;
490 struct process_info *child_proc;
491 struct lwp_info *child_lwp;
492 struct thread_info *child_thr;
493
494 ptid = ptid_t (new_pid, new_pid);
495
496 if (debug_threads)
497 {
498 debug_printf ("HEW: Got fork event from LWP %ld, "
499 "new child is %d\n",
500 ptid_of (event_thr).lwp (),
501 ptid.pid ());
502 }
503
504 /* Add the new process to the tables and clone the breakpoint
505 lists of the parent. We need to do this even if the new process
506 will be detached, since we will need the process object and the
507 breakpoints to remove any breakpoints from memory when we
508 detach, and the client side will access registers. */
509 child_proc = add_linux_process (new_pid, 0);
510 gdb_assert (child_proc != NULL);
511 child_lwp = add_lwp (ptid);
512 gdb_assert (child_lwp != NULL);
513 child_lwp->stopped = 1;
514 child_lwp->must_set_ptrace_flags = 1;
515 child_lwp->status_pending_p = 0;
516 child_thr = get_lwp_thread (child_lwp);
517 child_thr->last_resume_kind = resume_stop;
518 child_thr->last_status.set_stopped (GDB_SIGNAL_0);
519
520 /* If we're suspending all threads, leave this one suspended
521 too. If the fork/clone parent is stepping over a breakpoint,
522 all other threads have been suspended already. Leave the
523 child suspended too. */
524 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
525 || event_lwp->bp_reinsert != 0)
526 {
527 if (debug_threads)
528 debug_printf ("HEW: leaving child suspended\n");
529 child_lwp->suspended = 1;
530 }
531
532 parent_proc = get_thread_process (event_thr);
533 child_proc->attached = parent_proc->attached;
534
535 if (event_lwp->bp_reinsert != 0
536 && supports_software_single_step ()
537 && event == PTRACE_EVENT_VFORK)
538 {
539 /* If we leave single-step breakpoints there, child will
540 hit it, so uninsert single-step breakpoints from parent
541 (and child). Once vfork child is done, reinsert
542 them back to parent. */
543 uninsert_single_step_breakpoints (event_thr);
544 }
545
546 clone_all_breakpoints (child_thr, event_thr);
547
548 target_desc_up tdesc = allocate_target_description ();
549 copy_target_description (tdesc.get (), parent_proc->tdesc);
550 child_proc->tdesc = tdesc.release ();
551
552 /* Clone arch-specific process data. */
553 low_new_fork (parent_proc, child_proc);
554
555 /* Save fork info in the parent thread. */
556 if (event == PTRACE_EVENT_FORK)
557 event_lwp->waitstatus.set_forked (ptid);
558 else if (event == PTRACE_EVENT_VFORK)
559 event_lwp->waitstatus.set_vforked (ptid);
560
561 /* The status_pending field contains bits denoting the
562 extended event, so when the pending event is handled,
563 the handler will look at lwp->waitstatus. */
564 event_lwp->status_pending_p = 1;
565 event_lwp->status_pending = wstat;
566
567 /* Link the threads until the parent event is passed on to
568 higher layers. */
569 event_lwp->fork_relative = child_lwp;
570 child_lwp->fork_relative = event_lwp;
571
572 /* If the parent thread is doing step-over with single-step
573 breakpoints, the list of single-step breakpoints are cloned
574 from the parent's. Remove them from the child process.
575 In case of vfork, we'll reinsert them back once vforked
576 child is done. */
577 if (event_lwp->bp_reinsert != 0
578 && supports_software_single_step ())
579 {
580 /* The child process is forked and stopped, so it is safe
581 to access its memory without stopping all other threads
582 from other processes. */
583 delete_single_step_breakpoints (child_thr);
584
585 gdb_assert (has_single_step_breakpoints (event_thr));
586 gdb_assert (!has_single_step_breakpoints (child_thr));
587 }
588
589 /* Report the event. */
590 return 0;
591 }
592
593 if (debug_threads)
594 debug_printf ("HEW: Got clone event "
595 "from LWP %ld, new child is LWP %ld\n",
596 lwpid_of (event_thr), new_pid);
597
598 ptid = ptid_t (pid_of (event_thr), new_pid);
599 new_lwp = add_lwp (ptid);
600
601 /* Either we're going to immediately resume the new thread
602 or leave it stopped. resume_one_lwp is a nop if it
603 thinks the thread is currently running, so set this first
604 before calling resume_one_lwp. */
605 new_lwp->stopped = 1;
606
607 /* If we're suspending all threads, leave this one suspended
608 too. If the fork/clone parent is stepping over a breakpoint,
609 all other threads have been suspended already. Leave the
610 child suspended too. */
611 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
612 || event_lwp->bp_reinsert != 0)
613 new_lwp->suspended = 1;
614
615 /* Normally we will get the pending SIGSTOP. But in some cases
616 we might get another signal delivered to the group first.
617 If we do get another signal, be sure not to lose it. */
618 if (WSTOPSIG (status) != SIGSTOP)
619 {
620 new_lwp->stop_expected = 1;
621 new_lwp->status_pending_p = 1;
622 new_lwp->status_pending = status;
623 }
624 else if (cs.report_thread_events)
625 {
626 new_lwp->waitstatus.set_thread_created ();
627 new_lwp->status_pending_p = 1;
628 new_lwp->status_pending = status;
629 }
630
631 #ifdef USE_THREAD_DB
632 thread_db_notice_clone (event_thr, ptid);
633 #endif
634
635 /* Don't report the event. */
636 return 1;
637 }
638 else if (event == PTRACE_EVENT_VFORK_DONE)
639 {
640 event_lwp->waitstatus.set_vfork_done ();
641
642 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
643 {
644 reinsert_single_step_breakpoints (event_thr);
645
646 gdb_assert (has_single_step_breakpoints (event_thr));
647 }
648
649 /* Report the event. */
650 return 0;
651 }
652 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
653 {
654 struct process_info *proc;
655 std::vector<int> syscalls_to_catch;
656 ptid_t event_ptid;
657 pid_t event_pid;
658
659 if (debug_threads)
660 {
661 debug_printf ("HEW: Got exec event from LWP %ld\n",
662 lwpid_of (event_thr));
663 }
664
665 /* Get the event ptid. */
666 event_ptid = ptid_of (event_thr);
667 event_pid = event_ptid.pid ();
668
669 /* Save the syscall list from the execing process. */
670 proc = get_thread_process (event_thr);
671 syscalls_to_catch = std::move (proc->syscalls_to_catch);
672
673 /* Delete the execing process and all its threads. */
674 mourn (proc);
675 current_thread = NULL;
676
677 /* Create a new process/lwp/thread. */
678 proc = add_linux_process (event_pid, 0);
679 event_lwp = add_lwp (event_ptid);
680 event_thr = get_lwp_thread (event_lwp);
681 gdb_assert (current_thread == event_thr);
682 arch_setup_thread (event_thr);
683
684 /* Set the event status. */
685 event_lwp->waitstatus.set_execd
686 (make_unique_xstrdup
687 (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
688
689 /* Mark the exec status as pending. */
690 event_lwp->stopped = 1;
691 event_lwp->status_pending_p = 1;
692 event_lwp->status_pending = wstat;
693 event_thr->last_resume_kind = resume_continue;
694 event_thr->last_status.set_ignore ();
695
696 /* Update syscall state in the new lwp, effectively mid-syscall too. */
697 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
698
699 /* Restore the list to catch. Don't rely on the client, which is free
700 to avoid sending a new list when the architecture doesn't change.
701 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
702 proc->syscalls_to_catch = std::move (syscalls_to_catch);
703
704 /* Report the event. */
705 *orig_event_lwp = event_lwp;
706 return 0;
707 }
708
709 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
710 }
711
712 CORE_ADDR
713 linux_process_target::get_pc (lwp_info *lwp)
714 {
715 struct thread_info *saved_thread;
716 struct regcache *regcache;
717 CORE_ADDR pc;
718
719 if (!low_supports_breakpoints ())
720 return 0;
721
722 saved_thread = current_thread;
723 current_thread = get_lwp_thread (lwp);
724
725 regcache = get_thread_regcache (current_thread, 1);
726 pc = low_get_pc (regcache);
727
728 if (debug_threads)
729 debug_printf ("pc is 0x%lx\n", (long) pc);
730
731 current_thread = saved_thread;
732 return pc;
733 }
734
735 void
736 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
737 {
738 struct thread_info *saved_thread;
739 struct regcache *regcache;
740
741 saved_thread = current_thread;
742 current_thread = get_lwp_thread (lwp);
743
744 regcache = get_thread_regcache (current_thread, 1);
745 low_get_syscall_trapinfo (regcache, sysno);
746
747 if (debug_threads)
748 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
749
750 current_thread = saved_thread;
751 }
752
753 void
754 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
755 {
756 /* By default, report an unknown system call number. */
757 *sysno = UNKNOWN_SYSCALL;
758 }
759
760 bool
761 linux_process_target::save_stop_reason (lwp_info *lwp)
762 {
763 CORE_ADDR pc;
764 CORE_ADDR sw_breakpoint_pc;
765 struct thread_info *saved_thread;
766 #if USE_SIGTRAP_SIGINFO
767 siginfo_t siginfo;
768 #endif
769
770 if (!low_supports_breakpoints ())
771 return false;
772
773 pc = get_pc (lwp);
774 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
775
776 /* breakpoint_at reads from the current thread. */
777 saved_thread = current_thread;
778 current_thread = get_lwp_thread (lwp);
779
780 #if USE_SIGTRAP_SIGINFO
781 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
782 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
783 {
784 if (siginfo.si_signo == SIGTRAP)
785 {
786 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
787 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
788 {
789 /* The si_code is ambiguous on this arch -- check debug
790 registers. */
791 if (!check_stopped_by_watchpoint (lwp))
792 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
793 }
794 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
795 {
796 /* If we determine the LWP stopped for a SW breakpoint,
797 trust it. Particularly don't check watchpoint
798 registers, because at least on s390, we'd find
799 stopped-by-watchpoint as long as there's a watchpoint
800 set. */
801 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
802 }
803 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
804 {
805 /* This can indicate either a hardware breakpoint or
806 hardware watchpoint. Check debug registers. */
807 if (!check_stopped_by_watchpoint (lwp))
808 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
809 }
810 else if (siginfo.si_code == TRAP_TRACE)
811 {
812 /* We may have single stepped an instruction that
813 triggered a watchpoint. In that case, on some
814 architectures (such as x86), instead of TRAP_HWBKPT,
815 si_code indicates TRAP_TRACE, and we need to check
816 the debug registers separately. */
817 if (!check_stopped_by_watchpoint (lwp))
818 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
819 }
820 }
821 }
822 #else
823 /* We may have just stepped a breakpoint instruction. E.g., in
824 non-stop mode, GDB first tells the thread A to step a range, and
825 then the user inserts a breakpoint inside the range. In that
826 case we need to report the breakpoint PC. */
827 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
828 && low_breakpoint_at (sw_breakpoint_pc))
829 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
830
831 if (hardware_breakpoint_inserted_here (pc))
832 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
833
834 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
835 check_stopped_by_watchpoint (lwp);
836 #endif
837
838 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
839 {
840 if (debug_threads)
841 {
842 struct thread_info *thr = get_lwp_thread (lwp);
843
844 debug_printf ("CSBB: %s stopped by software breakpoint\n",
845 target_pid_to_str (ptid_of (thr)).c_str ());
846 }
847
848 /* Back up the PC if necessary. */
849 if (pc != sw_breakpoint_pc)
850 {
851 struct regcache *regcache
852 = get_thread_regcache (current_thread, 1);
853 low_set_pc (regcache, sw_breakpoint_pc);
854 }
855
856 /* Update this so we record the correct stop PC below. */
857 pc = sw_breakpoint_pc;
858 }
859 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
860 {
861 if (debug_threads)
862 {
863 struct thread_info *thr = get_lwp_thread (lwp);
864
865 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
866 target_pid_to_str (ptid_of (thr)).c_str ());
867 }
868 }
869 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
870 {
871 if (debug_threads)
872 {
873 struct thread_info *thr = get_lwp_thread (lwp);
874
875 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
876 target_pid_to_str (ptid_of (thr)).c_str ());
877 }
878 }
879 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
880 {
881 if (debug_threads)
882 {
883 struct thread_info *thr = get_lwp_thread (lwp);
884
885 debug_printf ("CSBB: %s stopped by trace\n",
886 target_pid_to_str (ptid_of (thr)).c_str ());
887 }
888 }
889
890 lwp->stop_pc = pc;
891 current_thread = saved_thread;
892 return true;
893 }
894
895 lwp_info *
896 linux_process_target::add_lwp (ptid_t ptid)
897 {
898 lwp_info *lwp = new lwp_info;
899
900 lwp->thread = add_thread (ptid, lwp);
901
902 low_new_thread (lwp);
903
904 return lwp;
905 }
906
907 void
908 linux_process_target::low_new_thread (lwp_info *info)
909 {
910 /* Nop. */
911 }
912
913 /* Callback to be used when calling fork_inferior, responsible for
914 actually initiating the tracing of the inferior. */
915
916 static void
917 linux_ptrace_fun ()
918 {
919 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
920 (PTRACE_TYPE_ARG4) 0) < 0)
921 trace_start_error_with_name ("ptrace");
922
923 if (setpgid (0, 0) < 0)
924 trace_start_error_with_name ("setpgid");
925
926 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
927 stdout to stderr so that inferior i/o doesn't corrupt the connection.
928 Also, redirect stdin to /dev/null. */
929 if (remote_connection_is_stdio ())
930 {
931 if (close (0) < 0)
932 trace_start_error_with_name ("close");
933 if (open ("/dev/null", O_RDONLY) < 0)
934 trace_start_error_with_name ("open");
935 if (dup2 (2, 1) < 0)
936 trace_start_error_with_name ("dup2");
937 if (write (2, "stdin/stdout redirected\n",
938 sizeof ("stdin/stdout redirected\n") - 1) < 0)
939 {
940 /* Errors ignored. */;
941 }
942 }
943 }
944
945 /* Start an inferior process and returns its pid.
946 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
947 are its arguments. */
948
949 int
950 linux_process_target::create_inferior (const char *program,
951 const std::vector<char *> &program_args)
952 {
953 client_state &cs = get_client_state ();
954 struct lwp_info *new_lwp;
955 int pid;
956 ptid_t ptid;
957
958 {
959 maybe_disable_address_space_randomization restore_personality
960 (cs.disable_randomization);
961 std::string str_program_args = construct_inferior_arguments (program_args);
962
963 pid = fork_inferior (program,
964 str_program_args.c_str (),
965 get_environ ()->envp (), linux_ptrace_fun,
966 NULL, NULL, NULL, NULL);
967 }
968
969 add_linux_process (pid, 0);
970
971 ptid = ptid_t (pid, pid);
972 new_lwp = add_lwp (ptid);
973 new_lwp->must_set_ptrace_flags = 1;
974
975 post_fork_inferior (pid, program);
976
977 return pid;
978 }
979
980 /* Implement the post_create_inferior target_ops method. */
981
982 void
983 linux_process_target::post_create_inferior ()
984 {
985 struct lwp_info *lwp = get_thread_lwp (current_thread);
986
987 low_arch_setup ();
988
989 if (lwp->must_set_ptrace_flags)
990 {
991 struct process_info *proc = current_process ();
992 int options = linux_low_ptrace_options (proc->attached);
993
994 linux_enable_event_reporting (lwpid_of (current_thread), options);
995 lwp->must_set_ptrace_flags = 0;
996 }
997 }
998
999 int
1000 linux_process_target::attach_lwp (ptid_t ptid)
1001 {
1002 struct lwp_info *new_lwp;
1003 int lwpid = ptid.lwp ();
1004
1005 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1006 != 0)
1007 return errno;
1008
1009 new_lwp = add_lwp (ptid);
1010
1011 /* We need to wait for SIGSTOP before being able to make the next
1012 ptrace call on this LWP. */
1013 new_lwp->must_set_ptrace_flags = 1;
1014
1015 if (linux_proc_pid_is_stopped (lwpid))
1016 {
1017 if (debug_threads)
1018 debug_printf ("Attached to a stopped process\n");
1019
1020 /* The process is definitely stopped. It is in a job control
1021 stop, unless the kernel predates the TASK_STOPPED /
1022 TASK_TRACED distinction, in which case it might be in a
1023 ptrace stop. Make sure it is in a ptrace stop; from there we
1024 can kill it, signal it, et cetera.
1025
1026 First make sure there is a pending SIGSTOP. Since we are
1027 already attached, the process can not transition from stopped
1028 to running without a PTRACE_CONT; so we know this signal will
1029 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1030 probably already in the queue (unless this kernel is old
1031 enough to use TASK_STOPPED for ptrace stops); but since
1032 SIGSTOP is not an RT signal, it can only be queued once. */
1033 kill_lwp (lwpid, SIGSTOP);
1034
1035 /* Finally, resume the stopped process. This will deliver the
1036 SIGSTOP (or a higher priority signal, just like normal
1037 PTRACE_ATTACH), which we'll catch later on. */
1038 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1039 }
1040
1041 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1042 brings it to a halt.
1043
1044 There are several cases to consider here:
1045
1046 1) gdbserver has already attached to the process and is being notified
1047 of a new thread that is being created.
1048 In this case we should ignore that SIGSTOP and resume the
1049 process. This is handled below by setting stop_expected = 1,
1050 and the fact that add_thread sets last_resume_kind ==
1051 resume_continue.
1052
1053 2) This is the first thread (the process thread), and we're attaching
1054 to it via attach_inferior.
1055 In this case we want the process thread to stop.
1056 This is handled by having linux_attach set last_resume_kind ==
1057 resume_stop after we return.
1058
1059 If the pid we are attaching to is also the tgid, we attach to and
1060 stop all the existing threads. Otherwise, we attach to pid and
1061 ignore any other threads in the same group as this pid.
1062
1063 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1064 existing threads.
1065 In this case we want the thread to stop.
1066 FIXME: This case is currently not properly handled.
1067 We should wait for the SIGSTOP but don't. Things work apparently
1068 because enough time passes between when we ptrace (ATTACH) and when
1069 gdb makes the next ptrace call on the thread.
1070
1071 On the other hand, if we are currently trying to stop all threads, we
1072 should treat the new thread as if we had sent it a SIGSTOP. This works
1073 because we are guaranteed that the add_lwp call above added us to the
1074 end of the list, and so the new thread has not yet reached
1075 wait_for_sigstop (but will). */
1076 new_lwp->stop_expected = 1;
1077
1078 return 0;
1079 }
1080
1081 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1082 already attached. Returns true if a new LWP is found, false
1083 otherwise. */
1084
1085 static int
1086 attach_proc_task_lwp_callback (ptid_t ptid)
1087 {
1088 /* Is this a new thread? */
1089 if (find_thread_ptid (ptid) == NULL)
1090 {
1091 int lwpid = ptid.lwp ();
1092 int err;
1093
1094 if (debug_threads)
1095 debug_printf ("Found new lwp %d\n", lwpid);
1096
1097 err = the_linux_target->attach_lwp (ptid);
1098
1099 /* Be quiet if we simply raced with the thread exiting. EPERM
1100 is returned if the thread's task still exists, and is marked
1101 as exited or zombie, as well as other conditions, so in that
1102 case, confirm the status in /proc/PID/status. */
1103 if (err == ESRCH
1104 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1105 {
1106 if (debug_threads)
1107 {
1108 debug_printf ("Cannot attach to lwp %d: "
1109 "thread is gone (%d: %s)\n",
1110 lwpid, err, safe_strerror (err));
1111 }
1112 }
1113 else if (err != 0)
1114 {
1115 std::string reason
1116 = linux_ptrace_attach_fail_reason_string (ptid, err);
1117
1118 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1119 }
1120
1121 return 1;
1122 }
1123 return 0;
1124 }
1125
1126 static void async_file_mark (void);
1127
1128 /* Attach to PID. If PID is the tgid, attach to it and all
1129 of its threads. */
1130
1131 int
1132 linux_process_target::attach (unsigned long pid)
1133 {
1134 struct process_info *proc;
1135 struct thread_info *initial_thread;
1136 ptid_t ptid = ptid_t (pid, pid);
1137 int err;
1138
1139 proc = add_linux_process (pid, 1);
1140
1141 /* Attach to PID. We will check for other threads
1142 soon. */
1143 err = attach_lwp (ptid);
1144 if (err != 0)
1145 {
1146 remove_process (proc);
1147
1148 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1149 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1150 }
1151
1152 /* Don't ignore the initial SIGSTOP if we just attached to this
1153 process. It will be collected by wait shortly. */
1154 initial_thread = find_thread_ptid (ptid_t (pid, pid));
1155 initial_thread->last_resume_kind = resume_stop;
1156
1157 /* We must attach to every LWP. If /proc is mounted, use that to
1158 find them now. On the one hand, the inferior may be using raw
1159 clone instead of using pthreads. On the other hand, even if it
1160 is using pthreads, GDB may not be connected yet (thread_db needs
1161 to do symbol lookups, through qSymbol). Also, thread_db walks
1162 structures in the inferior's address space to find the list of
1163 threads/LWPs, and those structures may well be corrupted. Note
1164 that once thread_db is loaded, we'll still use it to list threads
1165 and associate pthread info with each LWP. */
1166 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1167
1168 /* GDB will shortly read the xml target description for this
1169 process, to figure out the process' architecture. But the target
1170 description is only filled in when the first process/thread in
1171 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1172 that now, otherwise, if GDB is fast enough, it could read the
1173 target description _before_ that initial stop. */
1174 if (non_stop)
1175 {
1176 struct lwp_info *lwp;
1177 int wstat, lwpid;
1178 ptid_t pid_ptid = ptid_t (pid);
1179
1180 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1181 gdb_assert (lwpid > 0);
1182
1183 lwp = find_lwp_pid (ptid_t (lwpid));
1184
1185 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1186 {
1187 lwp->status_pending_p = 1;
1188 lwp->status_pending = wstat;
1189 }
1190
1191 initial_thread->last_resume_kind = resume_continue;
1192
1193 async_file_mark ();
1194
1195 gdb_assert (proc->tdesc != NULL);
1196 }
1197
1198 return 0;
1199 }
1200
1201 static int
1202 last_thread_of_process_p (int pid)
1203 {
1204 bool seen_one = false;
1205
1206 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1207 {
1208 if (!seen_one)
1209 {
1210 /* This is the first thread of this process we see. */
1211 seen_one = true;
1212 return false;
1213 }
1214 else
1215 {
1216 /* This is the second thread of this process we see. */
1217 return true;
1218 }
1219 });
1220
1221 return thread == NULL;
1222 }
1223
1224 /* Kill LWP. */
1225
1226 static void
1227 linux_kill_one_lwp (struct lwp_info *lwp)
1228 {
1229 struct thread_info *thr = get_lwp_thread (lwp);
1230 int pid = lwpid_of (thr);
1231
1232 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1233 there is no signal context, and ptrace(PTRACE_KILL) (or
1234 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1235 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1236 alternative is to kill with SIGKILL. We only need one SIGKILL
1237 per process, not one for each thread. But since we still support
1238 support debugging programs using raw clone without CLONE_THREAD,
1239 we send one for each thread. For years, we used PTRACE_KILL
1240 only, so we're being a bit paranoid about some old kernels where
1241 PTRACE_KILL might work better (dubious if there are any such, but
1242 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1243 second, and so we're fine everywhere. */
1244
1245 errno = 0;
1246 kill_lwp (pid, SIGKILL);
1247 if (debug_threads)
1248 {
1249 int save_errno = errno;
1250
1251 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1252 target_pid_to_str (ptid_of (thr)).c_str (),
1253 save_errno ? safe_strerror (save_errno) : "OK");
1254 }
1255
1256 errno = 0;
1257 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1258 if (debug_threads)
1259 {
1260 int save_errno = errno;
1261
1262 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1263 target_pid_to_str (ptid_of (thr)).c_str (),
1264 save_errno ? safe_strerror (save_errno) : "OK");
1265 }
1266 }
1267
1268 /* Kill LWP and wait for it to die. */
1269
1270 static void
1271 kill_wait_lwp (struct lwp_info *lwp)
1272 {
1273 struct thread_info *thr = get_lwp_thread (lwp);
1274 int pid = ptid_of (thr).pid ();
1275 int lwpid = ptid_of (thr).lwp ();
1276 int wstat;
1277 int res;
1278
1279 if (debug_threads)
1280 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1281
1282 do
1283 {
1284 linux_kill_one_lwp (lwp);
1285
1286 /* Make sure it died. Notes:
1287
1288 - The loop is most likely unnecessary.
1289
1290 - We don't use wait_for_event as that could delete lwps
1291 while we're iterating over them. We're not interested in
1292 any pending status at this point, only in making sure all
1293 wait status on the kernel side are collected until the
1294 process is reaped.
1295
1296 - We don't use __WALL here as the __WALL emulation relies on
1297 SIGCHLD, and killing a stopped process doesn't generate
1298 one, nor an exit status.
1299 */
1300 res = my_waitpid (lwpid, &wstat, 0);
1301 if (res == -1 && errno == ECHILD)
1302 res = my_waitpid (lwpid, &wstat, __WCLONE);
1303 } while (res > 0 && WIFSTOPPED (wstat));
1304
1305 /* Even if it was stopped, the child may have already disappeared.
1306 E.g., if it was killed by SIGKILL. */
1307 if (res < 0 && errno != ECHILD)
1308 perror_with_name ("kill_wait_lwp");
1309 }
1310
1311 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1312 except the leader. */
1313
1314 static void
1315 kill_one_lwp_callback (thread_info *thread, int pid)
1316 {
1317 struct lwp_info *lwp = get_thread_lwp (thread);
1318
1319 /* We avoid killing the first thread here, because of a Linux kernel (at
1320 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1321 the children get a chance to be reaped, it will remain a zombie
1322 forever. */
1323
1324 if (lwpid_of (thread) == pid)
1325 {
1326 if (debug_threads)
1327 debug_printf ("lkop: is last of process %s\n",
1328 target_pid_to_str (thread->id).c_str ());
1329 return;
1330 }
1331
1332 kill_wait_lwp (lwp);
1333 }
1334
1335 int
1336 linux_process_target::kill (process_info *process)
1337 {
1338 int pid = process->pid;
1339
1340 /* If we're killing a running inferior, make sure it is stopped
1341 first, as PTRACE_KILL will not work otherwise. */
1342 stop_all_lwps (0, NULL);
1343
1344 for_each_thread (pid, [&] (thread_info *thread)
1345 {
1346 kill_one_lwp_callback (thread, pid);
1347 });
1348
1349 /* See the comment in linux_kill_one_lwp. We did not kill the first
1350 thread in the list, so do so now. */
1351 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1352
1353 if (lwp == NULL)
1354 {
1355 if (debug_threads)
1356 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1357 pid);
1358 }
1359 else
1360 kill_wait_lwp (lwp);
1361
1362 mourn (process);
1363
1364 /* Since we presently can only stop all lwps of all processes, we
1365 need to unstop lwps of other processes. */
1366 unstop_all_lwps (0, NULL);
1367 return 0;
1368 }
1369
1370 /* Get pending signal of THREAD, for detaching purposes. This is the
1371 signal the thread last stopped for, which we need to deliver to the
1372 thread when detaching, otherwise, it'd be suppressed/lost. */
1373
1374 static int
1375 get_detach_signal (struct thread_info *thread)
1376 {
1377 client_state &cs = get_client_state ();
1378 enum gdb_signal signo = GDB_SIGNAL_0;
1379 int status;
1380 struct lwp_info *lp = get_thread_lwp (thread);
1381
1382 if (lp->status_pending_p)
1383 status = lp->status_pending;
1384 else
1385 {
1386 /* If the thread had been suspended by gdbserver, and it stopped
1387 cleanly, then it'll have stopped with SIGSTOP. But we don't
1388 want to deliver that SIGSTOP. */
1389 if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1390 || thread->last_status.sig () == GDB_SIGNAL_0)
1391 return 0;
1392
1393 /* Otherwise, we may need to deliver the signal we
1394 intercepted. */
1395 status = lp->last_status;
1396 }
1397
1398 if (!WIFSTOPPED (status))
1399 {
1400 if (debug_threads)
1401 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1402 target_pid_to_str (ptid_of (thread)).c_str ());
1403 return 0;
1404 }
1405
1406 /* Extended wait statuses aren't real SIGTRAPs. */
1407 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1408 {
1409 if (debug_threads)
1410 debug_printf ("GPS: lwp %s had stopped with extended "
1411 "status: no pending signal\n",
1412 target_pid_to_str (ptid_of (thread)).c_str ());
1413 return 0;
1414 }
1415
1416 signo = gdb_signal_from_host (WSTOPSIG (status));
1417
1418 if (cs.program_signals_p && !cs.program_signals[signo])
1419 {
1420 if (debug_threads)
1421 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1422 target_pid_to_str (ptid_of (thread)).c_str (),
1423 gdb_signal_to_string (signo));
1424 return 0;
1425 }
1426 else if (!cs.program_signals_p
1427 /* If we have no way to know which signals GDB does not
1428 want to have passed to the program, assume
1429 SIGTRAP/SIGINT, which is GDB's default. */
1430 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1431 {
1432 if (debug_threads)
1433 debug_printf ("GPS: lwp %s had signal %s, "
1434 "but we don't know if we should pass it. "
1435 "Default to not.\n",
1436 target_pid_to_str (ptid_of (thread)).c_str (),
1437 gdb_signal_to_string (signo));
1438 return 0;
1439 }
1440 else
1441 {
1442 if (debug_threads)
1443 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1444 target_pid_to_str (ptid_of (thread)).c_str (),
1445 gdb_signal_to_string (signo));
1446
1447 return WSTOPSIG (status);
1448 }
1449 }
1450
1451 void
1452 linux_process_target::detach_one_lwp (lwp_info *lwp)
1453 {
1454 struct thread_info *thread = get_lwp_thread (lwp);
1455 int sig;
1456 int lwpid;
1457
1458 /* If there is a pending SIGSTOP, get rid of it. */
1459 if (lwp->stop_expected)
1460 {
1461 if (debug_threads)
1462 debug_printf ("Sending SIGCONT to %s\n",
1463 target_pid_to_str (ptid_of (thread)).c_str ());
1464
1465 kill_lwp (lwpid_of (thread), SIGCONT);
1466 lwp->stop_expected = 0;
1467 }
1468
1469 /* Pass on any pending signal for this thread. */
1470 sig = get_detach_signal (thread);
1471
1472 /* Preparing to resume may try to write registers, and fail if the
1473 lwp is zombie. If that happens, ignore the error. We'll handle
1474 it below, when detach fails with ESRCH. */
1475 try
1476 {
1477 /* Flush any pending changes to the process's registers. */
1478 regcache_invalidate_thread (thread);
1479
1480 /* Finally, let it resume. */
1481 low_prepare_to_resume (lwp);
1482 }
1483 catch (const gdb_exception_error &ex)
1484 {
1485 if (!check_ptrace_stopped_lwp_gone (lwp))
1486 throw;
1487 }
1488
1489 lwpid = lwpid_of (thread);
1490 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1491 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1492 {
1493 int save_errno = errno;
1494
1495 /* We know the thread exists, so ESRCH must mean the lwp is
1496 zombie. This can happen if one of the already-detached
1497 threads exits the whole thread group. In that case we're
1498 still attached, and must reap the lwp. */
1499 if (save_errno == ESRCH)
1500 {
1501 int ret, status;
1502
1503 ret = my_waitpid (lwpid, &status, __WALL);
1504 if (ret == -1)
1505 {
1506 warning (_("Couldn't reap LWP %d while detaching: %s"),
1507 lwpid, safe_strerror (errno));
1508 }
1509 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1510 {
1511 warning (_("Reaping LWP %d while detaching "
1512 "returned unexpected status 0x%x"),
1513 lwpid, status);
1514 }
1515 }
1516 else
1517 {
1518 error (_("Can't detach %s: %s"),
1519 target_pid_to_str (ptid_of (thread)).c_str (),
1520 safe_strerror (save_errno));
1521 }
1522 }
1523 else if (debug_threads)
1524 {
1525 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1526 target_pid_to_str (ptid_of (thread)).c_str (),
1527 strsignal (sig));
1528 }
1529
1530 delete_lwp (lwp);
1531 }
1532
1533 int
1534 linux_process_target::detach (process_info *process)
1535 {
1536 struct lwp_info *main_lwp;
1537
1538 /* As there's a step over already in progress, let it finish first,
1539 otherwise nesting a stabilize_threads operation on top gets real
1540 messy. */
1541 complete_ongoing_step_over ();
1542
1543 /* Stop all threads before detaching. First, ptrace requires that
1544 the thread is stopped to successfully detach. Second, thread_db
1545 may need to uninstall thread event breakpoints from memory, which
1546 only works with a stopped process anyway. */
1547 stop_all_lwps (0, NULL);
1548
1549 #ifdef USE_THREAD_DB
1550 thread_db_detach (process);
1551 #endif
1552
1553 /* Stabilize threads (move out of jump pads). */
1554 target_stabilize_threads ();
1555
1556 /* Detach from the clone lwps first. If the thread group exits just
1557 while we're detaching, we must reap the clone lwps before we're
1558 able to reap the leader. */
1559 for_each_thread (process->pid, [this] (thread_info *thread)
1560 {
1561 /* We don't actually detach from the thread group leader just yet.
1562 If the thread group exits, we must reap the zombie clone lwps
1563 before we're able to reap the leader. */
1564 if (thread->id.pid () == thread->id.lwp ())
1565 return;
1566
1567 lwp_info *lwp = get_thread_lwp (thread);
1568 detach_one_lwp (lwp);
1569 });
1570
1571 main_lwp = find_lwp_pid (ptid_t (process->pid));
1572 detach_one_lwp (main_lwp);
1573
1574 mourn (process);
1575
1576 /* Since we presently can only stop all lwps of all processes, we
1577 need to unstop lwps of other processes. */
1578 unstop_all_lwps (0, NULL);
1579 return 0;
1580 }
1581
1582 /* Remove all LWPs that belong to process PROC from the lwp list. */
1583
1584 void
1585 linux_process_target::mourn (process_info *process)
1586 {
1587 struct process_info_private *priv;
1588
1589 #ifdef USE_THREAD_DB
1590 thread_db_mourn (process);
1591 #endif
1592
1593 for_each_thread (process->pid, [this] (thread_info *thread)
1594 {
1595 delete_lwp (get_thread_lwp (thread));
1596 });
1597
1598 /* Freeing all private data. */
1599 priv = process->priv;
1600 low_delete_process (priv->arch_private);
1601 free (priv);
1602 process->priv = NULL;
1603
1604 remove_process (process);
1605 }
1606
1607 void
1608 linux_process_target::join (int pid)
1609 {
1610 int status, ret;
1611
1612 do {
1613 ret = my_waitpid (pid, &status, 0);
1614 if (WIFEXITED (status) || WIFSIGNALED (status))
1615 break;
1616 } while (ret != -1 || errno != ECHILD);
1617 }
1618
1619 /* Return true if the given thread is still alive. */
1620
1621 bool
1622 linux_process_target::thread_alive (ptid_t ptid)
1623 {
1624 struct lwp_info *lwp = find_lwp_pid (ptid);
1625
1626 /* We assume we always know if a thread exits. If a whole process
1627 exited but we still haven't been able to report it to GDB, we'll
1628 hold on to the last lwp of the dead process. */
1629 if (lwp != NULL)
1630 return !lwp_is_marked_dead (lwp);
1631 else
1632 return 0;
1633 }
1634
1635 bool
1636 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1637 {
1638 struct lwp_info *lp = get_thread_lwp (thread);
1639
1640 if (!lp->status_pending_p)
1641 return 0;
1642
1643 if (thread->last_resume_kind != resume_stop
1644 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1645 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1646 {
1647 struct thread_info *saved_thread;
1648 CORE_ADDR pc;
1649 int discard = 0;
1650
1651 gdb_assert (lp->last_status != 0);
1652
1653 pc = get_pc (lp);
1654
1655 saved_thread = current_thread;
1656 current_thread = thread;
1657
1658 if (pc != lp->stop_pc)
1659 {
1660 if (debug_threads)
1661 debug_printf ("PC of %ld changed\n",
1662 lwpid_of (thread));
1663 discard = 1;
1664 }
1665
1666 #if !USE_SIGTRAP_SIGINFO
1667 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1668 && !low_breakpoint_at (pc))
1669 {
1670 if (debug_threads)
1671 debug_printf ("previous SW breakpoint of %ld gone\n",
1672 lwpid_of (thread));
1673 discard = 1;
1674 }
1675 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1676 && !hardware_breakpoint_inserted_here (pc))
1677 {
1678 if (debug_threads)
1679 debug_printf ("previous HW breakpoint of %ld gone\n",
1680 lwpid_of (thread));
1681 discard = 1;
1682 }
1683 #endif
1684
1685 current_thread = saved_thread;
1686
1687 if (discard)
1688 {
1689 if (debug_threads)
1690 debug_printf ("discarding pending breakpoint status\n");
1691 lp->status_pending_p = 0;
1692 return 0;
1693 }
1694 }
1695
1696 return 1;
1697 }
1698
1699 /* Returns true if LWP is resumed from the client's perspective. */
1700
1701 static int
1702 lwp_resumed (struct lwp_info *lwp)
1703 {
1704 struct thread_info *thread = get_lwp_thread (lwp);
1705
1706 if (thread->last_resume_kind != resume_stop)
1707 return 1;
1708
1709 /* Did gdb send us a `vCont;t', but we haven't reported the
1710 corresponding stop to gdb yet? If so, the thread is still
1711 resumed/running from gdb's perspective. */
1712 if (thread->last_resume_kind == resume_stop
1713 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
1714 return 1;
1715
1716 return 0;
1717 }
1718
1719 bool
1720 linux_process_target::status_pending_p_callback (thread_info *thread,
1721 ptid_t ptid)
1722 {
1723 struct lwp_info *lp = get_thread_lwp (thread);
1724
1725 /* Check if we're only interested in events from a specific process
1726 or a specific LWP. */
1727 if (!thread->id.matches (ptid))
1728 return 0;
1729
1730 if (!lwp_resumed (lp))
1731 return 0;
1732
1733 if (lp->status_pending_p
1734 && !thread_still_has_status_pending (thread))
1735 {
1736 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1737 return 0;
1738 }
1739
1740 return lp->status_pending_p;
1741 }
1742
1743 struct lwp_info *
1744 find_lwp_pid (ptid_t ptid)
1745 {
1746 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1747 {
1748 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1749 return thr_arg->id.lwp () == lwp;
1750 });
1751
1752 if (thread == NULL)
1753 return NULL;
1754
1755 return get_thread_lwp (thread);
1756 }
1757
1758 /* Return the number of known LWPs in the tgid given by PID. */
1759
1760 static int
1761 num_lwps (int pid)
1762 {
1763 int count = 0;
1764
1765 for_each_thread (pid, [&] (thread_info *thread)
1766 {
1767 count++;
1768 });
1769
1770 return count;
1771 }
1772
1773 /* See nat/linux-nat.h. */
1774
1775 struct lwp_info *
1776 iterate_over_lwps (ptid_t filter,
1777 gdb::function_view<iterate_over_lwps_ftype> callback)
1778 {
1779 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1780 {
1781 lwp_info *lwp = get_thread_lwp (thr_arg);
1782
1783 return callback (lwp);
1784 });
1785
1786 if (thread == NULL)
1787 return NULL;
1788
1789 return get_thread_lwp (thread);
1790 }
1791
1792 void
1793 linux_process_target::check_zombie_leaders ()
1794 {
1795 for_each_process ([this] (process_info *proc) {
1796 pid_t leader_pid = pid_of (proc);
1797 struct lwp_info *leader_lp;
1798
1799 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1800
1801 if (debug_threads)
1802 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1803 "num_lwps=%d, zombie=%d\n",
1804 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1805 linux_proc_pid_is_zombie (leader_pid));
1806
1807 if (leader_lp != NULL && !leader_lp->stopped
1808 /* Check if there are other threads in the group, as we may
1809 have raced with the inferior simply exiting. */
1810 && !last_thread_of_process_p (leader_pid)
1811 && linux_proc_pid_is_zombie (leader_pid))
1812 {
1813 /* A leader zombie can mean one of two things:
1814
1815 - It exited, and there's an exit status pending
1816 available, or only the leader exited (not the whole
1817 program). In the latter case, we can't waitpid the
1818 leader's exit status until all other threads are gone.
1819
1820 - There are 3 or more threads in the group, and a thread
1821 other than the leader exec'd. On an exec, the Linux
1822 kernel destroys all other threads (except the execing
1823 one) in the thread group, and resets the execing thread's
1824 tid to the tgid. No exit notification is sent for the
1825 execing thread -- from the ptracer's perspective, it
1826 appears as though the execing thread just vanishes.
1827 Until we reap all other threads except the leader and the
1828 execing thread, the leader will be zombie, and the
1829 execing thread will be in `D (disc sleep)'. As soon as
1830 all other threads are reaped, the execing thread changes
1831 it's tid to the tgid, and the previous (zombie) leader
1832 vanishes, giving place to the "new" leader. We could try
1833 distinguishing the exit and exec cases, by waiting once
1834 more, and seeing if something comes out, but it doesn't
1835 sound useful. The previous leader _does_ go away, and
1836 we'll re-add the new one once we see the exec event
1837 (which is just the same as what would happen if the
1838 previous leader did exit voluntarily before some other
1839 thread execs). */
1840
1841 if (debug_threads)
1842 debug_printf ("CZL: Thread group leader %d zombie "
1843 "(it exited, or another thread execd).\n",
1844 leader_pid);
1845
1846 delete_lwp (leader_lp);
1847 }
1848 });
1849 }
1850
1851 /* Callback for `find_thread'. Returns the first LWP that is not
1852 stopped. */
1853
1854 static bool
1855 not_stopped_callback (thread_info *thread, ptid_t filter)
1856 {
1857 if (!thread->id.matches (filter))
1858 return false;
1859
1860 lwp_info *lwp = get_thread_lwp (thread);
1861
1862 return !lwp->stopped;
1863 }
1864
1865 /* Increment LWP's suspend count. */
1866
1867 static void
1868 lwp_suspended_inc (struct lwp_info *lwp)
1869 {
1870 lwp->suspended++;
1871
1872 if (debug_threads && lwp->suspended > 4)
1873 {
1874 struct thread_info *thread = get_lwp_thread (lwp);
1875
1876 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1877 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1878 }
1879 }
1880
1881 /* Decrement LWP's suspend count. */
1882
1883 static void
1884 lwp_suspended_decr (struct lwp_info *lwp)
1885 {
1886 lwp->suspended--;
1887
1888 if (lwp->suspended < 0)
1889 {
1890 struct thread_info *thread = get_lwp_thread (lwp);
1891
1892 internal_error (__FILE__, __LINE__,
1893 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1894 lwp->suspended);
1895 }
1896 }
1897
1898 /* This function should only be called if the LWP got a SIGTRAP.
1899
1900 Handle any tracepoint steps or hits. Return true if a tracepoint
1901 event was handled, 0 otherwise. */
1902
1903 static int
1904 handle_tracepoints (struct lwp_info *lwp)
1905 {
1906 struct thread_info *tinfo = get_lwp_thread (lwp);
1907 int tpoint_related_event = 0;
1908
1909 gdb_assert (lwp->suspended == 0);
1910
1911 /* If this tracepoint hit causes a tracing stop, we'll immediately
1912 uninsert tracepoints. To do this, we temporarily pause all
1913 threads, unpatch away, and then unpause threads. We need to make
1914 sure the unpausing doesn't resume LWP too. */
1915 lwp_suspended_inc (lwp);
1916
1917 /* And we need to be sure that any all-threads-stopping doesn't try
1918 to move threads out of the jump pads, as it could deadlock the
1919 inferior (LWP could be in the jump pad, maybe even holding the
1920 lock.) */
1921
1922 /* Do any necessary step collect actions. */
1923 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1924
1925 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1926
1927 /* See if we just hit a tracepoint and do its main collect
1928 actions. */
1929 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1930
1931 lwp_suspended_decr (lwp);
1932
1933 gdb_assert (lwp->suspended == 0);
1934 gdb_assert (!stabilizing_threads
1935 || (lwp->collecting_fast_tracepoint
1936 != fast_tpoint_collect_result::not_collecting));
1937
1938 if (tpoint_related_event)
1939 {
1940 if (debug_threads)
1941 debug_printf ("got a tracepoint event\n");
1942 return 1;
1943 }
1944
1945 return 0;
1946 }
1947
1948 fast_tpoint_collect_result
1949 linux_process_target::linux_fast_tracepoint_collecting
1950 (lwp_info *lwp, fast_tpoint_collect_status *status)
1951 {
1952 CORE_ADDR thread_area;
1953 struct thread_info *thread = get_lwp_thread (lwp);
1954
1955 /* Get the thread area address. This is used to recognize which
1956 thread is which when tracing with the in-process agent library.
1957 We don't read anything from the address, and treat it as opaque;
1958 it's the address itself that we assume is unique per-thread. */
1959 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
1960 return fast_tpoint_collect_result::not_collecting;
1961
1962 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1963 }
1964
1965 int
1966 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1967 {
1968 return -1;
1969 }
1970
1971 bool
1972 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
1973 {
1974 struct thread_info *saved_thread;
1975
1976 saved_thread = current_thread;
1977 current_thread = get_lwp_thread (lwp);
1978
1979 if ((wstat == NULL
1980 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1981 && supports_fast_tracepoints ()
1982 && agent_loaded_p ())
1983 {
1984 struct fast_tpoint_collect_status status;
1985
1986 if (debug_threads)
1987 debug_printf ("Checking whether LWP %ld needs to move out of the "
1988 "jump pad.\n",
1989 lwpid_of (current_thread));
1990
1991 fast_tpoint_collect_result r
1992 = linux_fast_tracepoint_collecting (lwp, &status);
1993
1994 if (wstat == NULL
1995 || (WSTOPSIG (*wstat) != SIGILL
1996 && WSTOPSIG (*wstat) != SIGFPE
1997 && WSTOPSIG (*wstat) != SIGSEGV
1998 && WSTOPSIG (*wstat) != SIGBUS))
1999 {
2000 lwp->collecting_fast_tracepoint = r;
2001
2002 if (r != fast_tpoint_collect_result::not_collecting)
2003 {
2004 if (r == fast_tpoint_collect_result::before_insn
2005 && lwp->exit_jump_pad_bkpt == NULL)
2006 {
2007 /* Haven't executed the original instruction yet.
2008 Set breakpoint there, and wait till it's hit,
2009 then single-step until exiting the jump pad. */
2010 lwp->exit_jump_pad_bkpt
2011 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2012 }
2013
2014 if (debug_threads)
2015 debug_printf ("Checking whether LWP %ld needs to move out of "
2016 "the jump pad...it does\n",
2017 lwpid_of (current_thread));
2018 current_thread = saved_thread;
2019
2020 return true;
2021 }
2022 }
2023 else
2024 {
2025 /* If we get a synchronous signal while collecting, *and*
2026 while executing the (relocated) original instruction,
2027 reset the PC to point at the tpoint address, before
2028 reporting to GDB. Otherwise, it's an IPA lib bug: just
2029 report the signal to GDB, and pray for the best. */
2030
2031 lwp->collecting_fast_tracepoint
2032 = fast_tpoint_collect_result::not_collecting;
2033
2034 if (r != fast_tpoint_collect_result::not_collecting
2035 && (status.adjusted_insn_addr <= lwp->stop_pc
2036 && lwp->stop_pc < status.adjusted_insn_addr_end))
2037 {
2038 siginfo_t info;
2039 struct regcache *regcache;
2040
2041 /* The si_addr on a few signals references the address
2042 of the faulting instruction. Adjust that as
2043 well. */
2044 if ((WSTOPSIG (*wstat) == SIGILL
2045 || WSTOPSIG (*wstat) == SIGFPE
2046 || WSTOPSIG (*wstat) == SIGBUS
2047 || WSTOPSIG (*wstat) == SIGSEGV)
2048 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2049 (PTRACE_TYPE_ARG3) 0, &info) == 0
2050 /* Final check just to make sure we don't clobber
2051 the siginfo of non-kernel-sent signals. */
2052 && (uintptr_t) info.si_addr == lwp->stop_pc)
2053 {
2054 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2055 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2056 (PTRACE_TYPE_ARG3) 0, &info);
2057 }
2058
2059 regcache = get_thread_regcache (current_thread, 1);
2060 low_set_pc (regcache, status.tpoint_addr);
2061 lwp->stop_pc = status.tpoint_addr;
2062
2063 /* Cancel any fast tracepoint lock this thread was
2064 holding. */
2065 force_unlock_trace_buffer ();
2066 }
2067
2068 if (lwp->exit_jump_pad_bkpt != NULL)
2069 {
2070 if (debug_threads)
2071 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2072 "stopping all threads momentarily.\n");
2073
2074 stop_all_lwps (1, lwp);
2075
2076 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2077 lwp->exit_jump_pad_bkpt = NULL;
2078
2079 unstop_all_lwps (1, lwp);
2080
2081 gdb_assert (lwp->suspended >= 0);
2082 }
2083 }
2084 }
2085
2086 if (debug_threads)
2087 debug_printf ("Checking whether LWP %ld needs to move out of the "
2088 "jump pad...no\n",
2089 lwpid_of (current_thread));
2090
2091 current_thread = saved_thread;
2092 return false;
2093 }
2094
2095 /* Enqueue one signal in the "signals to report later when out of the
2096 jump pad" list. */
2097
2098 static void
2099 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2100 {
2101 struct thread_info *thread = get_lwp_thread (lwp);
2102
2103 if (debug_threads)
2104 debug_printf ("Deferring signal %d for LWP %ld.\n",
2105 WSTOPSIG (*wstat), lwpid_of (thread));
2106
2107 if (debug_threads)
2108 {
2109 for (const auto &sig : lwp->pending_signals_to_report)
2110 debug_printf (" Already queued %d\n",
2111 sig.signal);
2112
2113 debug_printf (" (no more currently queued signals)\n");
2114 }
2115
2116 /* Don't enqueue non-RT signals if they are already in the deferred
2117 queue. (SIGSTOP being the easiest signal to see ending up here
2118 twice) */
2119 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2120 {
2121 for (const auto &sig : lwp->pending_signals_to_report)
2122 {
2123 if (sig.signal == WSTOPSIG (*wstat))
2124 {
2125 if (debug_threads)
2126 debug_printf ("Not requeuing already queued non-RT signal %d"
2127 " for LWP %ld\n",
2128 sig.signal,
2129 lwpid_of (thread));
2130 return;
2131 }
2132 }
2133 }
2134
2135 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2136
2137 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2138 &lwp->pending_signals_to_report.back ().info);
2139 }
2140
2141 /* Dequeue one signal from the "signals to report later when out of
2142 the jump pad" list. */
2143
2144 static int
2145 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2146 {
2147 struct thread_info *thread = get_lwp_thread (lwp);
2148
2149 if (!lwp->pending_signals_to_report.empty ())
2150 {
2151 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2152
2153 *wstat = W_STOPCODE (p_sig.signal);
2154 if (p_sig.info.si_signo != 0)
2155 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2156 &p_sig.info);
2157
2158 lwp->pending_signals_to_report.pop_front ();
2159
2160 if (debug_threads)
2161 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2162 WSTOPSIG (*wstat), lwpid_of (thread));
2163
2164 if (debug_threads)
2165 {
2166 for (const auto &sig : lwp->pending_signals_to_report)
2167 debug_printf (" Still queued %d\n",
2168 sig.signal);
2169
2170 debug_printf (" (no more queued signals)\n");
2171 }
2172
2173 return 1;
2174 }
2175
2176 return 0;
2177 }
2178
2179 bool
2180 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2181 {
2182 struct thread_info *saved_thread = current_thread;
2183 current_thread = get_lwp_thread (child);
2184
2185 if (low_stopped_by_watchpoint ())
2186 {
2187 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2188 child->stopped_data_address = low_stopped_data_address ();
2189 }
2190
2191 current_thread = saved_thread;
2192
2193 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2194 }
2195
2196 bool
2197 linux_process_target::low_stopped_by_watchpoint ()
2198 {
2199 return false;
2200 }
2201
2202 CORE_ADDR
2203 linux_process_target::low_stopped_data_address ()
2204 {
2205 return 0;
2206 }
2207
2208 /* Return the ptrace options that we want to try to enable. */
2209
2210 static int
2211 linux_low_ptrace_options (int attached)
2212 {
2213 client_state &cs = get_client_state ();
2214 int options = 0;
2215
2216 if (!attached)
2217 options |= PTRACE_O_EXITKILL;
2218
2219 if (cs.report_fork_events)
2220 options |= PTRACE_O_TRACEFORK;
2221
2222 if (cs.report_vfork_events)
2223 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2224
2225 if (cs.report_exec_events)
2226 options |= PTRACE_O_TRACEEXEC;
2227
2228 options |= PTRACE_O_TRACESYSGOOD;
2229
2230 return options;
2231 }
2232
2233 void
2234 linux_process_target::filter_event (int lwpid, int wstat)
2235 {
2236 client_state &cs = get_client_state ();
2237 struct lwp_info *child;
2238 struct thread_info *thread;
2239 int have_stop_pc = 0;
2240
2241 child = find_lwp_pid (ptid_t (lwpid));
2242
2243 /* Check for stop events reported by a process we didn't already
2244 know about - anything not already in our LWP list.
2245
2246 If we're expecting to receive stopped processes after
2247 fork, vfork, and clone events, then we'll just add the
2248 new one to our list and go back to waiting for the event
2249 to be reported - the stopped process might be returned
2250 from waitpid before or after the event is.
2251
2252 But note the case of a non-leader thread exec'ing after the
2253 leader having exited, and gone from our lists (because
2254 check_zombie_leaders deleted it). The non-leader thread
2255 changes its tid to the tgid. */
2256
2257 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2258 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2259 {
2260 ptid_t child_ptid;
2261
2262 /* A multi-thread exec after we had seen the leader exiting. */
2263 if (debug_threads)
2264 {
2265 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2266 "after exec.\n", lwpid);
2267 }
2268
2269 child_ptid = ptid_t (lwpid, lwpid);
2270 child = add_lwp (child_ptid);
2271 child->stopped = 1;
2272 current_thread = child->thread;
2273 }
2274
2275 /* If we didn't find a process, one of two things presumably happened:
2276 - A process we started and then detached from has exited. Ignore it.
2277 - A process we are controlling has forked and the new child's stop
2278 was reported to us by the kernel. Save its PID. */
2279 if (child == NULL && WIFSTOPPED (wstat))
2280 {
2281 add_to_pid_list (&stopped_pids, lwpid, wstat);
2282 return;
2283 }
2284 else if (child == NULL)
2285 return;
2286
2287 thread = get_lwp_thread (child);
2288
2289 child->stopped = 1;
2290
2291 child->last_status = wstat;
2292
2293 /* Check if the thread has exited. */
2294 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2295 {
2296 if (debug_threads)
2297 debug_printf ("LLFE: %d exited.\n", lwpid);
2298
2299 if (finish_step_over (child))
2300 {
2301 /* Unsuspend all other LWPs, and set them back running again. */
2302 unsuspend_all_lwps (child);
2303 }
2304
2305 /* If there is at least one more LWP, then the exit signal was
2306 not the end of the debugged application and should be
2307 ignored, unless GDB wants to hear about thread exits. */
2308 if (cs.report_thread_events
2309 || last_thread_of_process_p (pid_of (thread)))
2310 {
2311 /* Since events are serialized to GDB core, and we can't
2312 report this one right now. Leave the status pending for
2313 the next time we're able to report it. */
2314 mark_lwp_dead (child, wstat);
2315 return;
2316 }
2317 else
2318 {
2319 delete_lwp (child);
2320 return;
2321 }
2322 }
2323
2324 gdb_assert (WIFSTOPPED (wstat));
2325
2326 if (WIFSTOPPED (wstat))
2327 {
2328 struct process_info *proc;
2329
2330 /* Architecture-specific setup after inferior is running. */
2331 proc = find_process_pid (pid_of (thread));
2332 if (proc->tdesc == NULL)
2333 {
2334 if (proc->attached)
2335 {
2336 /* This needs to happen after we have attached to the
2337 inferior and it is stopped for the first time, but
2338 before we access any inferior registers. */
2339 arch_setup_thread (thread);
2340 }
2341 else
2342 {
2343 /* The process is started, but GDBserver will do
2344 architecture-specific setup after the program stops at
2345 the first instruction. */
2346 child->status_pending_p = 1;
2347 child->status_pending = wstat;
2348 return;
2349 }
2350 }
2351 }
2352
2353 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2354 {
2355 struct process_info *proc = find_process_pid (pid_of (thread));
2356 int options = linux_low_ptrace_options (proc->attached);
2357
2358 linux_enable_event_reporting (lwpid, options);
2359 child->must_set_ptrace_flags = 0;
2360 }
2361
2362 /* Always update syscall_state, even if it will be filtered later. */
2363 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2364 {
2365 child->syscall_state
2366 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2367 ? TARGET_WAITKIND_SYSCALL_RETURN
2368 : TARGET_WAITKIND_SYSCALL_ENTRY);
2369 }
2370 else
2371 {
2372 /* Almost all other ptrace-stops are known to be outside of system
2373 calls, with further exceptions in handle_extended_wait. */
2374 child->syscall_state = TARGET_WAITKIND_IGNORE;
2375 }
2376
2377 /* Be careful to not overwrite stop_pc until save_stop_reason is
2378 called. */
2379 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2380 && linux_is_extended_waitstatus (wstat))
2381 {
2382 child->stop_pc = get_pc (child);
2383 if (handle_extended_wait (&child, wstat))
2384 {
2385 /* The event has been handled, so just return without
2386 reporting it. */
2387 return;
2388 }
2389 }
2390
2391 if (linux_wstatus_maybe_breakpoint (wstat))
2392 {
2393 if (save_stop_reason (child))
2394 have_stop_pc = 1;
2395 }
2396
2397 if (!have_stop_pc)
2398 child->stop_pc = get_pc (child);
2399
2400 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2401 && child->stop_expected)
2402 {
2403 if (debug_threads)
2404 debug_printf ("Expected stop.\n");
2405 child->stop_expected = 0;
2406
2407 if (thread->last_resume_kind == resume_stop)
2408 {
2409 /* We want to report the stop to the core. Treat the
2410 SIGSTOP as a normal event. */
2411 if (debug_threads)
2412 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2413 target_pid_to_str (ptid_of (thread)).c_str ());
2414 }
2415 else if (stopping_threads != NOT_STOPPING_THREADS)
2416 {
2417 /* Stopping threads. We don't want this SIGSTOP to end up
2418 pending. */
2419 if (debug_threads)
2420 debug_printf ("LLW: SIGSTOP caught for %s "
2421 "while stopping threads.\n",
2422 target_pid_to_str (ptid_of (thread)).c_str ());
2423 return;
2424 }
2425 else
2426 {
2427 /* This is a delayed SIGSTOP. Filter out the event. */
2428 if (debug_threads)
2429 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2430 child->stepping ? "step" : "continue",
2431 target_pid_to_str (ptid_of (thread)).c_str ());
2432
2433 resume_one_lwp (child, child->stepping, 0, NULL);
2434 return;
2435 }
2436 }
2437
2438 child->status_pending_p = 1;
2439 child->status_pending = wstat;
2440 return;
2441 }
2442
2443 bool
2444 linux_process_target::maybe_hw_step (thread_info *thread)
2445 {
2446 if (supports_hardware_single_step ())
2447 return true;
2448 else
2449 {
2450 /* GDBserver must insert single-step breakpoint for software
2451 single step. */
2452 gdb_assert (has_single_step_breakpoints (thread));
2453 return false;
2454 }
2455 }
2456
2457 void
2458 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2459 {
2460 struct lwp_info *lp = get_thread_lwp (thread);
2461
2462 if (lp->stopped
2463 && !lp->suspended
2464 && !lp->status_pending_p
2465 && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
2466 {
2467 int step = 0;
2468
2469 if (thread->last_resume_kind == resume_step)
2470 step = maybe_hw_step (thread);
2471
2472 if (debug_threads)
2473 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2474 target_pid_to_str (ptid_of (thread)).c_str (),
2475 paddress (lp->stop_pc),
2476 step);
2477
2478 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2479 }
2480 }
2481
2482 int
2483 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2484 ptid_t filter_ptid,
2485 int *wstatp, int options)
2486 {
2487 struct thread_info *event_thread;
2488 struct lwp_info *event_child, *requested_child;
2489 sigset_t block_mask, prev_mask;
2490
2491 retry:
2492 /* N.B. event_thread points to the thread_info struct that contains
2493 event_child. Keep them in sync. */
2494 event_thread = NULL;
2495 event_child = NULL;
2496 requested_child = NULL;
2497
2498 /* Check for a lwp with a pending status. */
2499
2500 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2501 {
2502 event_thread = find_thread_in_random ([&] (thread_info *thread)
2503 {
2504 return status_pending_p_callback (thread, filter_ptid);
2505 });
2506
2507 if (event_thread != NULL)
2508 event_child = get_thread_lwp (event_thread);
2509 if (debug_threads && event_thread)
2510 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2511 }
2512 else if (filter_ptid != null_ptid)
2513 {
2514 requested_child = find_lwp_pid (filter_ptid);
2515
2516 if (stopping_threads == NOT_STOPPING_THREADS
2517 && requested_child->status_pending_p
2518 && (requested_child->collecting_fast_tracepoint
2519 != fast_tpoint_collect_result::not_collecting))
2520 {
2521 enqueue_one_deferred_signal (requested_child,
2522 &requested_child->status_pending);
2523 requested_child->status_pending_p = 0;
2524 requested_child->status_pending = 0;
2525 resume_one_lwp (requested_child, 0, 0, NULL);
2526 }
2527
2528 if (requested_child->suspended
2529 && requested_child->status_pending_p)
2530 {
2531 internal_error (__FILE__, __LINE__,
2532 "requesting an event out of a"
2533 " suspended child?");
2534 }
2535
2536 if (requested_child->status_pending_p)
2537 {
2538 event_child = requested_child;
2539 event_thread = get_lwp_thread (event_child);
2540 }
2541 }
2542
2543 if (event_child != NULL)
2544 {
2545 if (debug_threads)
2546 debug_printf ("Got an event from pending child %ld (%04x)\n",
2547 lwpid_of (event_thread), event_child->status_pending);
2548 *wstatp = event_child->status_pending;
2549 event_child->status_pending_p = 0;
2550 event_child->status_pending = 0;
2551 current_thread = event_thread;
2552 return lwpid_of (event_thread);
2553 }
2554
2555 /* But if we don't find a pending event, we'll have to wait.
2556
2557 We only enter this loop if no process has a pending wait status.
2558 Thus any action taken in response to a wait status inside this
2559 loop is responding as soon as we detect the status, not after any
2560 pending events. */
2561
2562 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2563 all signals while here. */
2564 sigfillset (&block_mask);
2565 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2566
2567 /* Always pull all events out of the kernel. We'll randomly select
2568 an event LWP out of all that have events, to prevent
2569 starvation. */
2570 while (event_child == NULL)
2571 {
2572 pid_t ret = 0;
2573
2574 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2575 quirks:
2576
2577 - If the thread group leader exits while other threads in the
2578 thread group still exist, waitpid(TGID, ...) hangs. That
2579 waitpid won't return an exit status until the other threads
2580 in the group are reaped.
2581
2582 - When a non-leader thread execs, that thread just vanishes
2583 without reporting an exit (so we'd hang if we waited for it
2584 explicitly in that case). The exec event is reported to
2585 the TGID pid. */
2586 errno = 0;
2587 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2588
2589 if (debug_threads)
2590 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2591 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2592
2593 if (ret > 0)
2594 {
2595 if (debug_threads)
2596 {
2597 debug_printf ("LLW: waitpid %ld received %s\n",
2598 (long) ret, status_to_str (*wstatp).c_str ());
2599 }
2600
2601 /* Filter all events. IOW, leave all events pending. We'll
2602 randomly select an event LWP out of all that have events
2603 below. */
2604 filter_event (ret, *wstatp);
2605 /* Retry until nothing comes out of waitpid. A single
2606 SIGCHLD can indicate more than one child stopped. */
2607 continue;
2608 }
2609
2610 /* Now that we've pulled all events out of the kernel, resume
2611 LWPs that don't have an interesting event to report. */
2612 if (stopping_threads == NOT_STOPPING_THREADS)
2613 for_each_thread ([this] (thread_info *thread)
2614 {
2615 resume_stopped_resumed_lwps (thread);
2616 });
2617
2618 /* ... and find an LWP with a status to report to the core, if
2619 any. */
2620 event_thread = find_thread_in_random ([&] (thread_info *thread)
2621 {
2622 return status_pending_p_callback (thread, filter_ptid);
2623 });
2624
2625 if (event_thread != NULL)
2626 {
2627 event_child = get_thread_lwp (event_thread);
2628 *wstatp = event_child->status_pending;
2629 event_child->status_pending_p = 0;
2630 event_child->status_pending = 0;
2631 break;
2632 }
2633
2634 /* Check for zombie thread group leaders. Those can't be reaped
2635 until all other threads in the thread group are. */
2636 check_zombie_leaders ();
2637
2638 auto not_stopped = [&] (thread_info *thread)
2639 {
2640 return not_stopped_callback (thread, wait_ptid);
2641 };
2642
2643 /* If there are no resumed children left in the set of LWPs we
2644 want to wait for, bail. We can't just block in
2645 waitpid/sigsuspend, because lwps might have been left stopped
2646 in trace-stop state, and we'd be stuck forever waiting for
2647 their status to change (which would only happen if we resumed
2648 them). Even if WNOHANG is set, this return code is preferred
2649 over 0 (below), as it is more detailed. */
2650 if (find_thread (not_stopped) == NULL)
2651 {
2652 if (debug_threads)
2653 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2654 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2655 return -1;
2656 }
2657
2658 /* No interesting event to report to the caller. */
2659 if ((options & WNOHANG))
2660 {
2661 if (debug_threads)
2662 debug_printf ("WNOHANG set, no event found\n");
2663
2664 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2665 return 0;
2666 }
2667
2668 /* Block until we get an event reported with SIGCHLD. */
2669 if (debug_threads)
2670 debug_printf ("sigsuspend'ing\n");
2671
2672 sigsuspend (&prev_mask);
2673 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2674 goto retry;
2675 }
2676
2677 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2678
2679 current_thread = event_thread;
2680
2681 return lwpid_of (event_thread);
2682 }
2683
2684 int
2685 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2686 {
2687 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2688 }
2689
2690 /* Select one LWP out of those that have events pending. */
2691
2692 static void
2693 select_event_lwp (struct lwp_info **orig_lp)
2694 {
2695 struct thread_info *event_thread = NULL;
2696
2697 /* In all-stop, give preference to the LWP that is being
2698 single-stepped. There will be at most one, and it's the LWP that
2699 the core is most interested in. If we didn't do this, then we'd
2700 have to handle pending step SIGTRAPs somehow in case the core
2701 later continues the previously-stepped thread, otherwise we'd
2702 report the pending SIGTRAP, and the core, not having stepped the
2703 thread, wouldn't understand what the trap was for, and therefore
2704 would report it to the user as a random signal. */
2705 if (!non_stop)
2706 {
2707 event_thread = find_thread ([] (thread_info *thread)
2708 {
2709 lwp_info *lp = get_thread_lwp (thread);
2710
2711 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2712 && thread->last_resume_kind == resume_step
2713 && lp->status_pending_p);
2714 });
2715
2716 if (event_thread != NULL)
2717 {
2718 if (debug_threads)
2719 debug_printf ("SEL: Select single-step %s\n",
2720 target_pid_to_str (ptid_of (event_thread)).c_str ());
2721 }
2722 }
2723 if (event_thread == NULL)
2724 {
2725 /* No single-stepping LWP. Select one at random, out of those
2726 which have had events. */
2727
2728 event_thread = find_thread_in_random ([&] (thread_info *thread)
2729 {
2730 lwp_info *lp = get_thread_lwp (thread);
2731
2732 /* Only resumed LWPs that have an event pending. */
2733 return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2734 && lp->status_pending_p);
2735 });
2736 }
2737
2738 if (event_thread != NULL)
2739 {
2740 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2741
2742 /* Switch the event LWP. */
2743 *orig_lp = event_lp;
2744 }
2745 }
2746
2747 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2748 NULL. */
2749
2750 static void
2751 unsuspend_all_lwps (struct lwp_info *except)
2752 {
2753 for_each_thread ([&] (thread_info *thread)
2754 {
2755 lwp_info *lwp = get_thread_lwp (thread);
2756
2757 if (lwp != except)
2758 lwp_suspended_decr (lwp);
2759 });
2760 }
2761
2762 static bool lwp_running (thread_info *thread);
2763
2764 /* Stabilize threads (move out of jump pads).
2765
2766 If a thread is midway collecting a fast tracepoint, we need to
2767 finish the collection and move it out of the jump pad before
2768 reporting the signal.
2769
2770 This avoids recursion while collecting (when a signal arrives
2771 midway, and the signal handler itself collects), which would trash
2772 the trace buffer. In case the user set a breakpoint in a signal
2773 handler, this avoids the backtrace showing the jump pad, etc..
2774 Most importantly, there are certain things we can't do safely if
2775 threads are stopped in a jump pad (or in its callee's). For
2776 example:
2777
2778 - starting a new trace run. A thread still collecting the
2779 previous run, could trash the trace buffer when resumed. The trace
2780 buffer control structures would have been reset but the thread had
2781 no way to tell. The thread could even midway memcpy'ing to the
2782 buffer, which would mean that when resumed, it would clobber the
2783 trace buffer that had been set for a new run.
2784
2785 - we can't rewrite/reuse the jump pads for new tracepoints
2786 safely. Say you do tstart while a thread is stopped midway while
2787 collecting. When the thread is later resumed, it finishes the
2788 collection, and returns to the jump pad, to execute the original
2789 instruction that was under the tracepoint jump at the time the
2790 older run had been started. If the jump pad had been rewritten
2791 since for something else in the new run, the thread would now
2792 execute the wrong / random instructions. */
2793
2794 void
2795 linux_process_target::stabilize_threads ()
2796 {
2797 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2798 {
2799 return stuck_in_jump_pad (thread);
2800 });
2801
2802 if (thread_stuck != NULL)
2803 {
2804 if (debug_threads)
2805 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2806 lwpid_of (thread_stuck));
2807 return;
2808 }
2809
2810 thread_info *saved_thread = current_thread;
2811
2812 stabilizing_threads = 1;
2813
2814 /* Kick 'em all. */
2815 for_each_thread ([this] (thread_info *thread)
2816 {
2817 move_out_of_jump_pad (thread);
2818 });
2819
2820 /* Loop until all are stopped out of the jump pads. */
2821 while (find_thread (lwp_running) != NULL)
2822 {
2823 struct target_waitstatus ourstatus;
2824 struct lwp_info *lwp;
2825 int wstat;
2826
2827 /* Note that we go through the full wait even loop. While
2828 moving threads out of jump pad, we need to be able to step
2829 over internal breakpoints and such. */
2830 wait_1 (minus_one_ptid, &ourstatus, 0);
2831
2832 if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
2833 {
2834 lwp = get_thread_lwp (current_thread);
2835
2836 /* Lock it. */
2837 lwp_suspended_inc (lwp);
2838
2839 if (ourstatus.sig () != GDB_SIGNAL_0
2840 || current_thread->last_resume_kind == resume_stop)
2841 {
2842 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
2843 enqueue_one_deferred_signal (lwp, &wstat);
2844 }
2845 }
2846 }
2847
2848 unsuspend_all_lwps (NULL);
2849
2850 stabilizing_threads = 0;
2851
2852 current_thread = saved_thread;
2853
2854 if (debug_threads)
2855 {
2856 thread_stuck = find_thread ([this] (thread_info *thread)
2857 {
2858 return stuck_in_jump_pad (thread);
2859 });
2860
2861 if (thread_stuck != NULL)
2862 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2863 lwpid_of (thread_stuck));
2864 }
2865 }
2866
2867 /* Convenience function that is called when the kernel reports an
2868 event that is not passed out to GDB. */
2869
2870 static ptid_t
2871 ignore_event (struct target_waitstatus *ourstatus)
2872 {
2873 /* If we got an event, there may still be others, as a single
2874 SIGCHLD can indicate more than one child stopped. This forces
2875 another target_wait call. */
2876 async_file_mark ();
2877
2878 ourstatus->set_ignore ();
2879 return null_ptid;
2880 }
2881
2882 ptid_t
2883 linux_process_target::filter_exit_event (lwp_info *event_child,
2884 target_waitstatus *ourstatus)
2885 {
2886 client_state &cs = get_client_state ();
2887 struct thread_info *thread = get_lwp_thread (event_child);
2888 ptid_t ptid = ptid_of (thread);
2889
2890 if (!last_thread_of_process_p (pid_of (thread)))
2891 {
2892 if (cs.report_thread_events)
2893 ourstatus->set_thread_exited (0);
2894 else
2895 ourstatus->set_ignore ();
2896
2897 delete_lwp (event_child);
2898 }
2899 return ptid;
2900 }
2901
2902 /* Returns 1 if GDB is interested in any event_child syscalls. */
2903
2904 static int
2905 gdb_catching_syscalls_p (struct lwp_info *event_child)
2906 {
2907 struct thread_info *thread = get_lwp_thread (event_child);
2908 struct process_info *proc = get_thread_process (thread);
2909
2910 return !proc->syscalls_to_catch.empty ();
2911 }
2912
2913 bool
2914 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2915 {
2916 int sysno;
2917 struct thread_info *thread = get_lwp_thread (event_child);
2918 struct process_info *proc = get_thread_process (thread);
2919
2920 if (proc->syscalls_to_catch.empty ())
2921 return false;
2922
2923 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2924 return true;
2925
2926 get_syscall_trapinfo (event_child, &sysno);
2927
2928 for (int iter : proc->syscalls_to_catch)
2929 if (iter == sysno)
2930 return true;
2931
2932 return false;
2933 }
2934
2935 ptid_t
2936 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2937 target_wait_flags target_options)
2938 {
2939 client_state &cs = get_client_state ();
2940 int w;
2941 struct lwp_info *event_child;
2942 int options;
2943 int pid;
2944 int step_over_finished;
2945 int bp_explains_trap;
2946 int maybe_internal_trap;
2947 int report_to_gdb;
2948 int trace_event;
2949 int in_step_range;
2950 int any_resumed;
2951
2952 if (debug_threads)
2953 {
2954 debug_enter ();
2955 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid).c_str ());
2956 }
2957
2958 /* Translate generic target options into linux options. */
2959 options = __WALL;
2960 if (target_options & TARGET_WNOHANG)
2961 options |= WNOHANG;
2962
2963 bp_explains_trap = 0;
2964 trace_event = 0;
2965 in_step_range = 0;
2966 ourstatus->set_ignore ();
2967
2968 auto status_pending_p_any = [&] (thread_info *thread)
2969 {
2970 return status_pending_p_callback (thread, minus_one_ptid);
2971 };
2972
2973 auto not_stopped = [&] (thread_info *thread)
2974 {
2975 return not_stopped_callback (thread, minus_one_ptid);
2976 };
2977
2978 /* Find a resumed LWP, if any. */
2979 if (find_thread (status_pending_p_any) != NULL)
2980 any_resumed = 1;
2981 else if (find_thread (not_stopped) != NULL)
2982 any_resumed = 1;
2983 else
2984 any_resumed = 0;
2985
2986 if (step_over_bkpt == null_ptid)
2987 pid = wait_for_event (ptid, &w, options);
2988 else
2989 {
2990 if (debug_threads)
2991 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2992 target_pid_to_str (step_over_bkpt).c_str ());
2993 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2994 }
2995
2996 if (pid == 0 || (pid == -1 && !any_resumed))
2997 {
2998 gdb_assert (target_options & TARGET_WNOHANG);
2999
3000 if (debug_threads)
3001 {
3002 debug_printf ("wait_1 ret = null_ptid, "
3003 "TARGET_WAITKIND_IGNORE\n");
3004 debug_exit ();
3005 }
3006
3007 ourstatus->set_ignore ();
3008 return null_ptid;
3009 }
3010 else if (pid == -1)
3011 {
3012 if (debug_threads)
3013 {
3014 debug_printf ("wait_1 ret = null_ptid, "
3015 "TARGET_WAITKIND_NO_RESUMED\n");
3016 debug_exit ();
3017 }
3018
3019 ourstatus->set_no_resumed ();
3020 return null_ptid;
3021 }
3022
3023 event_child = get_thread_lwp (current_thread);
3024
3025 /* wait_for_event only returns an exit status for the last
3026 child of a process. Report it. */
3027 if (WIFEXITED (w) || WIFSIGNALED (w))
3028 {
3029 if (WIFEXITED (w))
3030 {
3031 ourstatus->set_exited (WEXITSTATUS (w));
3032
3033 if (debug_threads)
3034 {
3035 debug_printf ("wait_1 ret = %s, exited with "
3036 "retcode %d\n",
3037 target_pid_to_str (ptid_of (current_thread)).c_str (),
3038 WEXITSTATUS (w));
3039 debug_exit ();
3040 }
3041 }
3042 else
3043 {
3044 ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
3045
3046 if (debug_threads)
3047 {
3048 debug_printf ("wait_1 ret = %s, terminated with "
3049 "signal %d\n",
3050 target_pid_to_str (ptid_of (current_thread)).c_str (),
3051 WTERMSIG (w));
3052 debug_exit ();
3053 }
3054 }
3055
3056 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
3057 return filter_exit_event (event_child, ourstatus);
3058
3059 return ptid_of (current_thread);
3060 }
3061
3062 /* If step-over executes a breakpoint instruction, in the case of a
3063 hardware single step it means a gdb/gdbserver breakpoint had been
3064 planted on top of a permanent breakpoint, in the case of a software
3065 single step it may just mean that gdbserver hit the reinsert breakpoint.
3066 The PC has been adjusted by save_stop_reason to point at
3067 the breakpoint address.
3068 So in the case of the hardware single step advance the PC manually
3069 past the breakpoint and in the case of software single step advance only
3070 if it's not the single_step_breakpoint we are hitting.
3071 This avoids that a program would keep trapping a permanent breakpoint
3072 forever. */
3073 if (step_over_bkpt != null_ptid
3074 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3075 && (event_child->stepping
3076 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3077 {
3078 int increment_pc = 0;
3079 int breakpoint_kind = 0;
3080 CORE_ADDR stop_pc = event_child->stop_pc;
3081
3082 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3083 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3084
3085 if (debug_threads)
3086 {
3087 debug_printf ("step-over for %s executed software breakpoint\n",
3088 target_pid_to_str (ptid_of (current_thread)).c_str ());
3089 }
3090
3091 if (increment_pc != 0)
3092 {
3093 struct regcache *regcache
3094 = get_thread_regcache (current_thread, 1);
3095
3096 event_child->stop_pc += increment_pc;
3097 low_set_pc (regcache, event_child->stop_pc);
3098
3099 if (!low_breakpoint_at (event_child->stop_pc))
3100 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3101 }
3102 }
3103
3104 /* If this event was not handled before, and is not a SIGTRAP, we
3105 report it. SIGILL and SIGSEGV are also treated as traps in case
3106 a breakpoint is inserted at the current PC. If this target does
3107 not support internal breakpoints at all, we also report the
3108 SIGTRAP without further processing; it's of no concern to us. */
3109 maybe_internal_trap
3110 = (low_supports_breakpoints ()
3111 && (WSTOPSIG (w) == SIGTRAP
3112 || ((WSTOPSIG (w) == SIGILL
3113 || WSTOPSIG (w) == SIGSEGV)
3114 && low_breakpoint_at (event_child->stop_pc))));
3115
3116 if (maybe_internal_trap)
3117 {
3118 /* Handle anything that requires bookkeeping before deciding to
3119 report the event or continue waiting. */
3120
3121 /* First check if we can explain the SIGTRAP with an internal
3122 breakpoint, or if we should possibly report the event to GDB.
3123 Do this before anything that may remove or insert a
3124 breakpoint. */
3125 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3126
3127 /* We have a SIGTRAP, possibly a step-over dance has just
3128 finished. If so, tweak the state machine accordingly,
3129 reinsert breakpoints and delete any single-step
3130 breakpoints. */
3131 step_over_finished = finish_step_over (event_child);
3132
3133 /* Now invoke the callbacks of any internal breakpoints there. */
3134 check_breakpoints (event_child->stop_pc);
3135
3136 /* Handle tracepoint data collecting. This may overflow the
3137 trace buffer, and cause a tracing stop, removing
3138 breakpoints. */
3139 trace_event = handle_tracepoints (event_child);
3140
3141 if (bp_explains_trap)
3142 {
3143 if (debug_threads)
3144 debug_printf ("Hit a gdbserver breakpoint.\n");
3145 }
3146 }
3147 else
3148 {
3149 /* We have some other signal, possibly a step-over dance was in
3150 progress, and it should be cancelled too. */
3151 step_over_finished = finish_step_over (event_child);
3152 }
3153
3154 /* We have all the data we need. Either report the event to GDB, or
3155 resume threads and keep waiting for more. */
3156
3157 /* If we're collecting a fast tracepoint, finish the collection and
3158 move out of the jump pad before delivering a signal. See
3159 linux_stabilize_threads. */
3160
3161 if (WIFSTOPPED (w)
3162 && WSTOPSIG (w) != SIGTRAP
3163 && supports_fast_tracepoints ()
3164 && agent_loaded_p ())
3165 {
3166 if (debug_threads)
3167 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3168 "to defer or adjust it.\n",
3169 WSTOPSIG (w), lwpid_of (current_thread));
3170
3171 /* Allow debugging the jump pad itself. */
3172 if (current_thread->last_resume_kind != resume_step
3173 && maybe_move_out_of_jump_pad (event_child, &w))
3174 {
3175 enqueue_one_deferred_signal (event_child, &w);
3176
3177 if (debug_threads)
3178 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3179 WSTOPSIG (w), lwpid_of (current_thread));
3180
3181 resume_one_lwp (event_child, 0, 0, NULL);
3182
3183 if (debug_threads)
3184 debug_exit ();
3185 return ignore_event (ourstatus);
3186 }
3187 }
3188
3189 if (event_child->collecting_fast_tracepoint
3190 != fast_tpoint_collect_result::not_collecting)
3191 {
3192 if (debug_threads)
3193 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3194 "Check if we're already there.\n",
3195 lwpid_of (current_thread),
3196 (int) event_child->collecting_fast_tracepoint);
3197
3198 trace_event = 1;
3199
3200 event_child->collecting_fast_tracepoint
3201 = linux_fast_tracepoint_collecting (event_child, NULL);
3202
3203 if (event_child->collecting_fast_tracepoint
3204 != fast_tpoint_collect_result::before_insn)
3205 {
3206 /* No longer need this breakpoint. */
3207 if (event_child->exit_jump_pad_bkpt != NULL)
3208 {
3209 if (debug_threads)
3210 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3211 "stopping all threads momentarily.\n");
3212
3213 /* Other running threads could hit this breakpoint.
3214 We don't handle moribund locations like GDB does,
3215 instead we always pause all threads when removing
3216 breakpoints, so that any step-over or
3217 decr_pc_after_break adjustment is always taken
3218 care of while the breakpoint is still
3219 inserted. */
3220 stop_all_lwps (1, event_child);
3221
3222 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3223 event_child->exit_jump_pad_bkpt = NULL;
3224
3225 unstop_all_lwps (1, event_child);
3226
3227 gdb_assert (event_child->suspended >= 0);
3228 }
3229 }
3230
3231 if (event_child->collecting_fast_tracepoint
3232 == fast_tpoint_collect_result::not_collecting)
3233 {
3234 if (debug_threads)
3235 debug_printf ("fast tracepoint finished "
3236 "collecting successfully.\n");
3237
3238 /* We may have a deferred signal to report. */
3239 if (dequeue_one_deferred_signal (event_child, &w))
3240 {
3241 if (debug_threads)
3242 debug_printf ("dequeued one signal.\n");
3243 }
3244 else
3245 {
3246 if (debug_threads)
3247 debug_printf ("no deferred signals.\n");
3248
3249 if (stabilizing_threads)
3250 {
3251 ourstatus->set_stopped (GDB_SIGNAL_0);
3252
3253 if (debug_threads)
3254 {
3255 debug_printf ("wait_1 ret = %s, stopped "
3256 "while stabilizing threads\n",
3257 target_pid_to_str
3258 (ptid_of (current_thread)).c_str ());
3259 debug_exit ();
3260 }
3261
3262 return ptid_of (current_thread);
3263 }
3264 }
3265 }
3266 }
3267
3268 /* Check whether GDB would be interested in this event. */
3269
3270 /* Check if GDB is interested in this syscall. */
3271 if (WIFSTOPPED (w)
3272 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3273 && !gdb_catch_this_syscall (event_child))
3274 {
3275 if (debug_threads)
3276 {
3277 debug_printf ("Ignored syscall for LWP %ld.\n",
3278 lwpid_of (current_thread));
3279 }
3280
3281 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3282
3283 if (debug_threads)
3284 debug_exit ();
3285 return ignore_event (ourstatus);
3286 }
3287
3288 /* If GDB is not interested in this signal, don't stop other
3289 threads, and don't report it to GDB. Just resume the inferior
3290 right away. We do this for threading-related signals as well as
3291 any that GDB specifically requested we ignore. But never ignore
3292 SIGSTOP if we sent it ourselves, and do not ignore signals when
3293 stepping - they may require special handling to skip the signal
3294 handler. Also never ignore signals that could be caused by a
3295 breakpoint. */
3296 if (WIFSTOPPED (w)
3297 && current_thread->last_resume_kind != resume_step
3298 && (
3299 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3300 (current_process ()->priv->thread_db != NULL
3301 && (WSTOPSIG (w) == __SIGRTMIN
3302 || WSTOPSIG (w) == __SIGRTMIN + 1))
3303 ||
3304 #endif
3305 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3306 && !(WSTOPSIG (w) == SIGSTOP
3307 && current_thread->last_resume_kind == resume_stop)
3308 && !linux_wstatus_maybe_breakpoint (w))))
3309 {
3310 siginfo_t info, *info_p;
3311
3312 if (debug_threads)
3313 debug_printf ("Ignored signal %d for LWP %ld.\n",
3314 WSTOPSIG (w), lwpid_of (current_thread));
3315
3316 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3317 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3318 info_p = &info;
3319 else
3320 info_p = NULL;
3321
3322 if (step_over_finished)
3323 {
3324 /* We cancelled this thread's step-over above. We still
3325 need to unsuspend all other LWPs, and set them back
3326 running again while the signal handler runs. */
3327 unsuspend_all_lwps (event_child);
3328
3329 /* Enqueue the pending signal info so that proceed_all_lwps
3330 doesn't lose it. */
3331 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3332
3333 proceed_all_lwps ();
3334 }
3335 else
3336 {
3337 resume_one_lwp (event_child, event_child->stepping,
3338 WSTOPSIG (w), info_p);
3339 }
3340
3341 if (debug_threads)
3342 debug_exit ();
3343
3344 return ignore_event (ourstatus);
3345 }
3346
3347 /* Note that all addresses are always "out of the step range" when
3348 there's no range to begin with. */
3349 in_step_range = lwp_in_step_range (event_child);
3350
3351 /* If GDB wanted this thread to single step, and the thread is out
3352 of the step range, we always want to report the SIGTRAP, and let
3353 GDB handle it. Watchpoints should always be reported. So should
3354 signals we can't explain. A SIGTRAP we can't explain could be a
3355 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3356 do, we're be able to handle GDB breakpoints on top of internal
3357 breakpoints, by handling the internal breakpoint and still
3358 reporting the event to GDB. If we don't, we're out of luck, GDB
3359 won't see the breakpoint hit. If we see a single-step event but
3360 the thread should be continuing, don't pass the trap to gdb.
3361 That indicates that we had previously finished a single-step but
3362 left the single-step pending -- see
3363 complete_ongoing_step_over. */
3364 report_to_gdb = (!maybe_internal_trap
3365 || (current_thread->last_resume_kind == resume_step
3366 && !in_step_range)
3367 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3368 || (!in_step_range
3369 && !bp_explains_trap
3370 && !trace_event
3371 && !step_over_finished
3372 && !(current_thread->last_resume_kind == resume_continue
3373 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3374 || (gdb_breakpoint_here (event_child->stop_pc)
3375 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3376 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3377 || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
3378
3379 run_breakpoint_commands (event_child->stop_pc);
3380
3381 /* We found no reason GDB would want us to stop. We either hit one
3382 of our own breakpoints, or finished an internal step GDB
3383 shouldn't know about. */
3384 if (!report_to_gdb)
3385 {
3386 if (debug_threads)
3387 {
3388 if (bp_explains_trap)
3389 debug_printf ("Hit a gdbserver breakpoint.\n");
3390 if (step_over_finished)
3391 debug_printf ("Step-over finished.\n");
3392 if (trace_event)
3393 debug_printf ("Tracepoint event.\n");
3394 if (lwp_in_step_range (event_child))
3395 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3396 paddress (event_child->stop_pc),
3397 paddress (event_child->step_range_start),
3398 paddress (event_child->step_range_end));
3399 }
3400
3401 /* We're not reporting this breakpoint to GDB, so apply the
3402 decr_pc_after_break adjustment to the inferior's regcache
3403 ourselves. */
3404
3405 if (low_supports_breakpoints ())
3406 {
3407 struct regcache *regcache
3408 = get_thread_regcache (current_thread, 1);
3409 low_set_pc (regcache, event_child->stop_pc);
3410 }
3411
3412 if (step_over_finished)
3413 {
3414 /* If we have finished stepping over a breakpoint, we've
3415 stopped and suspended all LWPs momentarily except the
3416 stepping one. This is where we resume them all again.
3417 We're going to keep waiting, so use proceed, which
3418 handles stepping over the next breakpoint. */
3419 unsuspend_all_lwps (event_child);
3420 }
3421 else
3422 {
3423 /* Remove the single-step breakpoints if any. Note that
3424 there isn't single-step breakpoint if we finished stepping
3425 over. */
3426 if (supports_software_single_step ()
3427 && has_single_step_breakpoints (current_thread))
3428 {
3429 stop_all_lwps (0, event_child);
3430 delete_single_step_breakpoints (current_thread);
3431 unstop_all_lwps (0, event_child);
3432 }
3433 }
3434
3435 if (debug_threads)
3436 debug_printf ("proceeding all threads.\n");
3437 proceed_all_lwps ();
3438
3439 if (debug_threads)
3440 debug_exit ();
3441
3442 return ignore_event (ourstatus);
3443 }
3444
3445 if (debug_threads)
3446 {
3447 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3448 {
3449 std::string str
3450 = target_waitstatus_to_string (&event_child->waitstatus);
3451
3452 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3453 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3454 }
3455 if (current_thread->last_resume_kind == resume_step)
3456 {
3457 if (event_child->step_range_start == event_child->step_range_end)
3458 debug_printf ("GDB wanted to single-step, reporting event.\n");
3459 else if (!lwp_in_step_range (event_child))
3460 debug_printf ("Out of step range, reporting event.\n");
3461 }
3462 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3463 debug_printf ("Stopped by watchpoint.\n");
3464 else if (gdb_breakpoint_here (event_child->stop_pc))
3465 debug_printf ("Stopped by GDB breakpoint.\n");
3466 if (debug_threads)
3467 debug_printf ("Hit a non-gdbserver trap event.\n");
3468 }
3469
3470 /* Alright, we're going to report a stop. */
3471
3472 /* Remove single-step breakpoints. */
3473 if (supports_software_single_step ())
3474 {
3475 /* Remove single-step breakpoints or not. It it is true, stop all
3476 lwps, so that other threads won't hit the breakpoint in the
3477 staled memory. */
3478 int remove_single_step_breakpoints_p = 0;
3479
3480 if (non_stop)
3481 {
3482 remove_single_step_breakpoints_p
3483 = has_single_step_breakpoints (current_thread);
3484 }
3485 else
3486 {
3487 /* In all-stop, a stop reply cancels all previous resume
3488 requests. Delete all single-step breakpoints. */
3489
3490 find_thread ([&] (thread_info *thread) {
3491 if (has_single_step_breakpoints (thread))
3492 {
3493 remove_single_step_breakpoints_p = 1;
3494 return true;
3495 }
3496
3497 return false;
3498 });
3499 }
3500
3501 if (remove_single_step_breakpoints_p)
3502 {
3503 /* If we remove single-step breakpoints from memory, stop all lwps,
3504 so that other threads won't hit the breakpoint in the staled
3505 memory. */
3506 stop_all_lwps (0, event_child);
3507
3508 if (non_stop)
3509 {
3510 gdb_assert (has_single_step_breakpoints (current_thread));
3511 delete_single_step_breakpoints (current_thread);
3512 }
3513 else
3514 {
3515 for_each_thread ([] (thread_info *thread){
3516 if (has_single_step_breakpoints (thread))
3517 delete_single_step_breakpoints (thread);
3518 });
3519 }
3520
3521 unstop_all_lwps (0, event_child);
3522 }
3523 }
3524
3525 if (!stabilizing_threads)
3526 {
3527 /* In all-stop, stop all threads. */
3528 if (!non_stop)
3529 stop_all_lwps (0, NULL);
3530
3531 if (step_over_finished)
3532 {
3533 if (!non_stop)
3534 {
3535 /* If we were doing a step-over, all other threads but
3536 the stepping one had been paused in start_step_over,
3537 with their suspend counts incremented. We don't want
3538 to do a full unstop/unpause, because we're in
3539 all-stop mode (so we want threads stopped), but we
3540 still need to unsuspend the other threads, to
3541 decrement their `suspended' count back. */
3542 unsuspend_all_lwps (event_child);
3543 }
3544 else
3545 {
3546 /* If we just finished a step-over, then all threads had
3547 been momentarily paused. In all-stop, that's fine,
3548 we want threads stopped by now anyway. In non-stop,
3549 we need to re-resume threads that GDB wanted to be
3550 running. */
3551 unstop_all_lwps (1, event_child);
3552 }
3553 }
3554
3555 /* If we're not waiting for a specific LWP, choose an event LWP
3556 from among those that have had events. Giving equal priority
3557 to all LWPs that have had events helps prevent
3558 starvation. */
3559 if (ptid == minus_one_ptid)
3560 {
3561 event_child->status_pending_p = 1;
3562 event_child->status_pending = w;
3563
3564 select_event_lwp (&event_child);
3565
3566 /* current_thread and event_child must stay in sync. */
3567 current_thread = get_lwp_thread (event_child);
3568
3569 event_child->status_pending_p = 0;
3570 w = event_child->status_pending;
3571 }
3572
3573
3574 /* Stabilize threads (move out of jump pads). */
3575 if (!non_stop)
3576 target_stabilize_threads ();
3577 }
3578 else
3579 {
3580 /* If we just finished a step-over, then all threads had been
3581 momentarily paused. In all-stop, that's fine, we want
3582 threads stopped by now anyway. In non-stop, we need to
3583 re-resume threads that GDB wanted to be running. */
3584 if (step_over_finished)
3585 unstop_all_lwps (1, event_child);
3586 }
3587
3588 if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3589 {
3590 /* If the reported event is an exit, fork, vfork or exec, let
3591 GDB know. */
3592
3593 /* Break the unreported fork relationship chain. */
3594 if (event_child->waitstatus.kind () == TARGET_WAITKIND_FORKED
3595 || event_child->waitstatus.kind () == TARGET_WAITKIND_VFORKED)
3596 {
3597 event_child->fork_relative->fork_relative = NULL;
3598 event_child->fork_relative = NULL;
3599 }
3600
3601 *ourstatus = event_child->waitstatus;
3602 /* Clear the event lwp's waitstatus since we handled it already. */
3603 event_child->waitstatus.set_ignore ();
3604 }
3605 else
3606 {
3607 /* The actual stop signal is overwritten below. */
3608 ourstatus->set_stopped (GDB_SIGNAL_0);
3609 }
3610
3611 /* Now that we've selected our final event LWP, un-adjust its PC if
3612 it was a software breakpoint, and the client doesn't know we can
3613 adjust the breakpoint ourselves. */
3614 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3615 && !cs.swbreak_feature)
3616 {
3617 int decr_pc = low_decr_pc_after_break ();
3618
3619 if (decr_pc != 0)
3620 {
3621 struct regcache *regcache
3622 = get_thread_regcache (current_thread, 1);
3623 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3624 }
3625 }
3626
3627 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3628 {
3629 int syscall_number;
3630
3631 get_syscall_trapinfo (event_child, &syscall_number);
3632 if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3633 ourstatus->set_syscall_entry (syscall_number);
3634 else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3635 ourstatus->set_syscall_return (syscall_number);
3636 else
3637 gdb_assert_not_reached ("unexpected syscall state");
3638 }
3639 else if (current_thread->last_resume_kind == resume_stop
3640 && WSTOPSIG (w) == SIGSTOP)
3641 {
3642 /* A thread that has been requested to stop by GDB with vCont;t,
3643 and it stopped cleanly, so report as SIG0. The use of
3644 SIGSTOP is an implementation detail. */
3645 ourstatus->set_stopped (GDB_SIGNAL_0);
3646 }
3647 else if (current_thread->last_resume_kind == resume_stop
3648 && WSTOPSIG (w) != SIGSTOP)
3649 {
3650 /* A thread that has been requested to stop by GDB with vCont;t,
3651 but, it stopped for other reasons. */
3652 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3653 }
3654 else if (ourstatus->kind () == TARGET_WAITKIND_STOPPED)
3655 ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3656
3657 gdb_assert (step_over_bkpt == null_ptid);
3658
3659 if (debug_threads)
3660 {
3661 debug_printf ("wait_1 ret = %s, %d, %d\n",
3662 target_pid_to_str (ptid_of (current_thread)).c_str (),
3663 ourstatus->kind (), ourstatus->sig ());
3664 debug_exit ();
3665 }
3666
3667 if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
3668 return filter_exit_event (event_child, ourstatus);
3669
3670 return ptid_of (current_thread);
3671 }
3672
3673 /* Get rid of any pending event in the pipe. */
3674 static void
3675 async_file_flush (void)
3676 {
3677 int ret;
3678 char buf;
3679
3680 do
3681 ret = read (linux_event_pipe[0], &buf, 1);
3682 while (ret >= 0 || (ret == -1 && errno == EINTR));
3683 }
3684
3685 /* Put something in the pipe, so the event loop wakes up. */
3686 static void
3687 async_file_mark (void)
3688 {
3689 int ret;
3690
3691 async_file_flush ();
3692
3693 do
3694 ret = write (linux_event_pipe[1], "+", 1);
3695 while (ret == 0 || (ret == -1 && errno == EINTR));
3696
3697 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3698 be awakened anyway. */
3699 }
3700
3701 ptid_t
3702 linux_process_target::wait (ptid_t ptid,
3703 target_waitstatus *ourstatus,
3704 target_wait_flags target_options)
3705 {
3706 ptid_t event_ptid;
3707
3708 /* Flush the async file first. */
3709 if (target_is_async_p ())
3710 async_file_flush ();
3711
3712 do
3713 {
3714 event_ptid = wait_1 (ptid, ourstatus, target_options);
3715 }
3716 while ((target_options & TARGET_WNOHANG) == 0
3717 && event_ptid == null_ptid
3718 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3719
3720 /* If at least one stop was reported, there may be more. A single
3721 SIGCHLD can signal more than one child stop. */
3722 if (target_is_async_p ()
3723 && (target_options & TARGET_WNOHANG) != 0
3724 && event_ptid != null_ptid)
3725 async_file_mark ();
3726
3727 return event_ptid;
3728 }
3729
3730 /* Send a signal to an LWP. */
3731
3732 static int
3733 kill_lwp (unsigned long lwpid, int signo)
3734 {
3735 int ret;
3736
3737 errno = 0;
3738 ret = syscall (__NR_tkill, lwpid, signo);
3739 if (errno == ENOSYS)
3740 {
3741 /* If tkill fails, then we are not using nptl threads, a
3742 configuration we no longer support. */
3743 perror_with_name (("tkill"));
3744 }
3745 return ret;
3746 }
3747
3748 void
3749 linux_stop_lwp (struct lwp_info *lwp)
3750 {
3751 send_sigstop (lwp);
3752 }
3753
3754 static void
3755 send_sigstop (struct lwp_info *lwp)
3756 {
3757 int pid;
3758
3759 pid = lwpid_of (get_lwp_thread (lwp));
3760
3761 /* If we already have a pending stop signal for this process, don't
3762 send another. */
3763 if (lwp->stop_expected)
3764 {
3765 if (debug_threads)
3766 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3767
3768 return;
3769 }
3770
3771 if (debug_threads)
3772 debug_printf ("Sending sigstop to lwp %d\n", pid);
3773
3774 lwp->stop_expected = 1;
3775 kill_lwp (pid, SIGSTOP);
3776 }
3777
3778 static void
3779 send_sigstop (thread_info *thread, lwp_info *except)
3780 {
3781 struct lwp_info *lwp = get_thread_lwp (thread);
3782
3783 /* Ignore EXCEPT. */
3784 if (lwp == except)
3785 return;
3786
3787 if (lwp->stopped)
3788 return;
3789
3790 send_sigstop (lwp);
3791 }
3792
3793 /* Increment the suspend count of an LWP, and stop it, if not stopped
3794 yet. */
3795 static void
3796 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3797 {
3798 struct lwp_info *lwp = get_thread_lwp (thread);
3799
3800 /* Ignore EXCEPT. */
3801 if (lwp == except)
3802 return;
3803
3804 lwp_suspended_inc (lwp);
3805
3806 send_sigstop (thread, except);
3807 }
3808
3809 static void
3810 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3811 {
3812 /* Store the exit status for later. */
3813 lwp->status_pending_p = 1;
3814 lwp->status_pending = wstat;
3815
3816 /* Store in waitstatus as well, as there's nothing else to process
3817 for this event. */
3818 if (WIFEXITED (wstat))
3819 lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3820 else if (WIFSIGNALED (wstat))
3821 lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3822
3823 /* Prevent trying to stop it. */
3824 lwp->stopped = 1;
3825
3826 /* No further stops are expected from a dead lwp. */
3827 lwp->stop_expected = 0;
3828 }
3829
3830 /* Return true if LWP has exited already, and has a pending exit event
3831 to report to GDB. */
3832
3833 static int
3834 lwp_is_marked_dead (struct lwp_info *lwp)
3835 {
3836 return (lwp->status_pending_p
3837 && (WIFEXITED (lwp->status_pending)
3838 || WIFSIGNALED (lwp->status_pending)));
3839 }
3840
3841 void
3842 linux_process_target::wait_for_sigstop ()
3843 {
3844 struct thread_info *saved_thread;
3845 ptid_t saved_tid;
3846 int wstat;
3847 int ret;
3848
3849 saved_thread = current_thread;
3850 if (saved_thread != NULL)
3851 saved_tid = saved_thread->id;
3852 else
3853 saved_tid = null_ptid; /* avoid bogus unused warning */
3854
3855 if (debug_threads)
3856 debug_printf ("wait_for_sigstop: pulling events\n");
3857
3858 /* Passing NULL_PTID as filter indicates we want all events to be
3859 left pending. Eventually this returns when there are no
3860 unwaited-for children left. */
3861 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3862 gdb_assert (ret == -1);
3863
3864 if (saved_thread == NULL || mythread_alive (saved_tid))
3865 current_thread = saved_thread;
3866 else
3867 {
3868 if (debug_threads)
3869 debug_printf ("Previously current thread died.\n");
3870
3871 /* We can't change the current inferior behind GDB's back,
3872 otherwise, a subsequent command may apply to the wrong
3873 process. */
3874 current_thread = NULL;
3875 }
3876 }
3877
3878 bool
3879 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3880 {
3881 struct lwp_info *lwp = get_thread_lwp (thread);
3882
3883 if (lwp->suspended != 0)
3884 {
3885 internal_error (__FILE__, __LINE__,
3886 "LWP %ld is suspended, suspended=%d\n",
3887 lwpid_of (thread), lwp->suspended);
3888 }
3889 gdb_assert (lwp->stopped);
3890
3891 /* Allow debugging the jump pad, gdb_collect, etc.. */
3892 return (supports_fast_tracepoints ()
3893 && agent_loaded_p ()
3894 && (gdb_breakpoint_here (lwp->stop_pc)
3895 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3896 || thread->last_resume_kind == resume_step)
3897 && (linux_fast_tracepoint_collecting (lwp, NULL)
3898 != fast_tpoint_collect_result::not_collecting));
3899 }
3900
3901 void
3902 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3903 {
3904 struct thread_info *saved_thread;
3905 struct lwp_info *lwp = get_thread_lwp (thread);
3906 int *wstat;
3907
3908 if (lwp->suspended != 0)
3909 {
3910 internal_error (__FILE__, __LINE__,
3911 "LWP %ld is suspended, suspended=%d\n",
3912 lwpid_of (thread), lwp->suspended);
3913 }
3914 gdb_assert (lwp->stopped);
3915
3916 /* For gdb_breakpoint_here. */
3917 saved_thread = current_thread;
3918 current_thread = thread;
3919
3920 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3921
3922 /* Allow debugging the jump pad, gdb_collect, etc. */
3923 if (!gdb_breakpoint_here (lwp->stop_pc)
3924 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3925 && thread->last_resume_kind != resume_step
3926 && maybe_move_out_of_jump_pad (lwp, wstat))
3927 {
3928 if (debug_threads)
3929 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3930 lwpid_of (thread));
3931
3932 if (wstat)
3933 {
3934 lwp->status_pending_p = 0;
3935 enqueue_one_deferred_signal (lwp, wstat);
3936
3937 if (debug_threads)
3938 debug_printf ("Signal %d for LWP %ld deferred "
3939 "(in jump pad)\n",
3940 WSTOPSIG (*wstat), lwpid_of (thread));
3941 }
3942
3943 resume_one_lwp (lwp, 0, 0, NULL);
3944 }
3945 else
3946 lwp_suspended_inc (lwp);
3947
3948 current_thread = saved_thread;
3949 }
3950
3951 static bool
3952 lwp_running (thread_info *thread)
3953 {
3954 struct lwp_info *lwp = get_thread_lwp (thread);
3955
3956 if (lwp_is_marked_dead (lwp))
3957 return false;
3958
3959 return !lwp->stopped;
3960 }
3961
3962 void
3963 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3964 {
3965 /* Should not be called recursively. */
3966 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3967
3968 if (debug_threads)
3969 {
3970 debug_enter ();
3971 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3972 suspend ? "stop-and-suspend" : "stop",
3973 (except != NULL
3974 ? target_pid_to_str
3975 (ptid_of (get_lwp_thread (except))).c_str ()
3976 : "none"));
3977 }
3978
3979 stopping_threads = (suspend
3980 ? STOPPING_AND_SUSPENDING_THREADS
3981 : STOPPING_THREADS);
3982
3983 if (suspend)
3984 for_each_thread ([&] (thread_info *thread)
3985 {
3986 suspend_and_send_sigstop (thread, except);
3987 });
3988 else
3989 for_each_thread ([&] (thread_info *thread)
3990 {
3991 send_sigstop (thread, except);
3992 });
3993
3994 wait_for_sigstop ();
3995 stopping_threads = NOT_STOPPING_THREADS;
3996
3997 if (debug_threads)
3998 {
3999 debug_printf ("stop_all_lwps done, setting stopping_threads "
4000 "back to !stopping\n");
4001 debug_exit ();
4002 }
4003 }
4004
4005 /* Enqueue one signal in the chain of signals which need to be
4006 delivered to this process on next resume. */
4007
4008 static void
4009 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4010 {
4011 lwp->pending_signals.emplace_back (signal);
4012 if (info == nullptr)
4013 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
4014 else
4015 lwp->pending_signals.back ().info = *info;
4016 }
4017
4018 void
4019 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
4020 {
4021 struct thread_info *thread = get_lwp_thread (lwp);
4022 struct regcache *regcache = get_thread_regcache (thread, 1);
4023
4024 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4025
4026 current_thread = thread;
4027 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
4028
4029 for (CORE_ADDR pc : next_pcs)
4030 set_single_step_breakpoint (pc, current_ptid);
4031 }
4032
4033 int
4034 linux_process_target::single_step (lwp_info* lwp)
4035 {
4036 int step = 0;
4037
4038 if (supports_hardware_single_step ())
4039 {
4040 step = 1;
4041 }
4042 else if (supports_software_single_step ())
4043 {
4044 install_software_single_step_breakpoints (lwp);
4045 step = 0;
4046 }
4047 else
4048 {
4049 if (debug_threads)
4050 debug_printf ("stepping is not implemented on this target");
4051 }
4052
4053 return step;
4054 }
4055
4056 /* The signal can be delivered to the inferior if we are not trying to
4057 finish a fast tracepoint collect. Since signal can be delivered in
4058 the step-over, the program may go to signal handler and trap again
4059 after return from the signal handler. We can live with the spurious
4060 double traps. */
4061
4062 static int
4063 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4064 {
4065 return (lwp->collecting_fast_tracepoint
4066 == fast_tpoint_collect_result::not_collecting);
4067 }
4068
4069 void
4070 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4071 int signal, siginfo_t *info)
4072 {
4073 struct thread_info *thread = get_lwp_thread (lwp);
4074 struct thread_info *saved_thread;
4075 int ptrace_request;
4076 struct process_info *proc = get_thread_process (thread);
4077
4078 /* Note that target description may not be initialised
4079 (proc->tdesc == NULL) at this point because the program hasn't
4080 stopped at the first instruction yet. It means GDBserver skips
4081 the extra traps from the wrapper program (see option --wrapper).
4082 Code in this function that requires register access should be
4083 guarded by proc->tdesc == NULL or something else. */
4084
4085 if (lwp->stopped == 0)
4086 return;
4087
4088 gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
4089
4090 fast_tpoint_collect_result fast_tp_collecting
4091 = lwp->collecting_fast_tracepoint;
4092
4093 gdb_assert (!stabilizing_threads
4094 || (fast_tp_collecting
4095 != fast_tpoint_collect_result::not_collecting));
4096
4097 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4098 user used the "jump" command, or "set $pc = foo"). */
4099 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4100 {
4101 /* Collecting 'while-stepping' actions doesn't make sense
4102 anymore. */
4103 release_while_stepping_state_list (thread);
4104 }
4105
4106 /* If we have pending signals or status, and a new signal, enqueue the
4107 signal. Also enqueue the signal if it can't be delivered to the
4108 inferior right now. */
4109 if (signal != 0
4110 && (lwp->status_pending_p
4111 || !lwp->pending_signals.empty ()
4112 || !lwp_signal_can_be_delivered (lwp)))
4113 {
4114 enqueue_pending_signal (lwp, signal, info);
4115
4116 /* Postpone any pending signal. It was enqueued above. */
4117 signal = 0;
4118 }
4119
4120 if (lwp->status_pending_p)
4121 {
4122 if (debug_threads)
4123 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4124 " has pending status\n",
4125 lwpid_of (thread), step ? "step" : "continue",
4126 lwp->stop_expected ? "expected" : "not expected");
4127 return;
4128 }
4129
4130 saved_thread = current_thread;
4131 current_thread = thread;
4132
4133 /* This bit needs some thinking about. If we get a signal that
4134 we must report while a single-step reinsert is still pending,
4135 we often end up resuming the thread. It might be better to
4136 (ew) allow a stack of pending events; then we could be sure that
4137 the reinsert happened right away and not lose any signals.
4138
4139 Making this stack would also shrink the window in which breakpoints are
4140 uninserted (see comment in linux_wait_for_lwp) but not enough for
4141 complete correctness, so it won't solve that problem. It may be
4142 worthwhile just to solve this one, however. */
4143 if (lwp->bp_reinsert != 0)
4144 {
4145 if (debug_threads)
4146 debug_printf (" pending reinsert at 0x%s\n",
4147 paddress (lwp->bp_reinsert));
4148
4149 if (supports_hardware_single_step ())
4150 {
4151 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4152 {
4153 if (step == 0)
4154 warning ("BAD - reinserting but not stepping.");
4155 if (lwp->suspended)
4156 warning ("BAD - reinserting and suspended(%d).",
4157 lwp->suspended);
4158 }
4159 }
4160
4161 step = maybe_hw_step (thread);
4162 }
4163
4164 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4165 {
4166 if (debug_threads)
4167 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4168 " (exit-jump-pad-bkpt)\n",
4169 lwpid_of (thread));
4170 }
4171 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4172 {
4173 if (debug_threads)
4174 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4175 " single-stepping\n",
4176 lwpid_of (thread));
4177
4178 if (supports_hardware_single_step ())
4179 step = 1;
4180 else
4181 {
4182 internal_error (__FILE__, __LINE__,
4183 "moving out of jump pad single-stepping"
4184 " not implemented on this target");
4185 }
4186 }
4187
4188 /* If we have while-stepping actions in this thread set it stepping.
4189 If we have a signal to deliver, it may or may not be set to
4190 SIG_IGN, we don't know. Assume so, and allow collecting
4191 while-stepping into a signal handler. A possible smart thing to
4192 do would be to set an internal breakpoint at the signal return
4193 address, continue, and carry on catching this while-stepping
4194 action only when that breakpoint is hit. A future
4195 enhancement. */
4196 if (thread->while_stepping != NULL)
4197 {
4198 if (debug_threads)
4199 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4200 lwpid_of (thread));
4201
4202 step = single_step (lwp);
4203 }
4204
4205 if (proc->tdesc != NULL && low_supports_breakpoints ())
4206 {
4207 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4208
4209 lwp->stop_pc = low_get_pc (regcache);
4210
4211 if (debug_threads)
4212 {
4213 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4214 (long) lwp->stop_pc);
4215 }
4216 }
4217
4218 /* If we have pending signals, consume one if it can be delivered to
4219 the inferior. */
4220 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4221 {
4222 const pending_signal &p_sig = lwp->pending_signals.front ();
4223
4224 signal = p_sig.signal;
4225 if (p_sig.info.si_signo != 0)
4226 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4227 &p_sig.info);
4228
4229 lwp->pending_signals.pop_front ();
4230 }
4231
4232 if (debug_threads)
4233 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4234 lwpid_of (thread), step ? "step" : "continue", signal,
4235 lwp->stop_expected ? "expected" : "not expected");
4236
4237 low_prepare_to_resume (lwp);
4238
4239 regcache_invalidate_thread (thread);
4240 errno = 0;
4241 lwp->stepping = step;
4242 if (step)
4243 ptrace_request = PTRACE_SINGLESTEP;
4244 else if (gdb_catching_syscalls_p (lwp))
4245 ptrace_request = PTRACE_SYSCALL;
4246 else
4247 ptrace_request = PTRACE_CONT;
4248 ptrace (ptrace_request,
4249 lwpid_of (thread),
4250 (PTRACE_TYPE_ARG3) 0,
4251 /* Coerce to a uintptr_t first to avoid potential gcc warning
4252 of coercing an 8 byte integer to a 4 byte pointer. */
4253 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4254
4255 current_thread = saved_thread;
4256 if (errno)
4257 perror_with_name ("resuming thread");
4258
4259 /* Successfully resumed. Clear state that no longer makes sense,
4260 and mark the LWP as running. Must not do this before resuming
4261 otherwise if that fails other code will be confused. E.g., we'd
4262 later try to stop the LWP and hang forever waiting for a stop
4263 status. Note that we must not throw after this is cleared,
4264 otherwise handle_zombie_lwp_error would get confused. */
4265 lwp->stopped = 0;
4266 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4267 }
4268
4269 void
4270 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4271 {
4272 /* Nop. */
4273 }
4274
4275 /* Called when we try to resume a stopped LWP and that errors out. If
4276 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4277 or about to become), discard the error, clear any pending status
4278 the LWP may have, and return true (we'll collect the exit status
4279 soon enough). Otherwise, return false. */
4280
4281 static int
4282 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4283 {
4284 struct thread_info *thread = get_lwp_thread (lp);
4285
4286 /* If we get an error after resuming the LWP successfully, we'd
4287 confuse !T state for the LWP being gone. */
4288 gdb_assert (lp->stopped);
4289
4290 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4291 because even if ptrace failed with ESRCH, the tracee may be "not
4292 yet fully dead", but already refusing ptrace requests. In that
4293 case the tracee has 'R (Running)' state for a little bit
4294 (observed in Linux 3.18). See also the note on ESRCH in the
4295 ptrace(2) man page. Instead, check whether the LWP has any state
4296 other than ptrace-stopped. */
4297
4298 /* Don't assume anything if /proc/PID/status can't be read. */
4299 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4300 {
4301 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4302 lp->status_pending_p = 0;
4303 return 1;
4304 }
4305 return 0;
4306 }
4307
4308 void
4309 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4310 siginfo_t *info)
4311 {
4312 try
4313 {
4314 resume_one_lwp_throw (lwp, step, signal, info);
4315 }
4316 catch (const gdb_exception_error &ex)
4317 {
4318 if (!check_ptrace_stopped_lwp_gone (lwp))
4319 throw;
4320 }
4321 }
4322
4323 /* This function is called once per thread via for_each_thread.
4324 We look up which resume request applies to THREAD and mark it with a
4325 pointer to the appropriate resume request.
4326
4327 This algorithm is O(threads * resume elements), but resume elements
4328 is small (and will remain small at least until GDB supports thread
4329 suspension). */
4330
4331 static void
4332 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4333 {
4334 struct lwp_info *lwp = get_thread_lwp (thread);
4335
4336 for (int ndx = 0; ndx < n; ndx++)
4337 {
4338 ptid_t ptid = resume[ndx].thread;
4339 if (ptid == minus_one_ptid
4340 || ptid == thread->id
4341 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4342 of PID'. */
4343 || (ptid.pid () == pid_of (thread)
4344 && (ptid.is_pid ()
4345 || ptid.lwp () == -1)))
4346 {
4347 if (resume[ndx].kind == resume_stop
4348 && thread->last_resume_kind == resume_stop)
4349 {
4350 if (debug_threads)
4351 debug_printf ("already %s LWP %ld at GDB's request\n",
4352 (thread->last_status.kind ()
4353 == TARGET_WAITKIND_STOPPED)
4354 ? "stopped"
4355 : "stopping",
4356 lwpid_of (thread));
4357
4358 continue;
4359 }
4360
4361 /* Ignore (wildcard) resume requests for already-resumed
4362 threads. */
4363 if (resume[ndx].kind != resume_stop
4364 && thread->last_resume_kind != resume_stop)
4365 {
4366 if (debug_threads)
4367 debug_printf ("already %s LWP %ld at GDB's request\n",
4368 (thread->last_resume_kind
4369 == resume_step)
4370 ? "stepping"
4371 : "continuing",
4372 lwpid_of (thread));
4373 continue;
4374 }
4375
4376 /* Don't let wildcard resumes resume fork children that GDB
4377 does not yet know are new fork children. */
4378 if (lwp->fork_relative != NULL)
4379 {
4380 struct lwp_info *rel = lwp->fork_relative;
4381
4382 if (rel->status_pending_p
4383 && (rel->waitstatus.kind () == TARGET_WAITKIND_FORKED
4384 || rel->waitstatus.kind () == TARGET_WAITKIND_VFORKED))
4385 {
4386 if (debug_threads)
4387 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4388 lwpid_of (thread));
4389 continue;
4390 }
4391 }
4392
4393 /* If the thread has a pending event that has already been
4394 reported to GDBserver core, but GDB has not pulled the
4395 event out of the vStopped queue yet, likewise, ignore the
4396 (wildcard) resume request. */
4397 if (in_queued_stop_replies (thread->id))
4398 {
4399 if (debug_threads)
4400 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4401 lwpid_of (thread));
4402 continue;
4403 }
4404
4405 lwp->resume = &resume[ndx];
4406 thread->last_resume_kind = lwp->resume->kind;
4407
4408 lwp->step_range_start = lwp->resume->step_range_start;
4409 lwp->step_range_end = lwp->resume->step_range_end;
4410
4411 /* If we had a deferred signal to report, dequeue one now.
4412 This can happen if LWP gets more than one signal while
4413 trying to get out of a jump pad. */
4414 if (lwp->stopped
4415 && !lwp->status_pending_p
4416 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4417 {
4418 lwp->status_pending_p = 1;
4419
4420 if (debug_threads)
4421 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4422 "leaving status pending.\n",
4423 WSTOPSIG (lwp->status_pending),
4424 lwpid_of (thread));
4425 }
4426
4427 return;
4428 }
4429 }
4430
4431 /* No resume action for this thread. */
4432 lwp->resume = NULL;
4433 }
4434
4435 bool
4436 linux_process_target::resume_status_pending (thread_info *thread)
4437 {
4438 struct lwp_info *lwp = get_thread_lwp (thread);
4439
4440 /* LWPs which will not be resumed are not interesting, because
4441 we might not wait for them next time through linux_wait. */
4442 if (lwp->resume == NULL)
4443 return false;
4444
4445 return thread_still_has_status_pending (thread);
4446 }
4447
4448 bool
4449 linux_process_target::thread_needs_step_over (thread_info *thread)
4450 {
4451 struct lwp_info *lwp = get_thread_lwp (thread);
4452 struct thread_info *saved_thread;
4453 CORE_ADDR pc;
4454 struct process_info *proc = get_thread_process (thread);
4455
4456 /* GDBserver is skipping the extra traps from the wrapper program,
4457 don't have to do step over. */
4458 if (proc->tdesc == NULL)
4459 return false;
4460
4461 /* LWPs which will not be resumed are not interesting, because we
4462 might not wait for them next time through linux_wait. */
4463
4464 if (!lwp->stopped)
4465 {
4466 if (debug_threads)
4467 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4468 lwpid_of (thread));
4469 return false;
4470 }
4471
4472 if (thread->last_resume_kind == resume_stop)
4473 {
4474 if (debug_threads)
4475 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4476 " stopped\n",
4477 lwpid_of (thread));
4478 return false;
4479 }
4480
4481 gdb_assert (lwp->suspended >= 0);
4482
4483 if (lwp->suspended)
4484 {
4485 if (debug_threads)
4486 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4487 lwpid_of (thread));
4488 return false;
4489 }
4490
4491 if (lwp->status_pending_p)
4492 {
4493 if (debug_threads)
4494 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4495 " status.\n",
4496 lwpid_of (thread));
4497 return false;
4498 }
4499
4500 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4501 or we have. */
4502 pc = get_pc (lwp);
4503
4504 /* If the PC has changed since we stopped, then don't do anything,
4505 and let the breakpoint/tracepoint be hit. This happens if, for
4506 instance, GDB handled the decr_pc_after_break subtraction itself,
4507 GDB is OOL stepping this thread, or the user has issued a "jump"
4508 command, or poked thread's registers herself. */
4509 if (pc != lwp->stop_pc)
4510 {
4511 if (debug_threads)
4512 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4513 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4514 lwpid_of (thread),
4515 paddress (lwp->stop_pc), paddress (pc));
4516 return false;
4517 }
4518
4519 /* On software single step target, resume the inferior with signal
4520 rather than stepping over. */
4521 if (supports_software_single_step ()
4522 && !lwp->pending_signals.empty ()
4523 && lwp_signal_can_be_delivered (lwp))
4524 {
4525 if (debug_threads)
4526 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4527 " signals.\n",
4528 lwpid_of (thread));
4529
4530 return false;
4531 }
4532
4533 saved_thread = current_thread;
4534 current_thread = thread;
4535
4536 /* We can only step over breakpoints we know about. */
4537 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4538 {
4539 /* Don't step over a breakpoint that GDB expects to hit
4540 though. If the condition is being evaluated on the target's side
4541 and it evaluate to false, step over this breakpoint as well. */
4542 if (gdb_breakpoint_here (pc)
4543 && gdb_condition_true_at_breakpoint (pc)
4544 && gdb_no_commands_at_breakpoint (pc))
4545 {
4546 if (debug_threads)
4547 debug_printf ("Need step over [LWP %ld]? yes, but found"
4548 " GDB breakpoint at 0x%s; skipping step over\n",
4549 lwpid_of (thread), paddress (pc));
4550
4551 current_thread = saved_thread;
4552 return false;
4553 }
4554 else
4555 {
4556 if (debug_threads)
4557 debug_printf ("Need step over [LWP %ld]? yes, "
4558 "found breakpoint at 0x%s\n",
4559 lwpid_of (thread), paddress (pc));
4560
4561 /* We've found an lwp that needs stepping over --- return 1 so
4562 that find_thread stops looking. */
4563 current_thread = saved_thread;
4564
4565 return true;
4566 }
4567 }
4568
4569 current_thread = saved_thread;
4570
4571 if (debug_threads)
4572 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4573 " at 0x%s\n",
4574 lwpid_of (thread), paddress (pc));
4575
4576 return false;
4577 }
4578
4579 void
4580 linux_process_target::start_step_over (lwp_info *lwp)
4581 {
4582 struct thread_info *thread = get_lwp_thread (lwp);
4583 struct thread_info *saved_thread;
4584 CORE_ADDR pc;
4585 int step;
4586
4587 if (debug_threads)
4588 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4589 lwpid_of (thread));
4590
4591 stop_all_lwps (1, lwp);
4592
4593 if (lwp->suspended != 0)
4594 {
4595 internal_error (__FILE__, __LINE__,
4596 "LWP %ld suspended=%d\n", lwpid_of (thread),
4597 lwp->suspended);
4598 }
4599
4600 if (debug_threads)
4601 debug_printf ("Done stopping all threads for step-over.\n");
4602
4603 /* Note, we should always reach here with an already adjusted PC,
4604 either by GDB (if we're resuming due to GDB's request), or by our
4605 caller, if we just finished handling an internal breakpoint GDB
4606 shouldn't care about. */
4607 pc = get_pc (lwp);
4608
4609 saved_thread = current_thread;
4610 current_thread = thread;
4611
4612 lwp->bp_reinsert = pc;
4613 uninsert_breakpoints_at (pc);
4614 uninsert_fast_tracepoint_jumps_at (pc);
4615
4616 step = single_step (lwp);
4617
4618 current_thread = saved_thread;
4619
4620 resume_one_lwp (lwp, step, 0, NULL);
4621
4622 /* Require next event from this LWP. */
4623 step_over_bkpt = thread->id;
4624 }
4625
4626 bool
4627 linux_process_target::finish_step_over (lwp_info *lwp)
4628 {
4629 if (lwp->bp_reinsert != 0)
4630 {
4631 struct thread_info *saved_thread = current_thread;
4632
4633 if (debug_threads)
4634 debug_printf ("Finished step over.\n");
4635
4636 current_thread = get_lwp_thread (lwp);
4637
4638 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4639 may be no breakpoint to reinsert there by now. */
4640 reinsert_breakpoints_at (lwp->bp_reinsert);
4641 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4642
4643 lwp->bp_reinsert = 0;
4644
4645 /* Delete any single-step breakpoints. No longer needed. We
4646 don't have to worry about other threads hitting this trap,
4647 and later not being able to explain it, because we were
4648 stepping over a breakpoint, and we hold all threads but
4649 LWP stopped while doing that. */
4650 if (!supports_hardware_single_step ())
4651 {
4652 gdb_assert (has_single_step_breakpoints (current_thread));
4653 delete_single_step_breakpoints (current_thread);
4654 }
4655
4656 step_over_bkpt = null_ptid;
4657 current_thread = saved_thread;
4658 return true;
4659 }
4660 else
4661 return false;
4662 }
4663
4664 void
4665 linux_process_target::complete_ongoing_step_over ()
4666 {
4667 if (step_over_bkpt != null_ptid)
4668 {
4669 struct lwp_info *lwp;
4670 int wstat;
4671 int ret;
4672
4673 if (debug_threads)
4674 debug_printf ("detach: step over in progress, finish it first\n");
4675
4676 /* Passing NULL_PTID as filter indicates we want all events to
4677 be left pending. Eventually this returns when there are no
4678 unwaited-for children left. */
4679 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4680 __WALL);
4681 gdb_assert (ret == -1);
4682
4683 lwp = find_lwp_pid (step_over_bkpt);
4684 if (lwp != NULL)
4685 {
4686 finish_step_over (lwp);
4687
4688 /* If we got our step SIGTRAP, don't leave it pending,
4689 otherwise we would report it to GDB as a spurious
4690 SIGTRAP. */
4691 gdb_assert (lwp->status_pending_p);
4692 if (WIFSTOPPED (lwp->status_pending)
4693 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4694 {
4695 thread_info *thread = get_lwp_thread (lwp);
4696 if (thread->last_resume_kind != resume_step)
4697 {
4698 if (debug_threads)
4699 debug_printf ("detach: discard step-over SIGTRAP\n");
4700
4701 lwp->status_pending_p = 0;
4702 lwp->status_pending = 0;
4703 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4704 }
4705 else
4706 {
4707 if (debug_threads)
4708 debug_printf ("detach: resume_step, "
4709 "not discarding step-over SIGTRAP\n");
4710 }
4711 }
4712 }
4713 step_over_bkpt = null_ptid;
4714 unsuspend_all_lwps (lwp);
4715 }
4716 }
4717
4718 void
4719 linux_process_target::resume_one_thread (thread_info *thread,
4720 bool leave_all_stopped)
4721 {
4722 struct lwp_info *lwp = get_thread_lwp (thread);
4723 int leave_pending;
4724
4725 if (lwp->resume == NULL)
4726 return;
4727
4728 if (lwp->resume->kind == resume_stop)
4729 {
4730 if (debug_threads)
4731 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4732
4733 if (!lwp->stopped)
4734 {
4735 if (debug_threads)
4736 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4737
4738 /* Stop the thread, and wait for the event asynchronously,
4739 through the event loop. */
4740 send_sigstop (lwp);
4741 }
4742 else
4743 {
4744 if (debug_threads)
4745 debug_printf ("already stopped LWP %ld\n",
4746 lwpid_of (thread));
4747
4748 /* The LWP may have been stopped in an internal event that
4749 was not meant to be notified back to GDB (e.g., gdbserver
4750 breakpoint), so we should be reporting a stop event in
4751 this case too. */
4752
4753 /* If the thread already has a pending SIGSTOP, this is a
4754 no-op. Otherwise, something later will presumably resume
4755 the thread and this will cause it to cancel any pending
4756 operation, due to last_resume_kind == resume_stop. If
4757 the thread already has a pending status to report, we
4758 will still report it the next time we wait - see
4759 status_pending_p_callback. */
4760
4761 /* If we already have a pending signal to report, then
4762 there's no need to queue a SIGSTOP, as this means we're
4763 midway through moving the LWP out of the jumppad, and we
4764 will report the pending signal as soon as that is
4765 finished. */
4766 if (lwp->pending_signals_to_report.empty ())
4767 send_sigstop (lwp);
4768 }
4769
4770 /* For stop requests, we're done. */
4771 lwp->resume = NULL;
4772 thread->last_status.set_ignore ();
4773 return;
4774 }
4775
4776 /* If this thread which is about to be resumed has a pending status,
4777 then don't resume it - we can just report the pending status.
4778 Likewise if it is suspended, because e.g., another thread is
4779 stepping past a breakpoint. Make sure to queue any signals that
4780 would otherwise be sent. In all-stop mode, we do this decision
4781 based on if *any* thread has a pending status. If there's a
4782 thread that needs the step-over-breakpoint dance, then don't
4783 resume any other thread but that particular one. */
4784 leave_pending = (lwp->suspended
4785 || lwp->status_pending_p
4786 || leave_all_stopped);
4787
4788 /* If we have a new signal, enqueue the signal. */
4789 if (lwp->resume->sig != 0)
4790 {
4791 siginfo_t info, *info_p;
4792
4793 /* If this is the same signal we were previously stopped by,
4794 make sure to queue its siginfo. */
4795 if (WIFSTOPPED (lwp->last_status)
4796 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4797 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4798 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4799 info_p = &info;
4800 else
4801 info_p = NULL;
4802
4803 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4804 }
4805
4806 if (!leave_pending)
4807 {
4808 if (debug_threads)
4809 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4810
4811 proceed_one_lwp (thread, NULL);
4812 }
4813 else
4814 {
4815 if (debug_threads)
4816 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4817 }
4818
4819 thread->last_status.set_ignore ();
4820 lwp->resume = NULL;
4821 }
4822
4823 void
4824 linux_process_target::resume (thread_resume *resume_info, size_t n)
4825 {
4826 struct thread_info *need_step_over = NULL;
4827
4828 if (debug_threads)
4829 {
4830 debug_enter ();
4831 debug_printf ("linux_resume:\n");
4832 }
4833
4834 for_each_thread ([&] (thread_info *thread)
4835 {
4836 linux_set_resume_request (thread, resume_info, n);
4837 });
4838
4839 /* If there is a thread which would otherwise be resumed, which has
4840 a pending status, then don't resume any threads - we can just
4841 report the pending status. Make sure to queue any signals that
4842 would otherwise be sent. In non-stop mode, we'll apply this
4843 logic to each thread individually. We consume all pending events
4844 before considering to start a step-over (in all-stop). */
4845 bool any_pending = false;
4846 if (!non_stop)
4847 any_pending = find_thread ([this] (thread_info *thread)
4848 {
4849 return resume_status_pending (thread);
4850 }) != nullptr;
4851
4852 /* If there is a thread which would otherwise be resumed, which is
4853 stopped at a breakpoint that needs stepping over, then don't
4854 resume any threads - have it step over the breakpoint with all
4855 other threads stopped, then resume all threads again. Make sure
4856 to queue any signals that would otherwise be delivered or
4857 queued. */
4858 if (!any_pending && low_supports_breakpoints ())
4859 need_step_over = find_thread ([this] (thread_info *thread)
4860 {
4861 return thread_needs_step_over (thread);
4862 });
4863
4864 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4865
4866 if (debug_threads)
4867 {
4868 if (need_step_over != NULL)
4869 debug_printf ("Not resuming all, need step over\n");
4870 else if (any_pending)
4871 debug_printf ("Not resuming, all-stop and found "
4872 "an LWP with pending status\n");
4873 else
4874 debug_printf ("Resuming, no pending status or step over needed\n");
4875 }
4876
4877 /* Even if we're leaving threads stopped, queue all signals we'd
4878 otherwise deliver. */
4879 for_each_thread ([&] (thread_info *thread)
4880 {
4881 resume_one_thread (thread, leave_all_stopped);
4882 });
4883
4884 if (need_step_over)
4885 start_step_over (get_thread_lwp (need_step_over));
4886
4887 if (debug_threads)
4888 {
4889 debug_printf ("linux_resume done\n");
4890 debug_exit ();
4891 }
4892
4893 /* We may have events that were pending that can/should be sent to
4894 the client now. Trigger a linux_wait call. */
4895 if (target_is_async_p ())
4896 async_file_mark ();
4897 }
4898
4899 void
4900 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4901 {
4902 struct lwp_info *lwp = get_thread_lwp (thread);
4903 int step;
4904
4905 if (lwp == except)
4906 return;
4907
4908 if (debug_threads)
4909 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4910
4911 if (!lwp->stopped)
4912 {
4913 if (debug_threads)
4914 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4915 return;
4916 }
4917
4918 if (thread->last_resume_kind == resume_stop
4919 && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
4920 {
4921 if (debug_threads)
4922 debug_printf (" client wants LWP to remain %ld stopped\n",
4923 lwpid_of (thread));
4924 return;
4925 }
4926
4927 if (lwp->status_pending_p)
4928 {
4929 if (debug_threads)
4930 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4931 lwpid_of (thread));
4932 return;
4933 }
4934
4935 gdb_assert (lwp->suspended >= 0);
4936
4937 if (lwp->suspended)
4938 {
4939 if (debug_threads)
4940 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4941 return;
4942 }
4943
4944 if (thread->last_resume_kind == resume_stop
4945 && lwp->pending_signals_to_report.empty ()
4946 && (lwp->collecting_fast_tracepoint
4947 == fast_tpoint_collect_result::not_collecting))
4948 {
4949 /* We haven't reported this LWP as stopped yet (otherwise, the
4950 last_status.kind check above would catch it, and we wouldn't
4951 reach here. This LWP may have been momentarily paused by a
4952 stop_all_lwps call while handling for example, another LWP's
4953 step-over. In that case, the pending expected SIGSTOP signal
4954 that was queued at vCont;t handling time will have already
4955 been consumed by wait_for_sigstop, and so we need to requeue
4956 another one here. Note that if the LWP already has a SIGSTOP
4957 pending, this is a no-op. */
4958
4959 if (debug_threads)
4960 debug_printf ("Client wants LWP %ld to stop. "
4961 "Making sure it has a SIGSTOP pending\n",
4962 lwpid_of (thread));
4963
4964 send_sigstop (lwp);
4965 }
4966
4967 if (thread->last_resume_kind == resume_step)
4968 {
4969 if (debug_threads)
4970 debug_printf (" stepping LWP %ld, client wants it stepping\n",
4971 lwpid_of (thread));
4972
4973 /* If resume_step is requested by GDB, install single-step
4974 breakpoints when the thread is about to be actually resumed if
4975 the single-step breakpoints weren't removed. */
4976 if (supports_software_single_step ()
4977 && !has_single_step_breakpoints (thread))
4978 install_software_single_step_breakpoints (lwp);
4979
4980 step = maybe_hw_step (thread);
4981 }
4982 else if (lwp->bp_reinsert != 0)
4983 {
4984 if (debug_threads)
4985 debug_printf (" stepping LWP %ld, reinsert set\n",
4986 lwpid_of (thread));
4987
4988 step = maybe_hw_step (thread);
4989 }
4990 else
4991 step = 0;
4992
4993 resume_one_lwp (lwp, step, 0, NULL);
4994 }
4995
4996 void
4997 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4998 lwp_info *except)
4999 {
5000 struct lwp_info *lwp = get_thread_lwp (thread);
5001
5002 if (lwp == except)
5003 return;
5004
5005 lwp_suspended_decr (lwp);
5006
5007 proceed_one_lwp (thread, except);
5008 }
5009
5010 void
5011 linux_process_target::proceed_all_lwps ()
5012 {
5013 struct thread_info *need_step_over;
5014
5015 /* If there is a thread which would otherwise be resumed, which is
5016 stopped at a breakpoint that needs stepping over, then don't
5017 resume any threads - have it step over the breakpoint with all
5018 other threads stopped, then resume all threads again. */
5019
5020 if (low_supports_breakpoints ())
5021 {
5022 need_step_over = find_thread ([this] (thread_info *thread)
5023 {
5024 return thread_needs_step_over (thread);
5025 });
5026
5027 if (need_step_over != NULL)
5028 {
5029 if (debug_threads)
5030 debug_printf ("proceed_all_lwps: found "
5031 "thread %ld needing a step-over\n",
5032 lwpid_of (need_step_over));
5033
5034 start_step_over (get_thread_lwp (need_step_over));
5035 return;
5036 }
5037 }
5038
5039 if (debug_threads)
5040 debug_printf ("Proceeding, no step-over needed\n");
5041
5042 for_each_thread ([this] (thread_info *thread)
5043 {
5044 proceed_one_lwp (thread, NULL);
5045 });
5046 }
5047
5048 void
5049 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
5050 {
5051 if (debug_threads)
5052 {
5053 debug_enter ();
5054 if (except)
5055 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5056 lwpid_of (get_lwp_thread (except)));
5057 else
5058 debug_printf ("unstopping all lwps\n");
5059 }
5060
5061 if (unsuspend)
5062 for_each_thread ([&] (thread_info *thread)
5063 {
5064 unsuspend_and_proceed_one_lwp (thread, except);
5065 });
5066 else
5067 for_each_thread ([&] (thread_info *thread)
5068 {
5069 proceed_one_lwp (thread, except);
5070 });
5071
5072 if (debug_threads)
5073 {
5074 debug_printf ("unstop_all_lwps done\n");
5075 debug_exit ();
5076 }
5077 }
5078
5079
5080 #ifdef HAVE_LINUX_REGSETS
5081
5082 #define use_linux_regsets 1
5083
5084 /* Returns true if REGSET has been disabled. */
5085
5086 static int
5087 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5088 {
5089 return (info->disabled_regsets != NULL
5090 && info->disabled_regsets[regset - info->regsets]);
5091 }
5092
5093 /* Disable REGSET. */
5094
5095 static void
5096 disable_regset (struct regsets_info *info, struct regset_info *regset)
5097 {
5098 int dr_offset;
5099
5100 dr_offset = regset - info->regsets;
5101 if (info->disabled_regsets == NULL)
5102 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5103 info->disabled_regsets[dr_offset] = 1;
5104 }
5105
5106 static int
5107 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5108 struct regcache *regcache)
5109 {
5110 struct regset_info *regset;
5111 int saw_general_regs = 0;
5112 int pid;
5113 struct iovec iov;
5114
5115 pid = lwpid_of (current_thread);
5116 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5117 {
5118 void *buf, *data;
5119 int nt_type, res;
5120
5121 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5122 continue;
5123
5124 buf = xmalloc (regset->size);
5125
5126 nt_type = regset->nt_type;
5127 if (nt_type)
5128 {
5129 iov.iov_base = buf;
5130 iov.iov_len = regset->size;
5131 data = (void *) &iov;
5132 }
5133 else
5134 data = buf;
5135
5136 #ifndef __sparc__
5137 res = ptrace (regset->get_request, pid,
5138 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5139 #else
5140 res = ptrace (regset->get_request, pid, data, nt_type);
5141 #endif
5142 if (res < 0)
5143 {
5144 if (errno == EIO
5145 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5146 {
5147 /* If we get EIO on a regset, or an EINVAL and the regset is
5148 optional, do not try it again for this process mode. */
5149 disable_regset (regsets_info, regset);
5150 }
5151 else if (errno == ENODATA)
5152 {
5153 /* ENODATA may be returned if the regset is currently
5154 not "active". This can happen in normal operation,
5155 so suppress the warning in this case. */
5156 }
5157 else if (errno == ESRCH)
5158 {
5159 /* At this point, ESRCH should mean the process is
5160 already gone, in which case we simply ignore attempts
5161 to read its registers. */
5162 }
5163 else
5164 {
5165 char s[256];
5166 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5167 pid);
5168 perror (s);
5169 }
5170 }
5171 else
5172 {
5173 if (regset->type == GENERAL_REGS)
5174 saw_general_regs = 1;
5175 regset->store_function (regcache, buf);
5176 }
5177 free (buf);
5178 }
5179 if (saw_general_regs)
5180 return 0;
5181 else
5182 return 1;
5183 }
5184
5185 static int
5186 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5187 struct regcache *regcache)
5188 {
5189 struct regset_info *regset;
5190 int saw_general_regs = 0;
5191 int pid;
5192 struct iovec iov;
5193
5194 pid = lwpid_of (current_thread);
5195 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5196 {
5197 void *buf, *data;
5198 int nt_type, res;
5199
5200 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5201 || regset->fill_function == NULL)
5202 continue;
5203
5204 buf = xmalloc (regset->size);
5205
5206 /* First fill the buffer with the current register set contents,
5207 in case there are any items in the kernel's regset that are
5208 not in gdbserver's regcache. */
5209
5210 nt_type = regset->nt_type;
5211 if (nt_type)
5212 {
5213 iov.iov_base = buf;
5214 iov.iov_len = regset->size;
5215 data = (void *) &iov;
5216 }
5217 else
5218 data = buf;
5219
5220 #ifndef __sparc__
5221 res = ptrace (regset->get_request, pid,
5222 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5223 #else
5224 res = ptrace (regset->get_request, pid, data, nt_type);
5225 #endif
5226
5227 if (res == 0)
5228 {
5229 /* Then overlay our cached registers on that. */
5230 regset->fill_function (regcache, buf);
5231
5232 /* Only now do we write the register set. */
5233 #ifndef __sparc__
5234 res = ptrace (regset->set_request, pid,
5235 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5236 #else
5237 res = ptrace (regset->set_request, pid, data, nt_type);
5238 #endif
5239 }
5240
5241 if (res < 0)
5242 {
5243 if (errno == EIO
5244 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5245 {
5246 /* If we get EIO on a regset, or an EINVAL and the regset is
5247 optional, do not try it again for this process mode. */
5248 disable_regset (regsets_info, regset);
5249 }
5250 else if (errno == ESRCH)
5251 {
5252 /* At this point, ESRCH should mean the process is
5253 already gone, in which case we simply ignore attempts
5254 to change its registers. See also the related
5255 comment in resume_one_lwp. */
5256 free (buf);
5257 return 0;
5258 }
5259 else
5260 {
5261 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5262 }
5263 }
5264 else if (regset->type == GENERAL_REGS)
5265 saw_general_regs = 1;
5266 free (buf);
5267 }
5268 if (saw_general_regs)
5269 return 0;
5270 else
5271 return 1;
5272 }
5273
5274 #else /* !HAVE_LINUX_REGSETS */
5275
5276 #define use_linux_regsets 0
5277 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5278 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5279
5280 #endif
5281
5282 /* Return 1 if register REGNO is supported by one of the regset ptrace
5283 calls or 0 if it has to be transferred individually. */
5284
5285 static int
5286 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5287 {
5288 unsigned char mask = 1 << (regno % 8);
5289 size_t index = regno / 8;
5290
5291 return (use_linux_regsets
5292 && (regs_info->regset_bitmap == NULL
5293 || (regs_info->regset_bitmap[index] & mask) != 0));
5294 }
5295
5296 #ifdef HAVE_LINUX_USRREGS
5297
5298 static int
5299 register_addr (const struct usrregs_info *usrregs, int regnum)
5300 {
5301 int addr;
5302
5303 if (regnum < 0 || regnum >= usrregs->num_regs)
5304 error ("Invalid register number %d.", regnum);
5305
5306 addr = usrregs->regmap[regnum];
5307
5308 return addr;
5309 }
5310
5311
5312 void
5313 linux_process_target::fetch_register (const usrregs_info *usrregs,
5314 regcache *regcache, int regno)
5315 {
5316 CORE_ADDR regaddr;
5317 int i, size;
5318 char *buf;
5319 int pid;
5320
5321 if (regno >= usrregs->num_regs)
5322 return;
5323 if (low_cannot_fetch_register (regno))
5324 return;
5325
5326 regaddr = register_addr (usrregs, regno);
5327 if (regaddr == -1)
5328 return;
5329
5330 size = ((register_size (regcache->tdesc, regno)
5331 + sizeof (PTRACE_XFER_TYPE) - 1)
5332 & -sizeof (PTRACE_XFER_TYPE));
5333 buf = (char *) alloca (size);
5334
5335 pid = lwpid_of (current_thread);
5336 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5337 {
5338 errno = 0;
5339 *(PTRACE_XFER_TYPE *) (buf + i) =
5340 ptrace (PTRACE_PEEKUSER, pid,
5341 /* Coerce to a uintptr_t first to avoid potential gcc warning
5342 of coercing an 8 byte integer to a 4 byte pointer. */
5343 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5344 regaddr += sizeof (PTRACE_XFER_TYPE);
5345 if (errno != 0)
5346 {
5347 /* Mark register REGNO unavailable. */
5348 supply_register (regcache, regno, NULL);
5349 return;
5350 }
5351 }
5352
5353 low_supply_ptrace_register (regcache, regno, buf);
5354 }
5355
5356 void
5357 linux_process_target::store_register (const usrregs_info *usrregs,
5358 regcache *regcache, int regno)
5359 {
5360 CORE_ADDR regaddr;
5361 int i, size;
5362 char *buf;
5363 int pid;
5364
5365 if (regno >= usrregs->num_regs)
5366 return;
5367 if (low_cannot_store_register (regno))
5368 return;
5369
5370 regaddr = register_addr (usrregs, regno);
5371 if (regaddr == -1)
5372 return;
5373
5374 size = ((register_size (regcache->tdesc, regno)
5375 + sizeof (PTRACE_XFER_TYPE) - 1)
5376 & -sizeof (PTRACE_XFER_TYPE));
5377 buf = (char *) alloca (size);
5378 memset (buf, 0, size);
5379
5380 low_collect_ptrace_register (regcache, regno, buf);
5381
5382 pid = lwpid_of (current_thread);
5383 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5384 {
5385 errno = 0;
5386 ptrace (PTRACE_POKEUSER, pid,
5387 /* Coerce to a uintptr_t first to avoid potential gcc warning
5388 about coercing an 8 byte integer to a 4 byte pointer. */
5389 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5390 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5391 if (errno != 0)
5392 {
5393 /* At this point, ESRCH should mean the process is
5394 already gone, in which case we simply ignore attempts
5395 to change its registers. See also the related
5396 comment in resume_one_lwp. */
5397 if (errno == ESRCH)
5398 return;
5399
5400
5401 if (!low_cannot_store_register (regno))
5402 error ("writing register %d: %s", regno, safe_strerror (errno));
5403 }
5404 regaddr += sizeof (PTRACE_XFER_TYPE);
5405 }
5406 }
5407 #endif /* HAVE_LINUX_USRREGS */
5408
5409 void
5410 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5411 int regno, char *buf)
5412 {
5413 collect_register (regcache, regno, buf);
5414 }
5415
5416 void
5417 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5418 int regno, const char *buf)
5419 {
5420 supply_register (regcache, regno, buf);
5421 }
5422
5423 void
5424 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5425 regcache *regcache,
5426 int regno, int all)
5427 {
5428 #ifdef HAVE_LINUX_USRREGS
5429 struct usrregs_info *usr = regs_info->usrregs;
5430
5431 if (regno == -1)
5432 {
5433 for (regno = 0; regno < usr->num_regs; regno++)
5434 if (all || !linux_register_in_regsets (regs_info, regno))
5435 fetch_register (usr, regcache, regno);
5436 }
5437 else
5438 fetch_register (usr, regcache, regno);
5439 #endif
5440 }
5441
5442 void
5443 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5444 regcache *regcache,
5445 int regno, int all)
5446 {
5447 #ifdef HAVE_LINUX_USRREGS
5448 struct usrregs_info *usr = regs_info->usrregs;
5449
5450 if (regno == -1)
5451 {
5452 for (regno = 0; regno < usr->num_regs; regno++)
5453 if (all || !linux_register_in_regsets (regs_info, regno))
5454 store_register (usr, regcache, regno);
5455 }
5456 else
5457 store_register (usr, regcache, regno);
5458 #endif
5459 }
5460
5461 void
5462 linux_process_target::fetch_registers (regcache *regcache, int regno)
5463 {
5464 int use_regsets;
5465 int all = 0;
5466 const regs_info *regs_info = get_regs_info ();
5467
5468 if (regno == -1)
5469 {
5470 if (regs_info->usrregs != NULL)
5471 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5472 low_fetch_register (regcache, regno);
5473
5474 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5475 if (regs_info->usrregs != NULL)
5476 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5477 }
5478 else
5479 {
5480 if (low_fetch_register (regcache, regno))
5481 return;
5482
5483 use_regsets = linux_register_in_regsets (regs_info, regno);
5484 if (use_regsets)
5485 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5486 regcache);
5487 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5488 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5489 }
5490 }
5491
5492 void
5493 linux_process_target::store_registers (regcache *regcache, int regno)
5494 {
5495 int use_regsets;
5496 int all = 0;
5497 const regs_info *regs_info = get_regs_info ();
5498
5499 if (regno == -1)
5500 {
5501 all = regsets_store_inferior_registers (regs_info->regsets_info,
5502 regcache);
5503 if (regs_info->usrregs != NULL)
5504 usr_store_inferior_registers (regs_info, regcache, regno, all);
5505 }
5506 else
5507 {
5508 use_regsets = linux_register_in_regsets (regs_info, regno);
5509 if (use_regsets)
5510 all = regsets_store_inferior_registers (regs_info->regsets_info,
5511 regcache);
5512 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5513 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5514 }
5515 }
5516
5517 bool
5518 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5519 {
5520 return false;
5521 }
5522
5523 /* A wrapper for the read_memory target op. */
5524
5525 static int
5526 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5527 {
5528 return the_target->read_memory (memaddr, myaddr, len);
5529 }
5530
5531 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5532 to debugger memory starting at MYADDR. */
5533
5534 int
5535 linux_process_target::read_memory (CORE_ADDR memaddr,
5536 unsigned char *myaddr, int len)
5537 {
5538 int pid = lwpid_of (current_thread);
5539 PTRACE_XFER_TYPE *buffer;
5540 CORE_ADDR addr;
5541 int count;
5542 char filename[64];
5543 int i;
5544 int ret;
5545 int fd;
5546
5547 /* Try using /proc. Don't bother for one word. */
5548 if (len >= 3 * sizeof (long))
5549 {
5550 int bytes;
5551
5552 /* We could keep this file open and cache it - possibly one per
5553 thread. That requires some juggling, but is even faster. */
5554 sprintf (filename, "/proc/%d/mem", pid);
5555 fd = open (filename, O_RDONLY | O_LARGEFILE);
5556 if (fd == -1)
5557 goto no_proc;
5558
5559 /* If pread64 is available, use it. It's faster if the kernel
5560 supports it (only one syscall), and it's 64-bit safe even on
5561 32-bit platforms (for instance, SPARC debugging a SPARC64
5562 application). */
5563 #ifdef HAVE_PREAD64
5564 bytes = pread64 (fd, myaddr, len, memaddr);
5565 #else
5566 bytes = -1;
5567 if (lseek (fd, memaddr, SEEK_SET) != -1)
5568 bytes = read (fd, myaddr, len);
5569 #endif
5570
5571 close (fd);
5572 if (bytes == len)
5573 return 0;
5574
5575 /* Some data was read, we'll try to get the rest with ptrace. */
5576 if (bytes > 0)
5577 {
5578 memaddr += bytes;
5579 myaddr += bytes;
5580 len -= bytes;
5581 }
5582 }
5583
5584 no_proc:
5585 /* Round starting address down to longword boundary. */
5586 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5587 /* Round ending address up; get number of longwords that makes. */
5588 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5589 / sizeof (PTRACE_XFER_TYPE));
5590 /* Allocate buffer of that many longwords. */
5591 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5592
5593 /* Read all the longwords */
5594 errno = 0;
5595 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5596 {
5597 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5598 about coercing an 8 byte integer to a 4 byte pointer. */
5599 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5600 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5601 (PTRACE_TYPE_ARG4) 0);
5602 if (errno)
5603 break;
5604 }
5605 ret = errno;
5606
5607 /* Copy appropriate bytes out of the buffer. */
5608 if (i > 0)
5609 {
5610 i *= sizeof (PTRACE_XFER_TYPE);
5611 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5612 memcpy (myaddr,
5613 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5614 i < len ? i : len);
5615 }
5616
5617 return ret;
5618 }
5619
5620 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5621 memory at MEMADDR. On failure (cannot write to the inferior)
5622 returns the value of errno. Always succeeds if LEN is zero. */
5623
5624 int
5625 linux_process_target::write_memory (CORE_ADDR memaddr,
5626 const unsigned char *myaddr, int len)
5627 {
5628 int i;
5629 /* Round starting address down to longword boundary. */
5630 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5631 /* Round ending address up; get number of longwords that makes. */
5632 int count
5633 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5634 / sizeof (PTRACE_XFER_TYPE);
5635
5636 /* Allocate buffer of that many longwords. */
5637 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5638
5639 int pid = lwpid_of (current_thread);
5640
5641 if (len == 0)
5642 {
5643 /* Zero length write always succeeds. */
5644 return 0;
5645 }
5646
5647 if (debug_threads)
5648 {
5649 /* Dump up to four bytes. */
5650 char str[4 * 2 + 1];
5651 char *p = str;
5652 int dump = len < 4 ? len : 4;
5653
5654 for (i = 0; i < dump; i++)
5655 {
5656 sprintf (p, "%02x", myaddr[i]);
5657 p += 2;
5658 }
5659 *p = '\0';
5660
5661 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5662 str, (long) memaddr, pid);
5663 }
5664
5665 /* Fill start and end extra bytes of buffer with existing memory data. */
5666
5667 errno = 0;
5668 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5669 about coercing an 8 byte integer to a 4 byte pointer. */
5670 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5671 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5672 (PTRACE_TYPE_ARG4) 0);
5673 if (errno)
5674 return errno;
5675
5676 if (count > 1)
5677 {
5678 errno = 0;
5679 buffer[count - 1]
5680 = ptrace (PTRACE_PEEKTEXT, pid,
5681 /* Coerce to a uintptr_t first to avoid potential gcc warning
5682 about coercing an 8 byte integer to a 4 byte pointer. */
5683 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5684 * sizeof (PTRACE_XFER_TYPE)),
5685 (PTRACE_TYPE_ARG4) 0);
5686 if (errno)
5687 return errno;
5688 }
5689
5690 /* Copy data to be written over corresponding part of buffer. */
5691
5692 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5693 myaddr, len);
5694
5695 /* Write the entire buffer. */
5696
5697 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5698 {
5699 errno = 0;
5700 ptrace (PTRACE_POKETEXT, pid,
5701 /* Coerce to a uintptr_t first to avoid potential gcc warning
5702 about coercing an 8 byte integer to a 4 byte pointer. */
5703 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5704 (PTRACE_TYPE_ARG4) buffer[i]);
5705 if (errno)
5706 return errno;
5707 }
5708
5709 return 0;
5710 }
5711
5712 void
5713 linux_process_target::look_up_symbols ()
5714 {
5715 #ifdef USE_THREAD_DB
5716 struct process_info *proc = current_process ();
5717
5718 if (proc->priv->thread_db != NULL)
5719 return;
5720
5721 thread_db_init ();
5722 #endif
5723 }
5724
5725 void
5726 linux_process_target::request_interrupt ()
5727 {
5728 /* Send a SIGINT to the process group. This acts just like the user
5729 typed a ^C on the controlling terminal. */
5730 ::kill (-signal_pid, SIGINT);
5731 }
5732
5733 bool
5734 linux_process_target::supports_read_auxv ()
5735 {
5736 return true;
5737 }
5738
5739 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5740 to debugger memory starting at MYADDR. */
5741
5742 int
5743 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5744 unsigned int len)
5745 {
5746 char filename[PATH_MAX];
5747 int fd, n;
5748 int pid = lwpid_of (current_thread);
5749
5750 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5751
5752 fd = open (filename, O_RDONLY);
5753 if (fd < 0)
5754 return -1;
5755
5756 if (offset != (CORE_ADDR) 0
5757 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5758 n = -1;
5759 else
5760 n = read (fd, myaddr, len);
5761
5762 close (fd);
5763
5764 return n;
5765 }
5766
5767 int
5768 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5769 int size, raw_breakpoint *bp)
5770 {
5771 if (type == raw_bkpt_type_sw)
5772 return insert_memory_breakpoint (bp);
5773 else
5774 return low_insert_point (type, addr, size, bp);
5775 }
5776
5777 int
5778 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5779 int size, raw_breakpoint *bp)
5780 {
5781 /* Unsupported (see target.h). */
5782 return 1;
5783 }
5784
5785 int
5786 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5787 int size, raw_breakpoint *bp)
5788 {
5789 if (type == raw_bkpt_type_sw)
5790 return remove_memory_breakpoint (bp);
5791 else
5792 return low_remove_point (type, addr, size, bp);
5793 }
5794
5795 int
5796 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5797 int size, raw_breakpoint *bp)
5798 {
5799 /* Unsupported (see target.h). */
5800 return 1;
5801 }
5802
5803 /* Implement the stopped_by_sw_breakpoint target_ops
5804 method. */
5805
5806 bool
5807 linux_process_target::stopped_by_sw_breakpoint ()
5808 {
5809 struct lwp_info *lwp = get_thread_lwp (current_thread);
5810
5811 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5812 }
5813
5814 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5815 method. */
5816
5817 bool
5818 linux_process_target::supports_stopped_by_sw_breakpoint ()
5819 {
5820 return USE_SIGTRAP_SIGINFO;
5821 }
5822
5823 /* Implement the stopped_by_hw_breakpoint target_ops
5824 method. */
5825
5826 bool
5827 linux_process_target::stopped_by_hw_breakpoint ()
5828 {
5829 struct lwp_info *lwp = get_thread_lwp (current_thread);
5830
5831 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5832 }
5833
5834 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5835 method. */
5836
5837 bool
5838 linux_process_target::supports_stopped_by_hw_breakpoint ()
5839 {
5840 return USE_SIGTRAP_SIGINFO;
5841 }
5842
5843 /* Implement the supports_hardware_single_step target_ops method. */
5844
5845 bool
5846 linux_process_target::supports_hardware_single_step ()
5847 {
5848 return true;
5849 }
5850
5851 bool
5852 linux_process_target::stopped_by_watchpoint ()
5853 {
5854 struct lwp_info *lwp = get_thread_lwp (current_thread);
5855
5856 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5857 }
5858
5859 CORE_ADDR
5860 linux_process_target::stopped_data_address ()
5861 {
5862 struct lwp_info *lwp = get_thread_lwp (current_thread);
5863
5864 return lwp->stopped_data_address;
5865 }
5866
5867 /* This is only used for targets that define PT_TEXT_ADDR,
5868 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5869 the target has different ways of acquiring this information, like
5870 loadmaps. */
5871
5872 bool
5873 linux_process_target::supports_read_offsets ()
5874 {
5875 #ifdef SUPPORTS_READ_OFFSETS
5876 return true;
5877 #else
5878 return false;
5879 #endif
5880 }
5881
5882 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5883 to tell gdb about. */
5884
5885 int
5886 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5887 {
5888 #ifdef SUPPORTS_READ_OFFSETS
5889 unsigned long text, text_end, data;
5890 int pid = lwpid_of (current_thread);
5891
5892 errno = 0;
5893
5894 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5895 (PTRACE_TYPE_ARG4) 0);
5896 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5897 (PTRACE_TYPE_ARG4) 0);
5898 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5899 (PTRACE_TYPE_ARG4) 0);
5900
5901 if (errno == 0)
5902 {
5903 /* Both text and data offsets produced at compile-time (and so
5904 used by gdb) are relative to the beginning of the program,
5905 with the data segment immediately following the text segment.
5906 However, the actual runtime layout in memory may put the data
5907 somewhere else, so when we send gdb a data base-address, we
5908 use the real data base address and subtract the compile-time
5909 data base-address from it (which is just the length of the
5910 text segment). BSS immediately follows data in both
5911 cases. */
5912 *text_p = text;
5913 *data_p = data - (text_end - text);
5914
5915 return 1;
5916 }
5917 return 0;
5918 #else
5919 gdb_assert_not_reached ("target op read_offsets not supported");
5920 #endif
5921 }
5922
5923 bool
5924 linux_process_target::supports_get_tls_address ()
5925 {
5926 #ifdef USE_THREAD_DB
5927 return true;
5928 #else
5929 return false;
5930 #endif
5931 }
5932
5933 int
5934 linux_process_target::get_tls_address (thread_info *thread,
5935 CORE_ADDR offset,
5936 CORE_ADDR load_module,
5937 CORE_ADDR *address)
5938 {
5939 #ifdef USE_THREAD_DB
5940 return thread_db_get_tls_address (thread, offset, load_module, address);
5941 #else
5942 return -1;
5943 #endif
5944 }
5945
5946 bool
5947 linux_process_target::supports_qxfer_osdata ()
5948 {
5949 return true;
5950 }
5951
5952 int
5953 linux_process_target::qxfer_osdata (const char *annex,
5954 unsigned char *readbuf,
5955 unsigned const char *writebuf,
5956 CORE_ADDR offset, int len)
5957 {
5958 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5959 }
5960
5961 void
5962 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5963 gdb_byte *inf_siginfo, int direction)
5964 {
5965 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5966
5967 /* If there was no callback, or the callback didn't do anything,
5968 then just do a straight memcpy. */
5969 if (!done)
5970 {
5971 if (direction == 1)
5972 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5973 else
5974 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5975 }
5976 }
5977
5978 bool
5979 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5980 int direction)
5981 {
5982 return false;
5983 }
5984
5985 bool
5986 linux_process_target::supports_qxfer_siginfo ()
5987 {
5988 return true;
5989 }
5990
5991 int
5992 linux_process_target::qxfer_siginfo (const char *annex,
5993 unsigned char *readbuf,
5994 unsigned const char *writebuf,
5995 CORE_ADDR offset, int len)
5996 {
5997 int pid;
5998 siginfo_t siginfo;
5999 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6000
6001 if (current_thread == NULL)
6002 return -1;
6003
6004 pid = lwpid_of (current_thread);
6005
6006 if (debug_threads)
6007 debug_printf ("%s siginfo for lwp %d.\n",
6008 readbuf != NULL ? "Reading" : "Writing",
6009 pid);
6010
6011 if (offset >= sizeof (siginfo))
6012 return -1;
6013
6014 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6015 return -1;
6016
6017 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6018 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6019 inferior with a 64-bit GDBSERVER should look the same as debugging it
6020 with a 32-bit GDBSERVER, we need to convert it. */
6021 siginfo_fixup (&siginfo, inf_siginfo, 0);
6022
6023 if (offset + len > sizeof (siginfo))
6024 len = sizeof (siginfo) - offset;
6025
6026 if (readbuf != NULL)
6027 memcpy (readbuf, inf_siginfo + offset, len);
6028 else
6029 {
6030 memcpy (inf_siginfo + offset, writebuf, len);
6031
6032 /* Convert back to ptrace layout before flushing it out. */
6033 siginfo_fixup (&siginfo, inf_siginfo, 1);
6034
6035 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6036 return -1;
6037 }
6038
6039 return len;
6040 }
6041
6042 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6043 so we notice when children change state; as the handler for the
6044 sigsuspend in my_waitpid. */
6045
6046 static void
6047 sigchld_handler (int signo)
6048 {
6049 int old_errno = errno;
6050
6051 if (debug_threads)
6052 {
6053 do
6054 {
6055 /* Use the async signal safe debug function. */
6056 if (debug_write ("sigchld_handler\n",
6057 sizeof ("sigchld_handler\n") - 1) < 0)
6058 break; /* just ignore */
6059 } while (0);
6060 }
6061
6062 if (target_is_async_p ())
6063 async_file_mark (); /* trigger a linux_wait */
6064
6065 errno = old_errno;
6066 }
6067
6068 bool
6069 linux_process_target::supports_non_stop ()
6070 {
6071 return true;
6072 }
6073
6074 bool
6075 linux_process_target::async (bool enable)
6076 {
6077 bool previous = target_is_async_p ();
6078
6079 if (debug_threads)
6080 debug_printf ("linux_async (%d), previous=%d\n",
6081 enable, previous);
6082
6083 if (previous != enable)
6084 {
6085 sigset_t mask;
6086 sigemptyset (&mask);
6087 sigaddset (&mask, SIGCHLD);
6088
6089 gdb_sigmask (SIG_BLOCK, &mask, NULL);
6090
6091 if (enable)
6092 {
6093 if (pipe (linux_event_pipe) == -1)
6094 {
6095 linux_event_pipe[0] = -1;
6096 linux_event_pipe[1] = -1;
6097 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6098
6099 warning ("creating event pipe failed.");
6100 return previous;
6101 }
6102
6103 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6104 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6105
6106 /* Register the event loop handler. */
6107 add_file_handler (linux_event_pipe[0],
6108 handle_target_event, NULL,
6109 "linux-low");
6110
6111 /* Always trigger a linux_wait. */
6112 async_file_mark ();
6113 }
6114 else
6115 {
6116 delete_file_handler (linux_event_pipe[0]);
6117
6118 close (linux_event_pipe[0]);
6119 close (linux_event_pipe[1]);
6120 linux_event_pipe[0] = -1;
6121 linux_event_pipe[1] = -1;
6122 }
6123
6124 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6125 }
6126
6127 return previous;
6128 }
6129
6130 int
6131 linux_process_target::start_non_stop (bool nonstop)
6132 {
6133 /* Register or unregister from event-loop accordingly. */
6134 target_async (nonstop);
6135
6136 if (target_is_async_p () != (nonstop != false))
6137 return -1;
6138
6139 return 0;
6140 }
6141
6142 bool
6143 linux_process_target::supports_multi_process ()
6144 {
6145 return true;
6146 }
6147
6148 /* Check if fork events are supported. */
6149
6150 bool
6151 linux_process_target::supports_fork_events ()
6152 {
6153 return linux_supports_tracefork ();
6154 }
6155
6156 /* Check if vfork events are supported. */
6157
6158 bool
6159 linux_process_target::supports_vfork_events ()
6160 {
6161 return linux_supports_tracefork ();
6162 }
6163
6164 /* Check if exec events are supported. */
6165
6166 bool
6167 linux_process_target::supports_exec_events ()
6168 {
6169 return linux_supports_traceexec ();
6170 }
6171
6172 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6173 ptrace flags for all inferiors. This is in case the new GDB connection
6174 doesn't support the same set of events that the previous one did. */
6175
6176 void
6177 linux_process_target::handle_new_gdb_connection ()
6178 {
6179 /* Request that all the lwps reset their ptrace options. */
6180 for_each_thread ([] (thread_info *thread)
6181 {
6182 struct lwp_info *lwp = get_thread_lwp (thread);
6183
6184 if (!lwp->stopped)
6185 {
6186 /* Stop the lwp so we can modify its ptrace options. */
6187 lwp->must_set_ptrace_flags = 1;
6188 linux_stop_lwp (lwp);
6189 }
6190 else
6191 {
6192 /* Already stopped; go ahead and set the ptrace options. */
6193 struct process_info *proc = find_process_pid (pid_of (thread));
6194 int options = linux_low_ptrace_options (proc->attached);
6195
6196 linux_enable_event_reporting (lwpid_of (thread), options);
6197 lwp->must_set_ptrace_flags = 0;
6198 }
6199 });
6200 }
6201
6202 int
6203 linux_process_target::handle_monitor_command (char *mon)
6204 {
6205 #ifdef USE_THREAD_DB
6206 return thread_db_handle_monitor_command (mon);
6207 #else
6208 return 0;
6209 #endif
6210 }
6211
6212 int
6213 linux_process_target::core_of_thread (ptid_t ptid)
6214 {
6215 return linux_common_core_of_thread (ptid);
6216 }
6217
6218 bool
6219 linux_process_target::supports_disable_randomization ()
6220 {
6221 return true;
6222 }
6223
6224 bool
6225 linux_process_target::supports_agent ()
6226 {
6227 return true;
6228 }
6229
6230 bool
6231 linux_process_target::supports_range_stepping ()
6232 {
6233 if (supports_software_single_step ())
6234 return true;
6235
6236 return low_supports_range_stepping ();
6237 }
6238
6239 bool
6240 linux_process_target::low_supports_range_stepping ()
6241 {
6242 return false;
6243 }
6244
6245 bool
6246 linux_process_target::supports_pid_to_exec_file ()
6247 {
6248 return true;
6249 }
6250
6251 const char *
6252 linux_process_target::pid_to_exec_file (int pid)
6253 {
6254 return linux_proc_pid_to_exec_file (pid);
6255 }
6256
6257 bool
6258 linux_process_target::supports_multifs ()
6259 {
6260 return true;
6261 }
6262
6263 int
6264 linux_process_target::multifs_open (int pid, const char *filename,
6265 int flags, mode_t mode)
6266 {
6267 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6268 }
6269
6270 int
6271 linux_process_target::multifs_unlink (int pid, const char *filename)
6272 {
6273 return linux_mntns_unlink (pid, filename);
6274 }
6275
6276 ssize_t
6277 linux_process_target::multifs_readlink (int pid, const char *filename,
6278 char *buf, size_t bufsiz)
6279 {
6280 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6281 }
6282
6283 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6284 struct target_loadseg
6285 {
6286 /* Core address to which the segment is mapped. */
6287 Elf32_Addr addr;
6288 /* VMA recorded in the program header. */
6289 Elf32_Addr p_vaddr;
6290 /* Size of this segment in memory. */
6291 Elf32_Word p_memsz;
6292 };
6293
6294 # if defined PT_GETDSBT
6295 struct target_loadmap
6296 {
6297 /* Protocol version number, must be zero. */
6298 Elf32_Word version;
6299 /* Pointer to the DSBT table, its size, and the DSBT index. */
6300 unsigned *dsbt_table;
6301 unsigned dsbt_size, dsbt_index;
6302 /* Number of segments in this map. */
6303 Elf32_Word nsegs;
6304 /* The actual memory map. */
6305 struct target_loadseg segs[/*nsegs*/];
6306 };
6307 # define LINUX_LOADMAP PT_GETDSBT
6308 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6309 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6310 # else
6311 struct target_loadmap
6312 {
6313 /* Protocol version number, must be zero. */
6314 Elf32_Half version;
6315 /* Number of segments in this map. */
6316 Elf32_Half nsegs;
6317 /* The actual memory map. */
6318 struct target_loadseg segs[/*nsegs*/];
6319 };
6320 # define LINUX_LOADMAP PTRACE_GETFDPIC
6321 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6322 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6323 # endif
6324
6325 bool
6326 linux_process_target::supports_read_loadmap ()
6327 {
6328 return true;
6329 }
6330
6331 int
6332 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6333 unsigned char *myaddr, unsigned int len)
6334 {
6335 int pid = lwpid_of (current_thread);
6336 int addr = -1;
6337 struct target_loadmap *data = NULL;
6338 unsigned int actual_length, copy_length;
6339
6340 if (strcmp (annex, "exec") == 0)
6341 addr = (int) LINUX_LOADMAP_EXEC;
6342 else if (strcmp (annex, "interp") == 0)
6343 addr = (int) LINUX_LOADMAP_INTERP;
6344 else
6345 return -1;
6346
6347 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6348 return -1;
6349
6350 if (data == NULL)
6351 return -1;
6352
6353 actual_length = sizeof (struct target_loadmap)
6354 + sizeof (struct target_loadseg) * data->nsegs;
6355
6356 if (offset < 0 || offset > actual_length)
6357 return -1;
6358
6359 copy_length = actual_length - offset < len ? actual_length - offset : len;
6360 memcpy (myaddr, (char *) data + offset, copy_length);
6361 return copy_length;
6362 }
6363 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6364
6365 bool
6366 linux_process_target::supports_catch_syscall ()
6367 {
6368 return (low_supports_catch_syscall ()
6369 && linux_supports_tracesysgood ());
6370 }
6371
6372 bool
6373 linux_process_target::low_supports_catch_syscall ()
6374 {
6375 return false;
6376 }
6377
6378 CORE_ADDR
6379 linux_process_target::read_pc (regcache *regcache)
6380 {
6381 if (!low_supports_breakpoints ())
6382 return 0;
6383
6384 return low_get_pc (regcache);
6385 }
6386
6387 void
6388 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6389 {
6390 gdb_assert (low_supports_breakpoints ());
6391
6392 low_set_pc (regcache, pc);
6393 }
6394
6395 bool
6396 linux_process_target::supports_thread_stopped ()
6397 {
6398 return true;
6399 }
6400
6401 bool
6402 linux_process_target::thread_stopped (thread_info *thread)
6403 {
6404 return get_thread_lwp (thread)->stopped;
6405 }
6406
6407 /* This exposes stop-all-threads functionality to other modules. */
6408
6409 void
6410 linux_process_target::pause_all (bool freeze)
6411 {
6412 stop_all_lwps (freeze, NULL);
6413 }
6414
6415 /* This exposes unstop-all-threads functionality to other gdbserver
6416 modules. */
6417
6418 void
6419 linux_process_target::unpause_all (bool unfreeze)
6420 {
6421 unstop_all_lwps (unfreeze, NULL);
6422 }
6423
6424 int
6425 linux_process_target::prepare_to_access_memory ()
6426 {
6427 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6428 running LWP. */
6429 if (non_stop)
6430 target_pause_all (true);
6431 return 0;
6432 }
6433
6434 void
6435 linux_process_target::done_accessing_memory ()
6436 {
6437 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6438 running LWP. */
6439 if (non_stop)
6440 target_unpause_all (true);
6441 }
6442
6443 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6444
6445 static int
6446 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6447 CORE_ADDR *phdr_memaddr, int *num_phdr)
6448 {
6449 char filename[PATH_MAX];
6450 int fd;
6451 const int auxv_size = is_elf64
6452 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6453 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6454
6455 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6456
6457 fd = open (filename, O_RDONLY);
6458 if (fd < 0)
6459 return 1;
6460
6461 *phdr_memaddr = 0;
6462 *num_phdr = 0;
6463 while (read (fd, buf, auxv_size) == auxv_size
6464 && (*phdr_memaddr == 0 || *num_phdr == 0))
6465 {
6466 if (is_elf64)
6467 {
6468 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6469
6470 switch (aux->a_type)
6471 {
6472 case AT_PHDR:
6473 *phdr_memaddr = aux->a_un.a_val;
6474 break;
6475 case AT_PHNUM:
6476 *num_phdr = aux->a_un.a_val;
6477 break;
6478 }
6479 }
6480 else
6481 {
6482 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6483
6484 switch (aux->a_type)
6485 {
6486 case AT_PHDR:
6487 *phdr_memaddr = aux->a_un.a_val;
6488 break;
6489 case AT_PHNUM:
6490 *num_phdr = aux->a_un.a_val;
6491 break;
6492 }
6493 }
6494 }
6495
6496 close (fd);
6497
6498 if (*phdr_memaddr == 0 || *num_phdr == 0)
6499 {
6500 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6501 "phdr_memaddr = %ld, phdr_num = %d",
6502 (long) *phdr_memaddr, *num_phdr);
6503 return 2;
6504 }
6505
6506 return 0;
6507 }
6508
6509 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6510
6511 static CORE_ADDR
6512 get_dynamic (const int pid, const int is_elf64)
6513 {
6514 CORE_ADDR phdr_memaddr, relocation;
6515 int num_phdr, i;
6516 unsigned char *phdr_buf;
6517 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6518
6519 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6520 return 0;
6521
6522 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6523 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6524
6525 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6526 return 0;
6527
6528 /* Compute relocation: it is expected to be 0 for "regular" executables,
6529 non-zero for PIE ones. */
6530 relocation = -1;
6531 for (i = 0; relocation == -1 && i < num_phdr; i++)
6532 if (is_elf64)
6533 {
6534 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6535
6536 if (p->p_type == PT_PHDR)
6537 relocation = phdr_memaddr - p->p_vaddr;
6538 }
6539 else
6540 {
6541 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6542
6543 if (p->p_type == PT_PHDR)
6544 relocation = phdr_memaddr - p->p_vaddr;
6545 }
6546
6547 if (relocation == -1)
6548 {
6549 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6550 any real world executables, including PIE executables, have always
6551 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6552 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6553 or present DT_DEBUG anyway (fpc binaries are statically linked).
6554
6555 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6556
6557 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6558
6559 return 0;
6560 }
6561
6562 for (i = 0; i < num_phdr; i++)
6563 {
6564 if (is_elf64)
6565 {
6566 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6567
6568 if (p->p_type == PT_DYNAMIC)
6569 return p->p_vaddr + relocation;
6570 }
6571 else
6572 {
6573 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6574
6575 if (p->p_type == PT_DYNAMIC)
6576 return p->p_vaddr + relocation;
6577 }
6578 }
6579
6580 return 0;
6581 }
6582
6583 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6584 can be 0 if the inferior does not yet have the library list initialized.
6585 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6586 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6587
6588 static CORE_ADDR
6589 get_r_debug (const int pid, const int is_elf64)
6590 {
6591 CORE_ADDR dynamic_memaddr;
6592 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6593 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6594 CORE_ADDR map = -1;
6595
6596 dynamic_memaddr = get_dynamic (pid, is_elf64);
6597 if (dynamic_memaddr == 0)
6598 return map;
6599
6600 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6601 {
6602 if (is_elf64)
6603 {
6604 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6605 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6606 union
6607 {
6608 Elf64_Xword map;
6609 unsigned char buf[sizeof (Elf64_Xword)];
6610 }
6611 rld_map;
6612 #endif
6613 #ifdef DT_MIPS_RLD_MAP
6614 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6615 {
6616 if (linux_read_memory (dyn->d_un.d_val,
6617 rld_map.buf, sizeof (rld_map.buf)) == 0)
6618 return rld_map.map;
6619 else
6620 break;
6621 }
6622 #endif /* DT_MIPS_RLD_MAP */
6623 #ifdef DT_MIPS_RLD_MAP_REL
6624 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6625 {
6626 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6627 rld_map.buf, sizeof (rld_map.buf)) == 0)
6628 return rld_map.map;
6629 else
6630 break;
6631 }
6632 #endif /* DT_MIPS_RLD_MAP_REL */
6633
6634 if (dyn->d_tag == DT_DEBUG && map == -1)
6635 map = dyn->d_un.d_val;
6636
6637 if (dyn->d_tag == DT_NULL)
6638 break;
6639 }
6640 else
6641 {
6642 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6643 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6644 union
6645 {
6646 Elf32_Word map;
6647 unsigned char buf[sizeof (Elf32_Word)];
6648 }
6649 rld_map;
6650 #endif
6651 #ifdef DT_MIPS_RLD_MAP
6652 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6653 {
6654 if (linux_read_memory (dyn->d_un.d_val,
6655 rld_map.buf, sizeof (rld_map.buf)) == 0)
6656 return rld_map.map;
6657 else
6658 break;
6659 }
6660 #endif /* DT_MIPS_RLD_MAP */
6661 #ifdef DT_MIPS_RLD_MAP_REL
6662 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6663 {
6664 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6665 rld_map.buf, sizeof (rld_map.buf)) == 0)
6666 return rld_map.map;
6667 else
6668 break;
6669 }
6670 #endif /* DT_MIPS_RLD_MAP_REL */
6671
6672 if (dyn->d_tag == DT_DEBUG && map == -1)
6673 map = dyn->d_un.d_val;
6674
6675 if (dyn->d_tag == DT_NULL)
6676 break;
6677 }
6678
6679 dynamic_memaddr += dyn_size;
6680 }
6681
6682 return map;
6683 }
6684
6685 /* Read one pointer from MEMADDR in the inferior. */
6686
6687 static int
6688 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6689 {
6690 int ret;
6691
6692 /* Go through a union so this works on either big or little endian
6693 hosts, when the inferior's pointer size is smaller than the size
6694 of CORE_ADDR. It is assumed the inferior's endianness is the
6695 same of the superior's. */
6696 union
6697 {
6698 CORE_ADDR core_addr;
6699 unsigned int ui;
6700 unsigned char uc;
6701 } addr;
6702
6703 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6704 if (ret == 0)
6705 {
6706 if (ptr_size == sizeof (CORE_ADDR))
6707 *ptr = addr.core_addr;
6708 else if (ptr_size == sizeof (unsigned int))
6709 *ptr = addr.ui;
6710 else
6711 gdb_assert_not_reached ("unhandled pointer size");
6712 }
6713 return ret;
6714 }
6715
6716 bool
6717 linux_process_target::supports_qxfer_libraries_svr4 ()
6718 {
6719 return true;
6720 }
6721
6722 struct link_map_offsets
6723 {
6724 /* Offset and size of r_debug.r_version. */
6725 int r_version_offset;
6726
6727 /* Offset and size of r_debug.r_map. */
6728 int r_map_offset;
6729
6730 /* Offset to l_addr field in struct link_map. */
6731 int l_addr_offset;
6732
6733 /* Offset to l_name field in struct link_map. */
6734 int l_name_offset;
6735
6736 /* Offset to l_ld field in struct link_map. */
6737 int l_ld_offset;
6738
6739 /* Offset to l_next field in struct link_map. */
6740 int l_next_offset;
6741
6742 /* Offset to l_prev field in struct link_map. */
6743 int l_prev_offset;
6744 };
6745
6746 /* Construct qXfer:libraries-svr4:read reply. */
6747
6748 int
6749 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6750 unsigned char *readbuf,
6751 unsigned const char *writebuf,
6752 CORE_ADDR offset, int len)
6753 {
6754 struct process_info_private *const priv = current_process ()->priv;
6755 char filename[PATH_MAX];
6756 int pid, is_elf64;
6757
6758 static const struct link_map_offsets lmo_32bit_offsets =
6759 {
6760 0, /* r_version offset. */
6761 4, /* r_debug.r_map offset. */
6762 0, /* l_addr offset in link_map. */
6763 4, /* l_name offset in link_map. */
6764 8, /* l_ld offset in link_map. */
6765 12, /* l_next offset in link_map. */
6766 16 /* l_prev offset in link_map. */
6767 };
6768
6769 static const struct link_map_offsets lmo_64bit_offsets =
6770 {
6771 0, /* r_version offset. */
6772 8, /* r_debug.r_map offset. */
6773 0, /* l_addr offset in link_map. */
6774 8, /* l_name offset in link_map. */
6775 16, /* l_ld offset in link_map. */
6776 24, /* l_next offset in link_map. */
6777 32 /* l_prev offset in link_map. */
6778 };
6779 const struct link_map_offsets *lmo;
6780 unsigned int machine;
6781 int ptr_size;
6782 CORE_ADDR lm_addr = 0, lm_prev = 0;
6783 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6784 int header_done = 0;
6785
6786 if (writebuf != NULL)
6787 return -2;
6788 if (readbuf == NULL)
6789 return -1;
6790
6791 pid = lwpid_of (current_thread);
6792 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6793 is_elf64 = elf_64_file_p (filename, &machine);
6794 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6795 ptr_size = is_elf64 ? 8 : 4;
6796
6797 while (annex[0] != '\0')
6798 {
6799 const char *sep;
6800 CORE_ADDR *addrp;
6801 int name_len;
6802
6803 sep = strchr (annex, '=');
6804 if (sep == NULL)
6805 break;
6806
6807 name_len = sep - annex;
6808 if (name_len == 5 && startswith (annex, "start"))
6809 addrp = &lm_addr;
6810 else if (name_len == 4 && startswith (annex, "prev"))
6811 addrp = &lm_prev;
6812 else
6813 {
6814 annex = strchr (sep, ';');
6815 if (annex == NULL)
6816 break;
6817 annex++;
6818 continue;
6819 }
6820
6821 annex = decode_address_to_semicolon (addrp, sep + 1);
6822 }
6823
6824 if (lm_addr == 0)
6825 {
6826 int r_version = 0;
6827
6828 if (priv->r_debug == 0)
6829 priv->r_debug = get_r_debug (pid, is_elf64);
6830
6831 /* We failed to find DT_DEBUG. Such situation will not change
6832 for this inferior - do not retry it. Report it to GDB as
6833 E01, see for the reasons at the GDB solib-svr4.c side. */
6834 if (priv->r_debug == (CORE_ADDR) -1)
6835 return -1;
6836
6837 if (priv->r_debug != 0)
6838 {
6839 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6840 (unsigned char *) &r_version,
6841 sizeof (r_version)) != 0
6842 || r_version < 1)
6843 {
6844 warning ("unexpected r_debug version %d", r_version);
6845 }
6846 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6847 &lm_addr, ptr_size) != 0)
6848 {
6849 warning ("unable to read r_map from 0x%lx",
6850 (long) priv->r_debug + lmo->r_map_offset);
6851 }
6852 }
6853 }
6854
6855 std::string document = "<library-list-svr4 version=\"1.0\"";
6856
6857 while (lm_addr
6858 && read_one_ptr (lm_addr + lmo->l_name_offset,
6859 &l_name, ptr_size) == 0
6860 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6861 &l_addr, ptr_size) == 0
6862 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6863 &l_ld, ptr_size) == 0
6864 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6865 &l_prev, ptr_size) == 0
6866 && read_one_ptr (lm_addr + lmo->l_next_offset,
6867 &l_next, ptr_size) == 0)
6868 {
6869 unsigned char libname[PATH_MAX];
6870
6871 if (lm_prev != l_prev)
6872 {
6873 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6874 (long) lm_prev, (long) l_prev);
6875 break;
6876 }
6877
6878 /* Ignore the first entry even if it has valid name as the first entry
6879 corresponds to the main executable. The first entry should not be
6880 skipped if the dynamic loader was loaded late by a static executable
6881 (see solib-svr4.c parameter ignore_first). But in such case the main
6882 executable does not have PT_DYNAMIC present and this function already
6883 exited above due to failed get_r_debug. */
6884 if (lm_prev == 0)
6885 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6886 else
6887 {
6888 /* Not checking for error because reading may stop before
6889 we've got PATH_MAX worth of characters. */
6890 libname[0] = '\0';
6891 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6892 libname[sizeof (libname) - 1] = '\0';
6893 if (libname[0] != '\0')
6894 {
6895 if (!header_done)
6896 {
6897 /* Terminate `<library-list-svr4'. */
6898 document += '>';
6899 header_done = 1;
6900 }
6901
6902 string_appendf (document, "<library name=\"");
6903 xml_escape_text_append (&document, (char *) libname);
6904 string_appendf (document, "\" lm=\"0x%lx\" "
6905 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6906 (unsigned long) lm_addr, (unsigned long) l_addr,
6907 (unsigned long) l_ld);
6908 }
6909 }
6910
6911 lm_prev = lm_addr;
6912 lm_addr = l_next;
6913 }
6914
6915 if (!header_done)
6916 {
6917 /* Empty list; terminate `<library-list-svr4'. */
6918 document += "/>";
6919 }
6920 else
6921 document += "</library-list-svr4>";
6922
6923 int document_len = document.length ();
6924 if (offset < document_len)
6925 document_len -= offset;
6926 else
6927 document_len = 0;
6928 if (len > document_len)
6929 len = document_len;
6930
6931 memcpy (readbuf, document.data () + offset, len);
6932
6933 return len;
6934 }
6935
6936 #ifdef HAVE_LINUX_BTRACE
6937
6938 btrace_target_info *
6939 linux_process_target::enable_btrace (ptid_t ptid,
6940 const btrace_config *conf)
6941 {
6942 return linux_enable_btrace (ptid, conf);
6943 }
6944
6945 /* See to_disable_btrace target method. */
6946
6947 int
6948 linux_process_target::disable_btrace (btrace_target_info *tinfo)
6949 {
6950 enum btrace_error err;
6951
6952 err = linux_disable_btrace (tinfo);
6953 return (err == BTRACE_ERR_NONE ? 0 : -1);
6954 }
6955
6956 /* Encode an Intel Processor Trace configuration. */
6957
6958 static void
6959 linux_low_encode_pt_config (struct buffer *buffer,
6960 const struct btrace_data_pt_config *config)
6961 {
6962 buffer_grow_str (buffer, "<pt-config>\n");
6963
6964 switch (config->cpu.vendor)
6965 {
6966 case CV_INTEL:
6967 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6968 "model=\"%u\" stepping=\"%u\"/>\n",
6969 config->cpu.family, config->cpu.model,
6970 config->cpu.stepping);
6971 break;
6972
6973 default:
6974 break;
6975 }
6976
6977 buffer_grow_str (buffer, "</pt-config>\n");
6978 }
6979
6980 /* Encode a raw buffer. */
6981
6982 static void
6983 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6984 unsigned int size)
6985 {
6986 if (size == 0)
6987 return;
6988
6989 /* We use hex encoding - see gdbsupport/rsp-low.h. */
6990 buffer_grow_str (buffer, "<raw>\n");
6991
6992 while (size-- > 0)
6993 {
6994 char elem[2];
6995
6996 elem[0] = tohex ((*data >> 4) & 0xf);
6997 elem[1] = tohex (*data++ & 0xf);
6998
6999 buffer_grow (buffer, elem, 2);
7000 }
7001
7002 buffer_grow_str (buffer, "</raw>\n");
7003 }
7004
7005 /* See to_read_btrace target method. */
7006
7007 int
7008 linux_process_target::read_btrace (btrace_target_info *tinfo,
7009 buffer *buffer,
7010 enum btrace_read_type type)
7011 {
7012 struct btrace_data btrace;
7013 enum btrace_error err;
7014
7015 err = linux_read_btrace (&btrace, tinfo, type);
7016 if (err != BTRACE_ERR_NONE)
7017 {
7018 if (err == BTRACE_ERR_OVERFLOW)
7019 buffer_grow_str0 (buffer, "E.Overflow.");
7020 else
7021 buffer_grow_str0 (buffer, "E.Generic Error.");
7022
7023 return -1;
7024 }
7025
7026 switch (btrace.format)
7027 {
7028 case BTRACE_FORMAT_NONE:
7029 buffer_grow_str0 (buffer, "E.No Trace.");
7030 return -1;
7031
7032 case BTRACE_FORMAT_BTS:
7033 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7034 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7035
7036 for (const btrace_block &block : *btrace.variant.bts.blocks)
7037 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7038 paddress (block.begin), paddress (block.end));
7039
7040 buffer_grow_str0 (buffer, "</btrace>\n");
7041 break;
7042
7043 case BTRACE_FORMAT_PT:
7044 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7045 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7046 buffer_grow_str (buffer, "<pt>\n");
7047
7048 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7049
7050 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7051 btrace.variant.pt.size);
7052
7053 buffer_grow_str (buffer, "</pt>\n");
7054 buffer_grow_str0 (buffer, "</btrace>\n");
7055 break;
7056
7057 default:
7058 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7059 return -1;
7060 }
7061
7062 return 0;
7063 }
7064
7065 /* See to_btrace_conf target method. */
7066
7067 int
7068 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7069 buffer *buffer)
7070 {
7071 const struct btrace_config *conf;
7072
7073 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7074 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7075
7076 conf = linux_btrace_conf (tinfo);
7077 if (conf != NULL)
7078 {
7079 switch (conf->format)
7080 {
7081 case BTRACE_FORMAT_NONE:
7082 break;
7083
7084 case BTRACE_FORMAT_BTS:
7085 buffer_xml_printf (buffer, "<bts");
7086 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7087 buffer_xml_printf (buffer, " />\n");
7088 break;
7089
7090 case BTRACE_FORMAT_PT:
7091 buffer_xml_printf (buffer, "<pt");
7092 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7093 buffer_xml_printf (buffer, "/>\n");
7094 break;
7095 }
7096 }
7097
7098 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7099 return 0;
7100 }
7101 #endif /* HAVE_LINUX_BTRACE */
7102
7103 /* See nat/linux-nat.h. */
7104
7105 ptid_t
7106 current_lwp_ptid (void)
7107 {
7108 return ptid_of (current_thread);
7109 }
7110
7111 const char *
7112 linux_process_target::thread_name (ptid_t thread)
7113 {
7114 return linux_proc_tid_get_name (thread);
7115 }
7116
7117 #if USE_THREAD_DB
7118 bool
7119 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7120 int *handle_len)
7121 {
7122 return thread_db_thread_handle (ptid, handle, handle_len);
7123 }
7124 #endif
7125
7126 /* Default implementation of linux_target_ops method "set_pc" for
7127 32-bit pc register which is literally named "pc". */
7128
7129 void
7130 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7131 {
7132 uint32_t newpc = pc;
7133
7134 supply_register_by_name (regcache, "pc", &newpc);
7135 }
7136
7137 /* Default implementation of linux_target_ops method "get_pc" for
7138 32-bit pc register which is literally named "pc". */
7139
7140 CORE_ADDR
7141 linux_get_pc_32bit (struct regcache *regcache)
7142 {
7143 uint32_t pc;
7144
7145 collect_register_by_name (regcache, "pc", &pc);
7146 if (debug_threads)
7147 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7148 return pc;
7149 }
7150
7151 /* Default implementation of linux_target_ops method "set_pc" for
7152 64-bit pc register which is literally named "pc". */
7153
7154 void
7155 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7156 {
7157 uint64_t newpc = pc;
7158
7159 supply_register_by_name (regcache, "pc", &newpc);
7160 }
7161
7162 /* Default implementation of linux_target_ops method "get_pc" for
7163 64-bit pc register which is literally named "pc". */
7164
7165 CORE_ADDR
7166 linux_get_pc_64bit (struct regcache *regcache)
7167 {
7168 uint64_t pc;
7169
7170 collect_register_by_name (regcache, "pc", &pc);
7171 if (debug_threads)
7172 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7173 return pc;
7174 }
7175
7176 /* See linux-low.h. */
7177
7178 int
7179 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7180 {
7181 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7182 int offset = 0;
7183
7184 gdb_assert (wordsize == 4 || wordsize == 8);
7185
7186 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
7187 {
7188 if (wordsize == 4)
7189 {
7190 uint32_t *data_p = (uint32_t *) data;
7191 if (data_p[0] == match)
7192 {
7193 *valp = data_p[1];
7194 return 1;
7195 }
7196 }
7197 else
7198 {
7199 uint64_t *data_p = (uint64_t *) data;
7200 if (data_p[0] == match)
7201 {
7202 *valp = data_p[1];
7203 return 1;
7204 }
7205 }
7206
7207 offset += 2 * wordsize;
7208 }
7209
7210 return 0;
7211 }
7212
7213 /* See linux-low.h. */
7214
7215 CORE_ADDR
7216 linux_get_hwcap (int wordsize)
7217 {
7218 CORE_ADDR hwcap = 0;
7219 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7220 return hwcap;
7221 }
7222
7223 /* See linux-low.h. */
7224
7225 CORE_ADDR
7226 linux_get_hwcap2 (int wordsize)
7227 {
7228 CORE_ADDR hwcap2 = 0;
7229 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7230 return hwcap2;
7231 }
7232
7233 #ifdef HAVE_LINUX_REGSETS
7234 void
7235 initialize_regsets_info (struct regsets_info *info)
7236 {
7237 for (info->num_regsets = 0;
7238 info->regsets[info->num_regsets].size >= 0;
7239 info->num_regsets++)
7240 ;
7241 }
7242 #endif
7243
7244 void
7245 initialize_low (void)
7246 {
7247 struct sigaction sigchld_action;
7248
7249 memset (&sigchld_action, 0, sizeof (sigchld_action));
7250 set_target_ops (the_linux_target);
7251
7252 linux_ptrace_init_warnings ();
7253 linux_proc_init_warnings ();
7254
7255 sigchld_action.sa_handler = sigchld_handler;
7256 sigemptyset (&sigchld_action.sa_mask);
7257 sigchld_action.sa_flags = SA_RESTART;
7258 sigaction (SIGCHLD, &sigchld_action, NULL);
7259
7260 initialize_low_arch ();
7261
7262 linux_check_ptrace_features ();
7263 }