[PATCH, rs6000, v3][PR gdb/27525] displaced stepping across addpcis/lnia.
[binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2021 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
48 #include <inttypes.h>
49 #include "gdbsupport/common-inferior.h"
50 #include "nat/fork-inferior.h"
51 #include "gdbsupport/environ.h"
52 #include "gdbsupport/gdb-sigmask.h"
53 #include "gdbsupport/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 #ifndef AT_HWCAP2
75 #define AT_HWCAP2 26
76 #endif
77
78 /* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81 #if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84 #if defined(__mcoldfire__)
85 /* These are still undefined in 3.10 kernels. */
86 #define PT_TEXT_ADDR 49*4
87 #define PT_DATA_ADDR 50*4
88 #define PT_TEXT_END_ADDR 51*4
89 /* These are still undefined in 3.10 kernels. */
90 #elif defined(__TMS320C6X__)
91 #define PT_TEXT_ADDR (0x10000*4)
92 #define PT_DATA_ADDR (0x10004*4)
93 #define PT_TEXT_END_ADDR (0x10008*4)
94 #endif
95 #endif
96
97 #if (defined(__UCLIBC__) \
98 && defined(HAS_NOMMU) \
99 && defined(PT_TEXT_ADDR) \
100 && defined(PT_DATA_ADDR) \
101 && defined(PT_TEXT_END_ADDR))
102 #define SUPPORTS_READ_OFFSETS
103 #endif
104
105 #ifdef HAVE_LINUX_BTRACE
106 # include "nat/linux-btrace.h"
107 # include "gdbsupport/btrace-common.h"
108 #endif
109
110 #ifndef HAVE_ELF32_AUXV_T
111 /* Copied from glibc's elf.h. */
112 typedef struct
113 {
114 uint32_t a_type; /* Entry type */
115 union
116 {
117 uint32_t a_val; /* Integer value */
118 /* We use to have pointer elements added here. We cannot do that,
119 though, since it does not work when using 32-bit definitions
120 on 64-bit platforms and vice versa. */
121 } a_un;
122 } Elf32_auxv_t;
123 #endif
124
125 #ifndef HAVE_ELF64_AUXV_T
126 /* Copied from glibc's elf.h. */
127 typedef struct
128 {
129 uint64_t a_type; /* Entry type */
130 union
131 {
132 uint64_t a_val; /* Integer value */
133 /* We use to have pointer elements added here. We cannot do that,
134 though, since it does not work when using 32-bit definitions
135 on 64-bit platforms and vice versa. */
136 } a_un;
137 } Elf64_auxv_t;
138 #endif
139
140 /* Does the current host support PTRACE_GETREGSET? */
141 int have_ptrace_getregset = -1;
142
143 /* LWP accessors. */
144
145 /* See nat/linux-nat.h. */
146
147 ptid_t
148 ptid_of_lwp (struct lwp_info *lwp)
149 {
150 return ptid_of (get_lwp_thread (lwp));
151 }
152
153 /* See nat/linux-nat.h. */
154
155 void
156 lwp_set_arch_private_info (struct lwp_info *lwp,
157 struct arch_lwp_info *info)
158 {
159 lwp->arch_private = info;
160 }
161
162 /* See nat/linux-nat.h. */
163
164 struct arch_lwp_info *
165 lwp_arch_private_info (struct lwp_info *lwp)
166 {
167 return lwp->arch_private;
168 }
169
170 /* See nat/linux-nat.h. */
171
172 int
173 lwp_is_stopped (struct lwp_info *lwp)
174 {
175 return lwp->stopped;
176 }
177
178 /* See nat/linux-nat.h. */
179
180 enum target_stop_reason
181 lwp_stop_reason (struct lwp_info *lwp)
182 {
183 return lwp->stop_reason;
184 }
185
186 /* See nat/linux-nat.h. */
187
188 int
189 lwp_is_stepping (struct lwp_info *lwp)
190 {
191 return lwp->stepping;
192 }
193
194 /* A list of all unknown processes which receive stop signals. Some
195 other process will presumably claim each of these as forked
196 children momentarily. */
197
198 struct simple_pid_list
199 {
200 /* The process ID. */
201 int pid;
202
203 /* The status as reported by waitpid. */
204 int status;
205
206 /* Next in chain. */
207 struct simple_pid_list *next;
208 };
209 static struct simple_pid_list *stopped_pids;
210
211 /* Trivial list manipulation functions to keep track of a list of new
212 stopped processes. */
213
214 static void
215 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
216 {
217 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
218
219 new_pid->pid = pid;
220 new_pid->status = status;
221 new_pid->next = *listp;
222 *listp = new_pid;
223 }
224
225 static int
226 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
227 {
228 struct simple_pid_list **p;
229
230 for (p = listp; *p != NULL; p = &(*p)->next)
231 if ((*p)->pid == pid)
232 {
233 struct simple_pid_list *next = (*p)->next;
234
235 *statusp = (*p)->status;
236 xfree (*p);
237 *p = next;
238 return 1;
239 }
240 return 0;
241 }
242
243 enum stopping_threads_kind
244 {
245 /* Not stopping threads presently. */
246 NOT_STOPPING_THREADS,
247
248 /* Stopping threads. */
249 STOPPING_THREADS,
250
251 /* Stopping and suspending threads. */
252 STOPPING_AND_SUSPENDING_THREADS
253 };
254
255 /* This is set while stop_all_lwps is in effect. */
256 static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
257
258 /* FIXME make into a target method? */
259 int using_threads = 1;
260
261 /* True if we're presently stabilizing threads (moving them out of
262 jump pads). */
263 static int stabilizing_threads;
264
265 static void unsuspend_all_lwps (struct lwp_info *except);
266 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
267 static int lwp_is_marked_dead (struct lwp_info *lwp);
268 static int kill_lwp (unsigned long lwpid, int signo);
269 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
270 static int linux_low_ptrace_options (int attached);
271 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
272
273 /* When the event-loop is doing a step-over, this points at the thread
274 being stepped. */
275 static ptid_t step_over_bkpt;
276
277 bool
278 linux_process_target::low_supports_breakpoints ()
279 {
280 return false;
281 }
282
283 CORE_ADDR
284 linux_process_target::low_get_pc (regcache *regcache)
285 {
286 return 0;
287 }
288
289 void
290 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
291 {
292 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
293 }
294
295 std::vector<CORE_ADDR>
296 linux_process_target::low_get_next_pcs (regcache *regcache)
297 {
298 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
299 "implemented");
300 }
301
302 int
303 linux_process_target::low_decr_pc_after_break ()
304 {
305 return 0;
306 }
307
308 /* True if LWP is stopped in its stepping range. */
309
310 static int
311 lwp_in_step_range (struct lwp_info *lwp)
312 {
313 CORE_ADDR pc = lwp->stop_pc;
314
315 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
316 }
317
318 /* The read/write ends of the pipe registered as waitable file in the
319 event loop. */
320 static int linux_event_pipe[2] = { -1, -1 };
321
322 /* True if we're currently in async mode. */
323 #define target_is_async_p() (linux_event_pipe[0] != -1)
324
325 static void send_sigstop (struct lwp_info *lwp);
326
327 /* Return non-zero if HEADER is a 64-bit ELF file. */
328
329 static int
330 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
331 {
332 if (header->e_ident[EI_MAG0] == ELFMAG0
333 && header->e_ident[EI_MAG1] == ELFMAG1
334 && header->e_ident[EI_MAG2] == ELFMAG2
335 && header->e_ident[EI_MAG3] == ELFMAG3)
336 {
337 *machine = header->e_machine;
338 return header->e_ident[EI_CLASS] == ELFCLASS64;
339
340 }
341 *machine = EM_NONE;
342 return -1;
343 }
344
345 /* Return non-zero if FILE is a 64-bit ELF file,
346 zero if the file is not a 64-bit ELF file,
347 and -1 if the file is not accessible or doesn't exist. */
348
349 static int
350 elf_64_file_p (const char *file, unsigned int *machine)
351 {
352 Elf64_Ehdr header;
353 int fd;
354
355 fd = open (file, O_RDONLY);
356 if (fd < 0)
357 return -1;
358
359 if (read (fd, &header, sizeof (header)) != sizeof (header))
360 {
361 close (fd);
362 return 0;
363 }
364 close (fd);
365
366 return elf_64_header_p (&header, machine);
367 }
368
369 /* Accepts an integer PID; Returns true if the executable PID is
370 running is a 64-bit ELF file.. */
371
372 int
373 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
374 {
375 char file[PATH_MAX];
376
377 sprintf (file, "/proc/%d/exe", pid);
378 return elf_64_file_p (file, machine);
379 }
380
381 void
382 linux_process_target::delete_lwp (lwp_info *lwp)
383 {
384 struct thread_info *thr = get_lwp_thread (lwp);
385
386 if (debug_threads)
387 debug_printf ("deleting %ld\n", lwpid_of (thr));
388
389 remove_thread (thr);
390
391 low_delete_thread (lwp->arch_private);
392
393 delete lwp;
394 }
395
396 void
397 linux_process_target::low_delete_thread (arch_lwp_info *info)
398 {
399 /* Default implementation should be overridden if architecture-specific
400 info is being used. */
401 gdb_assert (info == nullptr);
402 }
403
404 process_info *
405 linux_process_target::add_linux_process (int pid, int attached)
406 {
407 struct process_info *proc;
408
409 proc = add_process (pid, attached);
410 proc->priv = XCNEW (struct process_info_private);
411
412 proc->priv->arch_private = low_new_process ();
413
414 return proc;
415 }
416
417 arch_process_info *
418 linux_process_target::low_new_process ()
419 {
420 return nullptr;
421 }
422
423 void
424 linux_process_target::low_delete_process (arch_process_info *info)
425 {
426 /* Default implementation must be overridden if architecture-specific
427 info exists. */
428 gdb_assert (info == nullptr);
429 }
430
431 void
432 linux_process_target::low_new_fork (process_info *parent, process_info *child)
433 {
434 /* Nop. */
435 }
436
437 void
438 linux_process_target::arch_setup_thread (thread_info *thread)
439 {
440 struct thread_info *saved_thread;
441
442 saved_thread = current_thread;
443 current_thread = thread;
444
445 low_arch_setup ();
446
447 current_thread = saved_thread;
448 }
449
450 int
451 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
452 int wstat)
453 {
454 client_state &cs = get_client_state ();
455 struct lwp_info *event_lwp = *orig_event_lwp;
456 int event = linux_ptrace_get_extended_event (wstat);
457 struct thread_info *event_thr = get_lwp_thread (event_lwp);
458 struct lwp_info *new_lwp;
459
460 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
461
462 /* All extended events we currently use are mid-syscall. Only
463 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
464 you have to be using PTRACE_SEIZE to get that. */
465 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
466
467 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
468 || (event == PTRACE_EVENT_CLONE))
469 {
470 ptid_t ptid;
471 unsigned long new_pid;
472 int ret, status;
473
474 /* Get the pid of the new lwp. */
475 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
476 &new_pid);
477
478 /* If we haven't already seen the new PID stop, wait for it now. */
479 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
480 {
481 /* The new child has a pending SIGSTOP. We can't affect it until it
482 hits the SIGSTOP, but we're already attached. */
483
484 ret = my_waitpid (new_pid, &status, __WALL);
485
486 if (ret == -1)
487 perror_with_name ("waiting for new child");
488 else if (ret != new_pid)
489 warning ("wait returned unexpected PID %d", ret);
490 else if (!WIFSTOPPED (status))
491 warning ("wait returned unexpected status 0x%x", status);
492 }
493
494 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
495 {
496 struct process_info *parent_proc;
497 struct process_info *child_proc;
498 struct lwp_info *child_lwp;
499 struct thread_info *child_thr;
500
501 ptid = ptid_t (new_pid, new_pid, 0);
502
503 if (debug_threads)
504 {
505 debug_printf ("HEW: Got fork event from LWP %ld, "
506 "new child is %d\n",
507 ptid_of (event_thr).lwp (),
508 ptid.pid ());
509 }
510
511 /* Add the new process to the tables and clone the breakpoint
512 lists of the parent. We need to do this even if the new process
513 will be detached, since we will need the process object and the
514 breakpoints to remove any breakpoints from memory when we
515 detach, and the client side will access registers. */
516 child_proc = add_linux_process (new_pid, 0);
517 gdb_assert (child_proc != NULL);
518 child_lwp = add_lwp (ptid);
519 gdb_assert (child_lwp != NULL);
520 child_lwp->stopped = 1;
521 child_lwp->must_set_ptrace_flags = 1;
522 child_lwp->status_pending_p = 0;
523 child_thr = get_lwp_thread (child_lwp);
524 child_thr->last_resume_kind = resume_stop;
525 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
526
527 /* If we're suspending all threads, leave this one suspended
528 too. If the fork/clone parent is stepping over a breakpoint,
529 all other threads have been suspended already. Leave the
530 child suspended too. */
531 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
532 || event_lwp->bp_reinsert != 0)
533 {
534 if (debug_threads)
535 debug_printf ("HEW: leaving child suspended\n");
536 child_lwp->suspended = 1;
537 }
538
539 parent_proc = get_thread_process (event_thr);
540 child_proc->attached = parent_proc->attached;
541
542 if (event_lwp->bp_reinsert != 0
543 && supports_software_single_step ()
544 && event == PTRACE_EVENT_VFORK)
545 {
546 /* If we leave single-step breakpoints there, child will
547 hit it, so uninsert single-step breakpoints from parent
548 (and child). Once vfork child is done, reinsert
549 them back to parent. */
550 uninsert_single_step_breakpoints (event_thr);
551 }
552
553 clone_all_breakpoints (child_thr, event_thr);
554
555 target_desc_up tdesc = allocate_target_description ();
556 copy_target_description (tdesc.get (), parent_proc->tdesc);
557 child_proc->tdesc = tdesc.release ();
558
559 /* Clone arch-specific process data. */
560 low_new_fork (parent_proc, child_proc);
561
562 /* Save fork info in the parent thread. */
563 if (event == PTRACE_EVENT_FORK)
564 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
565 else if (event == PTRACE_EVENT_VFORK)
566 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
567
568 event_lwp->waitstatus.value.related_pid = ptid;
569
570 /* The status_pending field contains bits denoting the
571 extended event, so when the pending event is handled,
572 the handler will look at lwp->waitstatus. */
573 event_lwp->status_pending_p = 1;
574 event_lwp->status_pending = wstat;
575
576 /* Link the threads until the parent event is passed on to
577 higher layers. */
578 event_lwp->fork_relative = child_lwp;
579 child_lwp->fork_relative = event_lwp;
580
581 /* If the parent thread is doing step-over with single-step
582 breakpoints, the list of single-step breakpoints are cloned
583 from the parent's. Remove them from the child process.
584 In case of vfork, we'll reinsert them back once vforked
585 child is done. */
586 if (event_lwp->bp_reinsert != 0
587 && supports_software_single_step ())
588 {
589 /* The child process is forked and stopped, so it is safe
590 to access its memory without stopping all other threads
591 from other processes. */
592 delete_single_step_breakpoints (child_thr);
593
594 gdb_assert (has_single_step_breakpoints (event_thr));
595 gdb_assert (!has_single_step_breakpoints (child_thr));
596 }
597
598 /* Report the event. */
599 return 0;
600 }
601
602 if (debug_threads)
603 debug_printf ("HEW: Got clone event "
604 "from LWP %ld, new child is LWP %ld\n",
605 lwpid_of (event_thr), new_pid);
606
607 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
608 new_lwp = add_lwp (ptid);
609
610 /* Either we're going to immediately resume the new thread
611 or leave it stopped. resume_one_lwp is a nop if it
612 thinks the thread is currently running, so set this first
613 before calling resume_one_lwp. */
614 new_lwp->stopped = 1;
615
616 /* If we're suspending all threads, leave this one suspended
617 too. If the fork/clone parent is stepping over a breakpoint,
618 all other threads have been suspended already. Leave the
619 child suspended too. */
620 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
621 || event_lwp->bp_reinsert != 0)
622 new_lwp->suspended = 1;
623
624 /* Normally we will get the pending SIGSTOP. But in some cases
625 we might get another signal delivered to the group first.
626 If we do get another signal, be sure not to lose it. */
627 if (WSTOPSIG (status) != SIGSTOP)
628 {
629 new_lwp->stop_expected = 1;
630 new_lwp->status_pending_p = 1;
631 new_lwp->status_pending = status;
632 }
633 else if (cs.report_thread_events)
634 {
635 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
636 new_lwp->status_pending_p = 1;
637 new_lwp->status_pending = status;
638 }
639
640 #ifdef USE_THREAD_DB
641 thread_db_notice_clone (event_thr, ptid);
642 #endif
643
644 /* Don't report the event. */
645 return 1;
646 }
647 else if (event == PTRACE_EVENT_VFORK_DONE)
648 {
649 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
650
651 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
652 {
653 reinsert_single_step_breakpoints (event_thr);
654
655 gdb_assert (has_single_step_breakpoints (event_thr));
656 }
657
658 /* Report the event. */
659 return 0;
660 }
661 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
662 {
663 struct process_info *proc;
664 std::vector<int> syscalls_to_catch;
665 ptid_t event_ptid;
666 pid_t event_pid;
667
668 if (debug_threads)
669 {
670 debug_printf ("HEW: Got exec event from LWP %ld\n",
671 lwpid_of (event_thr));
672 }
673
674 /* Get the event ptid. */
675 event_ptid = ptid_of (event_thr);
676 event_pid = event_ptid.pid ();
677
678 /* Save the syscall list from the execing process. */
679 proc = get_thread_process (event_thr);
680 syscalls_to_catch = std::move (proc->syscalls_to_catch);
681
682 /* Delete the execing process and all its threads. */
683 mourn (proc);
684 current_thread = NULL;
685
686 /* Create a new process/lwp/thread. */
687 proc = add_linux_process (event_pid, 0);
688 event_lwp = add_lwp (event_ptid);
689 event_thr = get_lwp_thread (event_lwp);
690 gdb_assert (current_thread == event_thr);
691 arch_setup_thread (event_thr);
692
693 /* Set the event status. */
694 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
695 event_lwp->waitstatus.value.execd_pathname
696 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
697
698 /* Mark the exec status as pending. */
699 event_lwp->stopped = 1;
700 event_lwp->status_pending_p = 1;
701 event_lwp->status_pending = wstat;
702 event_thr->last_resume_kind = resume_continue;
703 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
704
705 /* Update syscall state in the new lwp, effectively mid-syscall too. */
706 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
707
708 /* Restore the list to catch. Don't rely on the client, which is free
709 to avoid sending a new list when the architecture doesn't change.
710 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
711 proc->syscalls_to_catch = std::move (syscalls_to_catch);
712
713 /* Report the event. */
714 *orig_event_lwp = event_lwp;
715 return 0;
716 }
717
718 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
719 }
720
721 CORE_ADDR
722 linux_process_target::get_pc (lwp_info *lwp)
723 {
724 struct thread_info *saved_thread;
725 struct regcache *regcache;
726 CORE_ADDR pc;
727
728 if (!low_supports_breakpoints ())
729 return 0;
730
731 saved_thread = current_thread;
732 current_thread = get_lwp_thread (lwp);
733
734 regcache = get_thread_regcache (current_thread, 1);
735 pc = low_get_pc (regcache);
736
737 if (debug_threads)
738 debug_printf ("pc is 0x%lx\n", (long) pc);
739
740 current_thread = saved_thread;
741 return pc;
742 }
743
744 void
745 linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
746 {
747 struct thread_info *saved_thread;
748 struct regcache *regcache;
749
750 saved_thread = current_thread;
751 current_thread = get_lwp_thread (lwp);
752
753 regcache = get_thread_regcache (current_thread, 1);
754 low_get_syscall_trapinfo (regcache, sysno);
755
756 if (debug_threads)
757 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
758
759 current_thread = saved_thread;
760 }
761
762 void
763 linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
764 {
765 /* By default, report an unknown system call number. */
766 *sysno = UNKNOWN_SYSCALL;
767 }
768
769 bool
770 linux_process_target::save_stop_reason (lwp_info *lwp)
771 {
772 CORE_ADDR pc;
773 CORE_ADDR sw_breakpoint_pc;
774 struct thread_info *saved_thread;
775 #if USE_SIGTRAP_SIGINFO
776 siginfo_t siginfo;
777 #endif
778
779 if (!low_supports_breakpoints ())
780 return false;
781
782 pc = get_pc (lwp);
783 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
784
785 /* breakpoint_at reads from the current thread. */
786 saved_thread = current_thread;
787 current_thread = get_lwp_thread (lwp);
788
789 #if USE_SIGTRAP_SIGINFO
790 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
791 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
792 {
793 if (siginfo.si_signo == SIGTRAP)
794 {
795 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
796 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
797 {
798 /* The si_code is ambiguous on this arch -- check debug
799 registers. */
800 if (!check_stopped_by_watchpoint (lwp))
801 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
802 }
803 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
804 {
805 /* If we determine the LWP stopped for a SW breakpoint,
806 trust it. Particularly don't check watchpoint
807 registers, because at least on s390, we'd find
808 stopped-by-watchpoint as long as there's a watchpoint
809 set. */
810 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
811 }
812 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
813 {
814 /* This can indicate either a hardware breakpoint or
815 hardware watchpoint. Check debug registers. */
816 if (!check_stopped_by_watchpoint (lwp))
817 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
818 }
819 else if (siginfo.si_code == TRAP_TRACE)
820 {
821 /* We may have single stepped an instruction that
822 triggered a watchpoint. In that case, on some
823 architectures (such as x86), instead of TRAP_HWBKPT,
824 si_code indicates TRAP_TRACE, and we need to check
825 the debug registers separately. */
826 if (!check_stopped_by_watchpoint (lwp))
827 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
828 }
829 }
830 }
831 #else
832 /* We may have just stepped a breakpoint instruction. E.g., in
833 non-stop mode, GDB first tells the thread A to step a range, and
834 then the user inserts a breakpoint inside the range. In that
835 case we need to report the breakpoint PC. */
836 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
837 && low_breakpoint_at (sw_breakpoint_pc))
838 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
839
840 if (hardware_breakpoint_inserted_here (pc))
841 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
842
843 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
844 check_stopped_by_watchpoint (lwp);
845 #endif
846
847 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
848 {
849 if (debug_threads)
850 {
851 struct thread_info *thr = get_lwp_thread (lwp);
852
853 debug_printf ("CSBB: %s stopped by software breakpoint\n",
854 target_pid_to_str (ptid_of (thr)));
855 }
856
857 /* Back up the PC if necessary. */
858 if (pc != sw_breakpoint_pc)
859 {
860 struct regcache *regcache
861 = get_thread_regcache (current_thread, 1);
862 low_set_pc (regcache, sw_breakpoint_pc);
863 }
864
865 /* Update this so we record the correct stop PC below. */
866 pc = sw_breakpoint_pc;
867 }
868 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
869 {
870 if (debug_threads)
871 {
872 struct thread_info *thr = get_lwp_thread (lwp);
873
874 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
875 target_pid_to_str (ptid_of (thr)));
876 }
877 }
878 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
879 {
880 if (debug_threads)
881 {
882 struct thread_info *thr = get_lwp_thread (lwp);
883
884 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
885 target_pid_to_str (ptid_of (thr)));
886 }
887 }
888 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
889 {
890 if (debug_threads)
891 {
892 struct thread_info *thr = get_lwp_thread (lwp);
893
894 debug_printf ("CSBB: %s stopped by trace\n",
895 target_pid_to_str (ptid_of (thr)));
896 }
897 }
898
899 lwp->stop_pc = pc;
900 current_thread = saved_thread;
901 return true;
902 }
903
904 lwp_info *
905 linux_process_target::add_lwp (ptid_t ptid)
906 {
907 struct lwp_info *lwp;
908
909 lwp = new lwp_info {};
910
911 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
912
913 lwp->thread = add_thread (ptid, lwp);
914
915 low_new_thread (lwp);
916
917 return lwp;
918 }
919
920 void
921 linux_process_target::low_new_thread (lwp_info *info)
922 {
923 /* Nop. */
924 }
925
926 /* Callback to be used when calling fork_inferior, responsible for
927 actually initiating the tracing of the inferior. */
928
929 static void
930 linux_ptrace_fun ()
931 {
932 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
933 (PTRACE_TYPE_ARG4) 0) < 0)
934 trace_start_error_with_name ("ptrace");
935
936 if (setpgid (0, 0) < 0)
937 trace_start_error_with_name ("setpgid");
938
939 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
940 stdout to stderr so that inferior i/o doesn't corrupt the connection.
941 Also, redirect stdin to /dev/null. */
942 if (remote_connection_is_stdio ())
943 {
944 if (close (0) < 0)
945 trace_start_error_with_name ("close");
946 if (open ("/dev/null", O_RDONLY) < 0)
947 trace_start_error_with_name ("open");
948 if (dup2 (2, 1) < 0)
949 trace_start_error_with_name ("dup2");
950 if (write (2, "stdin/stdout redirected\n",
951 sizeof ("stdin/stdout redirected\n") - 1) < 0)
952 {
953 /* Errors ignored. */;
954 }
955 }
956 }
957
958 /* Start an inferior process and returns its pid.
959 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
960 are its arguments. */
961
962 int
963 linux_process_target::create_inferior (const char *program,
964 const std::vector<char *> &program_args)
965 {
966 client_state &cs = get_client_state ();
967 struct lwp_info *new_lwp;
968 int pid;
969 ptid_t ptid;
970
971 {
972 maybe_disable_address_space_randomization restore_personality
973 (cs.disable_randomization);
974 std::string str_program_args = construct_inferior_arguments (program_args);
975
976 pid = fork_inferior (program,
977 str_program_args.c_str (),
978 get_environ ()->envp (), linux_ptrace_fun,
979 NULL, NULL, NULL, NULL);
980 }
981
982 add_linux_process (pid, 0);
983
984 ptid = ptid_t (pid, pid, 0);
985 new_lwp = add_lwp (ptid);
986 new_lwp->must_set_ptrace_flags = 1;
987
988 post_fork_inferior (pid, program);
989
990 return pid;
991 }
992
993 /* Implement the post_create_inferior target_ops method. */
994
995 void
996 linux_process_target::post_create_inferior ()
997 {
998 struct lwp_info *lwp = get_thread_lwp (current_thread);
999
1000 low_arch_setup ();
1001
1002 if (lwp->must_set_ptrace_flags)
1003 {
1004 struct process_info *proc = current_process ();
1005 int options = linux_low_ptrace_options (proc->attached);
1006
1007 linux_enable_event_reporting (lwpid_of (current_thread), options);
1008 lwp->must_set_ptrace_flags = 0;
1009 }
1010 }
1011
1012 int
1013 linux_process_target::attach_lwp (ptid_t ptid)
1014 {
1015 struct lwp_info *new_lwp;
1016 int lwpid = ptid.lwp ();
1017
1018 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1019 != 0)
1020 return errno;
1021
1022 new_lwp = add_lwp (ptid);
1023
1024 /* We need to wait for SIGSTOP before being able to make the next
1025 ptrace call on this LWP. */
1026 new_lwp->must_set_ptrace_flags = 1;
1027
1028 if (linux_proc_pid_is_stopped (lwpid))
1029 {
1030 if (debug_threads)
1031 debug_printf ("Attached to a stopped process\n");
1032
1033 /* The process is definitely stopped. It is in a job control
1034 stop, unless the kernel predates the TASK_STOPPED /
1035 TASK_TRACED distinction, in which case it might be in a
1036 ptrace stop. Make sure it is in a ptrace stop; from there we
1037 can kill it, signal it, et cetera.
1038
1039 First make sure there is a pending SIGSTOP. Since we are
1040 already attached, the process can not transition from stopped
1041 to running without a PTRACE_CONT; so we know this signal will
1042 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1043 probably already in the queue (unless this kernel is old
1044 enough to use TASK_STOPPED for ptrace stops); but since
1045 SIGSTOP is not an RT signal, it can only be queued once. */
1046 kill_lwp (lwpid, SIGSTOP);
1047
1048 /* Finally, resume the stopped process. This will deliver the
1049 SIGSTOP (or a higher priority signal, just like normal
1050 PTRACE_ATTACH), which we'll catch later on. */
1051 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1052 }
1053
1054 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1055 brings it to a halt.
1056
1057 There are several cases to consider here:
1058
1059 1) gdbserver has already attached to the process and is being notified
1060 of a new thread that is being created.
1061 In this case we should ignore that SIGSTOP and resume the
1062 process. This is handled below by setting stop_expected = 1,
1063 and the fact that add_thread sets last_resume_kind ==
1064 resume_continue.
1065
1066 2) This is the first thread (the process thread), and we're attaching
1067 to it via attach_inferior.
1068 In this case we want the process thread to stop.
1069 This is handled by having linux_attach set last_resume_kind ==
1070 resume_stop after we return.
1071
1072 If the pid we are attaching to is also the tgid, we attach to and
1073 stop all the existing threads. Otherwise, we attach to pid and
1074 ignore any other threads in the same group as this pid.
1075
1076 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1077 existing threads.
1078 In this case we want the thread to stop.
1079 FIXME: This case is currently not properly handled.
1080 We should wait for the SIGSTOP but don't. Things work apparently
1081 because enough time passes between when we ptrace (ATTACH) and when
1082 gdb makes the next ptrace call on the thread.
1083
1084 On the other hand, if we are currently trying to stop all threads, we
1085 should treat the new thread as if we had sent it a SIGSTOP. This works
1086 because we are guaranteed that the add_lwp call above added us to the
1087 end of the list, and so the new thread has not yet reached
1088 wait_for_sigstop (but will). */
1089 new_lwp->stop_expected = 1;
1090
1091 return 0;
1092 }
1093
1094 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1095 already attached. Returns true if a new LWP is found, false
1096 otherwise. */
1097
1098 static int
1099 attach_proc_task_lwp_callback (ptid_t ptid)
1100 {
1101 /* Is this a new thread? */
1102 if (find_thread_ptid (ptid) == NULL)
1103 {
1104 int lwpid = ptid.lwp ();
1105 int err;
1106
1107 if (debug_threads)
1108 debug_printf ("Found new lwp %d\n", lwpid);
1109
1110 err = the_linux_target->attach_lwp (ptid);
1111
1112 /* Be quiet if we simply raced with the thread exiting. EPERM
1113 is returned if the thread's task still exists, and is marked
1114 as exited or zombie, as well as other conditions, so in that
1115 case, confirm the status in /proc/PID/status. */
1116 if (err == ESRCH
1117 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1118 {
1119 if (debug_threads)
1120 {
1121 debug_printf ("Cannot attach to lwp %d: "
1122 "thread is gone (%d: %s)\n",
1123 lwpid, err, safe_strerror (err));
1124 }
1125 }
1126 else if (err != 0)
1127 {
1128 std::string reason
1129 = linux_ptrace_attach_fail_reason_string (ptid, err);
1130
1131 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1132 }
1133
1134 return 1;
1135 }
1136 return 0;
1137 }
1138
1139 static void async_file_mark (void);
1140
1141 /* Attach to PID. If PID is the tgid, attach to it and all
1142 of its threads. */
1143
1144 int
1145 linux_process_target::attach (unsigned long pid)
1146 {
1147 struct process_info *proc;
1148 struct thread_info *initial_thread;
1149 ptid_t ptid = ptid_t (pid, pid, 0);
1150 int err;
1151
1152 proc = add_linux_process (pid, 1);
1153
1154 /* Attach to PID. We will check for other threads
1155 soon. */
1156 err = attach_lwp (ptid);
1157 if (err != 0)
1158 {
1159 remove_process (proc);
1160
1161 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1162 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1163 }
1164
1165 /* Don't ignore the initial SIGSTOP if we just attached to this
1166 process. It will be collected by wait shortly. */
1167 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
1168 initial_thread->last_resume_kind = resume_stop;
1169
1170 /* We must attach to every LWP. If /proc is mounted, use that to
1171 find them now. On the one hand, the inferior may be using raw
1172 clone instead of using pthreads. On the other hand, even if it
1173 is using pthreads, GDB may not be connected yet (thread_db needs
1174 to do symbol lookups, through qSymbol). Also, thread_db walks
1175 structures in the inferior's address space to find the list of
1176 threads/LWPs, and those structures may well be corrupted. Note
1177 that once thread_db is loaded, we'll still use it to list threads
1178 and associate pthread info with each LWP. */
1179 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1180
1181 /* GDB will shortly read the xml target description for this
1182 process, to figure out the process' architecture. But the target
1183 description is only filled in when the first process/thread in
1184 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1185 that now, otherwise, if GDB is fast enough, it could read the
1186 target description _before_ that initial stop. */
1187 if (non_stop)
1188 {
1189 struct lwp_info *lwp;
1190 int wstat, lwpid;
1191 ptid_t pid_ptid = ptid_t (pid);
1192
1193 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1194 gdb_assert (lwpid > 0);
1195
1196 lwp = find_lwp_pid (ptid_t (lwpid));
1197
1198 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1199 {
1200 lwp->status_pending_p = 1;
1201 lwp->status_pending = wstat;
1202 }
1203
1204 initial_thread->last_resume_kind = resume_continue;
1205
1206 async_file_mark ();
1207
1208 gdb_assert (proc->tdesc != NULL);
1209 }
1210
1211 return 0;
1212 }
1213
1214 static int
1215 last_thread_of_process_p (int pid)
1216 {
1217 bool seen_one = false;
1218
1219 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1220 {
1221 if (!seen_one)
1222 {
1223 /* This is the first thread of this process we see. */
1224 seen_one = true;
1225 return false;
1226 }
1227 else
1228 {
1229 /* This is the second thread of this process we see. */
1230 return true;
1231 }
1232 });
1233
1234 return thread == NULL;
1235 }
1236
1237 /* Kill LWP. */
1238
1239 static void
1240 linux_kill_one_lwp (struct lwp_info *lwp)
1241 {
1242 struct thread_info *thr = get_lwp_thread (lwp);
1243 int pid = lwpid_of (thr);
1244
1245 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1246 there is no signal context, and ptrace(PTRACE_KILL) (or
1247 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1248 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1249 alternative is to kill with SIGKILL. We only need one SIGKILL
1250 per process, not one for each thread. But since we still support
1251 support debugging programs using raw clone without CLONE_THREAD,
1252 we send one for each thread. For years, we used PTRACE_KILL
1253 only, so we're being a bit paranoid about some old kernels where
1254 PTRACE_KILL might work better (dubious if there are any such, but
1255 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1256 second, and so we're fine everywhere. */
1257
1258 errno = 0;
1259 kill_lwp (pid, SIGKILL);
1260 if (debug_threads)
1261 {
1262 int save_errno = errno;
1263
1264 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1265 target_pid_to_str (ptid_of (thr)),
1266 save_errno ? safe_strerror (save_errno) : "OK");
1267 }
1268
1269 errno = 0;
1270 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1271 if (debug_threads)
1272 {
1273 int save_errno = errno;
1274
1275 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1276 target_pid_to_str (ptid_of (thr)),
1277 save_errno ? safe_strerror (save_errno) : "OK");
1278 }
1279 }
1280
1281 /* Kill LWP and wait for it to die. */
1282
1283 static void
1284 kill_wait_lwp (struct lwp_info *lwp)
1285 {
1286 struct thread_info *thr = get_lwp_thread (lwp);
1287 int pid = ptid_of (thr).pid ();
1288 int lwpid = ptid_of (thr).lwp ();
1289 int wstat;
1290 int res;
1291
1292 if (debug_threads)
1293 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1294
1295 do
1296 {
1297 linux_kill_one_lwp (lwp);
1298
1299 /* Make sure it died. Notes:
1300
1301 - The loop is most likely unnecessary.
1302
1303 - We don't use wait_for_event as that could delete lwps
1304 while we're iterating over them. We're not interested in
1305 any pending status at this point, only in making sure all
1306 wait status on the kernel side are collected until the
1307 process is reaped.
1308
1309 - We don't use __WALL here as the __WALL emulation relies on
1310 SIGCHLD, and killing a stopped process doesn't generate
1311 one, nor an exit status.
1312 */
1313 res = my_waitpid (lwpid, &wstat, 0);
1314 if (res == -1 && errno == ECHILD)
1315 res = my_waitpid (lwpid, &wstat, __WCLONE);
1316 } while (res > 0 && WIFSTOPPED (wstat));
1317
1318 /* Even if it was stopped, the child may have already disappeared.
1319 E.g., if it was killed by SIGKILL. */
1320 if (res < 0 && errno != ECHILD)
1321 perror_with_name ("kill_wait_lwp");
1322 }
1323
1324 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1325 except the leader. */
1326
1327 static void
1328 kill_one_lwp_callback (thread_info *thread, int pid)
1329 {
1330 struct lwp_info *lwp = get_thread_lwp (thread);
1331
1332 /* We avoid killing the first thread here, because of a Linux kernel (at
1333 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1334 the children get a chance to be reaped, it will remain a zombie
1335 forever. */
1336
1337 if (lwpid_of (thread) == pid)
1338 {
1339 if (debug_threads)
1340 debug_printf ("lkop: is last of process %s\n",
1341 target_pid_to_str (thread->id));
1342 return;
1343 }
1344
1345 kill_wait_lwp (lwp);
1346 }
1347
1348 int
1349 linux_process_target::kill (process_info *process)
1350 {
1351 int pid = process->pid;
1352
1353 /* If we're killing a running inferior, make sure it is stopped
1354 first, as PTRACE_KILL will not work otherwise. */
1355 stop_all_lwps (0, NULL);
1356
1357 for_each_thread (pid, [&] (thread_info *thread)
1358 {
1359 kill_one_lwp_callback (thread, pid);
1360 });
1361
1362 /* See the comment in linux_kill_one_lwp. We did not kill the first
1363 thread in the list, so do so now. */
1364 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1365
1366 if (lwp == NULL)
1367 {
1368 if (debug_threads)
1369 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1370 pid);
1371 }
1372 else
1373 kill_wait_lwp (lwp);
1374
1375 mourn (process);
1376
1377 /* Since we presently can only stop all lwps of all processes, we
1378 need to unstop lwps of other processes. */
1379 unstop_all_lwps (0, NULL);
1380 return 0;
1381 }
1382
1383 /* Get pending signal of THREAD, for detaching purposes. This is the
1384 signal the thread last stopped for, which we need to deliver to the
1385 thread when detaching, otherwise, it'd be suppressed/lost. */
1386
1387 static int
1388 get_detach_signal (struct thread_info *thread)
1389 {
1390 client_state &cs = get_client_state ();
1391 enum gdb_signal signo = GDB_SIGNAL_0;
1392 int status;
1393 struct lwp_info *lp = get_thread_lwp (thread);
1394
1395 if (lp->status_pending_p)
1396 status = lp->status_pending;
1397 else
1398 {
1399 /* If the thread had been suspended by gdbserver, and it stopped
1400 cleanly, then it'll have stopped with SIGSTOP. But we don't
1401 want to deliver that SIGSTOP. */
1402 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1403 || thread->last_status.value.sig == GDB_SIGNAL_0)
1404 return 0;
1405
1406 /* Otherwise, we may need to deliver the signal we
1407 intercepted. */
1408 status = lp->last_status;
1409 }
1410
1411 if (!WIFSTOPPED (status))
1412 {
1413 if (debug_threads)
1414 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1415 target_pid_to_str (ptid_of (thread)));
1416 return 0;
1417 }
1418
1419 /* Extended wait statuses aren't real SIGTRAPs. */
1420 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1421 {
1422 if (debug_threads)
1423 debug_printf ("GPS: lwp %s had stopped with extended "
1424 "status: no pending signal\n",
1425 target_pid_to_str (ptid_of (thread)));
1426 return 0;
1427 }
1428
1429 signo = gdb_signal_from_host (WSTOPSIG (status));
1430
1431 if (cs.program_signals_p && !cs.program_signals[signo])
1432 {
1433 if (debug_threads)
1434 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1435 target_pid_to_str (ptid_of (thread)),
1436 gdb_signal_to_string (signo));
1437 return 0;
1438 }
1439 else if (!cs.program_signals_p
1440 /* If we have no way to know which signals GDB does not
1441 want to have passed to the program, assume
1442 SIGTRAP/SIGINT, which is GDB's default. */
1443 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1444 {
1445 if (debug_threads)
1446 debug_printf ("GPS: lwp %s had signal %s, "
1447 "but we don't know if we should pass it. "
1448 "Default to not.\n",
1449 target_pid_to_str (ptid_of (thread)),
1450 gdb_signal_to_string (signo));
1451 return 0;
1452 }
1453 else
1454 {
1455 if (debug_threads)
1456 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1457 target_pid_to_str (ptid_of (thread)),
1458 gdb_signal_to_string (signo));
1459
1460 return WSTOPSIG (status);
1461 }
1462 }
1463
1464 void
1465 linux_process_target::detach_one_lwp (lwp_info *lwp)
1466 {
1467 struct thread_info *thread = get_lwp_thread (lwp);
1468 int sig;
1469 int lwpid;
1470
1471 /* If there is a pending SIGSTOP, get rid of it. */
1472 if (lwp->stop_expected)
1473 {
1474 if (debug_threads)
1475 debug_printf ("Sending SIGCONT to %s\n",
1476 target_pid_to_str (ptid_of (thread)));
1477
1478 kill_lwp (lwpid_of (thread), SIGCONT);
1479 lwp->stop_expected = 0;
1480 }
1481
1482 /* Pass on any pending signal for this thread. */
1483 sig = get_detach_signal (thread);
1484
1485 /* Preparing to resume may try to write registers, and fail if the
1486 lwp is zombie. If that happens, ignore the error. We'll handle
1487 it below, when detach fails with ESRCH. */
1488 try
1489 {
1490 /* Flush any pending changes to the process's registers. */
1491 regcache_invalidate_thread (thread);
1492
1493 /* Finally, let it resume. */
1494 low_prepare_to_resume (lwp);
1495 }
1496 catch (const gdb_exception_error &ex)
1497 {
1498 if (!check_ptrace_stopped_lwp_gone (lwp))
1499 throw;
1500 }
1501
1502 lwpid = lwpid_of (thread);
1503 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1504 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1505 {
1506 int save_errno = errno;
1507
1508 /* We know the thread exists, so ESRCH must mean the lwp is
1509 zombie. This can happen if one of the already-detached
1510 threads exits the whole thread group. In that case we're
1511 still attached, and must reap the lwp. */
1512 if (save_errno == ESRCH)
1513 {
1514 int ret, status;
1515
1516 ret = my_waitpid (lwpid, &status, __WALL);
1517 if (ret == -1)
1518 {
1519 warning (_("Couldn't reap LWP %d while detaching: %s"),
1520 lwpid, safe_strerror (errno));
1521 }
1522 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1523 {
1524 warning (_("Reaping LWP %d while detaching "
1525 "returned unexpected status 0x%x"),
1526 lwpid, status);
1527 }
1528 }
1529 else
1530 {
1531 error (_("Can't detach %s: %s"),
1532 target_pid_to_str (ptid_of (thread)),
1533 safe_strerror (save_errno));
1534 }
1535 }
1536 else if (debug_threads)
1537 {
1538 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1539 target_pid_to_str (ptid_of (thread)),
1540 strsignal (sig));
1541 }
1542
1543 delete_lwp (lwp);
1544 }
1545
1546 int
1547 linux_process_target::detach (process_info *process)
1548 {
1549 struct lwp_info *main_lwp;
1550
1551 /* As there's a step over already in progress, let it finish first,
1552 otherwise nesting a stabilize_threads operation on top gets real
1553 messy. */
1554 complete_ongoing_step_over ();
1555
1556 /* Stop all threads before detaching. First, ptrace requires that
1557 the thread is stopped to successfully detach. Second, thread_db
1558 may need to uninstall thread event breakpoints from memory, which
1559 only works with a stopped process anyway. */
1560 stop_all_lwps (0, NULL);
1561
1562 #ifdef USE_THREAD_DB
1563 thread_db_detach (process);
1564 #endif
1565
1566 /* Stabilize threads (move out of jump pads). */
1567 target_stabilize_threads ();
1568
1569 /* Detach from the clone lwps first. If the thread group exits just
1570 while we're detaching, we must reap the clone lwps before we're
1571 able to reap the leader. */
1572 for_each_thread (process->pid, [this] (thread_info *thread)
1573 {
1574 /* We don't actually detach from the thread group leader just yet.
1575 If the thread group exits, we must reap the zombie clone lwps
1576 before we're able to reap the leader. */
1577 if (thread->id.pid () == thread->id.lwp ())
1578 return;
1579
1580 lwp_info *lwp = get_thread_lwp (thread);
1581 detach_one_lwp (lwp);
1582 });
1583
1584 main_lwp = find_lwp_pid (ptid_t (process->pid));
1585 detach_one_lwp (main_lwp);
1586
1587 mourn (process);
1588
1589 /* Since we presently can only stop all lwps of all processes, we
1590 need to unstop lwps of other processes. */
1591 unstop_all_lwps (0, NULL);
1592 return 0;
1593 }
1594
1595 /* Remove all LWPs that belong to process PROC from the lwp list. */
1596
1597 void
1598 linux_process_target::mourn (process_info *process)
1599 {
1600 struct process_info_private *priv;
1601
1602 #ifdef USE_THREAD_DB
1603 thread_db_mourn (process);
1604 #endif
1605
1606 for_each_thread (process->pid, [this] (thread_info *thread)
1607 {
1608 delete_lwp (get_thread_lwp (thread));
1609 });
1610
1611 /* Freeing all private data. */
1612 priv = process->priv;
1613 low_delete_process (priv->arch_private);
1614 free (priv);
1615 process->priv = NULL;
1616
1617 remove_process (process);
1618 }
1619
1620 void
1621 linux_process_target::join (int pid)
1622 {
1623 int status, ret;
1624
1625 do {
1626 ret = my_waitpid (pid, &status, 0);
1627 if (WIFEXITED (status) || WIFSIGNALED (status))
1628 break;
1629 } while (ret != -1 || errno != ECHILD);
1630 }
1631
1632 /* Return true if the given thread is still alive. */
1633
1634 bool
1635 linux_process_target::thread_alive (ptid_t ptid)
1636 {
1637 struct lwp_info *lwp = find_lwp_pid (ptid);
1638
1639 /* We assume we always know if a thread exits. If a whole process
1640 exited but we still haven't been able to report it to GDB, we'll
1641 hold on to the last lwp of the dead process. */
1642 if (lwp != NULL)
1643 return !lwp_is_marked_dead (lwp);
1644 else
1645 return 0;
1646 }
1647
1648 bool
1649 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1650 {
1651 struct lwp_info *lp = get_thread_lwp (thread);
1652
1653 if (!lp->status_pending_p)
1654 return 0;
1655
1656 if (thread->last_resume_kind != resume_stop
1657 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1658 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1659 {
1660 struct thread_info *saved_thread;
1661 CORE_ADDR pc;
1662 int discard = 0;
1663
1664 gdb_assert (lp->last_status != 0);
1665
1666 pc = get_pc (lp);
1667
1668 saved_thread = current_thread;
1669 current_thread = thread;
1670
1671 if (pc != lp->stop_pc)
1672 {
1673 if (debug_threads)
1674 debug_printf ("PC of %ld changed\n",
1675 lwpid_of (thread));
1676 discard = 1;
1677 }
1678
1679 #if !USE_SIGTRAP_SIGINFO
1680 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1681 && !low_breakpoint_at (pc))
1682 {
1683 if (debug_threads)
1684 debug_printf ("previous SW breakpoint of %ld gone\n",
1685 lwpid_of (thread));
1686 discard = 1;
1687 }
1688 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1689 && !hardware_breakpoint_inserted_here (pc))
1690 {
1691 if (debug_threads)
1692 debug_printf ("previous HW breakpoint of %ld gone\n",
1693 lwpid_of (thread));
1694 discard = 1;
1695 }
1696 #endif
1697
1698 current_thread = saved_thread;
1699
1700 if (discard)
1701 {
1702 if (debug_threads)
1703 debug_printf ("discarding pending breakpoint status\n");
1704 lp->status_pending_p = 0;
1705 return 0;
1706 }
1707 }
1708
1709 return 1;
1710 }
1711
1712 /* Returns true if LWP is resumed from the client's perspective. */
1713
1714 static int
1715 lwp_resumed (struct lwp_info *lwp)
1716 {
1717 struct thread_info *thread = get_lwp_thread (lwp);
1718
1719 if (thread->last_resume_kind != resume_stop)
1720 return 1;
1721
1722 /* Did gdb send us a `vCont;t', but we haven't reported the
1723 corresponding stop to gdb yet? If so, the thread is still
1724 resumed/running from gdb's perspective. */
1725 if (thread->last_resume_kind == resume_stop
1726 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1727 return 1;
1728
1729 return 0;
1730 }
1731
1732 bool
1733 linux_process_target::status_pending_p_callback (thread_info *thread,
1734 ptid_t ptid)
1735 {
1736 struct lwp_info *lp = get_thread_lwp (thread);
1737
1738 /* Check if we're only interested in events from a specific process
1739 or a specific LWP. */
1740 if (!thread->id.matches (ptid))
1741 return 0;
1742
1743 if (!lwp_resumed (lp))
1744 return 0;
1745
1746 if (lp->status_pending_p
1747 && !thread_still_has_status_pending (thread))
1748 {
1749 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1750 return 0;
1751 }
1752
1753 return lp->status_pending_p;
1754 }
1755
1756 struct lwp_info *
1757 find_lwp_pid (ptid_t ptid)
1758 {
1759 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1760 {
1761 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1762 return thr_arg->id.lwp () == lwp;
1763 });
1764
1765 if (thread == NULL)
1766 return NULL;
1767
1768 return get_thread_lwp (thread);
1769 }
1770
1771 /* Return the number of known LWPs in the tgid given by PID. */
1772
1773 static int
1774 num_lwps (int pid)
1775 {
1776 int count = 0;
1777
1778 for_each_thread (pid, [&] (thread_info *thread)
1779 {
1780 count++;
1781 });
1782
1783 return count;
1784 }
1785
1786 /* See nat/linux-nat.h. */
1787
1788 struct lwp_info *
1789 iterate_over_lwps (ptid_t filter,
1790 gdb::function_view<iterate_over_lwps_ftype> callback)
1791 {
1792 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1793 {
1794 lwp_info *lwp = get_thread_lwp (thr_arg);
1795
1796 return callback (lwp);
1797 });
1798
1799 if (thread == NULL)
1800 return NULL;
1801
1802 return get_thread_lwp (thread);
1803 }
1804
1805 void
1806 linux_process_target::check_zombie_leaders ()
1807 {
1808 for_each_process ([this] (process_info *proc) {
1809 pid_t leader_pid = pid_of (proc);
1810 struct lwp_info *leader_lp;
1811
1812 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1813
1814 if (debug_threads)
1815 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1816 "num_lwps=%d, zombie=%d\n",
1817 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1818 linux_proc_pid_is_zombie (leader_pid));
1819
1820 if (leader_lp != NULL && !leader_lp->stopped
1821 /* Check if there are other threads in the group, as we may
1822 have raced with the inferior simply exiting. */
1823 && !last_thread_of_process_p (leader_pid)
1824 && linux_proc_pid_is_zombie (leader_pid))
1825 {
1826 /* A leader zombie can mean one of two things:
1827
1828 - It exited, and there's an exit status pending
1829 available, or only the leader exited (not the whole
1830 program). In the latter case, we can't waitpid the
1831 leader's exit status until all other threads are gone.
1832
1833 - There are 3 or more threads in the group, and a thread
1834 other than the leader exec'd. On an exec, the Linux
1835 kernel destroys all other threads (except the execing
1836 one) in the thread group, and resets the execing thread's
1837 tid to the tgid. No exit notification is sent for the
1838 execing thread -- from the ptracer's perspective, it
1839 appears as though the execing thread just vanishes.
1840 Until we reap all other threads except the leader and the
1841 execing thread, the leader will be zombie, and the
1842 execing thread will be in `D (disc sleep)'. As soon as
1843 all other threads are reaped, the execing thread changes
1844 it's tid to the tgid, and the previous (zombie) leader
1845 vanishes, giving place to the "new" leader. We could try
1846 distinguishing the exit and exec cases, by waiting once
1847 more, and seeing if something comes out, but it doesn't
1848 sound useful. The previous leader _does_ go away, and
1849 we'll re-add the new one once we see the exec event
1850 (which is just the same as what would happen if the
1851 previous leader did exit voluntarily before some other
1852 thread execs). */
1853
1854 if (debug_threads)
1855 debug_printf ("CZL: Thread group leader %d zombie "
1856 "(it exited, or another thread execd).\n",
1857 leader_pid);
1858
1859 delete_lwp (leader_lp);
1860 }
1861 });
1862 }
1863
1864 /* Callback for `find_thread'. Returns the first LWP that is not
1865 stopped. */
1866
1867 static bool
1868 not_stopped_callback (thread_info *thread, ptid_t filter)
1869 {
1870 if (!thread->id.matches (filter))
1871 return false;
1872
1873 lwp_info *lwp = get_thread_lwp (thread);
1874
1875 return !lwp->stopped;
1876 }
1877
1878 /* Increment LWP's suspend count. */
1879
1880 static void
1881 lwp_suspended_inc (struct lwp_info *lwp)
1882 {
1883 lwp->suspended++;
1884
1885 if (debug_threads && lwp->suspended > 4)
1886 {
1887 struct thread_info *thread = get_lwp_thread (lwp);
1888
1889 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1890 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1891 }
1892 }
1893
1894 /* Decrement LWP's suspend count. */
1895
1896 static void
1897 lwp_suspended_decr (struct lwp_info *lwp)
1898 {
1899 lwp->suspended--;
1900
1901 if (lwp->suspended < 0)
1902 {
1903 struct thread_info *thread = get_lwp_thread (lwp);
1904
1905 internal_error (__FILE__, __LINE__,
1906 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1907 lwp->suspended);
1908 }
1909 }
1910
1911 /* This function should only be called if the LWP got a SIGTRAP.
1912
1913 Handle any tracepoint steps or hits. Return true if a tracepoint
1914 event was handled, 0 otherwise. */
1915
1916 static int
1917 handle_tracepoints (struct lwp_info *lwp)
1918 {
1919 struct thread_info *tinfo = get_lwp_thread (lwp);
1920 int tpoint_related_event = 0;
1921
1922 gdb_assert (lwp->suspended == 0);
1923
1924 /* If this tracepoint hit causes a tracing stop, we'll immediately
1925 uninsert tracepoints. To do this, we temporarily pause all
1926 threads, unpatch away, and then unpause threads. We need to make
1927 sure the unpausing doesn't resume LWP too. */
1928 lwp_suspended_inc (lwp);
1929
1930 /* And we need to be sure that any all-threads-stopping doesn't try
1931 to move threads out of the jump pads, as it could deadlock the
1932 inferior (LWP could be in the jump pad, maybe even holding the
1933 lock.) */
1934
1935 /* Do any necessary step collect actions. */
1936 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1937
1938 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1939
1940 /* See if we just hit a tracepoint and do its main collect
1941 actions. */
1942 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1943
1944 lwp_suspended_decr (lwp);
1945
1946 gdb_assert (lwp->suspended == 0);
1947 gdb_assert (!stabilizing_threads
1948 || (lwp->collecting_fast_tracepoint
1949 != fast_tpoint_collect_result::not_collecting));
1950
1951 if (tpoint_related_event)
1952 {
1953 if (debug_threads)
1954 debug_printf ("got a tracepoint event\n");
1955 return 1;
1956 }
1957
1958 return 0;
1959 }
1960
1961 fast_tpoint_collect_result
1962 linux_process_target::linux_fast_tracepoint_collecting
1963 (lwp_info *lwp, fast_tpoint_collect_status *status)
1964 {
1965 CORE_ADDR thread_area;
1966 struct thread_info *thread = get_lwp_thread (lwp);
1967
1968 /* Get the thread area address. This is used to recognize which
1969 thread is which when tracing with the in-process agent library.
1970 We don't read anything from the address, and treat it as opaque;
1971 it's the address itself that we assume is unique per-thread. */
1972 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
1973 return fast_tpoint_collect_result::not_collecting;
1974
1975 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1976 }
1977
1978 int
1979 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1980 {
1981 return -1;
1982 }
1983
1984 bool
1985 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
1986 {
1987 struct thread_info *saved_thread;
1988
1989 saved_thread = current_thread;
1990 current_thread = get_lwp_thread (lwp);
1991
1992 if ((wstat == NULL
1993 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1994 && supports_fast_tracepoints ()
1995 && agent_loaded_p ())
1996 {
1997 struct fast_tpoint_collect_status status;
1998
1999 if (debug_threads)
2000 debug_printf ("Checking whether LWP %ld needs to move out of the "
2001 "jump pad.\n",
2002 lwpid_of (current_thread));
2003
2004 fast_tpoint_collect_result r
2005 = linux_fast_tracepoint_collecting (lwp, &status);
2006
2007 if (wstat == NULL
2008 || (WSTOPSIG (*wstat) != SIGILL
2009 && WSTOPSIG (*wstat) != SIGFPE
2010 && WSTOPSIG (*wstat) != SIGSEGV
2011 && WSTOPSIG (*wstat) != SIGBUS))
2012 {
2013 lwp->collecting_fast_tracepoint = r;
2014
2015 if (r != fast_tpoint_collect_result::not_collecting)
2016 {
2017 if (r == fast_tpoint_collect_result::before_insn
2018 && lwp->exit_jump_pad_bkpt == NULL)
2019 {
2020 /* Haven't executed the original instruction yet.
2021 Set breakpoint there, and wait till it's hit,
2022 then single-step until exiting the jump pad. */
2023 lwp->exit_jump_pad_bkpt
2024 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2025 }
2026
2027 if (debug_threads)
2028 debug_printf ("Checking whether LWP %ld needs to move out of "
2029 "the jump pad...it does\n",
2030 lwpid_of (current_thread));
2031 current_thread = saved_thread;
2032
2033 return true;
2034 }
2035 }
2036 else
2037 {
2038 /* If we get a synchronous signal while collecting, *and*
2039 while executing the (relocated) original instruction,
2040 reset the PC to point at the tpoint address, before
2041 reporting to GDB. Otherwise, it's an IPA lib bug: just
2042 report the signal to GDB, and pray for the best. */
2043
2044 lwp->collecting_fast_tracepoint
2045 = fast_tpoint_collect_result::not_collecting;
2046
2047 if (r != fast_tpoint_collect_result::not_collecting
2048 && (status.adjusted_insn_addr <= lwp->stop_pc
2049 && lwp->stop_pc < status.adjusted_insn_addr_end))
2050 {
2051 siginfo_t info;
2052 struct regcache *regcache;
2053
2054 /* The si_addr on a few signals references the address
2055 of the faulting instruction. Adjust that as
2056 well. */
2057 if ((WSTOPSIG (*wstat) == SIGILL
2058 || WSTOPSIG (*wstat) == SIGFPE
2059 || WSTOPSIG (*wstat) == SIGBUS
2060 || WSTOPSIG (*wstat) == SIGSEGV)
2061 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2062 (PTRACE_TYPE_ARG3) 0, &info) == 0
2063 /* Final check just to make sure we don't clobber
2064 the siginfo of non-kernel-sent signals. */
2065 && (uintptr_t) info.si_addr == lwp->stop_pc)
2066 {
2067 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2068 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2069 (PTRACE_TYPE_ARG3) 0, &info);
2070 }
2071
2072 regcache = get_thread_regcache (current_thread, 1);
2073 low_set_pc (regcache, status.tpoint_addr);
2074 lwp->stop_pc = status.tpoint_addr;
2075
2076 /* Cancel any fast tracepoint lock this thread was
2077 holding. */
2078 force_unlock_trace_buffer ();
2079 }
2080
2081 if (lwp->exit_jump_pad_bkpt != NULL)
2082 {
2083 if (debug_threads)
2084 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2085 "stopping all threads momentarily.\n");
2086
2087 stop_all_lwps (1, lwp);
2088
2089 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2090 lwp->exit_jump_pad_bkpt = NULL;
2091
2092 unstop_all_lwps (1, lwp);
2093
2094 gdb_assert (lwp->suspended >= 0);
2095 }
2096 }
2097 }
2098
2099 if (debug_threads)
2100 debug_printf ("Checking whether LWP %ld needs to move out of the "
2101 "jump pad...no\n",
2102 lwpid_of (current_thread));
2103
2104 current_thread = saved_thread;
2105 return false;
2106 }
2107
2108 /* Enqueue one signal in the "signals to report later when out of the
2109 jump pad" list. */
2110
2111 static void
2112 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2113 {
2114 struct thread_info *thread = get_lwp_thread (lwp);
2115
2116 if (debug_threads)
2117 debug_printf ("Deferring signal %d for LWP %ld.\n",
2118 WSTOPSIG (*wstat), lwpid_of (thread));
2119
2120 if (debug_threads)
2121 {
2122 for (const auto &sig : lwp->pending_signals_to_report)
2123 debug_printf (" Already queued %d\n",
2124 sig.signal);
2125
2126 debug_printf (" (no more currently queued signals)\n");
2127 }
2128
2129 /* Don't enqueue non-RT signals if they are already in the deferred
2130 queue. (SIGSTOP being the easiest signal to see ending up here
2131 twice) */
2132 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2133 {
2134 for (const auto &sig : lwp->pending_signals_to_report)
2135 {
2136 if (sig.signal == WSTOPSIG (*wstat))
2137 {
2138 if (debug_threads)
2139 debug_printf ("Not requeuing already queued non-RT signal %d"
2140 " for LWP %ld\n",
2141 sig.signal,
2142 lwpid_of (thread));
2143 return;
2144 }
2145 }
2146 }
2147
2148 lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2149
2150 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2151 &lwp->pending_signals_to_report.back ().info);
2152 }
2153
2154 /* Dequeue one signal from the "signals to report later when out of
2155 the jump pad" list. */
2156
2157 static int
2158 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2159 {
2160 struct thread_info *thread = get_lwp_thread (lwp);
2161
2162 if (!lwp->pending_signals_to_report.empty ())
2163 {
2164 const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2165
2166 *wstat = W_STOPCODE (p_sig.signal);
2167 if (p_sig.info.si_signo != 0)
2168 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2169 &p_sig.info);
2170
2171 lwp->pending_signals_to_report.pop_front ();
2172
2173 if (debug_threads)
2174 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2175 WSTOPSIG (*wstat), lwpid_of (thread));
2176
2177 if (debug_threads)
2178 {
2179 for (const auto &sig : lwp->pending_signals_to_report)
2180 debug_printf (" Still queued %d\n",
2181 sig.signal);
2182
2183 debug_printf (" (no more queued signals)\n");
2184 }
2185
2186 return 1;
2187 }
2188
2189 return 0;
2190 }
2191
2192 bool
2193 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2194 {
2195 struct thread_info *saved_thread = current_thread;
2196 current_thread = get_lwp_thread (child);
2197
2198 if (low_stopped_by_watchpoint ())
2199 {
2200 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2201 child->stopped_data_address = low_stopped_data_address ();
2202 }
2203
2204 current_thread = saved_thread;
2205
2206 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2207 }
2208
2209 bool
2210 linux_process_target::low_stopped_by_watchpoint ()
2211 {
2212 return false;
2213 }
2214
2215 CORE_ADDR
2216 linux_process_target::low_stopped_data_address ()
2217 {
2218 return 0;
2219 }
2220
2221 /* Return the ptrace options that we want to try to enable. */
2222
2223 static int
2224 linux_low_ptrace_options (int attached)
2225 {
2226 client_state &cs = get_client_state ();
2227 int options = 0;
2228
2229 if (!attached)
2230 options |= PTRACE_O_EXITKILL;
2231
2232 if (cs.report_fork_events)
2233 options |= PTRACE_O_TRACEFORK;
2234
2235 if (cs.report_vfork_events)
2236 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2237
2238 if (cs.report_exec_events)
2239 options |= PTRACE_O_TRACEEXEC;
2240
2241 options |= PTRACE_O_TRACESYSGOOD;
2242
2243 return options;
2244 }
2245
2246 void
2247 linux_process_target::filter_event (int lwpid, int wstat)
2248 {
2249 client_state &cs = get_client_state ();
2250 struct lwp_info *child;
2251 struct thread_info *thread;
2252 int have_stop_pc = 0;
2253
2254 child = find_lwp_pid (ptid_t (lwpid));
2255
2256 /* Check for stop events reported by a process we didn't already
2257 know about - anything not already in our LWP list.
2258
2259 If we're expecting to receive stopped processes after
2260 fork, vfork, and clone events, then we'll just add the
2261 new one to our list and go back to waiting for the event
2262 to be reported - the stopped process might be returned
2263 from waitpid before or after the event is.
2264
2265 But note the case of a non-leader thread exec'ing after the
2266 leader having exited, and gone from our lists (because
2267 check_zombie_leaders deleted it). The non-leader thread
2268 changes its tid to the tgid. */
2269
2270 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2271 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2272 {
2273 ptid_t child_ptid;
2274
2275 /* A multi-thread exec after we had seen the leader exiting. */
2276 if (debug_threads)
2277 {
2278 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2279 "after exec.\n", lwpid);
2280 }
2281
2282 child_ptid = ptid_t (lwpid, lwpid, 0);
2283 child = add_lwp (child_ptid);
2284 child->stopped = 1;
2285 current_thread = child->thread;
2286 }
2287
2288 /* If we didn't find a process, one of two things presumably happened:
2289 - A process we started and then detached from has exited. Ignore it.
2290 - A process we are controlling has forked and the new child's stop
2291 was reported to us by the kernel. Save its PID. */
2292 if (child == NULL && WIFSTOPPED (wstat))
2293 {
2294 add_to_pid_list (&stopped_pids, lwpid, wstat);
2295 return;
2296 }
2297 else if (child == NULL)
2298 return;
2299
2300 thread = get_lwp_thread (child);
2301
2302 child->stopped = 1;
2303
2304 child->last_status = wstat;
2305
2306 /* Check if the thread has exited. */
2307 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2308 {
2309 if (debug_threads)
2310 debug_printf ("LLFE: %d exited.\n", lwpid);
2311
2312 if (finish_step_over (child))
2313 {
2314 /* Unsuspend all other LWPs, and set them back running again. */
2315 unsuspend_all_lwps (child);
2316 }
2317
2318 /* If there is at least one more LWP, then the exit signal was
2319 not the end of the debugged application and should be
2320 ignored, unless GDB wants to hear about thread exits. */
2321 if (cs.report_thread_events
2322 || last_thread_of_process_p (pid_of (thread)))
2323 {
2324 /* Since events are serialized to GDB core, and we can't
2325 report this one right now. Leave the status pending for
2326 the next time we're able to report it. */
2327 mark_lwp_dead (child, wstat);
2328 return;
2329 }
2330 else
2331 {
2332 delete_lwp (child);
2333 return;
2334 }
2335 }
2336
2337 gdb_assert (WIFSTOPPED (wstat));
2338
2339 if (WIFSTOPPED (wstat))
2340 {
2341 struct process_info *proc;
2342
2343 /* Architecture-specific setup after inferior is running. */
2344 proc = find_process_pid (pid_of (thread));
2345 if (proc->tdesc == NULL)
2346 {
2347 if (proc->attached)
2348 {
2349 /* This needs to happen after we have attached to the
2350 inferior and it is stopped for the first time, but
2351 before we access any inferior registers. */
2352 arch_setup_thread (thread);
2353 }
2354 else
2355 {
2356 /* The process is started, but GDBserver will do
2357 architecture-specific setup after the program stops at
2358 the first instruction. */
2359 child->status_pending_p = 1;
2360 child->status_pending = wstat;
2361 return;
2362 }
2363 }
2364 }
2365
2366 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2367 {
2368 struct process_info *proc = find_process_pid (pid_of (thread));
2369 int options = linux_low_ptrace_options (proc->attached);
2370
2371 linux_enable_event_reporting (lwpid, options);
2372 child->must_set_ptrace_flags = 0;
2373 }
2374
2375 /* Always update syscall_state, even if it will be filtered later. */
2376 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2377 {
2378 child->syscall_state
2379 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2380 ? TARGET_WAITKIND_SYSCALL_RETURN
2381 : TARGET_WAITKIND_SYSCALL_ENTRY);
2382 }
2383 else
2384 {
2385 /* Almost all other ptrace-stops are known to be outside of system
2386 calls, with further exceptions in handle_extended_wait. */
2387 child->syscall_state = TARGET_WAITKIND_IGNORE;
2388 }
2389
2390 /* Be careful to not overwrite stop_pc until save_stop_reason is
2391 called. */
2392 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2393 && linux_is_extended_waitstatus (wstat))
2394 {
2395 child->stop_pc = get_pc (child);
2396 if (handle_extended_wait (&child, wstat))
2397 {
2398 /* The event has been handled, so just return without
2399 reporting it. */
2400 return;
2401 }
2402 }
2403
2404 if (linux_wstatus_maybe_breakpoint (wstat))
2405 {
2406 if (save_stop_reason (child))
2407 have_stop_pc = 1;
2408 }
2409
2410 if (!have_stop_pc)
2411 child->stop_pc = get_pc (child);
2412
2413 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2414 && child->stop_expected)
2415 {
2416 if (debug_threads)
2417 debug_printf ("Expected stop.\n");
2418 child->stop_expected = 0;
2419
2420 if (thread->last_resume_kind == resume_stop)
2421 {
2422 /* We want to report the stop to the core. Treat the
2423 SIGSTOP as a normal event. */
2424 if (debug_threads)
2425 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2426 target_pid_to_str (ptid_of (thread)));
2427 }
2428 else if (stopping_threads != NOT_STOPPING_THREADS)
2429 {
2430 /* Stopping threads. We don't want this SIGSTOP to end up
2431 pending. */
2432 if (debug_threads)
2433 debug_printf ("LLW: SIGSTOP caught for %s "
2434 "while stopping threads.\n",
2435 target_pid_to_str (ptid_of (thread)));
2436 return;
2437 }
2438 else
2439 {
2440 /* This is a delayed SIGSTOP. Filter out the event. */
2441 if (debug_threads)
2442 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2443 child->stepping ? "step" : "continue",
2444 target_pid_to_str (ptid_of (thread)));
2445
2446 resume_one_lwp (child, child->stepping, 0, NULL);
2447 return;
2448 }
2449 }
2450
2451 child->status_pending_p = 1;
2452 child->status_pending = wstat;
2453 return;
2454 }
2455
2456 bool
2457 linux_process_target::maybe_hw_step (thread_info *thread)
2458 {
2459 if (supports_hardware_single_step ())
2460 return true;
2461 else
2462 {
2463 /* GDBserver must insert single-step breakpoint for software
2464 single step. */
2465 gdb_assert (has_single_step_breakpoints (thread));
2466 return false;
2467 }
2468 }
2469
2470 void
2471 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2472 {
2473 struct lwp_info *lp = get_thread_lwp (thread);
2474
2475 if (lp->stopped
2476 && !lp->suspended
2477 && !lp->status_pending_p
2478 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2479 {
2480 int step = 0;
2481
2482 if (thread->last_resume_kind == resume_step)
2483 step = maybe_hw_step (thread);
2484
2485 if (debug_threads)
2486 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2487 target_pid_to_str (ptid_of (thread)),
2488 paddress (lp->stop_pc),
2489 step);
2490
2491 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2492 }
2493 }
2494
2495 int
2496 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2497 ptid_t filter_ptid,
2498 int *wstatp, int options)
2499 {
2500 struct thread_info *event_thread;
2501 struct lwp_info *event_child, *requested_child;
2502 sigset_t block_mask, prev_mask;
2503
2504 retry:
2505 /* N.B. event_thread points to the thread_info struct that contains
2506 event_child. Keep them in sync. */
2507 event_thread = NULL;
2508 event_child = NULL;
2509 requested_child = NULL;
2510
2511 /* Check for a lwp with a pending status. */
2512
2513 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2514 {
2515 event_thread = find_thread_in_random ([&] (thread_info *thread)
2516 {
2517 return status_pending_p_callback (thread, filter_ptid);
2518 });
2519
2520 if (event_thread != NULL)
2521 event_child = get_thread_lwp (event_thread);
2522 if (debug_threads && event_thread)
2523 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2524 }
2525 else if (filter_ptid != null_ptid)
2526 {
2527 requested_child = find_lwp_pid (filter_ptid);
2528
2529 if (stopping_threads == NOT_STOPPING_THREADS
2530 && requested_child->status_pending_p
2531 && (requested_child->collecting_fast_tracepoint
2532 != fast_tpoint_collect_result::not_collecting))
2533 {
2534 enqueue_one_deferred_signal (requested_child,
2535 &requested_child->status_pending);
2536 requested_child->status_pending_p = 0;
2537 requested_child->status_pending = 0;
2538 resume_one_lwp (requested_child, 0, 0, NULL);
2539 }
2540
2541 if (requested_child->suspended
2542 && requested_child->status_pending_p)
2543 {
2544 internal_error (__FILE__, __LINE__,
2545 "requesting an event out of a"
2546 " suspended child?");
2547 }
2548
2549 if (requested_child->status_pending_p)
2550 {
2551 event_child = requested_child;
2552 event_thread = get_lwp_thread (event_child);
2553 }
2554 }
2555
2556 if (event_child != NULL)
2557 {
2558 if (debug_threads)
2559 debug_printf ("Got an event from pending child %ld (%04x)\n",
2560 lwpid_of (event_thread), event_child->status_pending);
2561 *wstatp = event_child->status_pending;
2562 event_child->status_pending_p = 0;
2563 event_child->status_pending = 0;
2564 current_thread = event_thread;
2565 return lwpid_of (event_thread);
2566 }
2567
2568 /* But if we don't find a pending event, we'll have to wait.
2569
2570 We only enter this loop if no process has a pending wait status.
2571 Thus any action taken in response to a wait status inside this
2572 loop is responding as soon as we detect the status, not after any
2573 pending events. */
2574
2575 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2576 all signals while here. */
2577 sigfillset (&block_mask);
2578 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2579
2580 /* Always pull all events out of the kernel. We'll randomly select
2581 an event LWP out of all that have events, to prevent
2582 starvation. */
2583 while (event_child == NULL)
2584 {
2585 pid_t ret = 0;
2586
2587 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2588 quirks:
2589
2590 - If the thread group leader exits while other threads in the
2591 thread group still exist, waitpid(TGID, ...) hangs. That
2592 waitpid won't return an exit status until the other threads
2593 in the group are reaped.
2594
2595 - When a non-leader thread execs, that thread just vanishes
2596 without reporting an exit (so we'd hang if we waited for it
2597 explicitly in that case). The exec event is reported to
2598 the TGID pid. */
2599 errno = 0;
2600 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2601
2602 if (debug_threads)
2603 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2604 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2605
2606 if (ret > 0)
2607 {
2608 if (debug_threads)
2609 {
2610 debug_printf ("LLW: waitpid %ld received %s\n",
2611 (long) ret, status_to_str (*wstatp));
2612 }
2613
2614 /* Filter all events. IOW, leave all events pending. We'll
2615 randomly select an event LWP out of all that have events
2616 below. */
2617 filter_event (ret, *wstatp);
2618 /* Retry until nothing comes out of waitpid. A single
2619 SIGCHLD can indicate more than one child stopped. */
2620 continue;
2621 }
2622
2623 /* Now that we've pulled all events out of the kernel, resume
2624 LWPs that don't have an interesting event to report. */
2625 if (stopping_threads == NOT_STOPPING_THREADS)
2626 for_each_thread ([this] (thread_info *thread)
2627 {
2628 resume_stopped_resumed_lwps (thread);
2629 });
2630
2631 /* ... and find an LWP with a status to report to the core, if
2632 any. */
2633 event_thread = find_thread_in_random ([&] (thread_info *thread)
2634 {
2635 return status_pending_p_callback (thread, filter_ptid);
2636 });
2637
2638 if (event_thread != NULL)
2639 {
2640 event_child = get_thread_lwp (event_thread);
2641 *wstatp = event_child->status_pending;
2642 event_child->status_pending_p = 0;
2643 event_child->status_pending = 0;
2644 break;
2645 }
2646
2647 /* Check for zombie thread group leaders. Those can't be reaped
2648 until all other threads in the thread group are. */
2649 check_zombie_leaders ();
2650
2651 auto not_stopped = [&] (thread_info *thread)
2652 {
2653 return not_stopped_callback (thread, wait_ptid);
2654 };
2655
2656 /* If there are no resumed children left in the set of LWPs we
2657 want to wait for, bail. We can't just block in
2658 waitpid/sigsuspend, because lwps might have been left stopped
2659 in trace-stop state, and we'd be stuck forever waiting for
2660 their status to change (which would only happen if we resumed
2661 them). Even if WNOHANG is set, this return code is preferred
2662 over 0 (below), as it is more detailed. */
2663 if (find_thread (not_stopped) == NULL)
2664 {
2665 if (debug_threads)
2666 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2667 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2668 return -1;
2669 }
2670
2671 /* No interesting event to report to the caller. */
2672 if ((options & WNOHANG))
2673 {
2674 if (debug_threads)
2675 debug_printf ("WNOHANG set, no event found\n");
2676
2677 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2678 return 0;
2679 }
2680
2681 /* Block until we get an event reported with SIGCHLD. */
2682 if (debug_threads)
2683 debug_printf ("sigsuspend'ing\n");
2684
2685 sigsuspend (&prev_mask);
2686 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2687 goto retry;
2688 }
2689
2690 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2691
2692 current_thread = event_thread;
2693
2694 return lwpid_of (event_thread);
2695 }
2696
2697 int
2698 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2699 {
2700 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2701 }
2702
2703 /* Select one LWP out of those that have events pending. */
2704
2705 static void
2706 select_event_lwp (struct lwp_info **orig_lp)
2707 {
2708 struct thread_info *event_thread = NULL;
2709
2710 /* In all-stop, give preference to the LWP that is being
2711 single-stepped. There will be at most one, and it's the LWP that
2712 the core is most interested in. If we didn't do this, then we'd
2713 have to handle pending step SIGTRAPs somehow in case the core
2714 later continues the previously-stepped thread, otherwise we'd
2715 report the pending SIGTRAP, and the core, not having stepped the
2716 thread, wouldn't understand what the trap was for, and therefore
2717 would report it to the user as a random signal. */
2718 if (!non_stop)
2719 {
2720 event_thread = find_thread ([] (thread_info *thread)
2721 {
2722 lwp_info *lp = get_thread_lwp (thread);
2723
2724 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2725 && thread->last_resume_kind == resume_step
2726 && lp->status_pending_p);
2727 });
2728
2729 if (event_thread != NULL)
2730 {
2731 if (debug_threads)
2732 debug_printf ("SEL: Select single-step %s\n",
2733 target_pid_to_str (ptid_of (event_thread)));
2734 }
2735 }
2736 if (event_thread == NULL)
2737 {
2738 /* No single-stepping LWP. Select one at random, out of those
2739 which have had events. */
2740
2741 event_thread = find_thread_in_random ([&] (thread_info *thread)
2742 {
2743 lwp_info *lp = get_thread_lwp (thread);
2744
2745 /* Only resumed LWPs that have an event pending. */
2746 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2747 && lp->status_pending_p);
2748 });
2749 }
2750
2751 if (event_thread != NULL)
2752 {
2753 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2754
2755 /* Switch the event LWP. */
2756 *orig_lp = event_lp;
2757 }
2758 }
2759
2760 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2761 NULL. */
2762
2763 static void
2764 unsuspend_all_lwps (struct lwp_info *except)
2765 {
2766 for_each_thread ([&] (thread_info *thread)
2767 {
2768 lwp_info *lwp = get_thread_lwp (thread);
2769
2770 if (lwp != except)
2771 lwp_suspended_decr (lwp);
2772 });
2773 }
2774
2775 static bool lwp_running (thread_info *thread);
2776
2777 /* Stabilize threads (move out of jump pads).
2778
2779 If a thread is midway collecting a fast tracepoint, we need to
2780 finish the collection and move it out of the jump pad before
2781 reporting the signal.
2782
2783 This avoids recursion while collecting (when a signal arrives
2784 midway, and the signal handler itself collects), which would trash
2785 the trace buffer. In case the user set a breakpoint in a signal
2786 handler, this avoids the backtrace showing the jump pad, etc..
2787 Most importantly, there are certain things we can't do safely if
2788 threads are stopped in a jump pad (or in its callee's). For
2789 example:
2790
2791 - starting a new trace run. A thread still collecting the
2792 previous run, could trash the trace buffer when resumed. The trace
2793 buffer control structures would have been reset but the thread had
2794 no way to tell. The thread could even midway memcpy'ing to the
2795 buffer, which would mean that when resumed, it would clobber the
2796 trace buffer that had been set for a new run.
2797
2798 - we can't rewrite/reuse the jump pads for new tracepoints
2799 safely. Say you do tstart while a thread is stopped midway while
2800 collecting. When the thread is later resumed, it finishes the
2801 collection, and returns to the jump pad, to execute the original
2802 instruction that was under the tracepoint jump at the time the
2803 older run had been started. If the jump pad had been rewritten
2804 since for something else in the new run, the thread would now
2805 execute the wrong / random instructions. */
2806
2807 void
2808 linux_process_target::stabilize_threads ()
2809 {
2810 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2811 {
2812 return stuck_in_jump_pad (thread);
2813 });
2814
2815 if (thread_stuck != NULL)
2816 {
2817 if (debug_threads)
2818 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2819 lwpid_of (thread_stuck));
2820 return;
2821 }
2822
2823 thread_info *saved_thread = current_thread;
2824
2825 stabilizing_threads = 1;
2826
2827 /* Kick 'em all. */
2828 for_each_thread ([this] (thread_info *thread)
2829 {
2830 move_out_of_jump_pad (thread);
2831 });
2832
2833 /* Loop until all are stopped out of the jump pads. */
2834 while (find_thread (lwp_running) != NULL)
2835 {
2836 struct target_waitstatus ourstatus;
2837 struct lwp_info *lwp;
2838 int wstat;
2839
2840 /* Note that we go through the full wait even loop. While
2841 moving threads out of jump pad, we need to be able to step
2842 over internal breakpoints and such. */
2843 wait_1 (minus_one_ptid, &ourstatus, 0);
2844
2845 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2846 {
2847 lwp = get_thread_lwp (current_thread);
2848
2849 /* Lock it. */
2850 lwp_suspended_inc (lwp);
2851
2852 if (ourstatus.value.sig != GDB_SIGNAL_0
2853 || current_thread->last_resume_kind == resume_stop)
2854 {
2855 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2856 enqueue_one_deferred_signal (lwp, &wstat);
2857 }
2858 }
2859 }
2860
2861 unsuspend_all_lwps (NULL);
2862
2863 stabilizing_threads = 0;
2864
2865 current_thread = saved_thread;
2866
2867 if (debug_threads)
2868 {
2869 thread_stuck = find_thread ([this] (thread_info *thread)
2870 {
2871 return stuck_in_jump_pad (thread);
2872 });
2873
2874 if (thread_stuck != NULL)
2875 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2876 lwpid_of (thread_stuck));
2877 }
2878 }
2879
2880 /* Convenience function that is called when the kernel reports an
2881 event that is not passed out to GDB. */
2882
2883 static ptid_t
2884 ignore_event (struct target_waitstatus *ourstatus)
2885 {
2886 /* If we got an event, there may still be others, as a single
2887 SIGCHLD can indicate more than one child stopped. This forces
2888 another target_wait call. */
2889 async_file_mark ();
2890
2891 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2892 return null_ptid;
2893 }
2894
2895 ptid_t
2896 linux_process_target::filter_exit_event (lwp_info *event_child,
2897 target_waitstatus *ourstatus)
2898 {
2899 client_state &cs = get_client_state ();
2900 struct thread_info *thread = get_lwp_thread (event_child);
2901 ptid_t ptid = ptid_of (thread);
2902
2903 if (!last_thread_of_process_p (pid_of (thread)))
2904 {
2905 if (cs.report_thread_events)
2906 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2907 else
2908 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2909
2910 delete_lwp (event_child);
2911 }
2912 return ptid;
2913 }
2914
2915 /* Returns 1 if GDB is interested in any event_child syscalls. */
2916
2917 static int
2918 gdb_catching_syscalls_p (struct lwp_info *event_child)
2919 {
2920 struct thread_info *thread = get_lwp_thread (event_child);
2921 struct process_info *proc = get_thread_process (thread);
2922
2923 return !proc->syscalls_to_catch.empty ();
2924 }
2925
2926 bool
2927 linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2928 {
2929 int sysno;
2930 struct thread_info *thread = get_lwp_thread (event_child);
2931 struct process_info *proc = get_thread_process (thread);
2932
2933 if (proc->syscalls_to_catch.empty ())
2934 return false;
2935
2936 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2937 return true;
2938
2939 get_syscall_trapinfo (event_child, &sysno);
2940
2941 for (int iter : proc->syscalls_to_catch)
2942 if (iter == sysno)
2943 return true;
2944
2945 return false;
2946 }
2947
2948 ptid_t
2949 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2950 target_wait_flags target_options)
2951 {
2952 client_state &cs = get_client_state ();
2953 int w;
2954 struct lwp_info *event_child;
2955 int options;
2956 int pid;
2957 int step_over_finished;
2958 int bp_explains_trap;
2959 int maybe_internal_trap;
2960 int report_to_gdb;
2961 int trace_event;
2962 int in_step_range;
2963 int any_resumed;
2964
2965 if (debug_threads)
2966 {
2967 debug_enter ();
2968 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid));
2969 }
2970
2971 /* Translate generic target options into linux options. */
2972 options = __WALL;
2973 if (target_options & TARGET_WNOHANG)
2974 options |= WNOHANG;
2975
2976 bp_explains_trap = 0;
2977 trace_event = 0;
2978 in_step_range = 0;
2979 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2980
2981 auto status_pending_p_any = [&] (thread_info *thread)
2982 {
2983 return status_pending_p_callback (thread, minus_one_ptid);
2984 };
2985
2986 auto not_stopped = [&] (thread_info *thread)
2987 {
2988 return not_stopped_callback (thread, minus_one_ptid);
2989 };
2990
2991 /* Find a resumed LWP, if any. */
2992 if (find_thread (status_pending_p_any) != NULL)
2993 any_resumed = 1;
2994 else if (find_thread (not_stopped) != NULL)
2995 any_resumed = 1;
2996 else
2997 any_resumed = 0;
2998
2999 if (step_over_bkpt == null_ptid)
3000 pid = wait_for_event (ptid, &w, options);
3001 else
3002 {
3003 if (debug_threads)
3004 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3005 target_pid_to_str (step_over_bkpt));
3006 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3007 }
3008
3009 if (pid == 0 || (pid == -1 && !any_resumed))
3010 {
3011 gdb_assert (target_options & TARGET_WNOHANG);
3012
3013 if (debug_threads)
3014 {
3015 debug_printf ("wait_1 ret = null_ptid, "
3016 "TARGET_WAITKIND_IGNORE\n");
3017 debug_exit ();
3018 }
3019
3020 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3021 return null_ptid;
3022 }
3023 else if (pid == -1)
3024 {
3025 if (debug_threads)
3026 {
3027 debug_printf ("wait_1 ret = null_ptid, "
3028 "TARGET_WAITKIND_NO_RESUMED\n");
3029 debug_exit ();
3030 }
3031
3032 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3033 return null_ptid;
3034 }
3035
3036 event_child = get_thread_lwp (current_thread);
3037
3038 /* wait_for_event only returns an exit status for the last
3039 child of a process. Report it. */
3040 if (WIFEXITED (w) || WIFSIGNALED (w))
3041 {
3042 if (WIFEXITED (w))
3043 {
3044 ourstatus->kind = TARGET_WAITKIND_EXITED;
3045 ourstatus->value.integer = WEXITSTATUS (w);
3046
3047 if (debug_threads)
3048 {
3049 debug_printf ("wait_1 ret = %s, exited with "
3050 "retcode %d\n",
3051 target_pid_to_str (ptid_of (current_thread)),
3052 WEXITSTATUS (w));
3053 debug_exit ();
3054 }
3055 }
3056 else
3057 {
3058 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3059 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3060
3061 if (debug_threads)
3062 {
3063 debug_printf ("wait_1 ret = %s, terminated with "
3064 "signal %d\n",
3065 target_pid_to_str (ptid_of (current_thread)),
3066 WTERMSIG (w));
3067 debug_exit ();
3068 }
3069 }
3070
3071 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3072 return filter_exit_event (event_child, ourstatus);
3073
3074 return ptid_of (current_thread);
3075 }
3076
3077 /* If step-over executes a breakpoint instruction, in the case of a
3078 hardware single step it means a gdb/gdbserver breakpoint had been
3079 planted on top of a permanent breakpoint, in the case of a software
3080 single step it may just mean that gdbserver hit the reinsert breakpoint.
3081 The PC has been adjusted by save_stop_reason to point at
3082 the breakpoint address.
3083 So in the case of the hardware single step advance the PC manually
3084 past the breakpoint and in the case of software single step advance only
3085 if it's not the single_step_breakpoint we are hitting.
3086 This avoids that a program would keep trapping a permanent breakpoint
3087 forever. */
3088 if (step_over_bkpt != null_ptid
3089 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3090 && (event_child->stepping
3091 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3092 {
3093 int increment_pc = 0;
3094 int breakpoint_kind = 0;
3095 CORE_ADDR stop_pc = event_child->stop_pc;
3096
3097 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3098 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3099
3100 if (debug_threads)
3101 {
3102 debug_printf ("step-over for %s executed software breakpoint\n",
3103 target_pid_to_str (ptid_of (current_thread)));
3104 }
3105
3106 if (increment_pc != 0)
3107 {
3108 struct regcache *regcache
3109 = get_thread_regcache (current_thread, 1);
3110
3111 event_child->stop_pc += increment_pc;
3112 low_set_pc (regcache, event_child->stop_pc);
3113
3114 if (!low_breakpoint_at (event_child->stop_pc))
3115 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3116 }
3117 }
3118
3119 /* If this event was not handled before, and is not a SIGTRAP, we
3120 report it. SIGILL and SIGSEGV are also treated as traps in case
3121 a breakpoint is inserted at the current PC. If this target does
3122 not support internal breakpoints at all, we also report the
3123 SIGTRAP without further processing; it's of no concern to us. */
3124 maybe_internal_trap
3125 = (low_supports_breakpoints ()
3126 && (WSTOPSIG (w) == SIGTRAP
3127 || ((WSTOPSIG (w) == SIGILL
3128 || WSTOPSIG (w) == SIGSEGV)
3129 && low_breakpoint_at (event_child->stop_pc))));
3130
3131 if (maybe_internal_trap)
3132 {
3133 /* Handle anything that requires bookkeeping before deciding to
3134 report the event or continue waiting. */
3135
3136 /* First check if we can explain the SIGTRAP with an internal
3137 breakpoint, or if we should possibly report the event to GDB.
3138 Do this before anything that may remove or insert a
3139 breakpoint. */
3140 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3141
3142 /* We have a SIGTRAP, possibly a step-over dance has just
3143 finished. If so, tweak the state machine accordingly,
3144 reinsert breakpoints and delete any single-step
3145 breakpoints. */
3146 step_over_finished = finish_step_over (event_child);
3147
3148 /* Now invoke the callbacks of any internal breakpoints there. */
3149 check_breakpoints (event_child->stop_pc);
3150
3151 /* Handle tracepoint data collecting. This may overflow the
3152 trace buffer, and cause a tracing stop, removing
3153 breakpoints. */
3154 trace_event = handle_tracepoints (event_child);
3155
3156 if (bp_explains_trap)
3157 {
3158 if (debug_threads)
3159 debug_printf ("Hit a gdbserver breakpoint.\n");
3160 }
3161 }
3162 else
3163 {
3164 /* We have some other signal, possibly a step-over dance was in
3165 progress, and it should be cancelled too. */
3166 step_over_finished = finish_step_over (event_child);
3167 }
3168
3169 /* We have all the data we need. Either report the event to GDB, or
3170 resume threads and keep waiting for more. */
3171
3172 /* If we're collecting a fast tracepoint, finish the collection and
3173 move out of the jump pad before delivering a signal. See
3174 linux_stabilize_threads. */
3175
3176 if (WIFSTOPPED (w)
3177 && WSTOPSIG (w) != SIGTRAP
3178 && supports_fast_tracepoints ()
3179 && agent_loaded_p ())
3180 {
3181 if (debug_threads)
3182 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3183 "to defer or adjust it.\n",
3184 WSTOPSIG (w), lwpid_of (current_thread));
3185
3186 /* Allow debugging the jump pad itself. */
3187 if (current_thread->last_resume_kind != resume_step
3188 && maybe_move_out_of_jump_pad (event_child, &w))
3189 {
3190 enqueue_one_deferred_signal (event_child, &w);
3191
3192 if (debug_threads)
3193 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3194 WSTOPSIG (w), lwpid_of (current_thread));
3195
3196 resume_one_lwp (event_child, 0, 0, NULL);
3197
3198 if (debug_threads)
3199 debug_exit ();
3200 return ignore_event (ourstatus);
3201 }
3202 }
3203
3204 if (event_child->collecting_fast_tracepoint
3205 != fast_tpoint_collect_result::not_collecting)
3206 {
3207 if (debug_threads)
3208 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3209 "Check if we're already there.\n",
3210 lwpid_of (current_thread),
3211 (int) event_child->collecting_fast_tracepoint);
3212
3213 trace_event = 1;
3214
3215 event_child->collecting_fast_tracepoint
3216 = linux_fast_tracepoint_collecting (event_child, NULL);
3217
3218 if (event_child->collecting_fast_tracepoint
3219 != fast_tpoint_collect_result::before_insn)
3220 {
3221 /* No longer need this breakpoint. */
3222 if (event_child->exit_jump_pad_bkpt != NULL)
3223 {
3224 if (debug_threads)
3225 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3226 "stopping all threads momentarily.\n");
3227
3228 /* Other running threads could hit this breakpoint.
3229 We don't handle moribund locations like GDB does,
3230 instead we always pause all threads when removing
3231 breakpoints, so that any step-over or
3232 decr_pc_after_break adjustment is always taken
3233 care of while the breakpoint is still
3234 inserted. */
3235 stop_all_lwps (1, event_child);
3236
3237 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3238 event_child->exit_jump_pad_bkpt = NULL;
3239
3240 unstop_all_lwps (1, event_child);
3241
3242 gdb_assert (event_child->suspended >= 0);
3243 }
3244 }
3245
3246 if (event_child->collecting_fast_tracepoint
3247 == fast_tpoint_collect_result::not_collecting)
3248 {
3249 if (debug_threads)
3250 debug_printf ("fast tracepoint finished "
3251 "collecting successfully.\n");
3252
3253 /* We may have a deferred signal to report. */
3254 if (dequeue_one_deferred_signal (event_child, &w))
3255 {
3256 if (debug_threads)
3257 debug_printf ("dequeued one signal.\n");
3258 }
3259 else
3260 {
3261 if (debug_threads)
3262 debug_printf ("no deferred signals.\n");
3263
3264 if (stabilizing_threads)
3265 {
3266 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3267 ourstatus->value.sig = GDB_SIGNAL_0;
3268
3269 if (debug_threads)
3270 {
3271 debug_printf ("wait_1 ret = %s, stopped "
3272 "while stabilizing threads\n",
3273 target_pid_to_str (ptid_of (current_thread)));
3274 debug_exit ();
3275 }
3276
3277 return ptid_of (current_thread);
3278 }
3279 }
3280 }
3281 }
3282
3283 /* Check whether GDB would be interested in this event. */
3284
3285 /* Check if GDB is interested in this syscall. */
3286 if (WIFSTOPPED (w)
3287 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3288 && !gdb_catch_this_syscall (event_child))
3289 {
3290 if (debug_threads)
3291 {
3292 debug_printf ("Ignored syscall for LWP %ld.\n",
3293 lwpid_of (current_thread));
3294 }
3295
3296 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3297
3298 if (debug_threads)
3299 debug_exit ();
3300 return ignore_event (ourstatus);
3301 }
3302
3303 /* If GDB is not interested in this signal, don't stop other
3304 threads, and don't report it to GDB. Just resume the inferior
3305 right away. We do this for threading-related signals as well as
3306 any that GDB specifically requested we ignore. But never ignore
3307 SIGSTOP if we sent it ourselves, and do not ignore signals when
3308 stepping - they may require special handling to skip the signal
3309 handler. Also never ignore signals that could be caused by a
3310 breakpoint. */
3311 if (WIFSTOPPED (w)
3312 && current_thread->last_resume_kind != resume_step
3313 && (
3314 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3315 (current_process ()->priv->thread_db != NULL
3316 && (WSTOPSIG (w) == __SIGRTMIN
3317 || WSTOPSIG (w) == __SIGRTMIN + 1))
3318 ||
3319 #endif
3320 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3321 && !(WSTOPSIG (w) == SIGSTOP
3322 && current_thread->last_resume_kind == resume_stop)
3323 && !linux_wstatus_maybe_breakpoint (w))))
3324 {
3325 siginfo_t info, *info_p;
3326
3327 if (debug_threads)
3328 debug_printf ("Ignored signal %d for LWP %ld.\n",
3329 WSTOPSIG (w), lwpid_of (current_thread));
3330
3331 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3332 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3333 info_p = &info;
3334 else
3335 info_p = NULL;
3336
3337 if (step_over_finished)
3338 {
3339 /* We cancelled this thread's step-over above. We still
3340 need to unsuspend all other LWPs, and set them back
3341 running again while the signal handler runs. */
3342 unsuspend_all_lwps (event_child);
3343
3344 /* Enqueue the pending signal info so that proceed_all_lwps
3345 doesn't lose it. */
3346 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3347
3348 proceed_all_lwps ();
3349 }
3350 else
3351 {
3352 resume_one_lwp (event_child, event_child->stepping,
3353 WSTOPSIG (w), info_p);
3354 }
3355
3356 if (debug_threads)
3357 debug_exit ();
3358
3359 return ignore_event (ourstatus);
3360 }
3361
3362 /* Note that all addresses are always "out of the step range" when
3363 there's no range to begin with. */
3364 in_step_range = lwp_in_step_range (event_child);
3365
3366 /* If GDB wanted this thread to single step, and the thread is out
3367 of the step range, we always want to report the SIGTRAP, and let
3368 GDB handle it. Watchpoints should always be reported. So should
3369 signals we can't explain. A SIGTRAP we can't explain could be a
3370 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3371 do, we're be able to handle GDB breakpoints on top of internal
3372 breakpoints, by handling the internal breakpoint and still
3373 reporting the event to GDB. If we don't, we're out of luck, GDB
3374 won't see the breakpoint hit. If we see a single-step event but
3375 the thread should be continuing, don't pass the trap to gdb.
3376 That indicates that we had previously finished a single-step but
3377 left the single-step pending -- see
3378 complete_ongoing_step_over. */
3379 report_to_gdb = (!maybe_internal_trap
3380 || (current_thread->last_resume_kind == resume_step
3381 && !in_step_range)
3382 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3383 || (!in_step_range
3384 && !bp_explains_trap
3385 && !trace_event
3386 && !step_over_finished
3387 && !(current_thread->last_resume_kind == resume_continue
3388 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3389 || (gdb_breakpoint_here (event_child->stop_pc)
3390 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3391 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3392 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3393
3394 run_breakpoint_commands (event_child->stop_pc);
3395
3396 /* We found no reason GDB would want us to stop. We either hit one
3397 of our own breakpoints, or finished an internal step GDB
3398 shouldn't know about. */
3399 if (!report_to_gdb)
3400 {
3401 if (debug_threads)
3402 {
3403 if (bp_explains_trap)
3404 debug_printf ("Hit a gdbserver breakpoint.\n");
3405 if (step_over_finished)
3406 debug_printf ("Step-over finished.\n");
3407 if (trace_event)
3408 debug_printf ("Tracepoint event.\n");
3409 if (lwp_in_step_range (event_child))
3410 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3411 paddress (event_child->stop_pc),
3412 paddress (event_child->step_range_start),
3413 paddress (event_child->step_range_end));
3414 }
3415
3416 /* We're not reporting this breakpoint to GDB, so apply the
3417 decr_pc_after_break adjustment to the inferior's regcache
3418 ourselves. */
3419
3420 if (low_supports_breakpoints ())
3421 {
3422 struct regcache *regcache
3423 = get_thread_regcache (current_thread, 1);
3424 low_set_pc (regcache, event_child->stop_pc);
3425 }
3426
3427 if (step_over_finished)
3428 {
3429 /* If we have finished stepping over a breakpoint, we've
3430 stopped and suspended all LWPs momentarily except the
3431 stepping one. This is where we resume them all again.
3432 We're going to keep waiting, so use proceed, which
3433 handles stepping over the next breakpoint. */
3434 unsuspend_all_lwps (event_child);
3435 }
3436 else
3437 {
3438 /* Remove the single-step breakpoints if any. Note that
3439 there isn't single-step breakpoint if we finished stepping
3440 over. */
3441 if (supports_software_single_step ()
3442 && has_single_step_breakpoints (current_thread))
3443 {
3444 stop_all_lwps (0, event_child);
3445 delete_single_step_breakpoints (current_thread);
3446 unstop_all_lwps (0, event_child);
3447 }
3448 }
3449
3450 if (debug_threads)
3451 debug_printf ("proceeding all threads.\n");
3452 proceed_all_lwps ();
3453
3454 if (debug_threads)
3455 debug_exit ();
3456
3457 return ignore_event (ourstatus);
3458 }
3459
3460 if (debug_threads)
3461 {
3462 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3463 {
3464 std::string str
3465 = target_waitstatus_to_string (&event_child->waitstatus);
3466
3467 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3468 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3469 }
3470 if (current_thread->last_resume_kind == resume_step)
3471 {
3472 if (event_child->step_range_start == event_child->step_range_end)
3473 debug_printf ("GDB wanted to single-step, reporting event.\n");
3474 else if (!lwp_in_step_range (event_child))
3475 debug_printf ("Out of step range, reporting event.\n");
3476 }
3477 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3478 debug_printf ("Stopped by watchpoint.\n");
3479 else if (gdb_breakpoint_here (event_child->stop_pc))
3480 debug_printf ("Stopped by GDB breakpoint.\n");
3481 if (debug_threads)
3482 debug_printf ("Hit a non-gdbserver trap event.\n");
3483 }
3484
3485 /* Alright, we're going to report a stop. */
3486
3487 /* Remove single-step breakpoints. */
3488 if (supports_software_single_step ())
3489 {
3490 /* Remove single-step breakpoints or not. It it is true, stop all
3491 lwps, so that other threads won't hit the breakpoint in the
3492 staled memory. */
3493 int remove_single_step_breakpoints_p = 0;
3494
3495 if (non_stop)
3496 {
3497 remove_single_step_breakpoints_p
3498 = has_single_step_breakpoints (current_thread);
3499 }
3500 else
3501 {
3502 /* In all-stop, a stop reply cancels all previous resume
3503 requests. Delete all single-step breakpoints. */
3504
3505 find_thread ([&] (thread_info *thread) {
3506 if (has_single_step_breakpoints (thread))
3507 {
3508 remove_single_step_breakpoints_p = 1;
3509 return true;
3510 }
3511
3512 return false;
3513 });
3514 }
3515
3516 if (remove_single_step_breakpoints_p)
3517 {
3518 /* If we remove single-step breakpoints from memory, stop all lwps,
3519 so that other threads won't hit the breakpoint in the staled
3520 memory. */
3521 stop_all_lwps (0, event_child);
3522
3523 if (non_stop)
3524 {
3525 gdb_assert (has_single_step_breakpoints (current_thread));
3526 delete_single_step_breakpoints (current_thread);
3527 }
3528 else
3529 {
3530 for_each_thread ([] (thread_info *thread){
3531 if (has_single_step_breakpoints (thread))
3532 delete_single_step_breakpoints (thread);
3533 });
3534 }
3535
3536 unstop_all_lwps (0, event_child);
3537 }
3538 }
3539
3540 if (!stabilizing_threads)
3541 {
3542 /* In all-stop, stop all threads. */
3543 if (!non_stop)
3544 stop_all_lwps (0, NULL);
3545
3546 if (step_over_finished)
3547 {
3548 if (!non_stop)
3549 {
3550 /* If we were doing a step-over, all other threads but
3551 the stepping one had been paused in start_step_over,
3552 with their suspend counts incremented. We don't want
3553 to do a full unstop/unpause, because we're in
3554 all-stop mode (so we want threads stopped), but we
3555 still need to unsuspend the other threads, to
3556 decrement their `suspended' count back. */
3557 unsuspend_all_lwps (event_child);
3558 }
3559 else
3560 {
3561 /* If we just finished a step-over, then all threads had
3562 been momentarily paused. In all-stop, that's fine,
3563 we want threads stopped by now anyway. In non-stop,
3564 we need to re-resume threads that GDB wanted to be
3565 running. */
3566 unstop_all_lwps (1, event_child);
3567 }
3568 }
3569
3570 /* If we're not waiting for a specific LWP, choose an event LWP
3571 from among those that have had events. Giving equal priority
3572 to all LWPs that have had events helps prevent
3573 starvation. */
3574 if (ptid == minus_one_ptid)
3575 {
3576 event_child->status_pending_p = 1;
3577 event_child->status_pending = w;
3578
3579 select_event_lwp (&event_child);
3580
3581 /* current_thread and event_child must stay in sync. */
3582 current_thread = get_lwp_thread (event_child);
3583
3584 event_child->status_pending_p = 0;
3585 w = event_child->status_pending;
3586 }
3587
3588
3589 /* Stabilize threads (move out of jump pads). */
3590 if (!non_stop)
3591 target_stabilize_threads ();
3592 }
3593 else
3594 {
3595 /* If we just finished a step-over, then all threads had been
3596 momentarily paused. In all-stop, that's fine, we want
3597 threads stopped by now anyway. In non-stop, we need to
3598 re-resume threads that GDB wanted to be running. */
3599 if (step_over_finished)
3600 unstop_all_lwps (1, event_child);
3601 }
3602
3603 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3604 {
3605 /* If the reported event is an exit, fork, vfork or exec, let
3606 GDB know. */
3607
3608 /* Break the unreported fork relationship chain. */
3609 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3610 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3611 {
3612 event_child->fork_relative->fork_relative = NULL;
3613 event_child->fork_relative = NULL;
3614 }
3615
3616 *ourstatus = event_child->waitstatus;
3617 /* Clear the event lwp's waitstatus since we handled it already. */
3618 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3619 }
3620 else
3621 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3622
3623 /* Now that we've selected our final event LWP, un-adjust its PC if
3624 it was a software breakpoint, and the client doesn't know we can
3625 adjust the breakpoint ourselves. */
3626 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3627 && !cs.swbreak_feature)
3628 {
3629 int decr_pc = low_decr_pc_after_break ();
3630
3631 if (decr_pc != 0)
3632 {
3633 struct regcache *regcache
3634 = get_thread_regcache (current_thread, 1);
3635 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3636 }
3637 }
3638
3639 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3640 {
3641 get_syscall_trapinfo (event_child,
3642 &ourstatus->value.syscall_number);
3643 ourstatus->kind = event_child->syscall_state;
3644 }
3645 else if (current_thread->last_resume_kind == resume_stop
3646 && WSTOPSIG (w) == SIGSTOP)
3647 {
3648 /* A thread that has been requested to stop by GDB with vCont;t,
3649 and it stopped cleanly, so report as SIG0. The use of
3650 SIGSTOP is an implementation detail. */
3651 ourstatus->value.sig = GDB_SIGNAL_0;
3652 }
3653 else if (current_thread->last_resume_kind == resume_stop
3654 && WSTOPSIG (w) != SIGSTOP)
3655 {
3656 /* A thread that has been requested to stop by GDB with vCont;t,
3657 but, it stopped for other reasons. */
3658 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3659 }
3660 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3661 {
3662 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3663 }
3664
3665 gdb_assert (step_over_bkpt == null_ptid);
3666
3667 if (debug_threads)
3668 {
3669 debug_printf ("wait_1 ret = %s, %d, %d\n",
3670 target_pid_to_str (ptid_of (current_thread)),
3671 ourstatus->kind, ourstatus->value.sig);
3672 debug_exit ();
3673 }
3674
3675 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3676 return filter_exit_event (event_child, ourstatus);
3677
3678 return ptid_of (current_thread);
3679 }
3680
3681 /* Get rid of any pending event in the pipe. */
3682 static void
3683 async_file_flush (void)
3684 {
3685 int ret;
3686 char buf;
3687
3688 do
3689 ret = read (linux_event_pipe[0], &buf, 1);
3690 while (ret >= 0 || (ret == -1 && errno == EINTR));
3691 }
3692
3693 /* Put something in the pipe, so the event loop wakes up. */
3694 static void
3695 async_file_mark (void)
3696 {
3697 int ret;
3698
3699 async_file_flush ();
3700
3701 do
3702 ret = write (linux_event_pipe[1], "+", 1);
3703 while (ret == 0 || (ret == -1 && errno == EINTR));
3704
3705 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3706 be awakened anyway. */
3707 }
3708
3709 ptid_t
3710 linux_process_target::wait (ptid_t ptid,
3711 target_waitstatus *ourstatus,
3712 target_wait_flags target_options)
3713 {
3714 ptid_t event_ptid;
3715
3716 /* Flush the async file first. */
3717 if (target_is_async_p ())
3718 async_file_flush ();
3719
3720 do
3721 {
3722 event_ptid = wait_1 (ptid, ourstatus, target_options);
3723 }
3724 while ((target_options & TARGET_WNOHANG) == 0
3725 && event_ptid == null_ptid
3726 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3727
3728 /* If at least one stop was reported, there may be more. A single
3729 SIGCHLD can signal more than one child stop. */
3730 if (target_is_async_p ()
3731 && (target_options & TARGET_WNOHANG) != 0
3732 && event_ptid != null_ptid)
3733 async_file_mark ();
3734
3735 return event_ptid;
3736 }
3737
3738 /* Send a signal to an LWP. */
3739
3740 static int
3741 kill_lwp (unsigned long lwpid, int signo)
3742 {
3743 int ret;
3744
3745 errno = 0;
3746 ret = syscall (__NR_tkill, lwpid, signo);
3747 if (errno == ENOSYS)
3748 {
3749 /* If tkill fails, then we are not using nptl threads, a
3750 configuration we no longer support. */
3751 perror_with_name (("tkill"));
3752 }
3753 return ret;
3754 }
3755
3756 void
3757 linux_stop_lwp (struct lwp_info *lwp)
3758 {
3759 send_sigstop (lwp);
3760 }
3761
3762 static void
3763 send_sigstop (struct lwp_info *lwp)
3764 {
3765 int pid;
3766
3767 pid = lwpid_of (get_lwp_thread (lwp));
3768
3769 /* If we already have a pending stop signal for this process, don't
3770 send another. */
3771 if (lwp->stop_expected)
3772 {
3773 if (debug_threads)
3774 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3775
3776 return;
3777 }
3778
3779 if (debug_threads)
3780 debug_printf ("Sending sigstop to lwp %d\n", pid);
3781
3782 lwp->stop_expected = 1;
3783 kill_lwp (pid, SIGSTOP);
3784 }
3785
3786 static void
3787 send_sigstop (thread_info *thread, lwp_info *except)
3788 {
3789 struct lwp_info *lwp = get_thread_lwp (thread);
3790
3791 /* Ignore EXCEPT. */
3792 if (lwp == except)
3793 return;
3794
3795 if (lwp->stopped)
3796 return;
3797
3798 send_sigstop (lwp);
3799 }
3800
3801 /* Increment the suspend count of an LWP, and stop it, if not stopped
3802 yet. */
3803 static void
3804 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3805 {
3806 struct lwp_info *lwp = get_thread_lwp (thread);
3807
3808 /* Ignore EXCEPT. */
3809 if (lwp == except)
3810 return;
3811
3812 lwp_suspended_inc (lwp);
3813
3814 send_sigstop (thread, except);
3815 }
3816
3817 static void
3818 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3819 {
3820 /* Store the exit status for later. */
3821 lwp->status_pending_p = 1;
3822 lwp->status_pending = wstat;
3823
3824 /* Store in waitstatus as well, as there's nothing else to process
3825 for this event. */
3826 if (WIFEXITED (wstat))
3827 {
3828 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3829 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3830 }
3831 else if (WIFSIGNALED (wstat))
3832 {
3833 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3834 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3835 }
3836
3837 /* Prevent trying to stop it. */
3838 lwp->stopped = 1;
3839
3840 /* No further stops are expected from a dead lwp. */
3841 lwp->stop_expected = 0;
3842 }
3843
3844 /* Return true if LWP has exited already, and has a pending exit event
3845 to report to GDB. */
3846
3847 static int
3848 lwp_is_marked_dead (struct lwp_info *lwp)
3849 {
3850 return (lwp->status_pending_p
3851 && (WIFEXITED (lwp->status_pending)
3852 || WIFSIGNALED (lwp->status_pending)));
3853 }
3854
3855 void
3856 linux_process_target::wait_for_sigstop ()
3857 {
3858 struct thread_info *saved_thread;
3859 ptid_t saved_tid;
3860 int wstat;
3861 int ret;
3862
3863 saved_thread = current_thread;
3864 if (saved_thread != NULL)
3865 saved_tid = saved_thread->id;
3866 else
3867 saved_tid = null_ptid; /* avoid bogus unused warning */
3868
3869 if (debug_threads)
3870 debug_printf ("wait_for_sigstop: pulling events\n");
3871
3872 /* Passing NULL_PTID as filter indicates we want all events to be
3873 left pending. Eventually this returns when there are no
3874 unwaited-for children left. */
3875 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3876 gdb_assert (ret == -1);
3877
3878 if (saved_thread == NULL || mythread_alive (saved_tid))
3879 current_thread = saved_thread;
3880 else
3881 {
3882 if (debug_threads)
3883 debug_printf ("Previously current thread died.\n");
3884
3885 /* We can't change the current inferior behind GDB's back,
3886 otherwise, a subsequent command may apply to the wrong
3887 process. */
3888 current_thread = NULL;
3889 }
3890 }
3891
3892 bool
3893 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3894 {
3895 struct lwp_info *lwp = get_thread_lwp (thread);
3896
3897 if (lwp->suspended != 0)
3898 {
3899 internal_error (__FILE__, __LINE__,
3900 "LWP %ld is suspended, suspended=%d\n",
3901 lwpid_of (thread), lwp->suspended);
3902 }
3903 gdb_assert (lwp->stopped);
3904
3905 /* Allow debugging the jump pad, gdb_collect, etc.. */
3906 return (supports_fast_tracepoints ()
3907 && agent_loaded_p ()
3908 && (gdb_breakpoint_here (lwp->stop_pc)
3909 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3910 || thread->last_resume_kind == resume_step)
3911 && (linux_fast_tracepoint_collecting (lwp, NULL)
3912 != fast_tpoint_collect_result::not_collecting));
3913 }
3914
3915 void
3916 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3917 {
3918 struct thread_info *saved_thread;
3919 struct lwp_info *lwp = get_thread_lwp (thread);
3920 int *wstat;
3921
3922 if (lwp->suspended != 0)
3923 {
3924 internal_error (__FILE__, __LINE__,
3925 "LWP %ld is suspended, suspended=%d\n",
3926 lwpid_of (thread), lwp->suspended);
3927 }
3928 gdb_assert (lwp->stopped);
3929
3930 /* For gdb_breakpoint_here. */
3931 saved_thread = current_thread;
3932 current_thread = thread;
3933
3934 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3935
3936 /* Allow debugging the jump pad, gdb_collect, etc. */
3937 if (!gdb_breakpoint_here (lwp->stop_pc)
3938 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3939 && thread->last_resume_kind != resume_step
3940 && maybe_move_out_of_jump_pad (lwp, wstat))
3941 {
3942 if (debug_threads)
3943 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3944 lwpid_of (thread));
3945
3946 if (wstat)
3947 {
3948 lwp->status_pending_p = 0;
3949 enqueue_one_deferred_signal (lwp, wstat);
3950
3951 if (debug_threads)
3952 debug_printf ("Signal %d for LWP %ld deferred "
3953 "(in jump pad)\n",
3954 WSTOPSIG (*wstat), lwpid_of (thread));
3955 }
3956
3957 resume_one_lwp (lwp, 0, 0, NULL);
3958 }
3959 else
3960 lwp_suspended_inc (lwp);
3961
3962 current_thread = saved_thread;
3963 }
3964
3965 static bool
3966 lwp_running (thread_info *thread)
3967 {
3968 struct lwp_info *lwp = get_thread_lwp (thread);
3969
3970 if (lwp_is_marked_dead (lwp))
3971 return false;
3972
3973 return !lwp->stopped;
3974 }
3975
3976 void
3977 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3978 {
3979 /* Should not be called recursively. */
3980 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3981
3982 if (debug_threads)
3983 {
3984 debug_enter ();
3985 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3986 suspend ? "stop-and-suspend" : "stop",
3987 except != NULL
3988 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3989 : "none");
3990 }
3991
3992 stopping_threads = (suspend
3993 ? STOPPING_AND_SUSPENDING_THREADS
3994 : STOPPING_THREADS);
3995
3996 if (suspend)
3997 for_each_thread ([&] (thread_info *thread)
3998 {
3999 suspend_and_send_sigstop (thread, except);
4000 });
4001 else
4002 for_each_thread ([&] (thread_info *thread)
4003 {
4004 send_sigstop (thread, except);
4005 });
4006
4007 wait_for_sigstop ();
4008 stopping_threads = NOT_STOPPING_THREADS;
4009
4010 if (debug_threads)
4011 {
4012 debug_printf ("stop_all_lwps done, setting stopping_threads "
4013 "back to !stopping\n");
4014 debug_exit ();
4015 }
4016 }
4017
4018 /* Enqueue one signal in the chain of signals which need to be
4019 delivered to this process on next resume. */
4020
4021 static void
4022 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4023 {
4024 lwp->pending_signals.emplace_back (signal);
4025 if (info == nullptr)
4026 memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
4027 else
4028 lwp->pending_signals.back ().info = *info;
4029 }
4030
4031 void
4032 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
4033 {
4034 struct thread_info *thread = get_lwp_thread (lwp);
4035 struct regcache *regcache = get_thread_regcache (thread, 1);
4036
4037 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4038
4039 current_thread = thread;
4040 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
4041
4042 for (CORE_ADDR pc : next_pcs)
4043 set_single_step_breakpoint (pc, current_ptid);
4044 }
4045
4046 int
4047 linux_process_target::single_step (lwp_info* lwp)
4048 {
4049 int step = 0;
4050
4051 if (supports_hardware_single_step ())
4052 {
4053 step = 1;
4054 }
4055 else if (supports_software_single_step ())
4056 {
4057 install_software_single_step_breakpoints (lwp);
4058 step = 0;
4059 }
4060 else
4061 {
4062 if (debug_threads)
4063 debug_printf ("stepping is not implemented on this target");
4064 }
4065
4066 return step;
4067 }
4068
4069 /* The signal can be delivered to the inferior if we are not trying to
4070 finish a fast tracepoint collect. Since signal can be delivered in
4071 the step-over, the program may go to signal handler and trap again
4072 after return from the signal handler. We can live with the spurious
4073 double traps. */
4074
4075 static int
4076 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4077 {
4078 return (lwp->collecting_fast_tracepoint
4079 == fast_tpoint_collect_result::not_collecting);
4080 }
4081
4082 void
4083 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4084 int signal, siginfo_t *info)
4085 {
4086 struct thread_info *thread = get_lwp_thread (lwp);
4087 struct thread_info *saved_thread;
4088 int ptrace_request;
4089 struct process_info *proc = get_thread_process (thread);
4090
4091 /* Note that target description may not be initialised
4092 (proc->tdesc == NULL) at this point because the program hasn't
4093 stopped at the first instruction yet. It means GDBserver skips
4094 the extra traps from the wrapper program (see option --wrapper).
4095 Code in this function that requires register access should be
4096 guarded by proc->tdesc == NULL or something else. */
4097
4098 if (lwp->stopped == 0)
4099 return;
4100
4101 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4102
4103 fast_tpoint_collect_result fast_tp_collecting
4104 = lwp->collecting_fast_tracepoint;
4105
4106 gdb_assert (!stabilizing_threads
4107 || (fast_tp_collecting
4108 != fast_tpoint_collect_result::not_collecting));
4109
4110 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4111 user used the "jump" command, or "set $pc = foo"). */
4112 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4113 {
4114 /* Collecting 'while-stepping' actions doesn't make sense
4115 anymore. */
4116 release_while_stepping_state_list (thread);
4117 }
4118
4119 /* If we have pending signals or status, and a new signal, enqueue the
4120 signal. Also enqueue the signal if it can't be delivered to the
4121 inferior right now. */
4122 if (signal != 0
4123 && (lwp->status_pending_p
4124 || !lwp->pending_signals.empty ()
4125 || !lwp_signal_can_be_delivered (lwp)))
4126 {
4127 enqueue_pending_signal (lwp, signal, info);
4128
4129 /* Postpone any pending signal. It was enqueued above. */
4130 signal = 0;
4131 }
4132
4133 if (lwp->status_pending_p)
4134 {
4135 if (debug_threads)
4136 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4137 " has pending status\n",
4138 lwpid_of (thread), step ? "step" : "continue",
4139 lwp->stop_expected ? "expected" : "not expected");
4140 return;
4141 }
4142
4143 saved_thread = current_thread;
4144 current_thread = thread;
4145
4146 /* This bit needs some thinking about. If we get a signal that
4147 we must report while a single-step reinsert is still pending,
4148 we often end up resuming the thread. It might be better to
4149 (ew) allow a stack of pending events; then we could be sure that
4150 the reinsert happened right away and not lose any signals.
4151
4152 Making this stack would also shrink the window in which breakpoints are
4153 uninserted (see comment in linux_wait_for_lwp) but not enough for
4154 complete correctness, so it won't solve that problem. It may be
4155 worthwhile just to solve this one, however. */
4156 if (lwp->bp_reinsert != 0)
4157 {
4158 if (debug_threads)
4159 debug_printf (" pending reinsert at 0x%s\n",
4160 paddress (lwp->bp_reinsert));
4161
4162 if (supports_hardware_single_step ())
4163 {
4164 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4165 {
4166 if (step == 0)
4167 warning ("BAD - reinserting but not stepping.");
4168 if (lwp->suspended)
4169 warning ("BAD - reinserting and suspended(%d).",
4170 lwp->suspended);
4171 }
4172 }
4173
4174 step = maybe_hw_step (thread);
4175 }
4176
4177 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4178 {
4179 if (debug_threads)
4180 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4181 " (exit-jump-pad-bkpt)\n",
4182 lwpid_of (thread));
4183 }
4184 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4185 {
4186 if (debug_threads)
4187 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4188 " single-stepping\n",
4189 lwpid_of (thread));
4190
4191 if (supports_hardware_single_step ())
4192 step = 1;
4193 else
4194 {
4195 internal_error (__FILE__, __LINE__,
4196 "moving out of jump pad single-stepping"
4197 " not implemented on this target");
4198 }
4199 }
4200
4201 /* If we have while-stepping actions in this thread set it stepping.
4202 If we have a signal to deliver, it may or may not be set to
4203 SIG_IGN, we don't know. Assume so, and allow collecting
4204 while-stepping into a signal handler. A possible smart thing to
4205 do would be to set an internal breakpoint at the signal return
4206 address, continue, and carry on catching this while-stepping
4207 action only when that breakpoint is hit. A future
4208 enhancement. */
4209 if (thread->while_stepping != NULL)
4210 {
4211 if (debug_threads)
4212 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4213 lwpid_of (thread));
4214
4215 step = single_step (lwp);
4216 }
4217
4218 if (proc->tdesc != NULL && low_supports_breakpoints ())
4219 {
4220 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4221
4222 lwp->stop_pc = low_get_pc (regcache);
4223
4224 if (debug_threads)
4225 {
4226 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4227 (long) lwp->stop_pc);
4228 }
4229 }
4230
4231 /* If we have pending signals, consume one if it can be delivered to
4232 the inferior. */
4233 if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4234 {
4235 const pending_signal &p_sig = lwp->pending_signals.front ();
4236
4237 signal = p_sig.signal;
4238 if (p_sig.info.si_signo != 0)
4239 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4240 &p_sig.info);
4241
4242 lwp->pending_signals.pop_front ();
4243 }
4244
4245 if (debug_threads)
4246 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4247 lwpid_of (thread), step ? "step" : "continue", signal,
4248 lwp->stop_expected ? "expected" : "not expected");
4249
4250 low_prepare_to_resume (lwp);
4251
4252 regcache_invalidate_thread (thread);
4253 errno = 0;
4254 lwp->stepping = step;
4255 if (step)
4256 ptrace_request = PTRACE_SINGLESTEP;
4257 else if (gdb_catching_syscalls_p (lwp))
4258 ptrace_request = PTRACE_SYSCALL;
4259 else
4260 ptrace_request = PTRACE_CONT;
4261 ptrace (ptrace_request,
4262 lwpid_of (thread),
4263 (PTRACE_TYPE_ARG3) 0,
4264 /* Coerce to a uintptr_t first to avoid potential gcc warning
4265 of coercing an 8 byte integer to a 4 byte pointer. */
4266 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4267
4268 current_thread = saved_thread;
4269 if (errno)
4270 perror_with_name ("resuming thread");
4271
4272 /* Successfully resumed. Clear state that no longer makes sense,
4273 and mark the LWP as running. Must not do this before resuming
4274 otherwise if that fails other code will be confused. E.g., we'd
4275 later try to stop the LWP and hang forever waiting for a stop
4276 status. Note that we must not throw after this is cleared,
4277 otherwise handle_zombie_lwp_error would get confused. */
4278 lwp->stopped = 0;
4279 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4280 }
4281
4282 void
4283 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4284 {
4285 /* Nop. */
4286 }
4287
4288 /* Called when we try to resume a stopped LWP and that errors out. If
4289 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4290 or about to become), discard the error, clear any pending status
4291 the LWP may have, and return true (we'll collect the exit status
4292 soon enough). Otherwise, return false. */
4293
4294 static int
4295 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4296 {
4297 struct thread_info *thread = get_lwp_thread (lp);
4298
4299 /* If we get an error after resuming the LWP successfully, we'd
4300 confuse !T state for the LWP being gone. */
4301 gdb_assert (lp->stopped);
4302
4303 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4304 because even if ptrace failed with ESRCH, the tracee may be "not
4305 yet fully dead", but already refusing ptrace requests. In that
4306 case the tracee has 'R (Running)' state for a little bit
4307 (observed in Linux 3.18). See also the note on ESRCH in the
4308 ptrace(2) man page. Instead, check whether the LWP has any state
4309 other than ptrace-stopped. */
4310
4311 /* Don't assume anything if /proc/PID/status can't be read. */
4312 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4313 {
4314 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4315 lp->status_pending_p = 0;
4316 return 1;
4317 }
4318 return 0;
4319 }
4320
4321 void
4322 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4323 siginfo_t *info)
4324 {
4325 try
4326 {
4327 resume_one_lwp_throw (lwp, step, signal, info);
4328 }
4329 catch (const gdb_exception_error &ex)
4330 {
4331 if (!check_ptrace_stopped_lwp_gone (lwp))
4332 throw;
4333 }
4334 }
4335
4336 /* This function is called once per thread via for_each_thread.
4337 We look up which resume request applies to THREAD and mark it with a
4338 pointer to the appropriate resume request.
4339
4340 This algorithm is O(threads * resume elements), but resume elements
4341 is small (and will remain small at least until GDB supports thread
4342 suspension). */
4343
4344 static void
4345 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4346 {
4347 struct lwp_info *lwp = get_thread_lwp (thread);
4348
4349 for (int ndx = 0; ndx < n; ndx++)
4350 {
4351 ptid_t ptid = resume[ndx].thread;
4352 if (ptid == minus_one_ptid
4353 || ptid == thread->id
4354 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4355 of PID'. */
4356 || (ptid.pid () == pid_of (thread)
4357 && (ptid.is_pid ()
4358 || ptid.lwp () == -1)))
4359 {
4360 if (resume[ndx].kind == resume_stop
4361 && thread->last_resume_kind == resume_stop)
4362 {
4363 if (debug_threads)
4364 debug_printf ("already %s LWP %ld at GDB's request\n",
4365 (thread->last_status.kind
4366 == TARGET_WAITKIND_STOPPED)
4367 ? "stopped"
4368 : "stopping",
4369 lwpid_of (thread));
4370
4371 continue;
4372 }
4373
4374 /* Ignore (wildcard) resume requests for already-resumed
4375 threads. */
4376 if (resume[ndx].kind != resume_stop
4377 && thread->last_resume_kind != resume_stop)
4378 {
4379 if (debug_threads)
4380 debug_printf ("already %s LWP %ld at GDB's request\n",
4381 (thread->last_resume_kind
4382 == resume_step)
4383 ? "stepping"
4384 : "continuing",
4385 lwpid_of (thread));
4386 continue;
4387 }
4388
4389 /* Don't let wildcard resumes resume fork children that GDB
4390 does not yet know are new fork children. */
4391 if (lwp->fork_relative != NULL)
4392 {
4393 struct lwp_info *rel = lwp->fork_relative;
4394
4395 if (rel->status_pending_p
4396 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4397 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4398 {
4399 if (debug_threads)
4400 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4401 lwpid_of (thread));
4402 continue;
4403 }
4404 }
4405
4406 /* If the thread has a pending event that has already been
4407 reported to GDBserver core, but GDB has not pulled the
4408 event out of the vStopped queue yet, likewise, ignore the
4409 (wildcard) resume request. */
4410 if (in_queued_stop_replies (thread->id))
4411 {
4412 if (debug_threads)
4413 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4414 lwpid_of (thread));
4415 continue;
4416 }
4417
4418 lwp->resume = &resume[ndx];
4419 thread->last_resume_kind = lwp->resume->kind;
4420
4421 lwp->step_range_start = lwp->resume->step_range_start;
4422 lwp->step_range_end = lwp->resume->step_range_end;
4423
4424 /* If we had a deferred signal to report, dequeue one now.
4425 This can happen if LWP gets more than one signal while
4426 trying to get out of a jump pad. */
4427 if (lwp->stopped
4428 && !lwp->status_pending_p
4429 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4430 {
4431 lwp->status_pending_p = 1;
4432
4433 if (debug_threads)
4434 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4435 "leaving status pending.\n",
4436 WSTOPSIG (lwp->status_pending),
4437 lwpid_of (thread));
4438 }
4439
4440 return;
4441 }
4442 }
4443
4444 /* No resume action for this thread. */
4445 lwp->resume = NULL;
4446 }
4447
4448 bool
4449 linux_process_target::resume_status_pending (thread_info *thread)
4450 {
4451 struct lwp_info *lwp = get_thread_lwp (thread);
4452
4453 /* LWPs which will not be resumed are not interesting, because
4454 we might not wait for them next time through linux_wait. */
4455 if (lwp->resume == NULL)
4456 return false;
4457
4458 return thread_still_has_status_pending (thread);
4459 }
4460
4461 bool
4462 linux_process_target::thread_needs_step_over (thread_info *thread)
4463 {
4464 struct lwp_info *lwp = get_thread_lwp (thread);
4465 struct thread_info *saved_thread;
4466 CORE_ADDR pc;
4467 struct process_info *proc = get_thread_process (thread);
4468
4469 /* GDBserver is skipping the extra traps from the wrapper program,
4470 don't have to do step over. */
4471 if (proc->tdesc == NULL)
4472 return false;
4473
4474 /* LWPs which will not be resumed are not interesting, because we
4475 might not wait for them next time through linux_wait. */
4476
4477 if (!lwp->stopped)
4478 {
4479 if (debug_threads)
4480 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4481 lwpid_of (thread));
4482 return false;
4483 }
4484
4485 if (thread->last_resume_kind == resume_stop)
4486 {
4487 if (debug_threads)
4488 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4489 " stopped\n",
4490 lwpid_of (thread));
4491 return false;
4492 }
4493
4494 gdb_assert (lwp->suspended >= 0);
4495
4496 if (lwp->suspended)
4497 {
4498 if (debug_threads)
4499 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4500 lwpid_of (thread));
4501 return false;
4502 }
4503
4504 if (lwp->status_pending_p)
4505 {
4506 if (debug_threads)
4507 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4508 " status.\n",
4509 lwpid_of (thread));
4510 return false;
4511 }
4512
4513 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4514 or we have. */
4515 pc = get_pc (lwp);
4516
4517 /* If the PC has changed since we stopped, then don't do anything,
4518 and let the breakpoint/tracepoint be hit. This happens if, for
4519 instance, GDB handled the decr_pc_after_break subtraction itself,
4520 GDB is OOL stepping this thread, or the user has issued a "jump"
4521 command, or poked thread's registers herself. */
4522 if (pc != lwp->stop_pc)
4523 {
4524 if (debug_threads)
4525 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4526 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4527 lwpid_of (thread),
4528 paddress (lwp->stop_pc), paddress (pc));
4529 return false;
4530 }
4531
4532 /* On software single step target, resume the inferior with signal
4533 rather than stepping over. */
4534 if (supports_software_single_step ()
4535 && !lwp->pending_signals.empty ()
4536 && lwp_signal_can_be_delivered (lwp))
4537 {
4538 if (debug_threads)
4539 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4540 " signals.\n",
4541 lwpid_of (thread));
4542
4543 return false;
4544 }
4545
4546 saved_thread = current_thread;
4547 current_thread = thread;
4548
4549 /* We can only step over breakpoints we know about. */
4550 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4551 {
4552 /* Don't step over a breakpoint that GDB expects to hit
4553 though. If the condition is being evaluated on the target's side
4554 and it evaluate to false, step over this breakpoint as well. */
4555 if (gdb_breakpoint_here (pc)
4556 && gdb_condition_true_at_breakpoint (pc)
4557 && gdb_no_commands_at_breakpoint (pc))
4558 {
4559 if (debug_threads)
4560 debug_printf ("Need step over [LWP %ld]? yes, but found"
4561 " GDB breakpoint at 0x%s; skipping step over\n",
4562 lwpid_of (thread), paddress (pc));
4563
4564 current_thread = saved_thread;
4565 return false;
4566 }
4567 else
4568 {
4569 if (debug_threads)
4570 debug_printf ("Need step over [LWP %ld]? yes, "
4571 "found breakpoint at 0x%s\n",
4572 lwpid_of (thread), paddress (pc));
4573
4574 /* We've found an lwp that needs stepping over --- return 1 so
4575 that find_thread stops looking. */
4576 current_thread = saved_thread;
4577
4578 return true;
4579 }
4580 }
4581
4582 current_thread = saved_thread;
4583
4584 if (debug_threads)
4585 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4586 " at 0x%s\n",
4587 lwpid_of (thread), paddress (pc));
4588
4589 return false;
4590 }
4591
4592 void
4593 linux_process_target::start_step_over (lwp_info *lwp)
4594 {
4595 struct thread_info *thread = get_lwp_thread (lwp);
4596 struct thread_info *saved_thread;
4597 CORE_ADDR pc;
4598 int step;
4599
4600 if (debug_threads)
4601 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4602 lwpid_of (thread));
4603
4604 stop_all_lwps (1, lwp);
4605
4606 if (lwp->suspended != 0)
4607 {
4608 internal_error (__FILE__, __LINE__,
4609 "LWP %ld suspended=%d\n", lwpid_of (thread),
4610 lwp->suspended);
4611 }
4612
4613 if (debug_threads)
4614 debug_printf ("Done stopping all threads for step-over.\n");
4615
4616 /* Note, we should always reach here with an already adjusted PC,
4617 either by GDB (if we're resuming due to GDB's request), or by our
4618 caller, if we just finished handling an internal breakpoint GDB
4619 shouldn't care about. */
4620 pc = get_pc (lwp);
4621
4622 saved_thread = current_thread;
4623 current_thread = thread;
4624
4625 lwp->bp_reinsert = pc;
4626 uninsert_breakpoints_at (pc);
4627 uninsert_fast_tracepoint_jumps_at (pc);
4628
4629 step = single_step (lwp);
4630
4631 current_thread = saved_thread;
4632
4633 resume_one_lwp (lwp, step, 0, NULL);
4634
4635 /* Require next event from this LWP. */
4636 step_over_bkpt = thread->id;
4637 }
4638
4639 bool
4640 linux_process_target::finish_step_over (lwp_info *lwp)
4641 {
4642 if (lwp->bp_reinsert != 0)
4643 {
4644 struct thread_info *saved_thread = current_thread;
4645
4646 if (debug_threads)
4647 debug_printf ("Finished step over.\n");
4648
4649 current_thread = get_lwp_thread (lwp);
4650
4651 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4652 may be no breakpoint to reinsert there by now. */
4653 reinsert_breakpoints_at (lwp->bp_reinsert);
4654 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4655
4656 lwp->bp_reinsert = 0;
4657
4658 /* Delete any single-step breakpoints. No longer needed. We
4659 don't have to worry about other threads hitting this trap,
4660 and later not being able to explain it, because we were
4661 stepping over a breakpoint, and we hold all threads but
4662 LWP stopped while doing that. */
4663 if (!supports_hardware_single_step ())
4664 {
4665 gdb_assert (has_single_step_breakpoints (current_thread));
4666 delete_single_step_breakpoints (current_thread);
4667 }
4668
4669 step_over_bkpt = null_ptid;
4670 current_thread = saved_thread;
4671 return true;
4672 }
4673 else
4674 return false;
4675 }
4676
4677 void
4678 linux_process_target::complete_ongoing_step_over ()
4679 {
4680 if (step_over_bkpt != null_ptid)
4681 {
4682 struct lwp_info *lwp;
4683 int wstat;
4684 int ret;
4685
4686 if (debug_threads)
4687 debug_printf ("detach: step over in progress, finish it first\n");
4688
4689 /* Passing NULL_PTID as filter indicates we want all events to
4690 be left pending. Eventually this returns when there are no
4691 unwaited-for children left. */
4692 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4693 __WALL);
4694 gdb_assert (ret == -1);
4695
4696 lwp = find_lwp_pid (step_over_bkpt);
4697 if (lwp != NULL)
4698 {
4699 finish_step_over (lwp);
4700
4701 /* If we got our step SIGTRAP, don't leave it pending,
4702 otherwise we would report it to GDB as a spurious
4703 SIGTRAP. */
4704 gdb_assert (lwp->status_pending_p);
4705 if (WIFSTOPPED (lwp->status_pending)
4706 && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4707 {
4708 thread_info *thread = get_lwp_thread (lwp);
4709 if (thread->last_resume_kind != resume_step)
4710 {
4711 if (debug_threads)
4712 debug_printf ("detach: discard step-over SIGTRAP\n");
4713
4714 lwp->status_pending_p = 0;
4715 lwp->status_pending = 0;
4716 resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4717 }
4718 else
4719 {
4720 if (debug_threads)
4721 debug_printf ("detach: resume_step, "
4722 "not discarding step-over SIGTRAP\n");
4723 }
4724 }
4725 }
4726 step_over_bkpt = null_ptid;
4727 unsuspend_all_lwps (lwp);
4728 }
4729 }
4730
4731 void
4732 linux_process_target::resume_one_thread (thread_info *thread,
4733 bool leave_all_stopped)
4734 {
4735 struct lwp_info *lwp = get_thread_lwp (thread);
4736 int leave_pending;
4737
4738 if (lwp->resume == NULL)
4739 return;
4740
4741 if (lwp->resume->kind == resume_stop)
4742 {
4743 if (debug_threads)
4744 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4745
4746 if (!lwp->stopped)
4747 {
4748 if (debug_threads)
4749 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4750
4751 /* Stop the thread, and wait for the event asynchronously,
4752 through the event loop. */
4753 send_sigstop (lwp);
4754 }
4755 else
4756 {
4757 if (debug_threads)
4758 debug_printf ("already stopped LWP %ld\n",
4759 lwpid_of (thread));
4760
4761 /* The LWP may have been stopped in an internal event that
4762 was not meant to be notified back to GDB (e.g., gdbserver
4763 breakpoint), so we should be reporting a stop event in
4764 this case too. */
4765
4766 /* If the thread already has a pending SIGSTOP, this is a
4767 no-op. Otherwise, something later will presumably resume
4768 the thread and this will cause it to cancel any pending
4769 operation, due to last_resume_kind == resume_stop. If
4770 the thread already has a pending status to report, we
4771 will still report it the next time we wait - see
4772 status_pending_p_callback. */
4773
4774 /* If we already have a pending signal to report, then
4775 there's no need to queue a SIGSTOP, as this means we're
4776 midway through moving the LWP out of the jumppad, and we
4777 will report the pending signal as soon as that is
4778 finished. */
4779 if (lwp->pending_signals_to_report.empty ())
4780 send_sigstop (lwp);
4781 }
4782
4783 /* For stop requests, we're done. */
4784 lwp->resume = NULL;
4785 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4786 return;
4787 }
4788
4789 /* If this thread which is about to be resumed has a pending status,
4790 then don't resume it - we can just report the pending status.
4791 Likewise if it is suspended, because e.g., another thread is
4792 stepping past a breakpoint. Make sure to queue any signals that
4793 would otherwise be sent. In all-stop mode, we do this decision
4794 based on if *any* thread has a pending status. If there's a
4795 thread that needs the step-over-breakpoint dance, then don't
4796 resume any other thread but that particular one. */
4797 leave_pending = (lwp->suspended
4798 || lwp->status_pending_p
4799 || leave_all_stopped);
4800
4801 /* If we have a new signal, enqueue the signal. */
4802 if (lwp->resume->sig != 0)
4803 {
4804 siginfo_t info, *info_p;
4805
4806 /* If this is the same signal we were previously stopped by,
4807 make sure to queue its siginfo. */
4808 if (WIFSTOPPED (lwp->last_status)
4809 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4810 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4811 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4812 info_p = &info;
4813 else
4814 info_p = NULL;
4815
4816 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4817 }
4818
4819 if (!leave_pending)
4820 {
4821 if (debug_threads)
4822 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4823
4824 proceed_one_lwp (thread, NULL);
4825 }
4826 else
4827 {
4828 if (debug_threads)
4829 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4830 }
4831
4832 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4833 lwp->resume = NULL;
4834 }
4835
4836 void
4837 linux_process_target::resume (thread_resume *resume_info, size_t n)
4838 {
4839 struct thread_info *need_step_over = NULL;
4840
4841 if (debug_threads)
4842 {
4843 debug_enter ();
4844 debug_printf ("linux_resume:\n");
4845 }
4846
4847 for_each_thread ([&] (thread_info *thread)
4848 {
4849 linux_set_resume_request (thread, resume_info, n);
4850 });
4851
4852 /* If there is a thread which would otherwise be resumed, which has
4853 a pending status, then don't resume any threads - we can just
4854 report the pending status. Make sure to queue any signals that
4855 would otherwise be sent. In non-stop mode, we'll apply this
4856 logic to each thread individually. We consume all pending events
4857 before considering to start a step-over (in all-stop). */
4858 bool any_pending = false;
4859 if (!non_stop)
4860 any_pending = find_thread ([this] (thread_info *thread)
4861 {
4862 return resume_status_pending (thread);
4863 }) != nullptr;
4864
4865 /* If there is a thread which would otherwise be resumed, which is
4866 stopped at a breakpoint that needs stepping over, then don't
4867 resume any threads - have it step over the breakpoint with all
4868 other threads stopped, then resume all threads again. Make sure
4869 to queue any signals that would otherwise be delivered or
4870 queued. */
4871 if (!any_pending && low_supports_breakpoints ())
4872 need_step_over = find_thread ([this] (thread_info *thread)
4873 {
4874 return thread_needs_step_over (thread);
4875 });
4876
4877 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4878
4879 if (debug_threads)
4880 {
4881 if (need_step_over != NULL)
4882 debug_printf ("Not resuming all, need step over\n");
4883 else if (any_pending)
4884 debug_printf ("Not resuming, all-stop and found "
4885 "an LWP with pending status\n");
4886 else
4887 debug_printf ("Resuming, no pending status or step over needed\n");
4888 }
4889
4890 /* Even if we're leaving threads stopped, queue all signals we'd
4891 otherwise deliver. */
4892 for_each_thread ([&] (thread_info *thread)
4893 {
4894 resume_one_thread (thread, leave_all_stopped);
4895 });
4896
4897 if (need_step_over)
4898 start_step_over (get_thread_lwp (need_step_over));
4899
4900 if (debug_threads)
4901 {
4902 debug_printf ("linux_resume done\n");
4903 debug_exit ();
4904 }
4905
4906 /* We may have events that were pending that can/should be sent to
4907 the client now. Trigger a linux_wait call. */
4908 if (target_is_async_p ())
4909 async_file_mark ();
4910 }
4911
4912 void
4913 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4914 {
4915 struct lwp_info *lwp = get_thread_lwp (thread);
4916 int step;
4917
4918 if (lwp == except)
4919 return;
4920
4921 if (debug_threads)
4922 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4923
4924 if (!lwp->stopped)
4925 {
4926 if (debug_threads)
4927 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4928 return;
4929 }
4930
4931 if (thread->last_resume_kind == resume_stop
4932 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4933 {
4934 if (debug_threads)
4935 debug_printf (" client wants LWP to remain %ld stopped\n",
4936 lwpid_of (thread));
4937 return;
4938 }
4939
4940 if (lwp->status_pending_p)
4941 {
4942 if (debug_threads)
4943 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4944 lwpid_of (thread));
4945 return;
4946 }
4947
4948 gdb_assert (lwp->suspended >= 0);
4949
4950 if (lwp->suspended)
4951 {
4952 if (debug_threads)
4953 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4954 return;
4955 }
4956
4957 if (thread->last_resume_kind == resume_stop
4958 && lwp->pending_signals_to_report.empty ()
4959 && (lwp->collecting_fast_tracepoint
4960 == fast_tpoint_collect_result::not_collecting))
4961 {
4962 /* We haven't reported this LWP as stopped yet (otherwise, the
4963 last_status.kind check above would catch it, and we wouldn't
4964 reach here. This LWP may have been momentarily paused by a
4965 stop_all_lwps call while handling for example, another LWP's
4966 step-over. In that case, the pending expected SIGSTOP signal
4967 that was queued at vCont;t handling time will have already
4968 been consumed by wait_for_sigstop, and so we need to requeue
4969 another one here. Note that if the LWP already has a SIGSTOP
4970 pending, this is a no-op. */
4971
4972 if (debug_threads)
4973 debug_printf ("Client wants LWP %ld to stop. "
4974 "Making sure it has a SIGSTOP pending\n",
4975 lwpid_of (thread));
4976
4977 send_sigstop (lwp);
4978 }
4979
4980 if (thread->last_resume_kind == resume_step)
4981 {
4982 if (debug_threads)
4983 debug_printf (" stepping LWP %ld, client wants it stepping\n",
4984 lwpid_of (thread));
4985
4986 /* If resume_step is requested by GDB, install single-step
4987 breakpoints when the thread is about to be actually resumed if
4988 the single-step breakpoints weren't removed. */
4989 if (supports_software_single_step ()
4990 && !has_single_step_breakpoints (thread))
4991 install_software_single_step_breakpoints (lwp);
4992
4993 step = maybe_hw_step (thread);
4994 }
4995 else if (lwp->bp_reinsert != 0)
4996 {
4997 if (debug_threads)
4998 debug_printf (" stepping LWP %ld, reinsert set\n",
4999 lwpid_of (thread));
5000
5001 step = maybe_hw_step (thread);
5002 }
5003 else
5004 step = 0;
5005
5006 resume_one_lwp (lwp, step, 0, NULL);
5007 }
5008
5009 void
5010 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
5011 lwp_info *except)
5012 {
5013 struct lwp_info *lwp = get_thread_lwp (thread);
5014
5015 if (lwp == except)
5016 return;
5017
5018 lwp_suspended_decr (lwp);
5019
5020 proceed_one_lwp (thread, except);
5021 }
5022
5023 void
5024 linux_process_target::proceed_all_lwps ()
5025 {
5026 struct thread_info *need_step_over;
5027
5028 /* If there is a thread which would otherwise be resumed, which is
5029 stopped at a breakpoint that needs stepping over, then don't
5030 resume any threads - have it step over the breakpoint with all
5031 other threads stopped, then resume all threads again. */
5032
5033 if (low_supports_breakpoints ())
5034 {
5035 need_step_over = find_thread ([this] (thread_info *thread)
5036 {
5037 return thread_needs_step_over (thread);
5038 });
5039
5040 if (need_step_over != NULL)
5041 {
5042 if (debug_threads)
5043 debug_printf ("proceed_all_lwps: found "
5044 "thread %ld needing a step-over\n",
5045 lwpid_of (need_step_over));
5046
5047 start_step_over (get_thread_lwp (need_step_over));
5048 return;
5049 }
5050 }
5051
5052 if (debug_threads)
5053 debug_printf ("Proceeding, no step-over needed\n");
5054
5055 for_each_thread ([this] (thread_info *thread)
5056 {
5057 proceed_one_lwp (thread, NULL);
5058 });
5059 }
5060
5061 void
5062 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
5063 {
5064 if (debug_threads)
5065 {
5066 debug_enter ();
5067 if (except)
5068 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5069 lwpid_of (get_lwp_thread (except)));
5070 else
5071 debug_printf ("unstopping all lwps\n");
5072 }
5073
5074 if (unsuspend)
5075 for_each_thread ([&] (thread_info *thread)
5076 {
5077 unsuspend_and_proceed_one_lwp (thread, except);
5078 });
5079 else
5080 for_each_thread ([&] (thread_info *thread)
5081 {
5082 proceed_one_lwp (thread, except);
5083 });
5084
5085 if (debug_threads)
5086 {
5087 debug_printf ("unstop_all_lwps done\n");
5088 debug_exit ();
5089 }
5090 }
5091
5092
5093 #ifdef HAVE_LINUX_REGSETS
5094
5095 #define use_linux_regsets 1
5096
5097 /* Returns true if REGSET has been disabled. */
5098
5099 static int
5100 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5101 {
5102 return (info->disabled_regsets != NULL
5103 && info->disabled_regsets[regset - info->regsets]);
5104 }
5105
5106 /* Disable REGSET. */
5107
5108 static void
5109 disable_regset (struct regsets_info *info, struct regset_info *regset)
5110 {
5111 int dr_offset;
5112
5113 dr_offset = regset - info->regsets;
5114 if (info->disabled_regsets == NULL)
5115 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5116 info->disabled_regsets[dr_offset] = 1;
5117 }
5118
5119 static int
5120 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5121 struct regcache *regcache)
5122 {
5123 struct regset_info *regset;
5124 int saw_general_regs = 0;
5125 int pid;
5126 struct iovec iov;
5127
5128 pid = lwpid_of (current_thread);
5129 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5130 {
5131 void *buf, *data;
5132 int nt_type, res;
5133
5134 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5135 continue;
5136
5137 buf = xmalloc (regset->size);
5138
5139 nt_type = regset->nt_type;
5140 if (nt_type)
5141 {
5142 iov.iov_base = buf;
5143 iov.iov_len = regset->size;
5144 data = (void *) &iov;
5145 }
5146 else
5147 data = buf;
5148
5149 #ifndef __sparc__
5150 res = ptrace (regset->get_request, pid,
5151 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5152 #else
5153 res = ptrace (regset->get_request, pid, data, nt_type);
5154 #endif
5155 if (res < 0)
5156 {
5157 if (errno == EIO
5158 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5159 {
5160 /* If we get EIO on a regset, or an EINVAL and the regset is
5161 optional, do not try it again for this process mode. */
5162 disable_regset (regsets_info, regset);
5163 }
5164 else if (errno == ENODATA)
5165 {
5166 /* ENODATA may be returned if the regset is currently
5167 not "active". This can happen in normal operation,
5168 so suppress the warning in this case. */
5169 }
5170 else if (errno == ESRCH)
5171 {
5172 /* At this point, ESRCH should mean the process is
5173 already gone, in which case we simply ignore attempts
5174 to read its registers. */
5175 }
5176 else
5177 {
5178 char s[256];
5179 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5180 pid);
5181 perror (s);
5182 }
5183 }
5184 else
5185 {
5186 if (regset->type == GENERAL_REGS)
5187 saw_general_regs = 1;
5188 regset->store_function (regcache, buf);
5189 }
5190 free (buf);
5191 }
5192 if (saw_general_regs)
5193 return 0;
5194 else
5195 return 1;
5196 }
5197
5198 static int
5199 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5200 struct regcache *regcache)
5201 {
5202 struct regset_info *regset;
5203 int saw_general_regs = 0;
5204 int pid;
5205 struct iovec iov;
5206
5207 pid = lwpid_of (current_thread);
5208 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5209 {
5210 void *buf, *data;
5211 int nt_type, res;
5212
5213 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5214 || regset->fill_function == NULL)
5215 continue;
5216
5217 buf = xmalloc (regset->size);
5218
5219 /* First fill the buffer with the current register set contents,
5220 in case there are any items in the kernel's regset that are
5221 not in gdbserver's regcache. */
5222
5223 nt_type = regset->nt_type;
5224 if (nt_type)
5225 {
5226 iov.iov_base = buf;
5227 iov.iov_len = regset->size;
5228 data = (void *) &iov;
5229 }
5230 else
5231 data = buf;
5232
5233 #ifndef __sparc__
5234 res = ptrace (regset->get_request, pid,
5235 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5236 #else
5237 res = ptrace (regset->get_request, pid, data, nt_type);
5238 #endif
5239
5240 if (res == 0)
5241 {
5242 /* Then overlay our cached registers on that. */
5243 regset->fill_function (regcache, buf);
5244
5245 /* Only now do we write the register set. */
5246 #ifndef __sparc__
5247 res = ptrace (regset->set_request, pid,
5248 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5249 #else
5250 res = ptrace (regset->set_request, pid, data, nt_type);
5251 #endif
5252 }
5253
5254 if (res < 0)
5255 {
5256 if (errno == EIO
5257 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5258 {
5259 /* If we get EIO on a regset, or an EINVAL and the regset is
5260 optional, do not try it again for this process mode. */
5261 disable_regset (regsets_info, regset);
5262 }
5263 else if (errno == ESRCH)
5264 {
5265 /* At this point, ESRCH should mean the process is
5266 already gone, in which case we simply ignore attempts
5267 to change its registers. See also the related
5268 comment in resume_one_lwp. */
5269 free (buf);
5270 return 0;
5271 }
5272 else
5273 {
5274 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5275 }
5276 }
5277 else if (regset->type == GENERAL_REGS)
5278 saw_general_regs = 1;
5279 free (buf);
5280 }
5281 if (saw_general_regs)
5282 return 0;
5283 else
5284 return 1;
5285 }
5286
5287 #else /* !HAVE_LINUX_REGSETS */
5288
5289 #define use_linux_regsets 0
5290 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5291 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5292
5293 #endif
5294
5295 /* Return 1 if register REGNO is supported by one of the regset ptrace
5296 calls or 0 if it has to be transferred individually. */
5297
5298 static int
5299 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5300 {
5301 unsigned char mask = 1 << (regno % 8);
5302 size_t index = regno / 8;
5303
5304 return (use_linux_regsets
5305 && (regs_info->regset_bitmap == NULL
5306 || (regs_info->regset_bitmap[index] & mask) != 0));
5307 }
5308
5309 #ifdef HAVE_LINUX_USRREGS
5310
5311 static int
5312 register_addr (const struct usrregs_info *usrregs, int regnum)
5313 {
5314 int addr;
5315
5316 if (regnum < 0 || regnum >= usrregs->num_regs)
5317 error ("Invalid register number %d.", regnum);
5318
5319 addr = usrregs->regmap[regnum];
5320
5321 return addr;
5322 }
5323
5324
5325 void
5326 linux_process_target::fetch_register (const usrregs_info *usrregs,
5327 regcache *regcache, int regno)
5328 {
5329 CORE_ADDR regaddr;
5330 int i, size;
5331 char *buf;
5332 int pid;
5333
5334 if (regno >= usrregs->num_regs)
5335 return;
5336 if (low_cannot_fetch_register (regno))
5337 return;
5338
5339 regaddr = register_addr (usrregs, regno);
5340 if (regaddr == -1)
5341 return;
5342
5343 size = ((register_size (regcache->tdesc, regno)
5344 + sizeof (PTRACE_XFER_TYPE) - 1)
5345 & -sizeof (PTRACE_XFER_TYPE));
5346 buf = (char *) alloca (size);
5347
5348 pid = lwpid_of (current_thread);
5349 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5350 {
5351 errno = 0;
5352 *(PTRACE_XFER_TYPE *) (buf + i) =
5353 ptrace (PTRACE_PEEKUSER, pid,
5354 /* Coerce to a uintptr_t first to avoid potential gcc warning
5355 of coercing an 8 byte integer to a 4 byte pointer. */
5356 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5357 regaddr += sizeof (PTRACE_XFER_TYPE);
5358 if (errno != 0)
5359 {
5360 /* Mark register REGNO unavailable. */
5361 supply_register (regcache, regno, NULL);
5362 return;
5363 }
5364 }
5365
5366 low_supply_ptrace_register (regcache, regno, buf);
5367 }
5368
5369 void
5370 linux_process_target::store_register (const usrregs_info *usrregs,
5371 regcache *regcache, int regno)
5372 {
5373 CORE_ADDR regaddr;
5374 int i, size;
5375 char *buf;
5376 int pid;
5377
5378 if (regno >= usrregs->num_regs)
5379 return;
5380 if (low_cannot_store_register (regno))
5381 return;
5382
5383 regaddr = register_addr (usrregs, regno);
5384 if (regaddr == -1)
5385 return;
5386
5387 size = ((register_size (regcache->tdesc, regno)
5388 + sizeof (PTRACE_XFER_TYPE) - 1)
5389 & -sizeof (PTRACE_XFER_TYPE));
5390 buf = (char *) alloca (size);
5391 memset (buf, 0, size);
5392
5393 low_collect_ptrace_register (regcache, regno, buf);
5394
5395 pid = lwpid_of (current_thread);
5396 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5397 {
5398 errno = 0;
5399 ptrace (PTRACE_POKEUSER, pid,
5400 /* Coerce to a uintptr_t first to avoid potential gcc warning
5401 about coercing an 8 byte integer to a 4 byte pointer. */
5402 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5403 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5404 if (errno != 0)
5405 {
5406 /* At this point, ESRCH should mean the process is
5407 already gone, in which case we simply ignore attempts
5408 to change its registers. See also the related
5409 comment in resume_one_lwp. */
5410 if (errno == ESRCH)
5411 return;
5412
5413
5414 if (!low_cannot_store_register (regno))
5415 error ("writing register %d: %s", regno, safe_strerror (errno));
5416 }
5417 regaddr += sizeof (PTRACE_XFER_TYPE);
5418 }
5419 }
5420 #endif /* HAVE_LINUX_USRREGS */
5421
5422 void
5423 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5424 int regno, char *buf)
5425 {
5426 collect_register (regcache, regno, buf);
5427 }
5428
5429 void
5430 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5431 int regno, const char *buf)
5432 {
5433 supply_register (regcache, regno, buf);
5434 }
5435
5436 void
5437 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5438 regcache *regcache,
5439 int regno, int all)
5440 {
5441 #ifdef HAVE_LINUX_USRREGS
5442 struct usrregs_info *usr = regs_info->usrregs;
5443
5444 if (regno == -1)
5445 {
5446 for (regno = 0; regno < usr->num_regs; regno++)
5447 if (all || !linux_register_in_regsets (regs_info, regno))
5448 fetch_register (usr, regcache, regno);
5449 }
5450 else
5451 fetch_register (usr, regcache, regno);
5452 #endif
5453 }
5454
5455 void
5456 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5457 regcache *regcache,
5458 int regno, int all)
5459 {
5460 #ifdef HAVE_LINUX_USRREGS
5461 struct usrregs_info *usr = regs_info->usrregs;
5462
5463 if (regno == -1)
5464 {
5465 for (regno = 0; regno < usr->num_regs; regno++)
5466 if (all || !linux_register_in_regsets (regs_info, regno))
5467 store_register (usr, regcache, regno);
5468 }
5469 else
5470 store_register (usr, regcache, regno);
5471 #endif
5472 }
5473
5474 void
5475 linux_process_target::fetch_registers (regcache *regcache, int regno)
5476 {
5477 int use_regsets;
5478 int all = 0;
5479 const regs_info *regs_info = get_regs_info ();
5480
5481 if (regno == -1)
5482 {
5483 if (regs_info->usrregs != NULL)
5484 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5485 low_fetch_register (regcache, regno);
5486
5487 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5488 if (regs_info->usrregs != NULL)
5489 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5490 }
5491 else
5492 {
5493 if (low_fetch_register (regcache, regno))
5494 return;
5495
5496 use_regsets = linux_register_in_regsets (regs_info, regno);
5497 if (use_regsets)
5498 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5499 regcache);
5500 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5501 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5502 }
5503 }
5504
5505 void
5506 linux_process_target::store_registers (regcache *regcache, int regno)
5507 {
5508 int use_regsets;
5509 int all = 0;
5510 const regs_info *regs_info = get_regs_info ();
5511
5512 if (regno == -1)
5513 {
5514 all = regsets_store_inferior_registers (regs_info->regsets_info,
5515 regcache);
5516 if (regs_info->usrregs != NULL)
5517 usr_store_inferior_registers (regs_info, regcache, regno, all);
5518 }
5519 else
5520 {
5521 use_regsets = linux_register_in_regsets (regs_info, regno);
5522 if (use_regsets)
5523 all = regsets_store_inferior_registers (regs_info->regsets_info,
5524 regcache);
5525 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5526 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5527 }
5528 }
5529
5530 bool
5531 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5532 {
5533 return false;
5534 }
5535
5536 /* A wrapper for the read_memory target op. */
5537
5538 static int
5539 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5540 {
5541 return the_target->read_memory (memaddr, myaddr, len);
5542 }
5543
5544 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5545 to debugger memory starting at MYADDR. */
5546
5547 int
5548 linux_process_target::read_memory (CORE_ADDR memaddr,
5549 unsigned char *myaddr, int len)
5550 {
5551 int pid = lwpid_of (current_thread);
5552 PTRACE_XFER_TYPE *buffer;
5553 CORE_ADDR addr;
5554 int count;
5555 char filename[64];
5556 int i;
5557 int ret;
5558 int fd;
5559
5560 /* Try using /proc. Don't bother for one word. */
5561 if (len >= 3 * sizeof (long))
5562 {
5563 int bytes;
5564
5565 /* We could keep this file open and cache it - possibly one per
5566 thread. That requires some juggling, but is even faster. */
5567 sprintf (filename, "/proc/%d/mem", pid);
5568 fd = open (filename, O_RDONLY | O_LARGEFILE);
5569 if (fd == -1)
5570 goto no_proc;
5571
5572 /* If pread64 is available, use it. It's faster if the kernel
5573 supports it (only one syscall), and it's 64-bit safe even on
5574 32-bit platforms (for instance, SPARC debugging a SPARC64
5575 application). */
5576 #ifdef HAVE_PREAD64
5577 bytes = pread64 (fd, myaddr, len, memaddr);
5578 #else
5579 bytes = -1;
5580 if (lseek (fd, memaddr, SEEK_SET) != -1)
5581 bytes = read (fd, myaddr, len);
5582 #endif
5583
5584 close (fd);
5585 if (bytes == len)
5586 return 0;
5587
5588 /* Some data was read, we'll try to get the rest with ptrace. */
5589 if (bytes > 0)
5590 {
5591 memaddr += bytes;
5592 myaddr += bytes;
5593 len -= bytes;
5594 }
5595 }
5596
5597 no_proc:
5598 /* Round starting address down to longword boundary. */
5599 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5600 /* Round ending address up; get number of longwords that makes. */
5601 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5602 / sizeof (PTRACE_XFER_TYPE));
5603 /* Allocate buffer of that many longwords. */
5604 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5605
5606 /* Read all the longwords */
5607 errno = 0;
5608 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5609 {
5610 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5611 about coercing an 8 byte integer to a 4 byte pointer. */
5612 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5613 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5614 (PTRACE_TYPE_ARG4) 0);
5615 if (errno)
5616 break;
5617 }
5618 ret = errno;
5619
5620 /* Copy appropriate bytes out of the buffer. */
5621 if (i > 0)
5622 {
5623 i *= sizeof (PTRACE_XFER_TYPE);
5624 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5625 memcpy (myaddr,
5626 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5627 i < len ? i : len);
5628 }
5629
5630 return ret;
5631 }
5632
5633 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5634 memory at MEMADDR. On failure (cannot write to the inferior)
5635 returns the value of errno. Always succeeds if LEN is zero. */
5636
5637 int
5638 linux_process_target::write_memory (CORE_ADDR memaddr,
5639 const unsigned char *myaddr, int len)
5640 {
5641 int i;
5642 /* Round starting address down to longword boundary. */
5643 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5644 /* Round ending address up; get number of longwords that makes. */
5645 int count
5646 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5647 / sizeof (PTRACE_XFER_TYPE);
5648
5649 /* Allocate buffer of that many longwords. */
5650 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5651
5652 int pid = lwpid_of (current_thread);
5653
5654 if (len == 0)
5655 {
5656 /* Zero length write always succeeds. */
5657 return 0;
5658 }
5659
5660 if (debug_threads)
5661 {
5662 /* Dump up to four bytes. */
5663 char str[4 * 2 + 1];
5664 char *p = str;
5665 int dump = len < 4 ? len : 4;
5666
5667 for (i = 0; i < dump; i++)
5668 {
5669 sprintf (p, "%02x", myaddr[i]);
5670 p += 2;
5671 }
5672 *p = '\0';
5673
5674 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5675 str, (long) memaddr, pid);
5676 }
5677
5678 /* Fill start and end extra bytes of buffer with existing memory data. */
5679
5680 errno = 0;
5681 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5682 about coercing an 8 byte integer to a 4 byte pointer. */
5683 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5684 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5685 (PTRACE_TYPE_ARG4) 0);
5686 if (errno)
5687 return errno;
5688
5689 if (count > 1)
5690 {
5691 errno = 0;
5692 buffer[count - 1]
5693 = ptrace (PTRACE_PEEKTEXT, pid,
5694 /* Coerce to a uintptr_t first to avoid potential gcc warning
5695 about coercing an 8 byte integer to a 4 byte pointer. */
5696 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5697 * sizeof (PTRACE_XFER_TYPE)),
5698 (PTRACE_TYPE_ARG4) 0);
5699 if (errno)
5700 return errno;
5701 }
5702
5703 /* Copy data to be written over corresponding part of buffer. */
5704
5705 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5706 myaddr, len);
5707
5708 /* Write the entire buffer. */
5709
5710 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5711 {
5712 errno = 0;
5713 ptrace (PTRACE_POKETEXT, pid,
5714 /* Coerce to a uintptr_t first to avoid potential gcc warning
5715 about coercing an 8 byte integer to a 4 byte pointer. */
5716 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5717 (PTRACE_TYPE_ARG4) buffer[i]);
5718 if (errno)
5719 return errno;
5720 }
5721
5722 return 0;
5723 }
5724
5725 void
5726 linux_process_target::look_up_symbols ()
5727 {
5728 #ifdef USE_THREAD_DB
5729 struct process_info *proc = current_process ();
5730
5731 if (proc->priv->thread_db != NULL)
5732 return;
5733
5734 thread_db_init ();
5735 #endif
5736 }
5737
5738 void
5739 linux_process_target::request_interrupt ()
5740 {
5741 /* Send a SIGINT to the process group. This acts just like the user
5742 typed a ^C on the controlling terminal. */
5743 ::kill (-signal_pid, SIGINT);
5744 }
5745
5746 bool
5747 linux_process_target::supports_read_auxv ()
5748 {
5749 return true;
5750 }
5751
5752 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5753 to debugger memory starting at MYADDR. */
5754
5755 int
5756 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5757 unsigned int len)
5758 {
5759 char filename[PATH_MAX];
5760 int fd, n;
5761 int pid = lwpid_of (current_thread);
5762
5763 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5764
5765 fd = open (filename, O_RDONLY);
5766 if (fd < 0)
5767 return -1;
5768
5769 if (offset != (CORE_ADDR) 0
5770 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5771 n = -1;
5772 else
5773 n = read (fd, myaddr, len);
5774
5775 close (fd);
5776
5777 return n;
5778 }
5779
5780 int
5781 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5782 int size, raw_breakpoint *bp)
5783 {
5784 if (type == raw_bkpt_type_sw)
5785 return insert_memory_breakpoint (bp);
5786 else
5787 return low_insert_point (type, addr, size, bp);
5788 }
5789
5790 int
5791 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5792 int size, raw_breakpoint *bp)
5793 {
5794 /* Unsupported (see target.h). */
5795 return 1;
5796 }
5797
5798 int
5799 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5800 int size, raw_breakpoint *bp)
5801 {
5802 if (type == raw_bkpt_type_sw)
5803 return remove_memory_breakpoint (bp);
5804 else
5805 return low_remove_point (type, addr, size, bp);
5806 }
5807
5808 int
5809 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5810 int size, raw_breakpoint *bp)
5811 {
5812 /* Unsupported (see target.h). */
5813 return 1;
5814 }
5815
5816 /* Implement the stopped_by_sw_breakpoint target_ops
5817 method. */
5818
5819 bool
5820 linux_process_target::stopped_by_sw_breakpoint ()
5821 {
5822 struct lwp_info *lwp = get_thread_lwp (current_thread);
5823
5824 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5825 }
5826
5827 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5828 method. */
5829
5830 bool
5831 linux_process_target::supports_stopped_by_sw_breakpoint ()
5832 {
5833 return USE_SIGTRAP_SIGINFO;
5834 }
5835
5836 /* Implement the stopped_by_hw_breakpoint target_ops
5837 method. */
5838
5839 bool
5840 linux_process_target::stopped_by_hw_breakpoint ()
5841 {
5842 struct lwp_info *lwp = get_thread_lwp (current_thread);
5843
5844 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5845 }
5846
5847 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5848 method. */
5849
5850 bool
5851 linux_process_target::supports_stopped_by_hw_breakpoint ()
5852 {
5853 return USE_SIGTRAP_SIGINFO;
5854 }
5855
5856 /* Implement the supports_hardware_single_step target_ops method. */
5857
5858 bool
5859 linux_process_target::supports_hardware_single_step ()
5860 {
5861 return true;
5862 }
5863
5864 bool
5865 linux_process_target::stopped_by_watchpoint ()
5866 {
5867 struct lwp_info *lwp = get_thread_lwp (current_thread);
5868
5869 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5870 }
5871
5872 CORE_ADDR
5873 linux_process_target::stopped_data_address ()
5874 {
5875 struct lwp_info *lwp = get_thread_lwp (current_thread);
5876
5877 return lwp->stopped_data_address;
5878 }
5879
5880 /* This is only used for targets that define PT_TEXT_ADDR,
5881 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5882 the target has different ways of acquiring this information, like
5883 loadmaps. */
5884
5885 bool
5886 linux_process_target::supports_read_offsets ()
5887 {
5888 #ifdef SUPPORTS_READ_OFFSETS
5889 return true;
5890 #else
5891 return false;
5892 #endif
5893 }
5894
5895 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5896 to tell gdb about. */
5897
5898 int
5899 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5900 {
5901 #ifdef SUPPORTS_READ_OFFSETS
5902 unsigned long text, text_end, data;
5903 int pid = lwpid_of (current_thread);
5904
5905 errno = 0;
5906
5907 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5908 (PTRACE_TYPE_ARG4) 0);
5909 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5910 (PTRACE_TYPE_ARG4) 0);
5911 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5912 (PTRACE_TYPE_ARG4) 0);
5913
5914 if (errno == 0)
5915 {
5916 /* Both text and data offsets produced at compile-time (and so
5917 used by gdb) are relative to the beginning of the program,
5918 with the data segment immediately following the text segment.
5919 However, the actual runtime layout in memory may put the data
5920 somewhere else, so when we send gdb a data base-address, we
5921 use the real data base address and subtract the compile-time
5922 data base-address from it (which is just the length of the
5923 text segment). BSS immediately follows data in both
5924 cases. */
5925 *text_p = text;
5926 *data_p = data - (text_end - text);
5927
5928 return 1;
5929 }
5930 return 0;
5931 #else
5932 gdb_assert_not_reached ("target op read_offsets not supported");
5933 #endif
5934 }
5935
5936 bool
5937 linux_process_target::supports_get_tls_address ()
5938 {
5939 #ifdef USE_THREAD_DB
5940 return true;
5941 #else
5942 return false;
5943 #endif
5944 }
5945
5946 int
5947 linux_process_target::get_tls_address (thread_info *thread,
5948 CORE_ADDR offset,
5949 CORE_ADDR load_module,
5950 CORE_ADDR *address)
5951 {
5952 #ifdef USE_THREAD_DB
5953 return thread_db_get_tls_address (thread, offset, load_module, address);
5954 #else
5955 return -1;
5956 #endif
5957 }
5958
5959 bool
5960 linux_process_target::supports_qxfer_osdata ()
5961 {
5962 return true;
5963 }
5964
5965 int
5966 linux_process_target::qxfer_osdata (const char *annex,
5967 unsigned char *readbuf,
5968 unsigned const char *writebuf,
5969 CORE_ADDR offset, int len)
5970 {
5971 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5972 }
5973
5974 void
5975 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5976 gdb_byte *inf_siginfo, int direction)
5977 {
5978 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5979
5980 /* If there was no callback, or the callback didn't do anything,
5981 then just do a straight memcpy. */
5982 if (!done)
5983 {
5984 if (direction == 1)
5985 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5986 else
5987 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5988 }
5989 }
5990
5991 bool
5992 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5993 int direction)
5994 {
5995 return false;
5996 }
5997
5998 bool
5999 linux_process_target::supports_qxfer_siginfo ()
6000 {
6001 return true;
6002 }
6003
6004 int
6005 linux_process_target::qxfer_siginfo (const char *annex,
6006 unsigned char *readbuf,
6007 unsigned const char *writebuf,
6008 CORE_ADDR offset, int len)
6009 {
6010 int pid;
6011 siginfo_t siginfo;
6012 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6013
6014 if (current_thread == NULL)
6015 return -1;
6016
6017 pid = lwpid_of (current_thread);
6018
6019 if (debug_threads)
6020 debug_printf ("%s siginfo for lwp %d.\n",
6021 readbuf != NULL ? "Reading" : "Writing",
6022 pid);
6023
6024 if (offset >= sizeof (siginfo))
6025 return -1;
6026
6027 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6028 return -1;
6029
6030 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6031 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6032 inferior with a 64-bit GDBSERVER should look the same as debugging it
6033 with a 32-bit GDBSERVER, we need to convert it. */
6034 siginfo_fixup (&siginfo, inf_siginfo, 0);
6035
6036 if (offset + len > sizeof (siginfo))
6037 len = sizeof (siginfo) - offset;
6038
6039 if (readbuf != NULL)
6040 memcpy (readbuf, inf_siginfo + offset, len);
6041 else
6042 {
6043 memcpy (inf_siginfo + offset, writebuf, len);
6044
6045 /* Convert back to ptrace layout before flushing it out. */
6046 siginfo_fixup (&siginfo, inf_siginfo, 1);
6047
6048 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6049 return -1;
6050 }
6051
6052 return len;
6053 }
6054
6055 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6056 so we notice when children change state; as the handler for the
6057 sigsuspend in my_waitpid. */
6058
6059 static void
6060 sigchld_handler (int signo)
6061 {
6062 int old_errno = errno;
6063
6064 if (debug_threads)
6065 {
6066 do
6067 {
6068 /* Use the async signal safe debug function. */
6069 if (debug_write ("sigchld_handler\n",
6070 sizeof ("sigchld_handler\n") - 1) < 0)
6071 break; /* just ignore */
6072 } while (0);
6073 }
6074
6075 if (target_is_async_p ())
6076 async_file_mark (); /* trigger a linux_wait */
6077
6078 errno = old_errno;
6079 }
6080
6081 bool
6082 linux_process_target::supports_non_stop ()
6083 {
6084 return true;
6085 }
6086
6087 bool
6088 linux_process_target::async (bool enable)
6089 {
6090 bool previous = target_is_async_p ();
6091
6092 if (debug_threads)
6093 debug_printf ("linux_async (%d), previous=%d\n",
6094 enable, previous);
6095
6096 if (previous != enable)
6097 {
6098 sigset_t mask;
6099 sigemptyset (&mask);
6100 sigaddset (&mask, SIGCHLD);
6101
6102 gdb_sigmask (SIG_BLOCK, &mask, NULL);
6103
6104 if (enable)
6105 {
6106 if (pipe (linux_event_pipe) == -1)
6107 {
6108 linux_event_pipe[0] = -1;
6109 linux_event_pipe[1] = -1;
6110 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6111
6112 warning ("creating event pipe failed.");
6113 return previous;
6114 }
6115
6116 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6117 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6118
6119 /* Register the event loop handler. */
6120 add_file_handler (linux_event_pipe[0],
6121 handle_target_event, NULL,
6122 "linux-low");
6123
6124 /* Always trigger a linux_wait. */
6125 async_file_mark ();
6126 }
6127 else
6128 {
6129 delete_file_handler (linux_event_pipe[0]);
6130
6131 close (linux_event_pipe[0]);
6132 close (linux_event_pipe[1]);
6133 linux_event_pipe[0] = -1;
6134 linux_event_pipe[1] = -1;
6135 }
6136
6137 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6138 }
6139
6140 return previous;
6141 }
6142
6143 int
6144 linux_process_target::start_non_stop (bool nonstop)
6145 {
6146 /* Register or unregister from event-loop accordingly. */
6147 target_async (nonstop);
6148
6149 if (target_is_async_p () != (nonstop != false))
6150 return -1;
6151
6152 return 0;
6153 }
6154
6155 bool
6156 linux_process_target::supports_multi_process ()
6157 {
6158 return true;
6159 }
6160
6161 /* Check if fork events are supported. */
6162
6163 bool
6164 linux_process_target::supports_fork_events ()
6165 {
6166 return linux_supports_tracefork ();
6167 }
6168
6169 /* Check if vfork events are supported. */
6170
6171 bool
6172 linux_process_target::supports_vfork_events ()
6173 {
6174 return linux_supports_tracefork ();
6175 }
6176
6177 /* Check if exec events are supported. */
6178
6179 bool
6180 linux_process_target::supports_exec_events ()
6181 {
6182 return linux_supports_traceexec ();
6183 }
6184
6185 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6186 ptrace flags for all inferiors. This is in case the new GDB connection
6187 doesn't support the same set of events that the previous one did. */
6188
6189 void
6190 linux_process_target::handle_new_gdb_connection ()
6191 {
6192 /* Request that all the lwps reset their ptrace options. */
6193 for_each_thread ([] (thread_info *thread)
6194 {
6195 struct lwp_info *lwp = get_thread_lwp (thread);
6196
6197 if (!lwp->stopped)
6198 {
6199 /* Stop the lwp so we can modify its ptrace options. */
6200 lwp->must_set_ptrace_flags = 1;
6201 linux_stop_lwp (lwp);
6202 }
6203 else
6204 {
6205 /* Already stopped; go ahead and set the ptrace options. */
6206 struct process_info *proc = find_process_pid (pid_of (thread));
6207 int options = linux_low_ptrace_options (proc->attached);
6208
6209 linux_enable_event_reporting (lwpid_of (thread), options);
6210 lwp->must_set_ptrace_flags = 0;
6211 }
6212 });
6213 }
6214
6215 int
6216 linux_process_target::handle_monitor_command (char *mon)
6217 {
6218 #ifdef USE_THREAD_DB
6219 return thread_db_handle_monitor_command (mon);
6220 #else
6221 return 0;
6222 #endif
6223 }
6224
6225 int
6226 linux_process_target::core_of_thread (ptid_t ptid)
6227 {
6228 return linux_common_core_of_thread (ptid);
6229 }
6230
6231 bool
6232 linux_process_target::supports_disable_randomization ()
6233 {
6234 #ifdef HAVE_PERSONALITY
6235 return true;
6236 #else
6237 return false;
6238 #endif
6239 }
6240
6241 bool
6242 linux_process_target::supports_agent ()
6243 {
6244 return true;
6245 }
6246
6247 bool
6248 linux_process_target::supports_range_stepping ()
6249 {
6250 if (supports_software_single_step ())
6251 return true;
6252
6253 return low_supports_range_stepping ();
6254 }
6255
6256 bool
6257 linux_process_target::low_supports_range_stepping ()
6258 {
6259 return false;
6260 }
6261
6262 bool
6263 linux_process_target::supports_pid_to_exec_file ()
6264 {
6265 return true;
6266 }
6267
6268 const char *
6269 linux_process_target::pid_to_exec_file (int pid)
6270 {
6271 return linux_proc_pid_to_exec_file (pid);
6272 }
6273
6274 bool
6275 linux_process_target::supports_multifs ()
6276 {
6277 return true;
6278 }
6279
6280 int
6281 linux_process_target::multifs_open (int pid, const char *filename,
6282 int flags, mode_t mode)
6283 {
6284 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6285 }
6286
6287 int
6288 linux_process_target::multifs_unlink (int pid, const char *filename)
6289 {
6290 return linux_mntns_unlink (pid, filename);
6291 }
6292
6293 ssize_t
6294 linux_process_target::multifs_readlink (int pid, const char *filename,
6295 char *buf, size_t bufsiz)
6296 {
6297 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6298 }
6299
6300 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6301 struct target_loadseg
6302 {
6303 /* Core address to which the segment is mapped. */
6304 Elf32_Addr addr;
6305 /* VMA recorded in the program header. */
6306 Elf32_Addr p_vaddr;
6307 /* Size of this segment in memory. */
6308 Elf32_Word p_memsz;
6309 };
6310
6311 # if defined PT_GETDSBT
6312 struct target_loadmap
6313 {
6314 /* Protocol version number, must be zero. */
6315 Elf32_Word version;
6316 /* Pointer to the DSBT table, its size, and the DSBT index. */
6317 unsigned *dsbt_table;
6318 unsigned dsbt_size, dsbt_index;
6319 /* Number of segments in this map. */
6320 Elf32_Word nsegs;
6321 /* The actual memory map. */
6322 struct target_loadseg segs[/*nsegs*/];
6323 };
6324 # define LINUX_LOADMAP PT_GETDSBT
6325 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6326 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6327 # else
6328 struct target_loadmap
6329 {
6330 /* Protocol version number, must be zero. */
6331 Elf32_Half version;
6332 /* Number of segments in this map. */
6333 Elf32_Half nsegs;
6334 /* The actual memory map. */
6335 struct target_loadseg segs[/*nsegs*/];
6336 };
6337 # define LINUX_LOADMAP PTRACE_GETFDPIC
6338 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6339 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6340 # endif
6341
6342 bool
6343 linux_process_target::supports_read_loadmap ()
6344 {
6345 return true;
6346 }
6347
6348 int
6349 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6350 unsigned char *myaddr, unsigned int len)
6351 {
6352 int pid = lwpid_of (current_thread);
6353 int addr = -1;
6354 struct target_loadmap *data = NULL;
6355 unsigned int actual_length, copy_length;
6356
6357 if (strcmp (annex, "exec") == 0)
6358 addr = (int) LINUX_LOADMAP_EXEC;
6359 else if (strcmp (annex, "interp") == 0)
6360 addr = (int) LINUX_LOADMAP_INTERP;
6361 else
6362 return -1;
6363
6364 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6365 return -1;
6366
6367 if (data == NULL)
6368 return -1;
6369
6370 actual_length = sizeof (struct target_loadmap)
6371 + sizeof (struct target_loadseg) * data->nsegs;
6372
6373 if (offset < 0 || offset > actual_length)
6374 return -1;
6375
6376 copy_length = actual_length - offset < len ? actual_length - offset : len;
6377 memcpy (myaddr, (char *) data + offset, copy_length);
6378 return copy_length;
6379 }
6380 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6381
6382 bool
6383 linux_process_target::supports_catch_syscall ()
6384 {
6385 return (low_supports_catch_syscall ()
6386 && linux_supports_tracesysgood ());
6387 }
6388
6389 bool
6390 linux_process_target::low_supports_catch_syscall ()
6391 {
6392 return false;
6393 }
6394
6395 CORE_ADDR
6396 linux_process_target::read_pc (regcache *regcache)
6397 {
6398 if (!low_supports_breakpoints ())
6399 return 0;
6400
6401 return low_get_pc (regcache);
6402 }
6403
6404 void
6405 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6406 {
6407 gdb_assert (low_supports_breakpoints ());
6408
6409 low_set_pc (regcache, pc);
6410 }
6411
6412 bool
6413 linux_process_target::supports_thread_stopped ()
6414 {
6415 return true;
6416 }
6417
6418 bool
6419 linux_process_target::thread_stopped (thread_info *thread)
6420 {
6421 return get_thread_lwp (thread)->stopped;
6422 }
6423
6424 /* This exposes stop-all-threads functionality to other modules. */
6425
6426 void
6427 linux_process_target::pause_all (bool freeze)
6428 {
6429 stop_all_lwps (freeze, NULL);
6430 }
6431
6432 /* This exposes unstop-all-threads functionality to other gdbserver
6433 modules. */
6434
6435 void
6436 linux_process_target::unpause_all (bool unfreeze)
6437 {
6438 unstop_all_lwps (unfreeze, NULL);
6439 }
6440
6441 int
6442 linux_process_target::prepare_to_access_memory ()
6443 {
6444 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6445 running LWP. */
6446 if (non_stop)
6447 target_pause_all (true);
6448 return 0;
6449 }
6450
6451 void
6452 linux_process_target::done_accessing_memory ()
6453 {
6454 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6455 running LWP. */
6456 if (non_stop)
6457 target_unpause_all (true);
6458 }
6459
6460 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6461
6462 static int
6463 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6464 CORE_ADDR *phdr_memaddr, int *num_phdr)
6465 {
6466 char filename[PATH_MAX];
6467 int fd;
6468 const int auxv_size = is_elf64
6469 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6470 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6471
6472 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6473
6474 fd = open (filename, O_RDONLY);
6475 if (fd < 0)
6476 return 1;
6477
6478 *phdr_memaddr = 0;
6479 *num_phdr = 0;
6480 while (read (fd, buf, auxv_size) == auxv_size
6481 && (*phdr_memaddr == 0 || *num_phdr == 0))
6482 {
6483 if (is_elf64)
6484 {
6485 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6486
6487 switch (aux->a_type)
6488 {
6489 case AT_PHDR:
6490 *phdr_memaddr = aux->a_un.a_val;
6491 break;
6492 case AT_PHNUM:
6493 *num_phdr = aux->a_un.a_val;
6494 break;
6495 }
6496 }
6497 else
6498 {
6499 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6500
6501 switch (aux->a_type)
6502 {
6503 case AT_PHDR:
6504 *phdr_memaddr = aux->a_un.a_val;
6505 break;
6506 case AT_PHNUM:
6507 *num_phdr = aux->a_un.a_val;
6508 break;
6509 }
6510 }
6511 }
6512
6513 close (fd);
6514
6515 if (*phdr_memaddr == 0 || *num_phdr == 0)
6516 {
6517 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6518 "phdr_memaddr = %ld, phdr_num = %d",
6519 (long) *phdr_memaddr, *num_phdr);
6520 return 2;
6521 }
6522
6523 return 0;
6524 }
6525
6526 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6527
6528 static CORE_ADDR
6529 get_dynamic (const int pid, const int is_elf64)
6530 {
6531 CORE_ADDR phdr_memaddr, relocation;
6532 int num_phdr, i;
6533 unsigned char *phdr_buf;
6534 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6535
6536 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6537 return 0;
6538
6539 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6540 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6541
6542 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6543 return 0;
6544
6545 /* Compute relocation: it is expected to be 0 for "regular" executables,
6546 non-zero for PIE ones. */
6547 relocation = -1;
6548 for (i = 0; relocation == -1 && i < num_phdr; i++)
6549 if (is_elf64)
6550 {
6551 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6552
6553 if (p->p_type == PT_PHDR)
6554 relocation = phdr_memaddr - p->p_vaddr;
6555 }
6556 else
6557 {
6558 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6559
6560 if (p->p_type == PT_PHDR)
6561 relocation = phdr_memaddr - p->p_vaddr;
6562 }
6563
6564 if (relocation == -1)
6565 {
6566 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6567 any real world executables, including PIE executables, have always
6568 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6569 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6570 or present DT_DEBUG anyway (fpc binaries are statically linked).
6571
6572 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6573
6574 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6575
6576 return 0;
6577 }
6578
6579 for (i = 0; i < num_phdr; i++)
6580 {
6581 if (is_elf64)
6582 {
6583 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6584
6585 if (p->p_type == PT_DYNAMIC)
6586 return p->p_vaddr + relocation;
6587 }
6588 else
6589 {
6590 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6591
6592 if (p->p_type == PT_DYNAMIC)
6593 return p->p_vaddr + relocation;
6594 }
6595 }
6596
6597 return 0;
6598 }
6599
6600 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6601 can be 0 if the inferior does not yet have the library list initialized.
6602 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6603 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6604
6605 static CORE_ADDR
6606 get_r_debug (const int pid, const int is_elf64)
6607 {
6608 CORE_ADDR dynamic_memaddr;
6609 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6610 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6611 CORE_ADDR map = -1;
6612
6613 dynamic_memaddr = get_dynamic (pid, is_elf64);
6614 if (dynamic_memaddr == 0)
6615 return map;
6616
6617 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6618 {
6619 if (is_elf64)
6620 {
6621 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6622 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6623 union
6624 {
6625 Elf64_Xword map;
6626 unsigned char buf[sizeof (Elf64_Xword)];
6627 }
6628 rld_map;
6629 #endif
6630 #ifdef DT_MIPS_RLD_MAP
6631 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6632 {
6633 if (linux_read_memory (dyn->d_un.d_val,
6634 rld_map.buf, sizeof (rld_map.buf)) == 0)
6635 return rld_map.map;
6636 else
6637 break;
6638 }
6639 #endif /* DT_MIPS_RLD_MAP */
6640 #ifdef DT_MIPS_RLD_MAP_REL
6641 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6642 {
6643 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6644 rld_map.buf, sizeof (rld_map.buf)) == 0)
6645 return rld_map.map;
6646 else
6647 break;
6648 }
6649 #endif /* DT_MIPS_RLD_MAP_REL */
6650
6651 if (dyn->d_tag == DT_DEBUG && map == -1)
6652 map = dyn->d_un.d_val;
6653
6654 if (dyn->d_tag == DT_NULL)
6655 break;
6656 }
6657 else
6658 {
6659 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6660 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6661 union
6662 {
6663 Elf32_Word map;
6664 unsigned char buf[sizeof (Elf32_Word)];
6665 }
6666 rld_map;
6667 #endif
6668 #ifdef DT_MIPS_RLD_MAP
6669 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6670 {
6671 if (linux_read_memory (dyn->d_un.d_val,
6672 rld_map.buf, sizeof (rld_map.buf)) == 0)
6673 return rld_map.map;
6674 else
6675 break;
6676 }
6677 #endif /* DT_MIPS_RLD_MAP */
6678 #ifdef DT_MIPS_RLD_MAP_REL
6679 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6680 {
6681 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6682 rld_map.buf, sizeof (rld_map.buf)) == 0)
6683 return rld_map.map;
6684 else
6685 break;
6686 }
6687 #endif /* DT_MIPS_RLD_MAP_REL */
6688
6689 if (dyn->d_tag == DT_DEBUG && map == -1)
6690 map = dyn->d_un.d_val;
6691
6692 if (dyn->d_tag == DT_NULL)
6693 break;
6694 }
6695
6696 dynamic_memaddr += dyn_size;
6697 }
6698
6699 return map;
6700 }
6701
6702 /* Read one pointer from MEMADDR in the inferior. */
6703
6704 static int
6705 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6706 {
6707 int ret;
6708
6709 /* Go through a union so this works on either big or little endian
6710 hosts, when the inferior's pointer size is smaller than the size
6711 of CORE_ADDR. It is assumed the inferior's endianness is the
6712 same of the superior's. */
6713 union
6714 {
6715 CORE_ADDR core_addr;
6716 unsigned int ui;
6717 unsigned char uc;
6718 } addr;
6719
6720 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6721 if (ret == 0)
6722 {
6723 if (ptr_size == sizeof (CORE_ADDR))
6724 *ptr = addr.core_addr;
6725 else if (ptr_size == sizeof (unsigned int))
6726 *ptr = addr.ui;
6727 else
6728 gdb_assert_not_reached ("unhandled pointer size");
6729 }
6730 return ret;
6731 }
6732
6733 bool
6734 linux_process_target::supports_qxfer_libraries_svr4 ()
6735 {
6736 return true;
6737 }
6738
6739 struct link_map_offsets
6740 {
6741 /* Offset and size of r_debug.r_version. */
6742 int r_version_offset;
6743
6744 /* Offset and size of r_debug.r_map. */
6745 int r_map_offset;
6746
6747 /* Offset to l_addr field in struct link_map. */
6748 int l_addr_offset;
6749
6750 /* Offset to l_name field in struct link_map. */
6751 int l_name_offset;
6752
6753 /* Offset to l_ld field in struct link_map. */
6754 int l_ld_offset;
6755
6756 /* Offset to l_next field in struct link_map. */
6757 int l_next_offset;
6758
6759 /* Offset to l_prev field in struct link_map. */
6760 int l_prev_offset;
6761 };
6762
6763 /* Construct qXfer:libraries-svr4:read reply. */
6764
6765 int
6766 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6767 unsigned char *readbuf,
6768 unsigned const char *writebuf,
6769 CORE_ADDR offset, int len)
6770 {
6771 struct process_info_private *const priv = current_process ()->priv;
6772 char filename[PATH_MAX];
6773 int pid, is_elf64;
6774
6775 static const struct link_map_offsets lmo_32bit_offsets =
6776 {
6777 0, /* r_version offset. */
6778 4, /* r_debug.r_map offset. */
6779 0, /* l_addr offset in link_map. */
6780 4, /* l_name offset in link_map. */
6781 8, /* l_ld offset in link_map. */
6782 12, /* l_next offset in link_map. */
6783 16 /* l_prev offset in link_map. */
6784 };
6785
6786 static const struct link_map_offsets lmo_64bit_offsets =
6787 {
6788 0, /* r_version offset. */
6789 8, /* r_debug.r_map offset. */
6790 0, /* l_addr offset in link_map. */
6791 8, /* l_name offset in link_map. */
6792 16, /* l_ld offset in link_map. */
6793 24, /* l_next offset in link_map. */
6794 32 /* l_prev offset in link_map. */
6795 };
6796 const struct link_map_offsets *lmo;
6797 unsigned int machine;
6798 int ptr_size;
6799 CORE_ADDR lm_addr = 0, lm_prev = 0;
6800 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6801 int header_done = 0;
6802
6803 if (writebuf != NULL)
6804 return -2;
6805 if (readbuf == NULL)
6806 return -1;
6807
6808 pid = lwpid_of (current_thread);
6809 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6810 is_elf64 = elf_64_file_p (filename, &machine);
6811 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6812 ptr_size = is_elf64 ? 8 : 4;
6813
6814 while (annex[0] != '\0')
6815 {
6816 const char *sep;
6817 CORE_ADDR *addrp;
6818 int name_len;
6819
6820 sep = strchr (annex, '=');
6821 if (sep == NULL)
6822 break;
6823
6824 name_len = sep - annex;
6825 if (name_len == 5 && startswith (annex, "start"))
6826 addrp = &lm_addr;
6827 else if (name_len == 4 && startswith (annex, "prev"))
6828 addrp = &lm_prev;
6829 else
6830 {
6831 annex = strchr (sep, ';');
6832 if (annex == NULL)
6833 break;
6834 annex++;
6835 continue;
6836 }
6837
6838 annex = decode_address_to_semicolon (addrp, sep + 1);
6839 }
6840
6841 if (lm_addr == 0)
6842 {
6843 int r_version = 0;
6844
6845 if (priv->r_debug == 0)
6846 priv->r_debug = get_r_debug (pid, is_elf64);
6847
6848 /* We failed to find DT_DEBUG. Such situation will not change
6849 for this inferior - do not retry it. Report it to GDB as
6850 E01, see for the reasons at the GDB solib-svr4.c side. */
6851 if (priv->r_debug == (CORE_ADDR) -1)
6852 return -1;
6853
6854 if (priv->r_debug != 0)
6855 {
6856 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6857 (unsigned char *) &r_version,
6858 sizeof (r_version)) != 0
6859 || r_version != 1)
6860 {
6861 warning ("unexpected r_debug version %d", r_version);
6862 }
6863 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6864 &lm_addr, ptr_size) != 0)
6865 {
6866 warning ("unable to read r_map from 0x%lx",
6867 (long) priv->r_debug + lmo->r_map_offset);
6868 }
6869 }
6870 }
6871
6872 std::string document = "<library-list-svr4 version=\"1.0\"";
6873
6874 while (lm_addr
6875 && read_one_ptr (lm_addr + lmo->l_name_offset,
6876 &l_name, ptr_size) == 0
6877 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6878 &l_addr, ptr_size) == 0
6879 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6880 &l_ld, ptr_size) == 0
6881 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6882 &l_prev, ptr_size) == 0
6883 && read_one_ptr (lm_addr + lmo->l_next_offset,
6884 &l_next, ptr_size) == 0)
6885 {
6886 unsigned char libname[PATH_MAX];
6887
6888 if (lm_prev != l_prev)
6889 {
6890 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6891 (long) lm_prev, (long) l_prev);
6892 break;
6893 }
6894
6895 /* Ignore the first entry even if it has valid name as the first entry
6896 corresponds to the main executable. The first entry should not be
6897 skipped if the dynamic loader was loaded late by a static executable
6898 (see solib-svr4.c parameter ignore_first). But in such case the main
6899 executable does not have PT_DYNAMIC present and this function already
6900 exited above due to failed get_r_debug. */
6901 if (lm_prev == 0)
6902 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6903 else
6904 {
6905 /* Not checking for error because reading may stop before
6906 we've got PATH_MAX worth of characters. */
6907 libname[0] = '\0';
6908 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6909 libname[sizeof (libname) - 1] = '\0';
6910 if (libname[0] != '\0')
6911 {
6912 if (!header_done)
6913 {
6914 /* Terminate `<library-list-svr4'. */
6915 document += '>';
6916 header_done = 1;
6917 }
6918
6919 string_appendf (document, "<library name=\"");
6920 xml_escape_text_append (&document, (char *) libname);
6921 string_appendf (document, "\" lm=\"0x%lx\" "
6922 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6923 (unsigned long) lm_addr, (unsigned long) l_addr,
6924 (unsigned long) l_ld);
6925 }
6926 }
6927
6928 lm_prev = lm_addr;
6929 lm_addr = l_next;
6930 }
6931
6932 if (!header_done)
6933 {
6934 /* Empty list; terminate `<library-list-svr4'. */
6935 document += "/>";
6936 }
6937 else
6938 document += "</library-list-svr4>";
6939
6940 int document_len = document.length ();
6941 if (offset < document_len)
6942 document_len -= offset;
6943 else
6944 document_len = 0;
6945 if (len > document_len)
6946 len = document_len;
6947
6948 memcpy (readbuf, document.data () + offset, len);
6949
6950 return len;
6951 }
6952
6953 #ifdef HAVE_LINUX_BTRACE
6954
6955 btrace_target_info *
6956 linux_process_target::enable_btrace (ptid_t ptid,
6957 const btrace_config *conf)
6958 {
6959 return linux_enable_btrace (ptid, conf);
6960 }
6961
6962 /* See to_disable_btrace target method. */
6963
6964 int
6965 linux_process_target::disable_btrace (btrace_target_info *tinfo)
6966 {
6967 enum btrace_error err;
6968
6969 err = linux_disable_btrace (tinfo);
6970 return (err == BTRACE_ERR_NONE ? 0 : -1);
6971 }
6972
6973 /* Encode an Intel Processor Trace configuration. */
6974
6975 static void
6976 linux_low_encode_pt_config (struct buffer *buffer,
6977 const struct btrace_data_pt_config *config)
6978 {
6979 buffer_grow_str (buffer, "<pt-config>\n");
6980
6981 switch (config->cpu.vendor)
6982 {
6983 case CV_INTEL:
6984 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6985 "model=\"%u\" stepping=\"%u\"/>\n",
6986 config->cpu.family, config->cpu.model,
6987 config->cpu.stepping);
6988 break;
6989
6990 default:
6991 break;
6992 }
6993
6994 buffer_grow_str (buffer, "</pt-config>\n");
6995 }
6996
6997 /* Encode a raw buffer. */
6998
6999 static void
7000 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7001 unsigned int size)
7002 {
7003 if (size == 0)
7004 return;
7005
7006 /* We use hex encoding - see gdbsupport/rsp-low.h. */
7007 buffer_grow_str (buffer, "<raw>\n");
7008
7009 while (size-- > 0)
7010 {
7011 char elem[2];
7012
7013 elem[0] = tohex ((*data >> 4) & 0xf);
7014 elem[1] = tohex (*data++ & 0xf);
7015
7016 buffer_grow (buffer, elem, 2);
7017 }
7018
7019 buffer_grow_str (buffer, "</raw>\n");
7020 }
7021
7022 /* See to_read_btrace target method. */
7023
7024 int
7025 linux_process_target::read_btrace (btrace_target_info *tinfo,
7026 buffer *buffer,
7027 enum btrace_read_type type)
7028 {
7029 struct btrace_data btrace;
7030 enum btrace_error err;
7031
7032 err = linux_read_btrace (&btrace, tinfo, type);
7033 if (err != BTRACE_ERR_NONE)
7034 {
7035 if (err == BTRACE_ERR_OVERFLOW)
7036 buffer_grow_str0 (buffer, "E.Overflow.");
7037 else
7038 buffer_grow_str0 (buffer, "E.Generic Error.");
7039
7040 return -1;
7041 }
7042
7043 switch (btrace.format)
7044 {
7045 case BTRACE_FORMAT_NONE:
7046 buffer_grow_str0 (buffer, "E.No Trace.");
7047 return -1;
7048
7049 case BTRACE_FORMAT_BTS:
7050 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7051 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7052
7053 for (const btrace_block &block : *btrace.variant.bts.blocks)
7054 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7055 paddress (block.begin), paddress (block.end));
7056
7057 buffer_grow_str0 (buffer, "</btrace>\n");
7058 break;
7059
7060 case BTRACE_FORMAT_PT:
7061 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7062 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7063 buffer_grow_str (buffer, "<pt>\n");
7064
7065 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7066
7067 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7068 btrace.variant.pt.size);
7069
7070 buffer_grow_str (buffer, "</pt>\n");
7071 buffer_grow_str0 (buffer, "</btrace>\n");
7072 break;
7073
7074 default:
7075 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7076 return -1;
7077 }
7078
7079 return 0;
7080 }
7081
7082 /* See to_btrace_conf target method. */
7083
7084 int
7085 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7086 buffer *buffer)
7087 {
7088 const struct btrace_config *conf;
7089
7090 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7091 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7092
7093 conf = linux_btrace_conf (tinfo);
7094 if (conf != NULL)
7095 {
7096 switch (conf->format)
7097 {
7098 case BTRACE_FORMAT_NONE:
7099 break;
7100
7101 case BTRACE_FORMAT_BTS:
7102 buffer_xml_printf (buffer, "<bts");
7103 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7104 buffer_xml_printf (buffer, " />\n");
7105 break;
7106
7107 case BTRACE_FORMAT_PT:
7108 buffer_xml_printf (buffer, "<pt");
7109 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7110 buffer_xml_printf (buffer, "/>\n");
7111 break;
7112 }
7113 }
7114
7115 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7116 return 0;
7117 }
7118 #endif /* HAVE_LINUX_BTRACE */
7119
7120 /* See nat/linux-nat.h. */
7121
7122 ptid_t
7123 current_lwp_ptid (void)
7124 {
7125 return ptid_of (current_thread);
7126 }
7127
7128 const char *
7129 linux_process_target::thread_name (ptid_t thread)
7130 {
7131 return linux_proc_tid_get_name (thread);
7132 }
7133
7134 #if USE_THREAD_DB
7135 bool
7136 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7137 int *handle_len)
7138 {
7139 return thread_db_thread_handle (ptid, handle, handle_len);
7140 }
7141 #endif
7142
7143 /* Default implementation of linux_target_ops method "set_pc" for
7144 32-bit pc register which is literally named "pc". */
7145
7146 void
7147 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7148 {
7149 uint32_t newpc = pc;
7150
7151 supply_register_by_name (regcache, "pc", &newpc);
7152 }
7153
7154 /* Default implementation of linux_target_ops method "get_pc" for
7155 32-bit pc register which is literally named "pc". */
7156
7157 CORE_ADDR
7158 linux_get_pc_32bit (struct regcache *regcache)
7159 {
7160 uint32_t pc;
7161
7162 collect_register_by_name (regcache, "pc", &pc);
7163 if (debug_threads)
7164 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7165 return pc;
7166 }
7167
7168 /* Default implementation of linux_target_ops method "set_pc" for
7169 64-bit pc register which is literally named "pc". */
7170
7171 void
7172 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7173 {
7174 uint64_t newpc = pc;
7175
7176 supply_register_by_name (regcache, "pc", &newpc);
7177 }
7178
7179 /* Default implementation of linux_target_ops method "get_pc" for
7180 64-bit pc register which is literally named "pc". */
7181
7182 CORE_ADDR
7183 linux_get_pc_64bit (struct regcache *regcache)
7184 {
7185 uint64_t pc;
7186
7187 collect_register_by_name (regcache, "pc", &pc);
7188 if (debug_threads)
7189 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7190 return pc;
7191 }
7192
7193 /* See linux-low.h. */
7194
7195 int
7196 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7197 {
7198 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7199 int offset = 0;
7200
7201 gdb_assert (wordsize == 4 || wordsize == 8);
7202
7203 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
7204 {
7205 if (wordsize == 4)
7206 {
7207 uint32_t *data_p = (uint32_t *) data;
7208 if (data_p[0] == match)
7209 {
7210 *valp = data_p[1];
7211 return 1;
7212 }
7213 }
7214 else
7215 {
7216 uint64_t *data_p = (uint64_t *) data;
7217 if (data_p[0] == match)
7218 {
7219 *valp = data_p[1];
7220 return 1;
7221 }
7222 }
7223
7224 offset += 2 * wordsize;
7225 }
7226
7227 return 0;
7228 }
7229
7230 /* See linux-low.h. */
7231
7232 CORE_ADDR
7233 linux_get_hwcap (int wordsize)
7234 {
7235 CORE_ADDR hwcap = 0;
7236 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7237 return hwcap;
7238 }
7239
7240 /* See linux-low.h. */
7241
7242 CORE_ADDR
7243 linux_get_hwcap2 (int wordsize)
7244 {
7245 CORE_ADDR hwcap2 = 0;
7246 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7247 return hwcap2;
7248 }
7249
7250 #ifdef HAVE_LINUX_REGSETS
7251 void
7252 initialize_regsets_info (struct regsets_info *info)
7253 {
7254 for (info->num_regsets = 0;
7255 info->regsets[info->num_regsets].size >= 0;
7256 info->num_regsets++)
7257 ;
7258 }
7259 #endif
7260
7261 void
7262 initialize_low (void)
7263 {
7264 struct sigaction sigchld_action;
7265
7266 memset (&sigchld_action, 0, sizeof (sigchld_action));
7267 set_target_ops (the_linux_target);
7268
7269 linux_ptrace_init_warnings ();
7270 linux_proc_init_warnings ();
7271
7272 sigchld_action.sa_handler = sigchld_handler;
7273 sigemptyset (&sigchld_action.sa_mask);
7274 sigchld_action.sa_flags = SA_RESTART;
7275 sigaction (SIGCHLD, &sigchld_action, NULL);
7276
7277 initialize_low_arch ();
7278
7279 linux_check_ptrace_features ();
7280 }