gdbserver/linux-low: turn 'cannot_{fetch/store}_register' into methods
[binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
48 #include <inttypes.h>
49 #include "gdbsupport/common-inferior.h"
50 #include "nat/fork-inferior.h"
51 #include "gdbsupport/environ.h"
52 #include "gdbsupport/gdb-sigmask.h"
53 #include "gdbsupport/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 #ifndef AT_HWCAP2
75 #define AT_HWCAP2 26
76 #endif
77
78 /* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81 #if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84 #if defined(__mcoldfire__)
85 /* These are still undefined in 3.10 kernels. */
86 #define PT_TEXT_ADDR 49*4
87 #define PT_DATA_ADDR 50*4
88 #define PT_TEXT_END_ADDR 51*4
89 /* BFIN already defines these since at least 2.6.32 kernels. */
90 #elif defined(BFIN)
91 #define PT_TEXT_ADDR 220
92 #define PT_TEXT_END_ADDR 224
93 #define PT_DATA_ADDR 228
94 /* These are still undefined in 3.10 kernels. */
95 #elif defined(__TMS320C6X__)
96 #define PT_TEXT_ADDR (0x10000*4)
97 #define PT_DATA_ADDR (0x10004*4)
98 #define PT_TEXT_END_ADDR (0x10008*4)
99 #endif
100 #endif
101
102 #if (defined(__UCLIBC__) \
103 && defined(HAS_NOMMU) \
104 && defined(PT_TEXT_ADDR) \
105 && defined(PT_DATA_ADDR) \
106 && defined(PT_TEXT_END_ADDR))
107 #define SUPPORTS_READ_OFFSETS
108 #endif
109
110 #ifdef HAVE_LINUX_BTRACE
111 # include "nat/linux-btrace.h"
112 # include "gdbsupport/btrace-common.h"
113 #endif
114
115 #ifndef HAVE_ELF32_AUXV_T
116 /* Copied from glibc's elf.h. */
117 typedef struct
118 {
119 uint32_t a_type; /* Entry type */
120 union
121 {
122 uint32_t a_val; /* Integer value */
123 /* We use to have pointer elements added here. We cannot do that,
124 though, since it does not work when using 32-bit definitions
125 on 64-bit platforms and vice versa. */
126 } a_un;
127 } Elf32_auxv_t;
128 #endif
129
130 #ifndef HAVE_ELF64_AUXV_T
131 /* Copied from glibc's elf.h. */
132 typedef struct
133 {
134 uint64_t a_type; /* Entry type */
135 union
136 {
137 uint64_t a_val; /* Integer value */
138 /* We use to have pointer elements added here. We cannot do that,
139 though, since it does not work when using 32-bit definitions
140 on 64-bit platforms and vice versa. */
141 } a_un;
142 } Elf64_auxv_t;
143 #endif
144
145 /* Does the current host support PTRACE_GETREGSET? */
146 int have_ptrace_getregset = -1;
147
148 /* LWP accessors. */
149
150 /* See nat/linux-nat.h. */
151
152 ptid_t
153 ptid_of_lwp (struct lwp_info *lwp)
154 {
155 return ptid_of (get_lwp_thread (lwp));
156 }
157
158 /* See nat/linux-nat.h. */
159
160 void
161 lwp_set_arch_private_info (struct lwp_info *lwp,
162 struct arch_lwp_info *info)
163 {
164 lwp->arch_private = info;
165 }
166
167 /* See nat/linux-nat.h. */
168
169 struct arch_lwp_info *
170 lwp_arch_private_info (struct lwp_info *lwp)
171 {
172 return lwp->arch_private;
173 }
174
175 /* See nat/linux-nat.h. */
176
177 int
178 lwp_is_stopped (struct lwp_info *lwp)
179 {
180 return lwp->stopped;
181 }
182
183 /* See nat/linux-nat.h. */
184
185 enum target_stop_reason
186 lwp_stop_reason (struct lwp_info *lwp)
187 {
188 return lwp->stop_reason;
189 }
190
191 /* See nat/linux-nat.h. */
192
193 int
194 lwp_is_stepping (struct lwp_info *lwp)
195 {
196 return lwp->stepping;
197 }
198
199 /* A list of all unknown processes which receive stop signals. Some
200 other process will presumably claim each of these as forked
201 children momentarily. */
202
203 struct simple_pid_list
204 {
205 /* The process ID. */
206 int pid;
207
208 /* The status as reported by waitpid. */
209 int status;
210
211 /* Next in chain. */
212 struct simple_pid_list *next;
213 };
214 struct simple_pid_list *stopped_pids;
215
216 /* Trivial list manipulation functions to keep track of a list of new
217 stopped processes. */
218
219 static void
220 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
221 {
222 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
223
224 new_pid->pid = pid;
225 new_pid->status = status;
226 new_pid->next = *listp;
227 *listp = new_pid;
228 }
229
230 static int
231 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
232 {
233 struct simple_pid_list **p;
234
235 for (p = listp; *p != NULL; p = &(*p)->next)
236 if ((*p)->pid == pid)
237 {
238 struct simple_pid_list *next = (*p)->next;
239
240 *statusp = (*p)->status;
241 xfree (*p);
242 *p = next;
243 return 1;
244 }
245 return 0;
246 }
247
248 enum stopping_threads_kind
249 {
250 /* Not stopping threads presently. */
251 NOT_STOPPING_THREADS,
252
253 /* Stopping threads. */
254 STOPPING_THREADS,
255
256 /* Stopping and suspending threads. */
257 STOPPING_AND_SUSPENDING_THREADS
258 };
259
260 /* This is set while stop_all_lwps is in effect. */
261 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
262
263 /* FIXME make into a target method? */
264 int using_threads = 1;
265
266 /* True if we're presently stabilizing threads (moving them out of
267 jump pads). */
268 static int stabilizing_threads;
269
270 static void linux_resume_one_lwp (struct lwp_info *lwp,
271 int step, int signal, siginfo_t *info);
272 static void unsuspend_all_lwps (struct lwp_info *except);
273 static struct lwp_info *add_lwp (ptid_t ptid);
274 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
275 static int lwp_is_marked_dead (struct lwp_info *lwp);
276 static int finish_step_over (struct lwp_info *lwp);
277 static int kill_lwp (unsigned long lwpid, int signo);
278 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
279 static int linux_low_ptrace_options (int attached);
280 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
281 static void proceed_one_lwp (thread_info *thread, lwp_info *except);
282
283 /* When the event-loop is doing a step-over, this points at the thread
284 being stepped. */
285 ptid_t step_over_bkpt;
286
287 /* True if the low target can hardware single-step. */
288
289 static int
290 can_hardware_single_step (void)
291 {
292 if (the_low_target.supports_hardware_single_step != NULL)
293 return the_low_target.supports_hardware_single_step ();
294 else
295 return 0;
296 }
297
298 /* True if the low target can software single-step. Such targets
299 implement the GET_NEXT_PCS callback. */
300
301 static int
302 can_software_single_step (void)
303 {
304 return (the_low_target.get_next_pcs != NULL);
305 }
306
307 /* True if the low target supports memory breakpoints. If so, we'll
308 have a GET_PC implementation. */
309
310 static int
311 supports_breakpoints (void)
312 {
313 return (the_low_target.get_pc != NULL);
314 }
315
316 /* Returns true if this target can support fast tracepoints. This
317 does not mean that the in-process agent has been loaded in the
318 inferior. */
319
320 static int
321 supports_fast_tracepoints (void)
322 {
323 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
324 }
325
326 /* True if LWP is stopped in its stepping range. */
327
328 static int
329 lwp_in_step_range (struct lwp_info *lwp)
330 {
331 CORE_ADDR pc = lwp->stop_pc;
332
333 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
334 }
335
336 struct pending_signals
337 {
338 int signal;
339 siginfo_t info;
340 struct pending_signals *prev;
341 };
342
343 /* The read/write ends of the pipe registered as waitable file in the
344 event loop. */
345 static int linux_event_pipe[2] = { -1, -1 };
346
347 /* True if we're currently in async mode. */
348 #define target_is_async_p() (linux_event_pipe[0] != -1)
349
350 static void send_sigstop (struct lwp_info *lwp);
351
352 /* Return non-zero if HEADER is a 64-bit ELF file. */
353
354 static int
355 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
356 {
357 if (header->e_ident[EI_MAG0] == ELFMAG0
358 && header->e_ident[EI_MAG1] == ELFMAG1
359 && header->e_ident[EI_MAG2] == ELFMAG2
360 && header->e_ident[EI_MAG3] == ELFMAG3)
361 {
362 *machine = header->e_machine;
363 return header->e_ident[EI_CLASS] == ELFCLASS64;
364
365 }
366 *machine = EM_NONE;
367 return -1;
368 }
369
370 /* Return non-zero if FILE is a 64-bit ELF file,
371 zero if the file is not a 64-bit ELF file,
372 and -1 if the file is not accessible or doesn't exist. */
373
374 static int
375 elf_64_file_p (const char *file, unsigned int *machine)
376 {
377 Elf64_Ehdr header;
378 int fd;
379
380 fd = open (file, O_RDONLY);
381 if (fd < 0)
382 return -1;
383
384 if (read (fd, &header, sizeof (header)) != sizeof (header))
385 {
386 close (fd);
387 return 0;
388 }
389 close (fd);
390
391 return elf_64_header_p (&header, machine);
392 }
393
394 /* Accepts an integer PID; Returns true if the executable PID is
395 running is a 64-bit ELF file.. */
396
397 int
398 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
399 {
400 char file[PATH_MAX];
401
402 sprintf (file, "/proc/%d/exe", pid);
403 return elf_64_file_p (file, machine);
404 }
405
406 static void
407 delete_lwp (struct lwp_info *lwp)
408 {
409 struct thread_info *thr = get_lwp_thread (lwp);
410
411 if (debug_threads)
412 debug_printf ("deleting %ld\n", lwpid_of (thr));
413
414 remove_thread (thr);
415
416 if (the_low_target.delete_thread != NULL)
417 the_low_target.delete_thread (lwp->arch_private);
418 else
419 gdb_assert (lwp->arch_private == NULL);
420
421 free (lwp);
422 }
423
424 /* Add a process to the common process list, and set its private
425 data. */
426
427 static struct process_info *
428 linux_add_process (int pid, int attached)
429 {
430 struct process_info *proc;
431
432 proc = add_process (pid, attached);
433 proc->priv = XCNEW (struct process_info_private);
434
435 if (the_low_target.new_process != NULL)
436 proc->priv->arch_private = the_low_target.new_process ();
437
438 return proc;
439 }
440
441 static CORE_ADDR get_pc (struct lwp_info *lwp);
442
443 void
444 linux_process_target::arch_setup_thread (thread_info *thread)
445 {
446 struct thread_info *saved_thread;
447
448 saved_thread = current_thread;
449 current_thread = thread;
450
451 low_arch_setup ();
452
453 current_thread = saved_thread;
454 }
455
456 int
457 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
458 int wstat)
459 {
460 client_state &cs = get_client_state ();
461 struct lwp_info *event_lwp = *orig_event_lwp;
462 int event = linux_ptrace_get_extended_event (wstat);
463 struct thread_info *event_thr = get_lwp_thread (event_lwp);
464 struct lwp_info *new_lwp;
465
466 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
467
468 /* All extended events we currently use are mid-syscall. Only
469 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
470 you have to be using PTRACE_SEIZE to get that. */
471 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
472
473 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
474 || (event == PTRACE_EVENT_CLONE))
475 {
476 ptid_t ptid;
477 unsigned long new_pid;
478 int ret, status;
479
480 /* Get the pid of the new lwp. */
481 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
482 &new_pid);
483
484 /* If we haven't already seen the new PID stop, wait for it now. */
485 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
486 {
487 /* The new child has a pending SIGSTOP. We can't affect it until it
488 hits the SIGSTOP, but we're already attached. */
489
490 ret = my_waitpid (new_pid, &status, __WALL);
491
492 if (ret == -1)
493 perror_with_name ("waiting for new child");
494 else if (ret != new_pid)
495 warning ("wait returned unexpected PID %d", ret);
496 else if (!WIFSTOPPED (status))
497 warning ("wait returned unexpected status 0x%x", status);
498 }
499
500 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
501 {
502 struct process_info *parent_proc;
503 struct process_info *child_proc;
504 struct lwp_info *child_lwp;
505 struct thread_info *child_thr;
506 struct target_desc *tdesc;
507
508 ptid = ptid_t (new_pid, new_pid, 0);
509
510 if (debug_threads)
511 {
512 debug_printf ("HEW: Got fork event from LWP %ld, "
513 "new child is %d\n",
514 ptid_of (event_thr).lwp (),
515 ptid.pid ());
516 }
517
518 /* Add the new process to the tables and clone the breakpoint
519 lists of the parent. We need to do this even if the new process
520 will be detached, since we will need the process object and the
521 breakpoints to remove any breakpoints from memory when we
522 detach, and the client side will access registers. */
523 child_proc = linux_add_process (new_pid, 0);
524 gdb_assert (child_proc != NULL);
525 child_lwp = add_lwp (ptid);
526 gdb_assert (child_lwp != NULL);
527 child_lwp->stopped = 1;
528 child_lwp->must_set_ptrace_flags = 1;
529 child_lwp->status_pending_p = 0;
530 child_thr = get_lwp_thread (child_lwp);
531 child_thr->last_resume_kind = resume_stop;
532 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
533
534 /* If we're suspending all threads, leave this one suspended
535 too. If the fork/clone parent is stepping over a breakpoint,
536 all other threads have been suspended already. Leave the
537 child suspended too. */
538 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
539 || event_lwp->bp_reinsert != 0)
540 {
541 if (debug_threads)
542 debug_printf ("HEW: leaving child suspended\n");
543 child_lwp->suspended = 1;
544 }
545
546 parent_proc = get_thread_process (event_thr);
547 child_proc->attached = parent_proc->attached;
548
549 if (event_lwp->bp_reinsert != 0
550 && can_software_single_step ()
551 && event == PTRACE_EVENT_VFORK)
552 {
553 /* If we leave single-step breakpoints there, child will
554 hit it, so uninsert single-step breakpoints from parent
555 (and child). Once vfork child is done, reinsert
556 them back to parent. */
557 uninsert_single_step_breakpoints (event_thr);
558 }
559
560 clone_all_breakpoints (child_thr, event_thr);
561
562 tdesc = allocate_target_description ();
563 copy_target_description (tdesc, parent_proc->tdesc);
564 child_proc->tdesc = tdesc;
565
566 /* Clone arch-specific process data. */
567 if (the_low_target.new_fork != NULL)
568 the_low_target.new_fork (parent_proc, child_proc);
569
570 /* Save fork info in the parent thread. */
571 if (event == PTRACE_EVENT_FORK)
572 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
573 else if (event == PTRACE_EVENT_VFORK)
574 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
575
576 event_lwp->waitstatus.value.related_pid = ptid;
577
578 /* The status_pending field contains bits denoting the
579 extended event, so when the pending event is handled,
580 the handler will look at lwp->waitstatus. */
581 event_lwp->status_pending_p = 1;
582 event_lwp->status_pending = wstat;
583
584 /* Link the threads until the parent event is passed on to
585 higher layers. */
586 event_lwp->fork_relative = child_lwp;
587 child_lwp->fork_relative = event_lwp;
588
589 /* If the parent thread is doing step-over with single-step
590 breakpoints, the list of single-step breakpoints are cloned
591 from the parent's. Remove them from the child process.
592 In case of vfork, we'll reinsert them back once vforked
593 child is done. */
594 if (event_lwp->bp_reinsert != 0
595 && can_software_single_step ())
596 {
597 /* The child process is forked and stopped, so it is safe
598 to access its memory without stopping all other threads
599 from other processes. */
600 delete_single_step_breakpoints (child_thr);
601
602 gdb_assert (has_single_step_breakpoints (event_thr));
603 gdb_assert (!has_single_step_breakpoints (child_thr));
604 }
605
606 /* Report the event. */
607 return 0;
608 }
609
610 if (debug_threads)
611 debug_printf ("HEW: Got clone event "
612 "from LWP %ld, new child is LWP %ld\n",
613 lwpid_of (event_thr), new_pid);
614
615 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
616 new_lwp = add_lwp (ptid);
617
618 /* Either we're going to immediately resume the new thread
619 or leave it stopped. linux_resume_one_lwp is a nop if it
620 thinks the thread is currently running, so set this first
621 before calling linux_resume_one_lwp. */
622 new_lwp->stopped = 1;
623
624 /* If we're suspending all threads, leave this one suspended
625 too. If the fork/clone parent is stepping over a breakpoint,
626 all other threads have been suspended already. Leave the
627 child suspended too. */
628 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
629 || event_lwp->bp_reinsert != 0)
630 new_lwp->suspended = 1;
631
632 /* Normally we will get the pending SIGSTOP. But in some cases
633 we might get another signal delivered to the group first.
634 If we do get another signal, be sure not to lose it. */
635 if (WSTOPSIG (status) != SIGSTOP)
636 {
637 new_lwp->stop_expected = 1;
638 new_lwp->status_pending_p = 1;
639 new_lwp->status_pending = status;
640 }
641 else if (cs.report_thread_events)
642 {
643 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
644 new_lwp->status_pending_p = 1;
645 new_lwp->status_pending = status;
646 }
647
648 #ifdef USE_THREAD_DB
649 thread_db_notice_clone (event_thr, ptid);
650 #endif
651
652 /* Don't report the event. */
653 return 1;
654 }
655 else if (event == PTRACE_EVENT_VFORK_DONE)
656 {
657 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
658
659 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
660 {
661 reinsert_single_step_breakpoints (event_thr);
662
663 gdb_assert (has_single_step_breakpoints (event_thr));
664 }
665
666 /* Report the event. */
667 return 0;
668 }
669 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
670 {
671 struct process_info *proc;
672 std::vector<int> syscalls_to_catch;
673 ptid_t event_ptid;
674 pid_t event_pid;
675
676 if (debug_threads)
677 {
678 debug_printf ("HEW: Got exec event from LWP %ld\n",
679 lwpid_of (event_thr));
680 }
681
682 /* Get the event ptid. */
683 event_ptid = ptid_of (event_thr);
684 event_pid = event_ptid.pid ();
685
686 /* Save the syscall list from the execing process. */
687 proc = get_thread_process (event_thr);
688 syscalls_to_catch = std::move (proc->syscalls_to_catch);
689
690 /* Delete the execing process and all its threads. */
691 mourn (proc);
692 current_thread = NULL;
693
694 /* Create a new process/lwp/thread. */
695 proc = linux_add_process (event_pid, 0);
696 event_lwp = add_lwp (event_ptid);
697 event_thr = get_lwp_thread (event_lwp);
698 gdb_assert (current_thread == event_thr);
699 arch_setup_thread (event_thr);
700
701 /* Set the event status. */
702 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
703 event_lwp->waitstatus.value.execd_pathname
704 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
705
706 /* Mark the exec status as pending. */
707 event_lwp->stopped = 1;
708 event_lwp->status_pending_p = 1;
709 event_lwp->status_pending = wstat;
710 event_thr->last_resume_kind = resume_continue;
711 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
712
713 /* Update syscall state in the new lwp, effectively mid-syscall too. */
714 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
715
716 /* Restore the list to catch. Don't rely on the client, which is free
717 to avoid sending a new list when the architecture doesn't change.
718 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
719 proc->syscalls_to_catch = std::move (syscalls_to_catch);
720
721 /* Report the event. */
722 *orig_event_lwp = event_lwp;
723 return 0;
724 }
725
726 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
727 }
728
729 /* Return the PC as read from the regcache of LWP, without any
730 adjustment. */
731
732 static CORE_ADDR
733 get_pc (struct lwp_info *lwp)
734 {
735 struct thread_info *saved_thread;
736 struct regcache *regcache;
737 CORE_ADDR pc;
738
739 if (the_low_target.get_pc == NULL)
740 return 0;
741
742 saved_thread = current_thread;
743 current_thread = get_lwp_thread (lwp);
744
745 regcache = get_thread_regcache (current_thread, 1);
746 pc = (*the_low_target.get_pc) (regcache);
747
748 if (debug_threads)
749 debug_printf ("pc is 0x%lx\n", (long) pc);
750
751 current_thread = saved_thread;
752 return pc;
753 }
754
755 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
756 Fill *SYSNO with the syscall nr trapped. */
757
758 static void
759 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
760 {
761 struct thread_info *saved_thread;
762 struct regcache *regcache;
763
764 if (the_low_target.get_syscall_trapinfo == NULL)
765 {
766 /* If we cannot get the syscall trapinfo, report an unknown
767 system call number. */
768 *sysno = UNKNOWN_SYSCALL;
769 return;
770 }
771
772 saved_thread = current_thread;
773 current_thread = get_lwp_thread (lwp);
774
775 regcache = get_thread_regcache (current_thread, 1);
776 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
777
778 if (debug_threads)
779 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
780
781 current_thread = saved_thread;
782 }
783
784 static int check_stopped_by_watchpoint (struct lwp_info *child);
785
786 /* Called when the LWP stopped for a signal/trap. If it stopped for a
787 trap check what caused it (breakpoint, watchpoint, trace, etc.),
788 and save the result in the LWP's stop_reason field. If it stopped
789 for a breakpoint, decrement the PC if necessary on the lwp's
790 architecture. Returns true if we now have the LWP's stop PC. */
791
792 static int
793 save_stop_reason (struct lwp_info *lwp)
794 {
795 CORE_ADDR pc;
796 CORE_ADDR sw_breakpoint_pc;
797 struct thread_info *saved_thread;
798 #if USE_SIGTRAP_SIGINFO
799 siginfo_t siginfo;
800 #endif
801
802 if (the_low_target.get_pc == NULL)
803 return 0;
804
805 pc = get_pc (lwp);
806 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
807
808 /* breakpoint_at reads from the current thread. */
809 saved_thread = current_thread;
810 current_thread = get_lwp_thread (lwp);
811
812 #if USE_SIGTRAP_SIGINFO
813 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
814 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
815 {
816 if (siginfo.si_signo == SIGTRAP)
817 {
818 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
819 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
820 {
821 /* The si_code is ambiguous on this arch -- check debug
822 registers. */
823 if (!check_stopped_by_watchpoint (lwp))
824 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
825 }
826 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
827 {
828 /* If we determine the LWP stopped for a SW breakpoint,
829 trust it. Particularly don't check watchpoint
830 registers, because at least on s390, we'd find
831 stopped-by-watchpoint as long as there's a watchpoint
832 set. */
833 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
834 }
835 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
836 {
837 /* This can indicate either a hardware breakpoint or
838 hardware watchpoint. Check debug registers. */
839 if (!check_stopped_by_watchpoint (lwp))
840 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
841 }
842 else if (siginfo.si_code == TRAP_TRACE)
843 {
844 /* We may have single stepped an instruction that
845 triggered a watchpoint. In that case, on some
846 architectures (such as x86), instead of TRAP_HWBKPT,
847 si_code indicates TRAP_TRACE, and we need to check
848 the debug registers separately. */
849 if (!check_stopped_by_watchpoint (lwp))
850 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
851 }
852 }
853 }
854 #else
855 /* We may have just stepped a breakpoint instruction. E.g., in
856 non-stop mode, GDB first tells the thread A to step a range, and
857 then the user inserts a breakpoint inside the range. In that
858 case we need to report the breakpoint PC. */
859 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
860 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
861 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
862
863 if (hardware_breakpoint_inserted_here (pc))
864 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
865
866 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
867 check_stopped_by_watchpoint (lwp);
868 #endif
869
870 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
871 {
872 if (debug_threads)
873 {
874 struct thread_info *thr = get_lwp_thread (lwp);
875
876 debug_printf ("CSBB: %s stopped by software breakpoint\n",
877 target_pid_to_str (ptid_of (thr)));
878 }
879
880 /* Back up the PC if necessary. */
881 if (pc != sw_breakpoint_pc)
882 {
883 struct regcache *regcache
884 = get_thread_regcache (current_thread, 1);
885 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
886 }
887
888 /* Update this so we record the correct stop PC below. */
889 pc = sw_breakpoint_pc;
890 }
891 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
892 {
893 if (debug_threads)
894 {
895 struct thread_info *thr = get_lwp_thread (lwp);
896
897 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
898 target_pid_to_str (ptid_of (thr)));
899 }
900 }
901 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
902 {
903 if (debug_threads)
904 {
905 struct thread_info *thr = get_lwp_thread (lwp);
906
907 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
908 target_pid_to_str (ptid_of (thr)));
909 }
910 }
911 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
912 {
913 if (debug_threads)
914 {
915 struct thread_info *thr = get_lwp_thread (lwp);
916
917 debug_printf ("CSBB: %s stopped by trace\n",
918 target_pid_to_str (ptid_of (thr)));
919 }
920 }
921
922 lwp->stop_pc = pc;
923 current_thread = saved_thread;
924 return 1;
925 }
926
927 static struct lwp_info *
928 add_lwp (ptid_t ptid)
929 {
930 struct lwp_info *lwp;
931
932 lwp = XCNEW (struct lwp_info);
933
934 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
935
936 lwp->thread = add_thread (ptid, lwp);
937
938 if (the_low_target.new_thread != NULL)
939 the_low_target.new_thread (lwp);
940
941 return lwp;
942 }
943
944 /* Callback to be used when calling fork_inferior, responsible for
945 actually initiating the tracing of the inferior. */
946
947 static void
948 linux_ptrace_fun ()
949 {
950 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
951 (PTRACE_TYPE_ARG4) 0) < 0)
952 trace_start_error_with_name ("ptrace");
953
954 if (setpgid (0, 0) < 0)
955 trace_start_error_with_name ("setpgid");
956
957 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
958 stdout to stderr so that inferior i/o doesn't corrupt the connection.
959 Also, redirect stdin to /dev/null. */
960 if (remote_connection_is_stdio ())
961 {
962 if (close (0) < 0)
963 trace_start_error_with_name ("close");
964 if (open ("/dev/null", O_RDONLY) < 0)
965 trace_start_error_with_name ("open");
966 if (dup2 (2, 1) < 0)
967 trace_start_error_with_name ("dup2");
968 if (write (2, "stdin/stdout redirected\n",
969 sizeof ("stdin/stdout redirected\n") - 1) < 0)
970 {
971 /* Errors ignored. */;
972 }
973 }
974 }
975
976 /* Start an inferior process and returns its pid.
977 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
978 are its arguments. */
979
980 int
981 linux_process_target::create_inferior (const char *program,
982 const std::vector<char *> &program_args)
983 {
984 client_state &cs = get_client_state ();
985 struct lwp_info *new_lwp;
986 int pid;
987 ptid_t ptid;
988
989 {
990 maybe_disable_address_space_randomization restore_personality
991 (cs.disable_randomization);
992 std::string str_program_args = stringify_argv (program_args);
993
994 pid = fork_inferior (program,
995 str_program_args.c_str (),
996 get_environ ()->envp (), linux_ptrace_fun,
997 NULL, NULL, NULL, NULL);
998 }
999
1000 linux_add_process (pid, 0);
1001
1002 ptid = ptid_t (pid, pid, 0);
1003 new_lwp = add_lwp (ptid);
1004 new_lwp->must_set_ptrace_flags = 1;
1005
1006 post_fork_inferior (pid, program);
1007
1008 return pid;
1009 }
1010
1011 /* Implement the post_create_inferior target_ops method. */
1012
1013 void
1014 linux_process_target::post_create_inferior ()
1015 {
1016 struct lwp_info *lwp = get_thread_lwp (current_thread);
1017
1018 low_arch_setup ();
1019
1020 if (lwp->must_set_ptrace_flags)
1021 {
1022 struct process_info *proc = current_process ();
1023 int options = linux_low_ptrace_options (proc->attached);
1024
1025 linux_enable_event_reporting (lwpid_of (current_thread), options);
1026 lwp->must_set_ptrace_flags = 0;
1027 }
1028 }
1029
1030 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1031 error. */
1032
1033 int
1034 linux_attach_lwp (ptid_t ptid)
1035 {
1036 struct lwp_info *new_lwp;
1037 int lwpid = ptid.lwp ();
1038
1039 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1040 != 0)
1041 return errno;
1042
1043 new_lwp = add_lwp (ptid);
1044
1045 /* We need to wait for SIGSTOP before being able to make the next
1046 ptrace call on this LWP. */
1047 new_lwp->must_set_ptrace_flags = 1;
1048
1049 if (linux_proc_pid_is_stopped (lwpid))
1050 {
1051 if (debug_threads)
1052 debug_printf ("Attached to a stopped process\n");
1053
1054 /* The process is definitely stopped. It is in a job control
1055 stop, unless the kernel predates the TASK_STOPPED /
1056 TASK_TRACED distinction, in which case it might be in a
1057 ptrace stop. Make sure it is in a ptrace stop; from there we
1058 can kill it, signal it, et cetera.
1059
1060 First make sure there is a pending SIGSTOP. Since we are
1061 already attached, the process can not transition from stopped
1062 to running without a PTRACE_CONT; so we know this signal will
1063 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1064 probably already in the queue (unless this kernel is old
1065 enough to use TASK_STOPPED for ptrace stops); but since
1066 SIGSTOP is not an RT signal, it can only be queued once. */
1067 kill_lwp (lwpid, SIGSTOP);
1068
1069 /* Finally, resume the stopped process. This will deliver the
1070 SIGSTOP (or a higher priority signal, just like normal
1071 PTRACE_ATTACH), which we'll catch later on. */
1072 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1073 }
1074
1075 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1076 brings it to a halt.
1077
1078 There are several cases to consider here:
1079
1080 1) gdbserver has already attached to the process and is being notified
1081 of a new thread that is being created.
1082 In this case we should ignore that SIGSTOP and resume the
1083 process. This is handled below by setting stop_expected = 1,
1084 and the fact that add_thread sets last_resume_kind ==
1085 resume_continue.
1086
1087 2) This is the first thread (the process thread), and we're attaching
1088 to it via attach_inferior.
1089 In this case we want the process thread to stop.
1090 This is handled by having linux_attach set last_resume_kind ==
1091 resume_stop after we return.
1092
1093 If the pid we are attaching to is also the tgid, we attach to and
1094 stop all the existing threads. Otherwise, we attach to pid and
1095 ignore any other threads in the same group as this pid.
1096
1097 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1098 existing threads.
1099 In this case we want the thread to stop.
1100 FIXME: This case is currently not properly handled.
1101 We should wait for the SIGSTOP but don't. Things work apparently
1102 because enough time passes between when we ptrace (ATTACH) and when
1103 gdb makes the next ptrace call on the thread.
1104
1105 On the other hand, if we are currently trying to stop all threads, we
1106 should treat the new thread as if we had sent it a SIGSTOP. This works
1107 because we are guaranteed that the add_lwp call above added us to the
1108 end of the list, and so the new thread has not yet reached
1109 wait_for_sigstop (but will). */
1110 new_lwp->stop_expected = 1;
1111
1112 return 0;
1113 }
1114
1115 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1116 already attached. Returns true if a new LWP is found, false
1117 otherwise. */
1118
1119 static int
1120 attach_proc_task_lwp_callback (ptid_t ptid)
1121 {
1122 /* Is this a new thread? */
1123 if (find_thread_ptid (ptid) == NULL)
1124 {
1125 int lwpid = ptid.lwp ();
1126 int err;
1127
1128 if (debug_threads)
1129 debug_printf ("Found new lwp %d\n", lwpid);
1130
1131 err = linux_attach_lwp (ptid);
1132
1133 /* Be quiet if we simply raced with the thread exiting. EPERM
1134 is returned if the thread's task still exists, and is marked
1135 as exited or zombie, as well as other conditions, so in that
1136 case, confirm the status in /proc/PID/status. */
1137 if (err == ESRCH
1138 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1139 {
1140 if (debug_threads)
1141 {
1142 debug_printf ("Cannot attach to lwp %d: "
1143 "thread is gone (%d: %s)\n",
1144 lwpid, err, safe_strerror (err));
1145 }
1146 }
1147 else if (err != 0)
1148 {
1149 std::string reason
1150 = linux_ptrace_attach_fail_reason_string (ptid, err);
1151
1152 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1153 }
1154
1155 return 1;
1156 }
1157 return 0;
1158 }
1159
1160 static void async_file_mark (void);
1161
1162 /* Attach to PID. If PID is the tgid, attach to it and all
1163 of its threads. */
1164
1165 int
1166 linux_process_target::attach (unsigned long pid)
1167 {
1168 struct process_info *proc;
1169 struct thread_info *initial_thread;
1170 ptid_t ptid = ptid_t (pid, pid, 0);
1171 int err;
1172
1173 proc = linux_add_process (pid, 1);
1174
1175 /* Attach to PID. We will check for other threads
1176 soon. */
1177 err = linux_attach_lwp (ptid);
1178 if (err != 0)
1179 {
1180 remove_process (proc);
1181
1182 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1183 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1184 }
1185
1186 /* Don't ignore the initial SIGSTOP if we just attached to this
1187 process. It will be collected by wait shortly. */
1188 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
1189 initial_thread->last_resume_kind = resume_stop;
1190
1191 /* We must attach to every LWP. If /proc is mounted, use that to
1192 find them now. On the one hand, the inferior may be using raw
1193 clone instead of using pthreads. On the other hand, even if it
1194 is using pthreads, GDB may not be connected yet (thread_db needs
1195 to do symbol lookups, through qSymbol). Also, thread_db walks
1196 structures in the inferior's address space to find the list of
1197 threads/LWPs, and those structures may well be corrupted. Note
1198 that once thread_db is loaded, we'll still use it to list threads
1199 and associate pthread info with each LWP. */
1200 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1201
1202 /* GDB will shortly read the xml target description for this
1203 process, to figure out the process' architecture. But the target
1204 description is only filled in when the first process/thread in
1205 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1206 that now, otherwise, if GDB is fast enough, it could read the
1207 target description _before_ that initial stop. */
1208 if (non_stop)
1209 {
1210 struct lwp_info *lwp;
1211 int wstat, lwpid;
1212 ptid_t pid_ptid = ptid_t (pid);
1213
1214 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1215 gdb_assert (lwpid > 0);
1216
1217 lwp = find_lwp_pid (ptid_t (lwpid));
1218
1219 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1220 {
1221 lwp->status_pending_p = 1;
1222 lwp->status_pending = wstat;
1223 }
1224
1225 initial_thread->last_resume_kind = resume_continue;
1226
1227 async_file_mark ();
1228
1229 gdb_assert (proc->tdesc != NULL);
1230 }
1231
1232 return 0;
1233 }
1234
1235 static int
1236 last_thread_of_process_p (int pid)
1237 {
1238 bool seen_one = false;
1239
1240 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1241 {
1242 if (!seen_one)
1243 {
1244 /* This is the first thread of this process we see. */
1245 seen_one = true;
1246 return false;
1247 }
1248 else
1249 {
1250 /* This is the second thread of this process we see. */
1251 return true;
1252 }
1253 });
1254
1255 return thread == NULL;
1256 }
1257
1258 /* Kill LWP. */
1259
1260 static void
1261 linux_kill_one_lwp (struct lwp_info *lwp)
1262 {
1263 struct thread_info *thr = get_lwp_thread (lwp);
1264 int pid = lwpid_of (thr);
1265
1266 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1267 there is no signal context, and ptrace(PTRACE_KILL) (or
1268 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1269 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1270 alternative is to kill with SIGKILL. We only need one SIGKILL
1271 per process, not one for each thread. But since we still support
1272 support debugging programs using raw clone without CLONE_THREAD,
1273 we send one for each thread. For years, we used PTRACE_KILL
1274 only, so we're being a bit paranoid about some old kernels where
1275 PTRACE_KILL might work better (dubious if there are any such, but
1276 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1277 second, and so we're fine everywhere. */
1278
1279 errno = 0;
1280 kill_lwp (pid, SIGKILL);
1281 if (debug_threads)
1282 {
1283 int save_errno = errno;
1284
1285 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1286 target_pid_to_str (ptid_of (thr)),
1287 save_errno ? safe_strerror (save_errno) : "OK");
1288 }
1289
1290 errno = 0;
1291 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1292 if (debug_threads)
1293 {
1294 int save_errno = errno;
1295
1296 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1297 target_pid_to_str (ptid_of (thr)),
1298 save_errno ? safe_strerror (save_errno) : "OK");
1299 }
1300 }
1301
1302 /* Kill LWP and wait for it to die. */
1303
1304 static void
1305 kill_wait_lwp (struct lwp_info *lwp)
1306 {
1307 struct thread_info *thr = get_lwp_thread (lwp);
1308 int pid = ptid_of (thr).pid ();
1309 int lwpid = ptid_of (thr).lwp ();
1310 int wstat;
1311 int res;
1312
1313 if (debug_threads)
1314 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1315
1316 do
1317 {
1318 linux_kill_one_lwp (lwp);
1319
1320 /* Make sure it died. Notes:
1321
1322 - The loop is most likely unnecessary.
1323
1324 - We don't use wait_for_event as that could delete lwps
1325 while we're iterating over them. We're not interested in
1326 any pending status at this point, only in making sure all
1327 wait status on the kernel side are collected until the
1328 process is reaped.
1329
1330 - We don't use __WALL here as the __WALL emulation relies on
1331 SIGCHLD, and killing a stopped process doesn't generate
1332 one, nor an exit status.
1333 */
1334 res = my_waitpid (lwpid, &wstat, 0);
1335 if (res == -1 && errno == ECHILD)
1336 res = my_waitpid (lwpid, &wstat, __WCLONE);
1337 } while (res > 0 && WIFSTOPPED (wstat));
1338
1339 /* Even if it was stopped, the child may have already disappeared.
1340 E.g., if it was killed by SIGKILL. */
1341 if (res < 0 && errno != ECHILD)
1342 perror_with_name ("kill_wait_lwp");
1343 }
1344
1345 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1346 except the leader. */
1347
1348 static void
1349 kill_one_lwp_callback (thread_info *thread, int pid)
1350 {
1351 struct lwp_info *lwp = get_thread_lwp (thread);
1352
1353 /* We avoid killing the first thread here, because of a Linux kernel (at
1354 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1355 the children get a chance to be reaped, it will remain a zombie
1356 forever. */
1357
1358 if (lwpid_of (thread) == pid)
1359 {
1360 if (debug_threads)
1361 debug_printf ("lkop: is last of process %s\n",
1362 target_pid_to_str (thread->id));
1363 return;
1364 }
1365
1366 kill_wait_lwp (lwp);
1367 }
1368
1369 int
1370 linux_process_target::kill (process_info *process)
1371 {
1372 int pid = process->pid;
1373
1374 /* If we're killing a running inferior, make sure it is stopped
1375 first, as PTRACE_KILL will not work otherwise. */
1376 stop_all_lwps (0, NULL);
1377
1378 for_each_thread (pid, [&] (thread_info *thread)
1379 {
1380 kill_one_lwp_callback (thread, pid);
1381 });
1382
1383 /* See the comment in linux_kill_one_lwp. We did not kill the first
1384 thread in the list, so do so now. */
1385 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1386
1387 if (lwp == NULL)
1388 {
1389 if (debug_threads)
1390 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1391 pid);
1392 }
1393 else
1394 kill_wait_lwp (lwp);
1395
1396 mourn (process);
1397
1398 /* Since we presently can only stop all lwps of all processes, we
1399 need to unstop lwps of other processes. */
1400 unstop_all_lwps (0, NULL);
1401 return 0;
1402 }
1403
1404 /* Get pending signal of THREAD, for detaching purposes. This is the
1405 signal the thread last stopped for, which we need to deliver to the
1406 thread when detaching, otherwise, it'd be suppressed/lost. */
1407
1408 static int
1409 get_detach_signal (struct thread_info *thread)
1410 {
1411 client_state &cs = get_client_state ();
1412 enum gdb_signal signo = GDB_SIGNAL_0;
1413 int status;
1414 struct lwp_info *lp = get_thread_lwp (thread);
1415
1416 if (lp->status_pending_p)
1417 status = lp->status_pending;
1418 else
1419 {
1420 /* If the thread had been suspended by gdbserver, and it stopped
1421 cleanly, then it'll have stopped with SIGSTOP. But we don't
1422 want to deliver that SIGSTOP. */
1423 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1424 || thread->last_status.value.sig == GDB_SIGNAL_0)
1425 return 0;
1426
1427 /* Otherwise, we may need to deliver the signal we
1428 intercepted. */
1429 status = lp->last_status;
1430 }
1431
1432 if (!WIFSTOPPED (status))
1433 {
1434 if (debug_threads)
1435 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1436 target_pid_to_str (ptid_of (thread)));
1437 return 0;
1438 }
1439
1440 /* Extended wait statuses aren't real SIGTRAPs. */
1441 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1442 {
1443 if (debug_threads)
1444 debug_printf ("GPS: lwp %s had stopped with extended "
1445 "status: no pending signal\n",
1446 target_pid_to_str (ptid_of (thread)));
1447 return 0;
1448 }
1449
1450 signo = gdb_signal_from_host (WSTOPSIG (status));
1451
1452 if (cs.program_signals_p && !cs.program_signals[signo])
1453 {
1454 if (debug_threads)
1455 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1456 target_pid_to_str (ptid_of (thread)),
1457 gdb_signal_to_string (signo));
1458 return 0;
1459 }
1460 else if (!cs.program_signals_p
1461 /* If we have no way to know which signals GDB does not
1462 want to have passed to the program, assume
1463 SIGTRAP/SIGINT, which is GDB's default. */
1464 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1465 {
1466 if (debug_threads)
1467 debug_printf ("GPS: lwp %s had signal %s, "
1468 "but we don't know if we should pass it. "
1469 "Default to not.\n",
1470 target_pid_to_str (ptid_of (thread)),
1471 gdb_signal_to_string (signo));
1472 return 0;
1473 }
1474 else
1475 {
1476 if (debug_threads)
1477 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1478 target_pid_to_str (ptid_of (thread)),
1479 gdb_signal_to_string (signo));
1480
1481 return WSTOPSIG (status);
1482 }
1483 }
1484
1485 /* Detach from LWP. */
1486
1487 static void
1488 linux_detach_one_lwp (struct lwp_info *lwp)
1489 {
1490 struct thread_info *thread = get_lwp_thread (lwp);
1491 int sig;
1492 int lwpid;
1493
1494 /* If there is a pending SIGSTOP, get rid of it. */
1495 if (lwp->stop_expected)
1496 {
1497 if (debug_threads)
1498 debug_printf ("Sending SIGCONT to %s\n",
1499 target_pid_to_str (ptid_of (thread)));
1500
1501 kill_lwp (lwpid_of (thread), SIGCONT);
1502 lwp->stop_expected = 0;
1503 }
1504
1505 /* Pass on any pending signal for this thread. */
1506 sig = get_detach_signal (thread);
1507
1508 /* Preparing to resume may try to write registers, and fail if the
1509 lwp is zombie. If that happens, ignore the error. We'll handle
1510 it below, when detach fails with ESRCH. */
1511 try
1512 {
1513 /* Flush any pending changes to the process's registers. */
1514 regcache_invalidate_thread (thread);
1515
1516 /* Finally, let it resume. */
1517 if (the_low_target.prepare_to_resume != NULL)
1518 the_low_target.prepare_to_resume (lwp);
1519 }
1520 catch (const gdb_exception_error &ex)
1521 {
1522 if (!check_ptrace_stopped_lwp_gone (lwp))
1523 throw;
1524 }
1525
1526 lwpid = lwpid_of (thread);
1527 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1528 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1529 {
1530 int save_errno = errno;
1531
1532 /* We know the thread exists, so ESRCH must mean the lwp is
1533 zombie. This can happen if one of the already-detached
1534 threads exits the whole thread group. In that case we're
1535 still attached, and must reap the lwp. */
1536 if (save_errno == ESRCH)
1537 {
1538 int ret, status;
1539
1540 ret = my_waitpid (lwpid, &status, __WALL);
1541 if (ret == -1)
1542 {
1543 warning (_("Couldn't reap LWP %d while detaching: %s"),
1544 lwpid, safe_strerror (errno));
1545 }
1546 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1547 {
1548 warning (_("Reaping LWP %d while detaching "
1549 "returned unexpected status 0x%x"),
1550 lwpid, status);
1551 }
1552 }
1553 else
1554 {
1555 error (_("Can't detach %s: %s"),
1556 target_pid_to_str (ptid_of (thread)),
1557 safe_strerror (save_errno));
1558 }
1559 }
1560 else if (debug_threads)
1561 {
1562 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1563 target_pid_to_str (ptid_of (thread)),
1564 strsignal (sig));
1565 }
1566
1567 delete_lwp (lwp);
1568 }
1569
1570 /* Callback for for_each_thread. Detaches from non-leader threads of a
1571 given process. */
1572
1573 static void
1574 linux_detach_lwp_callback (thread_info *thread)
1575 {
1576 /* We don't actually detach from the thread group leader just yet.
1577 If the thread group exits, we must reap the zombie clone lwps
1578 before we're able to reap the leader. */
1579 if (thread->id.pid () == thread->id.lwp ())
1580 return;
1581
1582 lwp_info *lwp = get_thread_lwp (thread);
1583 linux_detach_one_lwp (lwp);
1584 }
1585
1586 int
1587 linux_process_target::detach (process_info *process)
1588 {
1589 struct lwp_info *main_lwp;
1590
1591 /* As there's a step over already in progress, let it finish first,
1592 otherwise nesting a stabilize_threads operation on top gets real
1593 messy. */
1594 complete_ongoing_step_over ();
1595
1596 /* Stop all threads before detaching. First, ptrace requires that
1597 the thread is stopped to successfully detach. Second, thread_db
1598 may need to uninstall thread event breakpoints from memory, which
1599 only works with a stopped process anyway. */
1600 stop_all_lwps (0, NULL);
1601
1602 #ifdef USE_THREAD_DB
1603 thread_db_detach (process);
1604 #endif
1605
1606 /* Stabilize threads (move out of jump pads). */
1607 target_stabilize_threads ();
1608
1609 /* Detach from the clone lwps first. If the thread group exits just
1610 while we're detaching, we must reap the clone lwps before we're
1611 able to reap the leader. */
1612 for_each_thread (process->pid, linux_detach_lwp_callback);
1613
1614 main_lwp = find_lwp_pid (ptid_t (process->pid));
1615 linux_detach_one_lwp (main_lwp);
1616
1617 mourn (process);
1618
1619 /* Since we presently can only stop all lwps of all processes, we
1620 need to unstop lwps of other processes. */
1621 unstop_all_lwps (0, NULL);
1622 return 0;
1623 }
1624
1625 /* Remove all LWPs that belong to process PROC from the lwp list. */
1626
1627 void
1628 linux_process_target::mourn (process_info *process)
1629 {
1630 struct process_info_private *priv;
1631
1632 #ifdef USE_THREAD_DB
1633 thread_db_mourn (process);
1634 #endif
1635
1636 for_each_thread (process->pid, [] (thread_info *thread)
1637 {
1638 delete_lwp (get_thread_lwp (thread));
1639 });
1640
1641 /* Freeing all private data. */
1642 priv = process->priv;
1643 if (the_low_target.delete_process != NULL)
1644 the_low_target.delete_process (priv->arch_private);
1645 else
1646 gdb_assert (priv->arch_private == NULL);
1647 free (priv);
1648 process->priv = NULL;
1649
1650 remove_process (process);
1651 }
1652
1653 void
1654 linux_process_target::join (int pid)
1655 {
1656 int status, ret;
1657
1658 do {
1659 ret = my_waitpid (pid, &status, 0);
1660 if (WIFEXITED (status) || WIFSIGNALED (status))
1661 break;
1662 } while (ret != -1 || errno != ECHILD);
1663 }
1664
1665 /* Return true if the given thread is still alive. */
1666
1667 bool
1668 linux_process_target::thread_alive (ptid_t ptid)
1669 {
1670 struct lwp_info *lwp = find_lwp_pid (ptid);
1671
1672 /* We assume we always know if a thread exits. If a whole process
1673 exited but we still haven't been able to report it to GDB, we'll
1674 hold on to the last lwp of the dead process. */
1675 if (lwp != NULL)
1676 return !lwp_is_marked_dead (lwp);
1677 else
1678 return 0;
1679 }
1680
1681 /* Return 1 if this lwp still has an interesting status pending. If
1682 not (e.g., it had stopped for a breakpoint that is gone), return
1683 false. */
1684
1685 static int
1686 thread_still_has_status_pending_p (struct thread_info *thread)
1687 {
1688 struct lwp_info *lp = get_thread_lwp (thread);
1689
1690 if (!lp->status_pending_p)
1691 return 0;
1692
1693 if (thread->last_resume_kind != resume_stop
1694 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1695 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1696 {
1697 struct thread_info *saved_thread;
1698 CORE_ADDR pc;
1699 int discard = 0;
1700
1701 gdb_assert (lp->last_status != 0);
1702
1703 pc = get_pc (lp);
1704
1705 saved_thread = current_thread;
1706 current_thread = thread;
1707
1708 if (pc != lp->stop_pc)
1709 {
1710 if (debug_threads)
1711 debug_printf ("PC of %ld changed\n",
1712 lwpid_of (thread));
1713 discard = 1;
1714 }
1715
1716 #if !USE_SIGTRAP_SIGINFO
1717 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1718 && !(*the_low_target.breakpoint_at) (pc))
1719 {
1720 if (debug_threads)
1721 debug_printf ("previous SW breakpoint of %ld gone\n",
1722 lwpid_of (thread));
1723 discard = 1;
1724 }
1725 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1726 && !hardware_breakpoint_inserted_here (pc))
1727 {
1728 if (debug_threads)
1729 debug_printf ("previous HW breakpoint of %ld gone\n",
1730 lwpid_of (thread));
1731 discard = 1;
1732 }
1733 #endif
1734
1735 current_thread = saved_thread;
1736
1737 if (discard)
1738 {
1739 if (debug_threads)
1740 debug_printf ("discarding pending breakpoint status\n");
1741 lp->status_pending_p = 0;
1742 return 0;
1743 }
1744 }
1745
1746 return 1;
1747 }
1748
1749 /* Returns true if LWP is resumed from the client's perspective. */
1750
1751 static int
1752 lwp_resumed (struct lwp_info *lwp)
1753 {
1754 struct thread_info *thread = get_lwp_thread (lwp);
1755
1756 if (thread->last_resume_kind != resume_stop)
1757 return 1;
1758
1759 /* Did gdb send us a `vCont;t', but we haven't reported the
1760 corresponding stop to gdb yet? If so, the thread is still
1761 resumed/running from gdb's perspective. */
1762 if (thread->last_resume_kind == resume_stop
1763 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1764 return 1;
1765
1766 return 0;
1767 }
1768
1769 /* Return true if this lwp has an interesting status pending. */
1770 static bool
1771 status_pending_p_callback (thread_info *thread, ptid_t ptid)
1772 {
1773 struct lwp_info *lp = get_thread_lwp (thread);
1774
1775 /* Check if we're only interested in events from a specific process
1776 or a specific LWP. */
1777 if (!thread->id.matches (ptid))
1778 return 0;
1779
1780 if (!lwp_resumed (lp))
1781 return 0;
1782
1783 if (lp->status_pending_p
1784 && !thread_still_has_status_pending_p (thread))
1785 {
1786 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1787 return 0;
1788 }
1789
1790 return lp->status_pending_p;
1791 }
1792
1793 struct lwp_info *
1794 find_lwp_pid (ptid_t ptid)
1795 {
1796 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1797 {
1798 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1799 return thr_arg->id.lwp () == lwp;
1800 });
1801
1802 if (thread == NULL)
1803 return NULL;
1804
1805 return get_thread_lwp (thread);
1806 }
1807
1808 /* Return the number of known LWPs in the tgid given by PID. */
1809
1810 static int
1811 num_lwps (int pid)
1812 {
1813 int count = 0;
1814
1815 for_each_thread (pid, [&] (thread_info *thread)
1816 {
1817 count++;
1818 });
1819
1820 return count;
1821 }
1822
1823 /* See nat/linux-nat.h. */
1824
1825 struct lwp_info *
1826 iterate_over_lwps (ptid_t filter,
1827 gdb::function_view<iterate_over_lwps_ftype> callback)
1828 {
1829 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1830 {
1831 lwp_info *lwp = get_thread_lwp (thr_arg);
1832
1833 return callback (lwp);
1834 });
1835
1836 if (thread == NULL)
1837 return NULL;
1838
1839 return get_thread_lwp (thread);
1840 }
1841
1842 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1843 their exits until all other threads in the group have exited. */
1844
1845 static void
1846 check_zombie_leaders (void)
1847 {
1848 for_each_process ([] (process_info *proc) {
1849 pid_t leader_pid = pid_of (proc);
1850 struct lwp_info *leader_lp;
1851
1852 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1853
1854 if (debug_threads)
1855 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1856 "num_lwps=%d, zombie=%d\n",
1857 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1858 linux_proc_pid_is_zombie (leader_pid));
1859
1860 if (leader_lp != NULL && !leader_lp->stopped
1861 /* Check if there are other threads in the group, as we may
1862 have raced with the inferior simply exiting. */
1863 && !last_thread_of_process_p (leader_pid)
1864 && linux_proc_pid_is_zombie (leader_pid))
1865 {
1866 /* A leader zombie can mean one of two things:
1867
1868 - It exited, and there's an exit status pending
1869 available, or only the leader exited (not the whole
1870 program). In the latter case, we can't waitpid the
1871 leader's exit status until all other threads are gone.
1872
1873 - There are 3 or more threads in the group, and a thread
1874 other than the leader exec'd. On an exec, the Linux
1875 kernel destroys all other threads (except the execing
1876 one) in the thread group, and resets the execing thread's
1877 tid to the tgid. No exit notification is sent for the
1878 execing thread -- from the ptracer's perspective, it
1879 appears as though the execing thread just vanishes.
1880 Until we reap all other threads except the leader and the
1881 execing thread, the leader will be zombie, and the
1882 execing thread will be in `D (disc sleep)'. As soon as
1883 all other threads are reaped, the execing thread changes
1884 it's tid to the tgid, and the previous (zombie) leader
1885 vanishes, giving place to the "new" leader. We could try
1886 distinguishing the exit and exec cases, by waiting once
1887 more, and seeing if something comes out, but it doesn't
1888 sound useful. The previous leader _does_ go away, and
1889 we'll re-add the new one once we see the exec event
1890 (which is just the same as what would happen if the
1891 previous leader did exit voluntarily before some other
1892 thread execs). */
1893
1894 if (debug_threads)
1895 debug_printf ("CZL: Thread group leader %d zombie "
1896 "(it exited, or another thread execd).\n",
1897 leader_pid);
1898
1899 delete_lwp (leader_lp);
1900 }
1901 });
1902 }
1903
1904 /* Callback for `find_thread'. Returns the first LWP that is not
1905 stopped. */
1906
1907 static bool
1908 not_stopped_callback (thread_info *thread, ptid_t filter)
1909 {
1910 if (!thread->id.matches (filter))
1911 return false;
1912
1913 lwp_info *lwp = get_thread_lwp (thread);
1914
1915 return !lwp->stopped;
1916 }
1917
1918 /* Increment LWP's suspend count. */
1919
1920 static void
1921 lwp_suspended_inc (struct lwp_info *lwp)
1922 {
1923 lwp->suspended++;
1924
1925 if (debug_threads && lwp->suspended > 4)
1926 {
1927 struct thread_info *thread = get_lwp_thread (lwp);
1928
1929 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1930 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1931 }
1932 }
1933
1934 /* Decrement LWP's suspend count. */
1935
1936 static void
1937 lwp_suspended_decr (struct lwp_info *lwp)
1938 {
1939 lwp->suspended--;
1940
1941 if (lwp->suspended < 0)
1942 {
1943 struct thread_info *thread = get_lwp_thread (lwp);
1944
1945 internal_error (__FILE__, __LINE__,
1946 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1947 lwp->suspended);
1948 }
1949 }
1950
1951 /* This function should only be called if the LWP got a SIGTRAP.
1952
1953 Handle any tracepoint steps or hits. Return true if a tracepoint
1954 event was handled, 0 otherwise. */
1955
1956 static int
1957 handle_tracepoints (struct lwp_info *lwp)
1958 {
1959 struct thread_info *tinfo = get_lwp_thread (lwp);
1960 int tpoint_related_event = 0;
1961
1962 gdb_assert (lwp->suspended == 0);
1963
1964 /* If this tracepoint hit causes a tracing stop, we'll immediately
1965 uninsert tracepoints. To do this, we temporarily pause all
1966 threads, unpatch away, and then unpause threads. We need to make
1967 sure the unpausing doesn't resume LWP too. */
1968 lwp_suspended_inc (lwp);
1969
1970 /* And we need to be sure that any all-threads-stopping doesn't try
1971 to move threads out of the jump pads, as it could deadlock the
1972 inferior (LWP could be in the jump pad, maybe even holding the
1973 lock.) */
1974
1975 /* Do any necessary step collect actions. */
1976 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1977
1978 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1979
1980 /* See if we just hit a tracepoint and do its main collect
1981 actions. */
1982 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1983
1984 lwp_suspended_decr (lwp);
1985
1986 gdb_assert (lwp->suspended == 0);
1987 gdb_assert (!stabilizing_threads
1988 || (lwp->collecting_fast_tracepoint
1989 != fast_tpoint_collect_result::not_collecting));
1990
1991 if (tpoint_related_event)
1992 {
1993 if (debug_threads)
1994 debug_printf ("got a tracepoint event\n");
1995 return 1;
1996 }
1997
1998 return 0;
1999 }
2000
2001 /* Convenience wrapper. Returns information about LWP's fast tracepoint
2002 collection status. */
2003
2004 static fast_tpoint_collect_result
2005 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2006 struct fast_tpoint_collect_status *status)
2007 {
2008 CORE_ADDR thread_area;
2009 struct thread_info *thread = get_lwp_thread (lwp);
2010
2011 if (the_low_target.get_thread_area == NULL)
2012 return fast_tpoint_collect_result::not_collecting;
2013
2014 /* Get the thread area address. This is used to recognize which
2015 thread is which when tracing with the in-process agent library.
2016 We don't read anything from the address, and treat it as opaque;
2017 it's the address itself that we assume is unique per-thread. */
2018 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2019 return fast_tpoint_collect_result::not_collecting;
2020
2021 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2022 }
2023
2024 bool
2025 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
2026 {
2027 struct thread_info *saved_thread;
2028
2029 saved_thread = current_thread;
2030 current_thread = get_lwp_thread (lwp);
2031
2032 if ((wstat == NULL
2033 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2034 && supports_fast_tracepoints ()
2035 && agent_loaded_p ())
2036 {
2037 struct fast_tpoint_collect_status status;
2038
2039 if (debug_threads)
2040 debug_printf ("Checking whether LWP %ld needs to move out of the "
2041 "jump pad.\n",
2042 lwpid_of (current_thread));
2043
2044 fast_tpoint_collect_result r
2045 = linux_fast_tracepoint_collecting (lwp, &status);
2046
2047 if (wstat == NULL
2048 || (WSTOPSIG (*wstat) != SIGILL
2049 && WSTOPSIG (*wstat) != SIGFPE
2050 && WSTOPSIG (*wstat) != SIGSEGV
2051 && WSTOPSIG (*wstat) != SIGBUS))
2052 {
2053 lwp->collecting_fast_tracepoint = r;
2054
2055 if (r != fast_tpoint_collect_result::not_collecting)
2056 {
2057 if (r == fast_tpoint_collect_result::before_insn
2058 && lwp->exit_jump_pad_bkpt == NULL)
2059 {
2060 /* Haven't executed the original instruction yet.
2061 Set breakpoint there, and wait till it's hit,
2062 then single-step until exiting the jump pad. */
2063 lwp->exit_jump_pad_bkpt
2064 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2065 }
2066
2067 if (debug_threads)
2068 debug_printf ("Checking whether LWP %ld needs to move out of "
2069 "the jump pad...it does\n",
2070 lwpid_of (current_thread));
2071 current_thread = saved_thread;
2072
2073 return true;
2074 }
2075 }
2076 else
2077 {
2078 /* If we get a synchronous signal while collecting, *and*
2079 while executing the (relocated) original instruction,
2080 reset the PC to point at the tpoint address, before
2081 reporting to GDB. Otherwise, it's an IPA lib bug: just
2082 report the signal to GDB, and pray for the best. */
2083
2084 lwp->collecting_fast_tracepoint
2085 = fast_tpoint_collect_result::not_collecting;
2086
2087 if (r != fast_tpoint_collect_result::not_collecting
2088 && (status.adjusted_insn_addr <= lwp->stop_pc
2089 && lwp->stop_pc < status.adjusted_insn_addr_end))
2090 {
2091 siginfo_t info;
2092 struct regcache *regcache;
2093
2094 /* The si_addr on a few signals references the address
2095 of the faulting instruction. Adjust that as
2096 well. */
2097 if ((WSTOPSIG (*wstat) == SIGILL
2098 || WSTOPSIG (*wstat) == SIGFPE
2099 || WSTOPSIG (*wstat) == SIGBUS
2100 || WSTOPSIG (*wstat) == SIGSEGV)
2101 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2102 (PTRACE_TYPE_ARG3) 0, &info) == 0
2103 /* Final check just to make sure we don't clobber
2104 the siginfo of non-kernel-sent signals. */
2105 && (uintptr_t) info.si_addr == lwp->stop_pc)
2106 {
2107 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2108 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2109 (PTRACE_TYPE_ARG3) 0, &info);
2110 }
2111
2112 regcache = get_thread_regcache (current_thread, 1);
2113 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2114 lwp->stop_pc = status.tpoint_addr;
2115
2116 /* Cancel any fast tracepoint lock this thread was
2117 holding. */
2118 force_unlock_trace_buffer ();
2119 }
2120
2121 if (lwp->exit_jump_pad_bkpt != NULL)
2122 {
2123 if (debug_threads)
2124 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2125 "stopping all threads momentarily.\n");
2126
2127 stop_all_lwps (1, lwp);
2128
2129 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2130 lwp->exit_jump_pad_bkpt = NULL;
2131
2132 unstop_all_lwps (1, lwp);
2133
2134 gdb_assert (lwp->suspended >= 0);
2135 }
2136 }
2137 }
2138
2139 if (debug_threads)
2140 debug_printf ("Checking whether LWP %ld needs to move out of the "
2141 "jump pad...no\n",
2142 lwpid_of (current_thread));
2143
2144 current_thread = saved_thread;
2145 return false;
2146 }
2147
2148 /* Enqueue one signal in the "signals to report later when out of the
2149 jump pad" list. */
2150
2151 static void
2152 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2153 {
2154 struct pending_signals *p_sig;
2155 struct thread_info *thread = get_lwp_thread (lwp);
2156
2157 if (debug_threads)
2158 debug_printf ("Deferring signal %d for LWP %ld.\n",
2159 WSTOPSIG (*wstat), lwpid_of (thread));
2160
2161 if (debug_threads)
2162 {
2163 struct pending_signals *sig;
2164
2165 for (sig = lwp->pending_signals_to_report;
2166 sig != NULL;
2167 sig = sig->prev)
2168 debug_printf (" Already queued %d\n",
2169 sig->signal);
2170
2171 debug_printf (" (no more currently queued signals)\n");
2172 }
2173
2174 /* Don't enqueue non-RT signals if they are already in the deferred
2175 queue. (SIGSTOP being the easiest signal to see ending up here
2176 twice) */
2177 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2178 {
2179 struct pending_signals *sig;
2180
2181 for (sig = lwp->pending_signals_to_report;
2182 sig != NULL;
2183 sig = sig->prev)
2184 {
2185 if (sig->signal == WSTOPSIG (*wstat))
2186 {
2187 if (debug_threads)
2188 debug_printf ("Not requeuing already queued non-RT signal %d"
2189 " for LWP %ld\n",
2190 sig->signal,
2191 lwpid_of (thread));
2192 return;
2193 }
2194 }
2195 }
2196
2197 p_sig = XCNEW (struct pending_signals);
2198 p_sig->prev = lwp->pending_signals_to_report;
2199 p_sig->signal = WSTOPSIG (*wstat);
2200
2201 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2202 &p_sig->info);
2203
2204 lwp->pending_signals_to_report = p_sig;
2205 }
2206
2207 /* Dequeue one signal from the "signals to report later when out of
2208 the jump pad" list. */
2209
2210 static int
2211 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2212 {
2213 struct thread_info *thread = get_lwp_thread (lwp);
2214
2215 if (lwp->pending_signals_to_report != NULL)
2216 {
2217 struct pending_signals **p_sig;
2218
2219 p_sig = &lwp->pending_signals_to_report;
2220 while ((*p_sig)->prev != NULL)
2221 p_sig = &(*p_sig)->prev;
2222
2223 *wstat = W_STOPCODE ((*p_sig)->signal);
2224 if ((*p_sig)->info.si_signo != 0)
2225 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2226 &(*p_sig)->info);
2227 free (*p_sig);
2228 *p_sig = NULL;
2229
2230 if (debug_threads)
2231 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2232 WSTOPSIG (*wstat), lwpid_of (thread));
2233
2234 if (debug_threads)
2235 {
2236 struct pending_signals *sig;
2237
2238 for (sig = lwp->pending_signals_to_report;
2239 sig != NULL;
2240 sig = sig->prev)
2241 debug_printf (" Still queued %d\n",
2242 sig->signal);
2243
2244 debug_printf (" (no more queued signals)\n");
2245 }
2246
2247 return 1;
2248 }
2249
2250 return 0;
2251 }
2252
2253 /* Fetch the possibly triggered data watchpoint info and store it in
2254 CHILD.
2255
2256 On some archs, like x86, that use debug registers to set
2257 watchpoints, it's possible that the way to know which watched
2258 address trapped, is to check the register that is used to select
2259 which address to watch. Problem is, between setting the watchpoint
2260 and reading back which data address trapped, the user may change
2261 the set of watchpoints, and, as a consequence, GDB changes the
2262 debug registers in the inferior. To avoid reading back a stale
2263 stopped-data-address when that happens, we cache in LP the fact
2264 that a watchpoint trapped, and the corresponding data address, as
2265 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2266 registers meanwhile, we have the cached data we can rely on. */
2267
2268 static int
2269 check_stopped_by_watchpoint (struct lwp_info *child)
2270 {
2271 if (the_low_target.stopped_by_watchpoint != NULL)
2272 {
2273 struct thread_info *saved_thread;
2274
2275 saved_thread = current_thread;
2276 current_thread = get_lwp_thread (child);
2277
2278 if (the_low_target.stopped_by_watchpoint ())
2279 {
2280 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2281
2282 if (the_low_target.stopped_data_address != NULL)
2283 child->stopped_data_address
2284 = the_low_target.stopped_data_address ();
2285 else
2286 child->stopped_data_address = 0;
2287 }
2288
2289 current_thread = saved_thread;
2290 }
2291
2292 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2293 }
2294
2295 /* Return the ptrace options that we want to try to enable. */
2296
2297 static int
2298 linux_low_ptrace_options (int attached)
2299 {
2300 client_state &cs = get_client_state ();
2301 int options = 0;
2302
2303 if (!attached)
2304 options |= PTRACE_O_EXITKILL;
2305
2306 if (cs.report_fork_events)
2307 options |= PTRACE_O_TRACEFORK;
2308
2309 if (cs.report_vfork_events)
2310 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2311
2312 if (cs.report_exec_events)
2313 options |= PTRACE_O_TRACEEXEC;
2314
2315 options |= PTRACE_O_TRACESYSGOOD;
2316
2317 return options;
2318 }
2319
2320 lwp_info *
2321 linux_process_target::filter_event (int lwpid, int wstat)
2322 {
2323 client_state &cs = get_client_state ();
2324 struct lwp_info *child;
2325 struct thread_info *thread;
2326 int have_stop_pc = 0;
2327
2328 child = find_lwp_pid (ptid_t (lwpid));
2329
2330 /* Check for stop events reported by a process we didn't already
2331 know about - anything not already in our LWP list.
2332
2333 If we're expecting to receive stopped processes after
2334 fork, vfork, and clone events, then we'll just add the
2335 new one to our list and go back to waiting for the event
2336 to be reported - the stopped process might be returned
2337 from waitpid before or after the event is.
2338
2339 But note the case of a non-leader thread exec'ing after the
2340 leader having exited, and gone from our lists (because
2341 check_zombie_leaders deleted it). The non-leader thread
2342 changes its tid to the tgid. */
2343
2344 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2345 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2346 {
2347 ptid_t child_ptid;
2348
2349 /* A multi-thread exec after we had seen the leader exiting. */
2350 if (debug_threads)
2351 {
2352 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2353 "after exec.\n", lwpid);
2354 }
2355
2356 child_ptid = ptid_t (lwpid, lwpid, 0);
2357 child = add_lwp (child_ptid);
2358 child->stopped = 1;
2359 current_thread = child->thread;
2360 }
2361
2362 /* If we didn't find a process, one of two things presumably happened:
2363 - A process we started and then detached from has exited. Ignore it.
2364 - A process we are controlling has forked and the new child's stop
2365 was reported to us by the kernel. Save its PID. */
2366 if (child == NULL && WIFSTOPPED (wstat))
2367 {
2368 add_to_pid_list (&stopped_pids, lwpid, wstat);
2369 return NULL;
2370 }
2371 else if (child == NULL)
2372 return NULL;
2373
2374 thread = get_lwp_thread (child);
2375
2376 child->stopped = 1;
2377
2378 child->last_status = wstat;
2379
2380 /* Check if the thread has exited. */
2381 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2382 {
2383 if (debug_threads)
2384 debug_printf ("LLFE: %d exited.\n", lwpid);
2385
2386 if (finish_step_over (child))
2387 {
2388 /* Unsuspend all other LWPs, and set them back running again. */
2389 unsuspend_all_lwps (child);
2390 }
2391
2392 /* If there is at least one more LWP, then the exit signal was
2393 not the end of the debugged application and should be
2394 ignored, unless GDB wants to hear about thread exits. */
2395 if (cs.report_thread_events
2396 || last_thread_of_process_p (pid_of (thread)))
2397 {
2398 /* Since events are serialized to GDB core, and we can't
2399 report this one right now. Leave the status pending for
2400 the next time we're able to report it. */
2401 mark_lwp_dead (child, wstat);
2402 return child;
2403 }
2404 else
2405 {
2406 delete_lwp (child);
2407 return NULL;
2408 }
2409 }
2410
2411 gdb_assert (WIFSTOPPED (wstat));
2412
2413 if (WIFSTOPPED (wstat))
2414 {
2415 struct process_info *proc;
2416
2417 /* Architecture-specific setup after inferior is running. */
2418 proc = find_process_pid (pid_of (thread));
2419 if (proc->tdesc == NULL)
2420 {
2421 if (proc->attached)
2422 {
2423 /* This needs to happen after we have attached to the
2424 inferior and it is stopped for the first time, but
2425 before we access any inferior registers. */
2426 arch_setup_thread (thread);
2427 }
2428 else
2429 {
2430 /* The process is started, but GDBserver will do
2431 architecture-specific setup after the program stops at
2432 the first instruction. */
2433 child->status_pending_p = 1;
2434 child->status_pending = wstat;
2435 return child;
2436 }
2437 }
2438 }
2439
2440 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2441 {
2442 struct process_info *proc = find_process_pid (pid_of (thread));
2443 int options = linux_low_ptrace_options (proc->attached);
2444
2445 linux_enable_event_reporting (lwpid, options);
2446 child->must_set_ptrace_flags = 0;
2447 }
2448
2449 /* Always update syscall_state, even if it will be filtered later. */
2450 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2451 {
2452 child->syscall_state
2453 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2454 ? TARGET_WAITKIND_SYSCALL_RETURN
2455 : TARGET_WAITKIND_SYSCALL_ENTRY);
2456 }
2457 else
2458 {
2459 /* Almost all other ptrace-stops are known to be outside of system
2460 calls, with further exceptions in handle_extended_wait. */
2461 child->syscall_state = TARGET_WAITKIND_IGNORE;
2462 }
2463
2464 /* Be careful to not overwrite stop_pc until save_stop_reason is
2465 called. */
2466 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2467 && linux_is_extended_waitstatus (wstat))
2468 {
2469 child->stop_pc = get_pc (child);
2470 if (handle_extended_wait (&child, wstat))
2471 {
2472 /* The event has been handled, so just return without
2473 reporting it. */
2474 return NULL;
2475 }
2476 }
2477
2478 if (linux_wstatus_maybe_breakpoint (wstat))
2479 {
2480 if (save_stop_reason (child))
2481 have_stop_pc = 1;
2482 }
2483
2484 if (!have_stop_pc)
2485 child->stop_pc = get_pc (child);
2486
2487 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2488 && child->stop_expected)
2489 {
2490 if (debug_threads)
2491 debug_printf ("Expected stop.\n");
2492 child->stop_expected = 0;
2493
2494 if (thread->last_resume_kind == resume_stop)
2495 {
2496 /* We want to report the stop to the core. Treat the
2497 SIGSTOP as a normal event. */
2498 if (debug_threads)
2499 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2500 target_pid_to_str (ptid_of (thread)));
2501 }
2502 else if (stopping_threads != NOT_STOPPING_THREADS)
2503 {
2504 /* Stopping threads. We don't want this SIGSTOP to end up
2505 pending. */
2506 if (debug_threads)
2507 debug_printf ("LLW: SIGSTOP caught for %s "
2508 "while stopping threads.\n",
2509 target_pid_to_str (ptid_of (thread)));
2510 return NULL;
2511 }
2512 else
2513 {
2514 /* This is a delayed SIGSTOP. Filter out the event. */
2515 if (debug_threads)
2516 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2517 child->stepping ? "step" : "continue",
2518 target_pid_to_str (ptid_of (thread)));
2519
2520 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2521 return NULL;
2522 }
2523 }
2524
2525 child->status_pending_p = 1;
2526 child->status_pending = wstat;
2527 return child;
2528 }
2529
2530 /* Return true if THREAD is doing hardware single step. */
2531
2532 static int
2533 maybe_hw_step (struct thread_info *thread)
2534 {
2535 if (can_hardware_single_step ())
2536 return 1;
2537 else
2538 {
2539 /* GDBserver must insert single-step breakpoint for software
2540 single step. */
2541 gdb_assert (has_single_step_breakpoints (thread));
2542 return 0;
2543 }
2544 }
2545
2546 /* Resume LWPs that are currently stopped without any pending status
2547 to report, but are resumed from the core's perspective. */
2548
2549 static void
2550 resume_stopped_resumed_lwps (thread_info *thread)
2551 {
2552 struct lwp_info *lp = get_thread_lwp (thread);
2553
2554 if (lp->stopped
2555 && !lp->suspended
2556 && !lp->status_pending_p
2557 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2558 {
2559 int step = 0;
2560
2561 if (thread->last_resume_kind == resume_step)
2562 step = maybe_hw_step (thread);
2563
2564 if (debug_threads)
2565 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2566 target_pid_to_str (ptid_of (thread)),
2567 paddress (lp->stop_pc),
2568 step);
2569
2570 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2571 }
2572 }
2573
2574 int
2575 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2576 ptid_t filter_ptid,
2577 int *wstatp, int options)
2578 {
2579 struct thread_info *event_thread;
2580 struct lwp_info *event_child, *requested_child;
2581 sigset_t block_mask, prev_mask;
2582
2583 retry:
2584 /* N.B. event_thread points to the thread_info struct that contains
2585 event_child. Keep them in sync. */
2586 event_thread = NULL;
2587 event_child = NULL;
2588 requested_child = NULL;
2589
2590 /* Check for a lwp with a pending status. */
2591
2592 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2593 {
2594 event_thread = find_thread_in_random ([&] (thread_info *thread)
2595 {
2596 return status_pending_p_callback (thread, filter_ptid);
2597 });
2598
2599 if (event_thread != NULL)
2600 event_child = get_thread_lwp (event_thread);
2601 if (debug_threads && event_thread)
2602 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2603 }
2604 else if (filter_ptid != null_ptid)
2605 {
2606 requested_child = find_lwp_pid (filter_ptid);
2607
2608 if (stopping_threads == NOT_STOPPING_THREADS
2609 && requested_child->status_pending_p
2610 && (requested_child->collecting_fast_tracepoint
2611 != fast_tpoint_collect_result::not_collecting))
2612 {
2613 enqueue_one_deferred_signal (requested_child,
2614 &requested_child->status_pending);
2615 requested_child->status_pending_p = 0;
2616 requested_child->status_pending = 0;
2617 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2618 }
2619
2620 if (requested_child->suspended
2621 && requested_child->status_pending_p)
2622 {
2623 internal_error (__FILE__, __LINE__,
2624 "requesting an event out of a"
2625 " suspended child?");
2626 }
2627
2628 if (requested_child->status_pending_p)
2629 {
2630 event_child = requested_child;
2631 event_thread = get_lwp_thread (event_child);
2632 }
2633 }
2634
2635 if (event_child != NULL)
2636 {
2637 if (debug_threads)
2638 debug_printf ("Got an event from pending child %ld (%04x)\n",
2639 lwpid_of (event_thread), event_child->status_pending);
2640 *wstatp = event_child->status_pending;
2641 event_child->status_pending_p = 0;
2642 event_child->status_pending = 0;
2643 current_thread = event_thread;
2644 return lwpid_of (event_thread);
2645 }
2646
2647 /* But if we don't find a pending event, we'll have to wait.
2648
2649 We only enter this loop if no process has a pending wait status.
2650 Thus any action taken in response to a wait status inside this
2651 loop is responding as soon as we detect the status, not after any
2652 pending events. */
2653
2654 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2655 all signals while here. */
2656 sigfillset (&block_mask);
2657 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2658
2659 /* Always pull all events out of the kernel. We'll randomly select
2660 an event LWP out of all that have events, to prevent
2661 starvation. */
2662 while (event_child == NULL)
2663 {
2664 pid_t ret = 0;
2665
2666 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2667 quirks:
2668
2669 - If the thread group leader exits while other threads in the
2670 thread group still exist, waitpid(TGID, ...) hangs. That
2671 waitpid won't return an exit status until the other threads
2672 in the group are reaped.
2673
2674 - When a non-leader thread execs, that thread just vanishes
2675 without reporting an exit (so we'd hang if we waited for it
2676 explicitly in that case). The exec event is reported to
2677 the TGID pid. */
2678 errno = 0;
2679 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2680
2681 if (debug_threads)
2682 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2683 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2684
2685 if (ret > 0)
2686 {
2687 if (debug_threads)
2688 {
2689 debug_printf ("LLW: waitpid %ld received %s\n",
2690 (long) ret, status_to_str (*wstatp));
2691 }
2692
2693 /* Filter all events. IOW, leave all events pending. We'll
2694 randomly select an event LWP out of all that have events
2695 below. */
2696 filter_event (ret, *wstatp);
2697 /* Retry until nothing comes out of waitpid. A single
2698 SIGCHLD can indicate more than one child stopped. */
2699 continue;
2700 }
2701
2702 /* Now that we've pulled all events out of the kernel, resume
2703 LWPs that don't have an interesting event to report. */
2704 if (stopping_threads == NOT_STOPPING_THREADS)
2705 for_each_thread (resume_stopped_resumed_lwps);
2706
2707 /* ... and find an LWP with a status to report to the core, if
2708 any. */
2709 event_thread = find_thread_in_random ([&] (thread_info *thread)
2710 {
2711 return status_pending_p_callback (thread, filter_ptid);
2712 });
2713
2714 if (event_thread != NULL)
2715 {
2716 event_child = get_thread_lwp (event_thread);
2717 *wstatp = event_child->status_pending;
2718 event_child->status_pending_p = 0;
2719 event_child->status_pending = 0;
2720 break;
2721 }
2722
2723 /* Check for zombie thread group leaders. Those can't be reaped
2724 until all other threads in the thread group are. */
2725 check_zombie_leaders ();
2726
2727 auto not_stopped = [&] (thread_info *thread)
2728 {
2729 return not_stopped_callback (thread, wait_ptid);
2730 };
2731
2732 /* If there are no resumed children left in the set of LWPs we
2733 want to wait for, bail. We can't just block in
2734 waitpid/sigsuspend, because lwps might have been left stopped
2735 in trace-stop state, and we'd be stuck forever waiting for
2736 their status to change (which would only happen if we resumed
2737 them). Even if WNOHANG is set, this return code is preferred
2738 over 0 (below), as it is more detailed. */
2739 if (find_thread (not_stopped) == NULL)
2740 {
2741 if (debug_threads)
2742 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2743 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2744 return -1;
2745 }
2746
2747 /* No interesting event to report to the caller. */
2748 if ((options & WNOHANG))
2749 {
2750 if (debug_threads)
2751 debug_printf ("WNOHANG set, no event found\n");
2752
2753 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2754 return 0;
2755 }
2756
2757 /* Block until we get an event reported with SIGCHLD. */
2758 if (debug_threads)
2759 debug_printf ("sigsuspend'ing\n");
2760
2761 sigsuspend (&prev_mask);
2762 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2763 goto retry;
2764 }
2765
2766 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2767
2768 current_thread = event_thread;
2769
2770 return lwpid_of (event_thread);
2771 }
2772
2773 int
2774 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2775 {
2776 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2777 }
2778
2779 /* Select one LWP out of those that have events pending. */
2780
2781 static void
2782 select_event_lwp (struct lwp_info **orig_lp)
2783 {
2784 struct thread_info *event_thread = NULL;
2785
2786 /* In all-stop, give preference to the LWP that is being
2787 single-stepped. There will be at most one, and it's the LWP that
2788 the core is most interested in. If we didn't do this, then we'd
2789 have to handle pending step SIGTRAPs somehow in case the core
2790 later continues the previously-stepped thread, otherwise we'd
2791 report the pending SIGTRAP, and the core, not having stepped the
2792 thread, wouldn't understand what the trap was for, and therefore
2793 would report it to the user as a random signal. */
2794 if (!non_stop)
2795 {
2796 event_thread = find_thread ([] (thread_info *thread)
2797 {
2798 lwp_info *lp = get_thread_lwp (thread);
2799
2800 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2801 && thread->last_resume_kind == resume_step
2802 && lp->status_pending_p);
2803 });
2804
2805 if (event_thread != NULL)
2806 {
2807 if (debug_threads)
2808 debug_printf ("SEL: Select single-step %s\n",
2809 target_pid_to_str (ptid_of (event_thread)));
2810 }
2811 }
2812 if (event_thread == NULL)
2813 {
2814 /* No single-stepping LWP. Select one at random, out of those
2815 which have had events. */
2816
2817 event_thread = find_thread_in_random ([&] (thread_info *thread)
2818 {
2819 lwp_info *lp = get_thread_lwp (thread);
2820
2821 /* Only resumed LWPs that have an event pending. */
2822 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2823 && lp->status_pending_p);
2824 });
2825 }
2826
2827 if (event_thread != NULL)
2828 {
2829 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2830
2831 /* Switch the event LWP. */
2832 *orig_lp = event_lp;
2833 }
2834 }
2835
2836 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2837 NULL. */
2838
2839 static void
2840 unsuspend_all_lwps (struct lwp_info *except)
2841 {
2842 for_each_thread ([&] (thread_info *thread)
2843 {
2844 lwp_info *lwp = get_thread_lwp (thread);
2845
2846 if (lwp != except)
2847 lwp_suspended_decr (lwp);
2848 });
2849 }
2850
2851 static bool stuck_in_jump_pad_callback (thread_info *thread);
2852 static bool lwp_running (thread_info *thread);
2853
2854 /* Stabilize threads (move out of jump pads).
2855
2856 If a thread is midway collecting a fast tracepoint, we need to
2857 finish the collection and move it out of the jump pad before
2858 reporting the signal.
2859
2860 This avoids recursion while collecting (when a signal arrives
2861 midway, and the signal handler itself collects), which would trash
2862 the trace buffer. In case the user set a breakpoint in a signal
2863 handler, this avoids the backtrace showing the jump pad, etc..
2864 Most importantly, there are certain things we can't do safely if
2865 threads are stopped in a jump pad (or in its callee's). For
2866 example:
2867
2868 - starting a new trace run. A thread still collecting the
2869 previous run, could trash the trace buffer when resumed. The trace
2870 buffer control structures would have been reset but the thread had
2871 no way to tell. The thread could even midway memcpy'ing to the
2872 buffer, which would mean that when resumed, it would clobber the
2873 trace buffer that had been set for a new run.
2874
2875 - we can't rewrite/reuse the jump pads for new tracepoints
2876 safely. Say you do tstart while a thread is stopped midway while
2877 collecting. When the thread is later resumed, it finishes the
2878 collection, and returns to the jump pad, to execute the original
2879 instruction that was under the tracepoint jump at the time the
2880 older run had been started. If the jump pad had been rewritten
2881 since for something else in the new run, the thread would now
2882 execute the wrong / random instructions. */
2883
2884 void
2885 linux_process_target::stabilize_threads ()
2886 {
2887 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
2888
2889 if (thread_stuck != NULL)
2890 {
2891 if (debug_threads)
2892 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2893 lwpid_of (thread_stuck));
2894 return;
2895 }
2896
2897 thread_info *saved_thread = current_thread;
2898
2899 stabilizing_threads = 1;
2900
2901 /* Kick 'em all. */
2902 for_each_thread ([this] (thread_info *thread)
2903 {
2904 move_out_of_jump_pad (thread);
2905 });
2906
2907 /* Loop until all are stopped out of the jump pads. */
2908 while (find_thread (lwp_running) != NULL)
2909 {
2910 struct target_waitstatus ourstatus;
2911 struct lwp_info *lwp;
2912 int wstat;
2913
2914 /* Note that we go through the full wait even loop. While
2915 moving threads out of jump pad, we need to be able to step
2916 over internal breakpoints and such. */
2917 wait_1 (minus_one_ptid, &ourstatus, 0);
2918
2919 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2920 {
2921 lwp = get_thread_lwp (current_thread);
2922
2923 /* Lock it. */
2924 lwp_suspended_inc (lwp);
2925
2926 if (ourstatus.value.sig != GDB_SIGNAL_0
2927 || current_thread->last_resume_kind == resume_stop)
2928 {
2929 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2930 enqueue_one_deferred_signal (lwp, &wstat);
2931 }
2932 }
2933 }
2934
2935 unsuspend_all_lwps (NULL);
2936
2937 stabilizing_threads = 0;
2938
2939 current_thread = saved_thread;
2940
2941 if (debug_threads)
2942 {
2943 thread_stuck = find_thread (stuck_in_jump_pad_callback);
2944
2945 if (thread_stuck != NULL)
2946 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2947 lwpid_of (thread_stuck));
2948 }
2949 }
2950
2951 /* Convenience function that is called when the kernel reports an
2952 event that is not passed out to GDB. */
2953
2954 static ptid_t
2955 ignore_event (struct target_waitstatus *ourstatus)
2956 {
2957 /* If we got an event, there may still be others, as a single
2958 SIGCHLD can indicate more than one child stopped. This forces
2959 another target_wait call. */
2960 async_file_mark ();
2961
2962 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2963 return null_ptid;
2964 }
2965
2966 /* Convenience function that is called when the kernel reports an exit
2967 event. This decides whether to report the event to GDB as a
2968 process exit event, a thread exit event, or to suppress the
2969 event. */
2970
2971 static ptid_t
2972 filter_exit_event (struct lwp_info *event_child,
2973 struct target_waitstatus *ourstatus)
2974 {
2975 client_state &cs = get_client_state ();
2976 struct thread_info *thread = get_lwp_thread (event_child);
2977 ptid_t ptid = ptid_of (thread);
2978
2979 if (!last_thread_of_process_p (pid_of (thread)))
2980 {
2981 if (cs.report_thread_events)
2982 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2983 else
2984 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2985
2986 delete_lwp (event_child);
2987 }
2988 return ptid;
2989 }
2990
2991 /* Returns 1 if GDB is interested in any event_child syscalls. */
2992
2993 static int
2994 gdb_catching_syscalls_p (struct lwp_info *event_child)
2995 {
2996 struct thread_info *thread = get_lwp_thread (event_child);
2997 struct process_info *proc = get_thread_process (thread);
2998
2999 return !proc->syscalls_to_catch.empty ();
3000 }
3001
3002 /* Returns 1 if GDB is interested in the event_child syscall.
3003 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3004
3005 static int
3006 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3007 {
3008 int sysno;
3009 struct thread_info *thread = get_lwp_thread (event_child);
3010 struct process_info *proc = get_thread_process (thread);
3011
3012 if (proc->syscalls_to_catch.empty ())
3013 return 0;
3014
3015 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
3016 return 1;
3017
3018 get_syscall_trapinfo (event_child, &sysno);
3019
3020 for (int iter : proc->syscalls_to_catch)
3021 if (iter == sysno)
3022 return 1;
3023
3024 return 0;
3025 }
3026
3027 ptid_t
3028 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
3029 int target_options)
3030 {
3031 client_state &cs = get_client_state ();
3032 int w;
3033 struct lwp_info *event_child;
3034 int options;
3035 int pid;
3036 int step_over_finished;
3037 int bp_explains_trap;
3038 int maybe_internal_trap;
3039 int report_to_gdb;
3040 int trace_event;
3041 int in_step_range;
3042 int any_resumed;
3043
3044 if (debug_threads)
3045 {
3046 debug_enter ();
3047 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid));
3048 }
3049
3050 /* Translate generic target options into linux options. */
3051 options = __WALL;
3052 if (target_options & TARGET_WNOHANG)
3053 options |= WNOHANG;
3054
3055 bp_explains_trap = 0;
3056 trace_event = 0;
3057 in_step_range = 0;
3058 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3059
3060 auto status_pending_p_any = [&] (thread_info *thread)
3061 {
3062 return status_pending_p_callback (thread, minus_one_ptid);
3063 };
3064
3065 auto not_stopped = [&] (thread_info *thread)
3066 {
3067 return not_stopped_callback (thread, minus_one_ptid);
3068 };
3069
3070 /* Find a resumed LWP, if any. */
3071 if (find_thread (status_pending_p_any) != NULL)
3072 any_resumed = 1;
3073 else if (find_thread (not_stopped) != NULL)
3074 any_resumed = 1;
3075 else
3076 any_resumed = 0;
3077
3078 if (step_over_bkpt == null_ptid)
3079 pid = wait_for_event (ptid, &w, options);
3080 else
3081 {
3082 if (debug_threads)
3083 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3084 target_pid_to_str (step_over_bkpt));
3085 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3086 }
3087
3088 if (pid == 0 || (pid == -1 && !any_resumed))
3089 {
3090 gdb_assert (target_options & TARGET_WNOHANG);
3091
3092 if (debug_threads)
3093 {
3094 debug_printf ("wait_1 ret = null_ptid, "
3095 "TARGET_WAITKIND_IGNORE\n");
3096 debug_exit ();
3097 }
3098
3099 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3100 return null_ptid;
3101 }
3102 else if (pid == -1)
3103 {
3104 if (debug_threads)
3105 {
3106 debug_printf ("wait_1 ret = null_ptid, "
3107 "TARGET_WAITKIND_NO_RESUMED\n");
3108 debug_exit ();
3109 }
3110
3111 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3112 return null_ptid;
3113 }
3114
3115 event_child = get_thread_lwp (current_thread);
3116
3117 /* wait_for_event only returns an exit status for the last
3118 child of a process. Report it. */
3119 if (WIFEXITED (w) || WIFSIGNALED (w))
3120 {
3121 if (WIFEXITED (w))
3122 {
3123 ourstatus->kind = TARGET_WAITKIND_EXITED;
3124 ourstatus->value.integer = WEXITSTATUS (w);
3125
3126 if (debug_threads)
3127 {
3128 debug_printf ("wait_1 ret = %s, exited with "
3129 "retcode %d\n",
3130 target_pid_to_str (ptid_of (current_thread)),
3131 WEXITSTATUS (w));
3132 debug_exit ();
3133 }
3134 }
3135 else
3136 {
3137 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3138 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3139
3140 if (debug_threads)
3141 {
3142 debug_printf ("wait_1 ret = %s, terminated with "
3143 "signal %d\n",
3144 target_pid_to_str (ptid_of (current_thread)),
3145 WTERMSIG (w));
3146 debug_exit ();
3147 }
3148 }
3149
3150 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3151 return filter_exit_event (event_child, ourstatus);
3152
3153 return ptid_of (current_thread);
3154 }
3155
3156 /* If step-over executes a breakpoint instruction, in the case of a
3157 hardware single step it means a gdb/gdbserver breakpoint had been
3158 planted on top of a permanent breakpoint, in the case of a software
3159 single step it may just mean that gdbserver hit the reinsert breakpoint.
3160 The PC has been adjusted by save_stop_reason to point at
3161 the breakpoint address.
3162 So in the case of the hardware single step advance the PC manually
3163 past the breakpoint and in the case of software single step advance only
3164 if it's not the single_step_breakpoint we are hitting.
3165 This avoids that a program would keep trapping a permanent breakpoint
3166 forever. */
3167 if (step_over_bkpt != null_ptid
3168 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3169 && (event_child->stepping
3170 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3171 {
3172 int increment_pc = 0;
3173 int breakpoint_kind = 0;
3174 CORE_ADDR stop_pc = event_child->stop_pc;
3175
3176 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3177 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3178
3179 if (debug_threads)
3180 {
3181 debug_printf ("step-over for %s executed software breakpoint\n",
3182 target_pid_to_str (ptid_of (current_thread)));
3183 }
3184
3185 if (increment_pc != 0)
3186 {
3187 struct regcache *regcache
3188 = get_thread_regcache (current_thread, 1);
3189
3190 event_child->stop_pc += increment_pc;
3191 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3192
3193 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3194 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3195 }
3196 }
3197
3198 /* If this event was not handled before, and is not a SIGTRAP, we
3199 report it. SIGILL and SIGSEGV are also treated as traps in case
3200 a breakpoint is inserted at the current PC. If this target does
3201 not support internal breakpoints at all, we also report the
3202 SIGTRAP without further processing; it's of no concern to us. */
3203 maybe_internal_trap
3204 = (supports_breakpoints ()
3205 && (WSTOPSIG (w) == SIGTRAP
3206 || ((WSTOPSIG (w) == SIGILL
3207 || WSTOPSIG (w) == SIGSEGV)
3208 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3209
3210 if (maybe_internal_trap)
3211 {
3212 /* Handle anything that requires bookkeeping before deciding to
3213 report the event or continue waiting. */
3214
3215 /* First check if we can explain the SIGTRAP with an internal
3216 breakpoint, or if we should possibly report the event to GDB.
3217 Do this before anything that may remove or insert a
3218 breakpoint. */
3219 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3220
3221 /* We have a SIGTRAP, possibly a step-over dance has just
3222 finished. If so, tweak the state machine accordingly,
3223 reinsert breakpoints and delete any single-step
3224 breakpoints. */
3225 step_over_finished = finish_step_over (event_child);
3226
3227 /* Now invoke the callbacks of any internal breakpoints there. */
3228 check_breakpoints (event_child->stop_pc);
3229
3230 /* Handle tracepoint data collecting. This may overflow the
3231 trace buffer, and cause a tracing stop, removing
3232 breakpoints. */
3233 trace_event = handle_tracepoints (event_child);
3234
3235 if (bp_explains_trap)
3236 {
3237 if (debug_threads)
3238 debug_printf ("Hit a gdbserver breakpoint.\n");
3239 }
3240 }
3241 else
3242 {
3243 /* We have some other signal, possibly a step-over dance was in
3244 progress, and it should be cancelled too. */
3245 step_over_finished = finish_step_over (event_child);
3246 }
3247
3248 /* We have all the data we need. Either report the event to GDB, or
3249 resume threads and keep waiting for more. */
3250
3251 /* If we're collecting a fast tracepoint, finish the collection and
3252 move out of the jump pad before delivering a signal. See
3253 linux_stabilize_threads. */
3254
3255 if (WIFSTOPPED (w)
3256 && WSTOPSIG (w) != SIGTRAP
3257 && supports_fast_tracepoints ()
3258 && agent_loaded_p ())
3259 {
3260 if (debug_threads)
3261 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3262 "to defer or adjust it.\n",
3263 WSTOPSIG (w), lwpid_of (current_thread));
3264
3265 /* Allow debugging the jump pad itself. */
3266 if (current_thread->last_resume_kind != resume_step
3267 && maybe_move_out_of_jump_pad (event_child, &w))
3268 {
3269 enqueue_one_deferred_signal (event_child, &w);
3270
3271 if (debug_threads)
3272 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3273 WSTOPSIG (w), lwpid_of (current_thread));
3274
3275 linux_resume_one_lwp (event_child, 0, 0, NULL);
3276
3277 if (debug_threads)
3278 debug_exit ();
3279 return ignore_event (ourstatus);
3280 }
3281 }
3282
3283 if (event_child->collecting_fast_tracepoint
3284 != fast_tpoint_collect_result::not_collecting)
3285 {
3286 if (debug_threads)
3287 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3288 "Check if we're already there.\n",
3289 lwpid_of (current_thread),
3290 (int) event_child->collecting_fast_tracepoint);
3291
3292 trace_event = 1;
3293
3294 event_child->collecting_fast_tracepoint
3295 = linux_fast_tracepoint_collecting (event_child, NULL);
3296
3297 if (event_child->collecting_fast_tracepoint
3298 != fast_tpoint_collect_result::before_insn)
3299 {
3300 /* No longer need this breakpoint. */
3301 if (event_child->exit_jump_pad_bkpt != NULL)
3302 {
3303 if (debug_threads)
3304 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3305 "stopping all threads momentarily.\n");
3306
3307 /* Other running threads could hit this breakpoint.
3308 We don't handle moribund locations like GDB does,
3309 instead we always pause all threads when removing
3310 breakpoints, so that any step-over or
3311 decr_pc_after_break adjustment is always taken
3312 care of while the breakpoint is still
3313 inserted. */
3314 stop_all_lwps (1, event_child);
3315
3316 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3317 event_child->exit_jump_pad_bkpt = NULL;
3318
3319 unstop_all_lwps (1, event_child);
3320
3321 gdb_assert (event_child->suspended >= 0);
3322 }
3323 }
3324
3325 if (event_child->collecting_fast_tracepoint
3326 == fast_tpoint_collect_result::not_collecting)
3327 {
3328 if (debug_threads)
3329 debug_printf ("fast tracepoint finished "
3330 "collecting successfully.\n");
3331
3332 /* We may have a deferred signal to report. */
3333 if (dequeue_one_deferred_signal (event_child, &w))
3334 {
3335 if (debug_threads)
3336 debug_printf ("dequeued one signal.\n");
3337 }
3338 else
3339 {
3340 if (debug_threads)
3341 debug_printf ("no deferred signals.\n");
3342
3343 if (stabilizing_threads)
3344 {
3345 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3346 ourstatus->value.sig = GDB_SIGNAL_0;
3347
3348 if (debug_threads)
3349 {
3350 debug_printf ("wait_1 ret = %s, stopped "
3351 "while stabilizing threads\n",
3352 target_pid_to_str (ptid_of (current_thread)));
3353 debug_exit ();
3354 }
3355
3356 return ptid_of (current_thread);
3357 }
3358 }
3359 }
3360 }
3361
3362 /* Check whether GDB would be interested in this event. */
3363
3364 /* Check if GDB is interested in this syscall. */
3365 if (WIFSTOPPED (w)
3366 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3367 && !gdb_catch_this_syscall_p (event_child))
3368 {
3369 if (debug_threads)
3370 {
3371 debug_printf ("Ignored syscall for LWP %ld.\n",
3372 lwpid_of (current_thread));
3373 }
3374
3375 linux_resume_one_lwp (event_child, event_child->stepping,
3376 0, NULL);
3377
3378 if (debug_threads)
3379 debug_exit ();
3380 return ignore_event (ourstatus);
3381 }
3382
3383 /* If GDB is not interested in this signal, don't stop other
3384 threads, and don't report it to GDB. Just resume the inferior
3385 right away. We do this for threading-related signals as well as
3386 any that GDB specifically requested we ignore. But never ignore
3387 SIGSTOP if we sent it ourselves, and do not ignore signals when
3388 stepping - they may require special handling to skip the signal
3389 handler. Also never ignore signals that could be caused by a
3390 breakpoint. */
3391 if (WIFSTOPPED (w)
3392 && current_thread->last_resume_kind != resume_step
3393 && (
3394 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3395 (current_process ()->priv->thread_db != NULL
3396 && (WSTOPSIG (w) == __SIGRTMIN
3397 || WSTOPSIG (w) == __SIGRTMIN + 1))
3398 ||
3399 #endif
3400 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3401 && !(WSTOPSIG (w) == SIGSTOP
3402 && current_thread->last_resume_kind == resume_stop)
3403 && !linux_wstatus_maybe_breakpoint (w))))
3404 {
3405 siginfo_t info, *info_p;
3406
3407 if (debug_threads)
3408 debug_printf ("Ignored signal %d for LWP %ld.\n",
3409 WSTOPSIG (w), lwpid_of (current_thread));
3410
3411 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3412 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3413 info_p = &info;
3414 else
3415 info_p = NULL;
3416
3417 if (step_over_finished)
3418 {
3419 /* We cancelled this thread's step-over above. We still
3420 need to unsuspend all other LWPs, and set them back
3421 running again while the signal handler runs. */
3422 unsuspend_all_lwps (event_child);
3423
3424 /* Enqueue the pending signal info so that proceed_all_lwps
3425 doesn't lose it. */
3426 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3427
3428 proceed_all_lwps ();
3429 }
3430 else
3431 {
3432 linux_resume_one_lwp (event_child, event_child->stepping,
3433 WSTOPSIG (w), info_p);
3434 }
3435
3436 if (debug_threads)
3437 debug_exit ();
3438
3439 return ignore_event (ourstatus);
3440 }
3441
3442 /* Note that all addresses are always "out of the step range" when
3443 there's no range to begin with. */
3444 in_step_range = lwp_in_step_range (event_child);
3445
3446 /* If GDB wanted this thread to single step, and the thread is out
3447 of the step range, we always want to report the SIGTRAP, and let
3448 GDB handle it. Watchpoints should always be reported. So should
3449 signals we can't explain. A SIGTRAP we can't explain could be a
3450 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3451 do, we're be able to handle GDB breakpoints on top of internal
3452 breakpoints, by handling the internal breakpoint and still
3453 reporting the event to GDB. If we don't, we're out of luck, GDB
3454 won't see the breakpoint hit. If we see a single-step event but
3455 the thread should be continuing, don't pass the trap to gdb.
3456 That indicates that we had previously finished a single-step but
3457 left the single-step pending -- see
3458 complete_ongoing_step_over. */
3459 report_to_gdb = (!maybe_internal_trap
3460 || (current_thread->last_resume_kind == resume_step
3461 && !in_step_range)
3462 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3463 || (!in_step_range
3464 && !bp_explains_trap
3465 && !trace_event
3466 && !step_over_finished
3467 && !(current_thread->last_resume_kind == resume_continue
3468 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3469 || (gdb_breakpoint_here (event_child->stop_pc)
3470 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3471 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3472 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3473
3474 run_breakpoint_commands (event_child->stop_pc);
3475
3476 /* We found no reason GDB would want us to stop. We either hit one
3477 of our own breakpoints, or finished an internal step GDB
3478 shouldn't know about. */
3479 if (!report_to_gdb)
3480 {
3481 if (debug_threads)
3482 {
3483 if (bp_explains_trap)
3484 debug_printf ("Hit a gdbserver breakpoint.\n");
3485 if (step_over_finished)
3486 debug_printf ("Step-over finished.\n");
3487 if (trace_event)
3488 debug_printf ("Tracepoint event.\n");
3489 if (lwp_in_step_range (event_child))
3490 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3491 paddress (event_child->stop_pc),
3492 paddress (event_child->step_range_start),
3493 paddress (event_child->step_range_end));
3494 }
3495
3496 /* We're not reporting this breakpoint to GDB, so apply the
3497 decr_pc_after_break adjustment to the inferior's regcache
3498 ourselves. */
3499
3500 if (the_low_target.set_pc != NULL)
3501 {
3502 struct regcache *regcache
3503 = get_thread_regcache (current_thread, 1);
3504 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3505 }
3506
3507 if (step_over_finished)
3508 {
3509 /* If we have finished stepping over a breakpoint, we've
3510 stopped and suspended all LWPs momentarily except the
3511 stepping one. This is where we resume them all again.
3512 We're going to keep waiting, so use proceed, which
3513 handles stepping over the next breakpoint. */
3514 unsuspend_all_lwps (event_child);
3515 }
3516 else
3517 {
3518 /* Remove the single-step breakpoints if any. Note that
3519 there isn't single-step breakpoint if we finished stepping
3520 over. */
3521 if (can_software_single_step ()
3522 && has_single_step_breakpoints (current_thread))
3523 {
3524 stop_all_lwps (0, event_child);
3525 delete_single_step_breakpoints (current_thread);
3526 unstop_all_lwps (0, event_child);
3527 }
3528 }
3529
3530 if (debug_threads)
3531 debug_printf ("proceeding all threads.\n");
3532 proceed_all_lwps ();
3533
3534 if (debug_threads)
3535 debug_exit ();
3536
3537 return ignore_event (ourstatus);
3538 }
3539
3540 if (debug_threads)
3541 {
3542 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3543 {
3544 std::string str
3545 = target_waitstatus_to_string (&event_child->waitstatus);
3546
3547 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3548 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3549 }
3550 if (current_thread->last_resume_kind == resume_step)
3551 {
3552 if (event_child->step_range_start == event_child->step_range_end)
3553 debug_printf ("GDB wanted to single-step, reporting event.\n");
3554 else if (!lwp_in_step_range (event_child))
3555 debug_printf ("Out of step range, reporting event.\n");
3556 }
3557 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3558 debug_printf ("Stopped by watchpoint.\n");
3559 else if (gdb_breakpoint_here (event_child->stop_pc))
3560 debug_printf ("Stopped by GDB breakpoint.\n");
3561 if (debug_threads)
3562 debug_printf ("Hit a non-gdbserver trap event.\n");
3563 }
3564
3565 /* Alright, we're going to report a stop. */
3566
3567 /* Remove single-step breakpoints. */
3568 if (can_software_single_step ())
3569 {
3570 /* Remove single-step breakpoints or not. It it is true, stop all
3571 lwps, so that other threads won't hit the breakpoint in the
3572 staled memory. */
3573 int remove_single_step_breakpoints_p = 0;
3574
3575 if (non_stop)
3576 {
3577 remove_single_step_breakpoints_p
3578 = has_single_step_breakpoints (current_thread);
3579 }
3580 else
3581 {
3582 /* In all-stop, a stop reply cancels all previous resume
3583 requests. Delete all single-step breakpoints. */
3584
3585 find_thread ([&] (thread_info *thread) {
3586 if (has_single_step_breakpoints (thread))
3587 {
3588 remove_single_step_breakpoints_p = 1;
3589 return true;
3590 }
3591
3592 return false;
3593 });
3594 }
3595
3596 if (remove_single_step_breakpoints_p)
3597 {
3598 /* If we remove single-step breakpoints from memory, stop all lwps,
3599 so that other threads won't hit the breakpoint in the staled
3600 memory. */
3601 stop_all_lwps (0, event_child);
3602
3603 if (non_stop)
3604 {
3605 gdb_assert (has_single_step_breakpoints (current_thread));
3606 delete_single_step_breakpoints (current_thread);
3607 }
3608 else
3609 {
3610 for_each_thread ([] (thread_info *thread){
3611 if (has_single_step_breakpoints (thread))
3612 delete_single_step_breakpoints (thread);
3613 });
3614 }
3615
3616 unstop_all_lwps (0, event_child);
3617 }
3618 }
3619
3620 if (!stabilizing_threads)
3621 {
3622 /* In all-stop, stop all threads. */
3623 if (!non_stop)
3624 stop_all_lwps (0, NULL);
3625
3626 if (step_over_finished)
3627 {
3628 if (!non_stop)
3629 {
3630 /* If we were doing a step-over, all other threads but
3631 the stepping one had been paused in start_step_over,
3632 with their suspend counts incremented. We don't want
3633 to do a full unstop/unpause, because we're in
3634 all-stop mode (so we want threads stopped), but we
3635 still need to unsuspend the other threads, to
3636 decrement their `suspended' count back. */
3637 unsuspend_all_lwps (event_child);
3638 }
3639 else
3640 {
3641 /* If we just finished a step-over, then all threads had
3642 been momentarily paused. In all-stop, that's fine,
3643 we want threads stopped by now anyway. In non-stop,
3644 we need to re-resume threads that GDB wanted to be
3645 running. */
3646 unstop_all_lwps (1, event_child);
3647 }
3648 }
3649
3650 /* If we're not waiting for a specific LWP, choose an event LWP
3651 from among those that have had events. Giving equal priority
3652 to all LWPs that have had events helps prevent
3653 starvation. */
3654 if (ptid == minus_one_ptid)
3655 {
3656 event_child->status_pending_p = 1;
3657 event_child->status_pending = w;
3658
3659 select_event_lwp (&event_child);
3660
3661 /* current_thread and event_child must stay in sync. */
3662 current_thread = get_lwp_thread (event_child);
3663
3664 event_child->status_pending_p = 0;
3665 w = event_child->status_pending;
3666 }
3667
3668
3669 /* Stabilize threads (move out of jump pads). */
3670 if (!non_stop)
3671 target_stabilize_threads ();
3672 }
3673 else
3674 {
3675 /* If we just finished a step-over, then all threads had been
3676 momentarily paused. In all-stop, that's fine, we want
3677 threads stopped by now anyway. In non-stop, we need to
3678 re-resume threads that GDB wanted to be running. */
3679 if (step_over_finished)
3680 unstop_all_lwps (1, event_child);
3681 }
3682
3683 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3684 {
3685 /* If the reported event is an exit, fork, vfork or exec, let
3686 GDB know. */
3687
3688 /* Break the unreported fork relationship chain. */
3689 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3690 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3691 {
3692 event_child->fork_relative->fork_relative = NULL;
3693 event_child->fork_relative = NULL;
3694 }
3695
3696 *ourstatus = event_child->waitstatus;
3697 /* Clear the event lwp's waitstatus since we handled it already. */
3698 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3699 }
3700 else
3701 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3702
3703 /* Now that we've selected our final event LWP, un-adjust its PC if
3704 it was a software breakpoint, and the client doesn't know we can
3705 adjust the breakpoint ourselves. */
3706 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3707 && !cs.swbreak_feature)
3708 {
3709 int decr_pc = the_low_target.decr_pc_after_break;
3710
3711 if (decr_pc != 0)
3712 {
3713 struct regcache *regcache
3714 = get_thread_regcache (current_thread, 1);
3715 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3716 }
3717 }
3718
3719 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3720 {
3721 get_syscall_trapinfo (event_child,
3722 &ourstatus->value.syscall_number);
3723 ourstatus->kind = event_child->syscall_state;
3724 }
3725 else if (current_thread->last_resume_kind == resume_stop
3726 && WSTOPSIG (w) == SIGSTOP)
3727 {
3728 /* A thread that has been requested to stop by GDB with vCont;t,
3729 and it stopped cleanly, so report as SIG0. The use of
3730 SIGSTOP is an implementation detail. */
3731 ourstatus->value.sig = GDB_SIGNAL_0;
3732 }
3733 else if (current_thread->last_resume_kind == resume_stop
3734 && WSTOPSIG (w) != SIGSTOP)
3735 {
3736 /* A thread that has been requested to stop by GDB with vCont;t,
3737 but, it stopped for other reasons. */
3738 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3739 }
3740 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3741 {
3742 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3743 }
3744
3745 gdb_assert (step_over_bkpt == null_ptid);
3746
3747 if (debug_threads)
3748 {
3749 debug_printf ("wait_1 ret = %s, %d, %d\n",
3750 target_pid_to_str (ptid_of (current_thread)),
3751 ourstatus->kind, ourstatus->value.sig);
3752 debug_exit ();
3753 }
3754
3755 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3756 return filter_exit_event (event_child, ourstatus);
3757
3758 return ptid_of (current_thread);
3759 }
3760
3761 /* Get rid of any pending event in the pipe. */
3762 static void
3763 async_file_flush (void)
3764 {
3765 int ret;
3766 char buf;
3767
3768 do
3769 ret = read (linux_event_pipe[0], &buf, 1);
3770 while (ret >= 0 || (ret == -1 && errno == EINTR));
3771 }
3772
3773 /* Put something in the pipe, so the event loop wakes up. */
3774 static void
3775 async_file_mark (void)
3776 {
3777 int ret;
3778
3779 async_file_flush ();
3780
3781 do
3782 ret = write (linux_event_pipe[1], "+", 1);
3783 while (ret == 0 || (ret == -1 && errno == EINTR));
3784
3785 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3786 be awakened anyway. */
3787 }
3788
3789 ptid_t
3790 linux_process_target::wait (ptid_t ptid,
3791 target_waitstatus *ourstatus,
3792 int target_options)
3793 {
3794 ptid_t event_ptid;
3795
3796 /* Flush the async file first. */
3797 if (target_is_async_p ())
3798 async_file_flush ();
3799
3800 do
3801 {
3802 event_ptid = wait_1 (ptid, ourstatus, target_options);
3803 }
3804 while ((target_options & TARGET_WNOHANG) == 0
3805 && event_ptid == null_ptid
3806 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3807
3808 /* If at least one stop was reported, there may be more. A single
3809 SIGCHLD can signal more than one child stop. */
3810 if (target_is_async_p ()
3811 && (target_options & TARGET_WNOHANG) != 0
3812 && event_ptid != null_ptid)
3813 async_file_mark ();
3814
3815 return event_ptid;
3816 }
3817
3818 /* Send a signal to an LWP. */
3819
3820 static int
3821 kill_lwp (unsigned long lwpid, int signo)
3822 {
3823 int ret;
3824
3825 errno = 0;
3826 ret = syscall (__NR_tkill, lwpid, signo);
3827 if (errno == ENOSYS)
3828 {
3829 /* If tkill fails, then we are not using nptl threads, a
3830 configuration we no longer support. */
3831 perror_with_name (("tkill"));
3832 }
3833 return ret;
3834 }
3835
3836 void
3837 linux_stop_lwp (struct lwp_info *lwp)
3838 {
3839 send_sigstop (lwp);
3840 }
3841
3842 static void
3843 send_sigstop (struct lwp_info *lwp)
3844 {
3845 int pid;
3846
3847 pid = lwpid_of (get_lwp_thread (lwp));
3848
3849 /* If we already have a pending stop signal for this process, don't
3850 send another. */
3851 if (lwp->stop_expected)
3852 {
3853 if (debug_threads)
3854 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3855
3856 return;
3857 }
3858
3859 if (debug_threads)
3860 debug_printf ("Sending sigstop to lwp %d\n", pid);
3861
3862 lwp->stop_expected = 1;
3863 kill_lwp (pid, SIGSTOP);
3864 }
3865
3866 static void
3867 send_sigstop (thread_info *thread, lwp_info *except)
3868 {
3869 struct lwp_info *lwp = get_thread_lwp (thread);
3870
3871 /* Ignore EXCEPT. */
3872 if (lwp == except)
3873 return;
3874
3875 if (lwp->stopped)
3876 return;
3877
3878 send_sigstop (lwp);
3879 }
3880
3881 /* Increment the suspend count of an LWP, and stop it, if not stopped
3882 yet. */
3883 static void
3884 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3885 {
3886 struct lwp_info *lwp = get_thread_lwp (thread);
3887
3888 /* Ignore EXCEPT. */
3889 if (lwp == except)
3890 return;
3891
3892 lwp_suspended_inc (lwp);
3893
3894 send_sigstop (thread, except);
3895 }
3896
3897 static void
3898 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3899 {
3900 /* Store the exit status for later. */
3901 lwp->status_pending_p = 1;
3902 lwp->status_pending = wstat;
3903
3904 /* Store in waitstatus as well, as there's nothing else to process
3905 for this event. */
3906 if (WIFEXITED (wstat))
3907 {
3908 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3909 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3910 }
3911 else if (WIFSIGNALED (wstat))
3912 {
3913 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3914 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3915 }
3916
3917 /* Prevent trying to stop it. */
3918 lwp->stopped = 1;
3919
3920 /* No further stops are expected from a dead lwp. */
3921 lwp->stop_expected = 0;
3922 }
3923
3924 /* Return true if LWP has exited already, and has a pending exit event
3925 to report to GDB. */
3926
3927 static int
3928 lwp_is_marked_dead (struct lwp_info *lwp)
3929 {
3930 return (lwp->status_pending_p
3931 && (WIFEXITED (lwp->status_pending)
3932 || WIFSIGNALED (lwp->status_pending)));
3933 }
3934
3935 void
3936 linux_process_target::wait_for_sigstop ()
3937 {
3938 struct thread_info *saved_thread;
3939 ptid_t saved_tid;
3940 int wstat;
3941 int ret;
3942
3943 saved_thread = current_thread;
3944 if (saved_thread != NULL)
3945 saved_tid = saved_thread->id;
3946 else
3947 saved_tid = null_ptid; /* avoid bogus unused warning */
3948
3949 if (debug_threads)
3950 debug_printf ("wait_for_sigstop: pulling events\n");
3951
3952 /* Passing NULL_PTID as filter indicates we want all events to be
3953 left pending. Eventually this returns when there are no
3954 unwaited-for children left. */
3955 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3956 gdb_assert (ret == -1);
3957
3958 if (saved_thread == NULL || mythread_alive (saved_tid))
3959 current_thread = saved_thread;
3960 else
3961 {
3962 if (debug_threads)
3963 debug_printf ("Previously current thread died.\n");
3964
3965 /* We can't change the current inferior behind GDB's back,
3966 otherwise, a subsequent command may apply to the wrong
3967 process. */
3968 current_thread = NULL;
3969 }
3970 }
3971
3972 /* Returns true if THREAD is stopped in a jump pad, and we can't
3973 move it out, because we need to report the stop event to GDB. For
3974 example, if the user puts a breakpoint in the jump pad, it's
3975 because she wants to debug it. */
3976
3977 static bool
3978 stuck_in_jump_pad_callback (thread_info *thread)
3979 {
3980 struct lwp_info *lwp = get_thread_lwp (thread);
3981
3982 if (lwp->suspended != 0)
3983 {
3984 internal_error (__FILE__, __LINE__,
3985 "LWP %ld is suspended, suspended=%d\n",
3986 lwpid_of (thread), lwp->suspended);
3987 }
3988 gdb_assert (lwp->stopped);
3989
3990 /* Allow debugging the jump pad, gdb_collect, etc.. */
3991 return (supports_fast_tracepoints ()
3992 && agent_loaded_p ()
3993 && (gdb_breakpoint_here (lwp->stop_pc)
3994 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3995 || thread->last_resume_kind == resume_step)
3996 && (linux_fast_tracepoint_collecting (lwp, NULL)
3997 != fast_tpoint_collect_result::not_collecting));
3998 }
3999
4000 void
4001 linux_process_target::move_out_of_jump_pad (thread_info *thread)
4002 {
4003 struct thread_info *saved_thread;
4004 struct lwp_info *lwp = get_thread_lwp (thread);
4005 int *wstat;
4006
4007 if (lwp->suspended != 0)
4008 {
4009 internal_error (__FILE__, __LINE__,
4010 "LWP %ld is suspended, suspended=%d\n",
4011 lwpid_of (thread), lwp->suspended);
4012 }
4013 gdb_assert (lwp->stopped);
4014
4015 /* For gdb_breakpoint_here. */
4016 saved_thread = current_thread;
4017 current_thread = thread;
4018
4019 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4020
4021 /* Allow debugging the jump pad, gdb_collect, etc. */
4022 if (!gdb_breakpoint_here (lwp->stop_pc)
4023 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4024 && thread->last_resume_kind != resume_step
4025 && maybe_move_out_of_jump_pad (lwp, wstat))
4026 {
4027 if (debug_threads)
4028 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4029 lwpid_of (thread));
4030
4031 if (wstat)
4032 {
4033 lwp->status_pending_p = 0;
4034 enqueue_one_deferred_signal (lwp, wstat);
4035
4036 if (debug_threads)
4037 debug_printf ("Signal %d for LWP %ld deferred "
4038 "(in jump pad)\n",
4039 WSTOPSIG (*wstat), lwpid_of (thread));
4040 }
4041
4042 linux_resume_one_lwp (lwp, 0, 0, NULL);
4043 }
4044 else
4045 lwp_suspended_inc (lwp);
4046
4047 current_thread = saved_thread;
4048 }
4049
4050 static bool
4051 lwp_running (thread_info *thread)
4052 {
4053 struct lwp_info *lwp = get_thread_lwp (thread);
4054
4055 if (lwp_is_marked_dead (lwp))
4056 return false;
4057
4058 return !lwp->stopped;
4059 }
4060
4061 void
4062 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
4063 {
4064 /* Should not be called recursively. */
4065 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4066
4067 if (debug_threads)
4068 {
4069 debug_enter ();
4070 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4071 suspend ? "stop-and-suspend" : "stop",
4072 except != NULL
4073 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4074 : "none");
4075 }
4076
4077 stopping_threads = (suspend
4078 ? STOPPING_AND_SUSPENDING_THREADS
4079 : STOPPING_THREADS);
4080
4081 if (suspend)
4082 for_each_thread ([&] (thread_info *thread)
4083 {
4084 suspend_and_send_sigstop (thread, except);
4085 });
4086 else
4087 for_each_thread ([&] (thread_info *thread)
4088 {
4089 send_sigstop (thread, except);
4090 });
4091
4092 wait_for_sigstop ();
4093 stopping_threads = NOT_STOPPING_THREADS;
4094
4095 if (debug_threads)
4096 {
4097 debug_printf ("stop_all_lwps done, setting stopping_threads "
4098 "back to !stopping\n");
4099 debug_exit ();
4100 }
4101 }
4102
4103 /* Enqueue one signal in the chain of signals which need to be
4104 delivered to this process on next resume. */
4105
4106 static void
4107 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4108 {
4109 struct pending_signals *p_sig = XNEW (struct pending_signals);
4110
4111 p_sig->prev = lwp->pending_signals;
4112 p_sig->signal = signal;
4113 if (info == NULL)
4114 memset (&p_sig->info, 0, sizeof (siginfo_t));
4115 else
4116 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4117 lwp->pending_signals = p_sig;
4118 }
4119
4120 /* Install breakpoints for software single stepping. */
4121
4122 static void
4123 install_software_single_step_breakpoints (struct lwp_info *lwp)
4124 {
4125 struct thread_info *thread = get_lwp_thread (lwp);
4126 struct regcache *regcache = get_thread_regcache (thread, 1);
4127
4128 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4129
4130 current_thread = thread;
4131 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
4132
4133 for (CORE_ADDR pc : next_pcs)
4134 set_single_step_breakpoint (pc, current_ptid);
4135 }
4136
4137 /* Single step via hardware or software single step.
4138 Return 1 if hardware single stepping, 0 if software single stepping
4139 or can't single step. */
4140
4141 static int
4142 single_step (struct lwp_info* lwp)
4143 {
4144 int step = 0;
4145
4146 if (can_hardware_single_step ())
4147 {
4148 step = 1;
4149 }
4150 else if (can_software_single_step ())
4151 {
4152 install_software_single_step_breakpoints (lwp);
4153 step = 0;
4154 }
4155 else
4156 {
4157 if (debug_threads)
4158 debug_printf ("stepping is not implemented on this target");
4159 }
4160
4161 return step;
4162 }
4163
4164 /* The signal can be delivered to the inferior if we are not trying to
4165 finish a fast tracepoint collect. Since signal can be delivered in
4166 the step-over, the program may go to signal handler and trap again
4167 after return from the signal handler. We can live with the spurious
4168 double traps. */
4169
4170 static int
4171 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4172 {
4173 return (lwp->collecting_fast_tracepoint
4174 == fast_tpoint_collect_result::not_collecting);
4175 }
4176
4177 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4178 SIGNAL is nonzero, give it that signal. */
4179
4180 static void
4181 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4182 int step, int signal, siginfo_t *info)
4183 {
4184 struct thread_info *thread = get_lwp_thread (lwp);
4185 struct thread_info *saved_thread;
4186 int ptrace_request;
4187 struct process_info *proc = get_thread_process (thread);
4188
4189 /* Note that target description may not be initialised
4190 (proc->tdesc == NULL) at this point because the program hasn't
4191 stopped at the first instruction yet. It means GDBserver skips
4192 the extra traps from the wrapper program (see option --wrapper).
4193 Code in this function that requires register access should be
4194 guarded by proc->tdesc == NULL or something else. */
4195
4196 if (lwp->stopped == 0)
4197 return;
4198
4199 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4200
4201 fast_tpoint_collect_result fast_tp_collecting
4202 = lwp->collecting_fast_tracepoint;
4203
4204 gdb_assert (!stabilizing_threads
4205 || (fast_tp_collecting
4206 != fast_tpoint_collect_result::not_collecting));
4207
4208 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4209 user used the "jump" command, or "set $pc = foo"). */
4210 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4211 {
4212 /* Collecting 'while-stepping' actions doesn't make sense
4213 anymore. */
4214 release_while_stepping_state_list (thread);
4215 }
4216
4217 /* If we have pending signals or status, and a new signal, enqueue the
4218 signal. Also enqueue the signal if it can't be delivered to the
4219 inferior right now. */
4220 if (signal != 0
4221 && (lwp->status_pending_p
4222 || lwp->pending_signals != NULL
4223 || !lwp_signal_can_be_delivered (lwp)))
4224 {
4225 enqueue_pending_signal (lwp, signal, info);
4226
4227 /* Postpone any pending signal. It was enqueued above. */
4228 signal = 0;
4229 }
4230
4231 if (lwp->status_pending_p)
4232 {
4233 if (debug_threads)
4234 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4235 " has pending status\n",
4236 lwpid_of (thread), step ? "step" : "continue",
4237 lwp->stop_expected ? "expected" : "not expected");
4238 return;
4239 }
4240
4241 saved_thread = current_thread;
4242 current_thread = thread;
4243
4244 /* This bit needs some thinking about. If we get a signal that
4245 we must report while a single-step reinsert is still pending,
4246 we often end up resuming the thread. It might be better to
4247 (ew) allow a stack of pending events; then we could be sure that
4248 the reinsert happened right away and not lose any signals.
4249
4250 Making this stack would also shrink the window in which breakpoints are
4251 uninserted (see comment in linux_wait_for_lwp) but not enough for
4252 complete correctness, so it won't solve that problem. It may be
4253 worthwhile just to solve this one, however. */
4254 if (lwp->bp_reinsert != 0)
4255 {
4256 if (debug_threads)
4257 debug_printf (" pending reinsert at 0x%s\n",
4258 paddress (lwp->bp_reinsert));
4259
4260 if (can_hardware_single_step ())
4261 {
4262 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4263 {
4264 if (step == 0)
4265 warning ("BAD - reinserting but not stepping.");
4266 if (lwp->suspended)
4267 warning ("BAD - reinserting and suspended(%d).",
4268 lwp->suspended);
4269 }
4270 }
4271
4272 step = maybe_hw_step (thread);
4273 }
4274
4275 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4276 {
4277 if (debug_threads)
4278 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4279 " (exit-jump-pad-bkpt)\n",
4280 lwpid_of (thread));
4281 }
4282 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4283 {
4284 if (debug_threads)
4285 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4286 " single-stepping\n",
4287 lwpid_of (thread));
4288
4289 if (can_hardware_single_step ())
4290 step = 1;
4291 else
4292 {
4293 internal_error (__FILE__, __LINE__,
4294 "moving out of jump pad single-stepping"
4295 " not implemented on this target");
4296 }
4297 }
4298
4299 /* If we have while-stepping actions in this thread set it stepping.
4300 If we have a signal to deliver, it may or may not be set to
4301 SIG_IGN, we don't know. Assume so, and allow collecting
4302 while-stepping into a signal handler. A possible smart thing to
4303 do would be to set an internal breakpoint at the signal return
4304 address, continue, and carry on catching this while-stepping
4305 action only when that breakpoint is hit. A future
4306 enhancement. */
4307 if (thread->while_stepping != NULL)
4308 {
4309 if (debug_threads)
4310 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4311 lwpid_of (thread));
4312
4313 step = single_step (lwp);
4314 }
4315
4316 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4317 {
4318 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4319
4320 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4321
4322 if (debug_threads)
4323 {
4324 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4325 (long) lwp->stop_pc);
4326 }
4327 }
4328
4329 /* If we have pending signals, consume one if it can be delivered to
4330 the inferior. */
4331 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4332 {
4333 struct pending_signals **p_sig;
4334
4335 p_sig = &lwp->pending_signals;
4336 while ((*p_sig)->prev != NULL)
4337 p_sig = &(*p_sig)->prev;
4338
4339 signal = (*p_sig)->signal;
4340 if ((*p_sig)->info.si_signo != 0)
4341 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4342 &(*p_sig)->info);
4343
4344 free (*p_sig);
4345 *p_sig = NULL;
4346 }
4347
4348 if (debug_threads)
4349 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4350 lwpid_of (thread), step ? "step" : "continue", signal,
4351 lwp->stop_expected ? "expected" : "not expected");
4352
4353 if (the_low_target.prepare_to_resume != NULL)
4354 the_low_target.prepare_to_resume (lwp);
4355
4356 regcache_invalidate_thread (thread);
4357 errno = 0;
4358 lwp->stepping = step;
4359 if (step)
4360 ptrace_request = PTRACE_SINGLESTEP;
4361 else if (gdb_catching_syscalls_p (lwp))
4362 ptrace_request = PTRACE_SYSCALL;
4363 else
4364 ptrace_request = PTRACE_CONT;
4365 ptrace (ptrace_request,
4366 lwpid_of (thread),
4367 (PTRACE_TYPE_ARG3) 0,
4368 /* Coerce to a uintptr_t first to avoid potential gcc warning
4369 of coercing an 8 byte integer to a 4 byte pointer. */
4370 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4371
4372 current_thread = saved_thread;
4373 if (errno)
4374 perror_with_name ("resuming thread");
4375
4376 /* Successfully resumed. Clear state that no longer makes sense,
4377 and mark the LWP as running. Must not do this before resuming
4378 otherwise if that fails other code will be confused. E.g., we'd
4379 later try to stop the LWP and hang forever waiting for a stop
4380 status. Note that we must not throw after this is cleared,
4381 otherwise handle_zombie_lwp_error would get confused. */
4382 lwp->stopped = 0;
4383 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4384 }
4385
4386 /* Called when we try to resume a stopped LWP and that errors out. If
4387 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4388 or about to become), discard the error, clear any pending status
4389 the LWP may have, and return true (we'll collect the exit status
4390 soon enough). Otherwise, return false. */
4391
4392 static int
4393 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4394 {
4395 struct thread_info *thread = get_lwp_thread (lp);
4396
4397 /* If we get an error after resuming the LWP successfully, we'd
4398 confuse !T state for the LWP being gone. */
4399 gdb_assert (lp->stopped);
4400
4401 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4402 because even if ptrace failed with ESRCH, the tracee may be "not
4403 yet fully dead", but already refusing ptrace requests. In that
4404 case the tracee has 'R (Running)' state for a little bit
4405 (observed in Linux 3.18). See also the note on ESRCH in the
4406 ptrace(2) man page. Instead, check whether the LWP has any state
4407 other than ptrace-stopped. */
4408
4409 /* Don't assume anything if /proc/PID/status can't be read. */
4410 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4411 {
4412 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4413 lp->status_pending_p = 0;
4414 return 1;
4415 }
4416 return 0;
4417 }
4418
4419 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4420 disappears while we try to resume it. */
4421
4422 static void
4423 linux_resume_one_lwp (struct lwp_info *lwp,
4424 int step, int signal, siginfo_t *info)
4425 {
4426 try
4427 {
4428 linux_resume_one_lwp_throw (lwp, step, signal, info);
4429 }
4430 catch (const gdb_exception_error &ex)
4431 {
4432 if (!check_ptrace_stopped_lwp_gone (lwp))
4433 throw;
4434 }
4435 }
4436
4437 /* This function is called once per thread via for_each_thread.
4438 We look up which resume request applies to THREAD and mark it with a
4439 pointer to the appropriate resume request.
4440
4441 This algorithm is O(threads * resume elements), but resume elements
4442 is small (and will remain small at least until GDB supports thread
4443 suspension). */
4444
4445 static void
4446 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4447 {
4448 struct lwp_info *lwp = get_thread_lwp (thread);
4449
4450 for (int ndx = 0; ndx < n; ndx++)
4451 {
4452 ptid_t ptid = resume[ndx].thread;
4453 if (ptid == minus_one_ptid
4454 || ptid == thread->id
4455 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4456 of PID'. */
4457 || (ptid.pid () == pid_of (thread)
4458 && (ptid.is_pid ()
4459 || ptid.lwp () == -1)))
4460 {
4461 if (resume[ndx].kind == resume_stop
4462 && thread->last_resume_kind == resume_stop)
4463 {
4464 if (debug_threads)
4465 debug_printf ("already %s LWP %ld at GDB's request\n",
4466 (thread->last_status.kind
4467 == TARGET_WAITKIND_STOPPED)
4468 ? "stopped"
4469 : "stopping",
4470 lwpid_of (thread));
4471
4472 continue;
4473 }
4474
4475 /* Ignore (wildcard) resume requests for already-resumed
4476 threads. */
4477 if (resume[ndx].kind != resume_stop
4478 && thread->last_resume_kind != resume_stop)
4479 {
4480 if (debug_threads)
4481 debug_printf ("already %s LWP %ld at GDB's request\n",
4482 (thread->last_resume_kind
4483 == resume_step)
4484 ? "stepping"
4485 : "continuing",
4486 lwpid_of (thread));
4487 continue;
4488 }
4489
4490 /* Don't let wildcard resumes resume fork children that GDB
4491 does not yet know are new fork children. */
4492 if (lwp->fork_relative != NULL)
4493 {
4494 struct lwp_info *rel = lwp->fork_relative;
4495
4496 if (rel->status_pending_p
4497 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4498 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4499 {
4500 if (debug_threads)
4501 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4502 lwpid_of (thread));
4503 continue;
4504 }
4505 }
4506
4507 /* If the thread has a pending event that has already been
4508 reported to GDBserver core, but GDB has not pulled the
4509 event out of the vStopped queue yet, likewise, ignore the
4510 (wildcard) resume request. */
4511 if (in_queued_stop_replies (thread->id))
4512 {
4513 if (debug_threads)
4514 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4515 lwpid_of (thread));
4516 continue;
4517 }
4518
4519 lwp->resume = &resume[ndx];
4520 thread->last_resume_kind = lwp->resume->kind;
4521
4522 lwp->step_range_start = lwp->resume->step_range_start;
4523 lwp->step_range_end = lwp->resume->step_range_end;
4524
4525 /* If we had a deferred signal to report, dequeue one now.
4526 This can happen if LWP gets more than one signal while
4527 trying to get out of a jump pad. */
4528 if (lwp->stopped
4529 && !lwp->status_pending_p
4530 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4531 {
4532 lwp->status_pending_p = 1;
4533
4534 if (debug_threads)
4535 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4536 "leaving status pending.\n",
4537 WSTOPSIG (lwp->status_pending),
4538 lwpid_of (thread));
4539 }
4540
4541 return;
4542 }
4543 }
4544
4545 /* No resume action for this thread. */
4546 lwp->resume = NULL;
4547 }
4548
4549 /* find_thread callback for linux_resume. Return true if this lwp has an
4550 interesting status pending. */
4551
4552 static bool
4553 resume_status_pending_p (thread_info *thread)
4554 {
4555 struct lwp_info *lwp = get_thread_lwp (thread);
4556
4557 /* LWPs which will not be resumed are not interesting, because
4558 we might not wait for them next time through linux_wait. */
4559 if (lwp->resume == NULL)
4560 return false;
4561
4562 return thread_still_has_status_pending_p (thread);
4563 }
4564
4565 /* Return 1 if this lwp that GDB wants running is stopped at an
4566 internal breakpoint that we need to step over. It assumes that any
4567 required STOP_PC adjustment has already been propagated to the
4568 inferior's regcache. */
4569
4570 static bool
4571 need_step_over_p (thread_info *thread)
4572 {
4573 struct lwp_info *lwp = get_thread_lwp (thread);
4574 struct thread_info *saved_thread;
4575 CORE_ADDR pc;
4576 struct process_info *proc = get_thread_process (thread);
4577
4578 /* GDBserver is skipping the extra traps from the wrapper program,
4579 don't have to do step over. */
4580 if (proc->tdesc == NULL)
4581 return false;
4582
4583 /* LWPs which will not be resumed are not interesting, because we
4584 might not wait for them next time through linux_wait. */
4585
4586 if (!lwp->stopped)
4587 {
4588 if (debug_threads)
4589 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4590 lwpid_of (thread));
4591 return false;
4592 }
4593
4594 if (thread->last_resume_kind == resume_stop)
4595 {
4596 if (debug_threads)
4597 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4598 " stopped\n",
4599 lwpid_of (thread));
4600 return false;
4601 }
4602
4603 gdb_assert (lwp->suspended >= 0);
4604
4605 if (lwp->suspended)
4606 {
4607 if (debug_threads)
4608 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4609 lwpid_of (thread));
4610 return false;
4611 }
4612
4613 if (lwp->status_pending_p)
4614 {
4615 if (debug_threads)
4616 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4617 " status.\n",
4618 lwpid_of (thread));
4619 return false;
4620 }
4621
4622 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4623 or we have. */
4624 pc = get_pc (lwp);
4625
4626 /* If the PC has changed since we stopped, then don't do anything,
4627 and let the breakpoint/tracepoint be hit. This happens if, for
4628 instance, GDB handled the decr_pc_after_break subtraction itself,
4629 GDB is OOL stepping this thread, or the user has issued a "jump"
4630 command, or poked thread's registers herself. */
4631 if (pc != lwp->stop_pc)
4632 {
4633 if (debug_threads)
4634 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4635 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4636 lwpid_of (thread),
4637 paddress (lwp->stop_pc), paddress (pc));
4638 return false;
4639 }
4640
4641 /* On software single step target, resume the inferior with signal
4642 rather than stepping over. */
4643 if (can_software_single_step ()
4644 && lwp->pending_signals != NULL
4645 && lwp_signal_can_be_delivered (lwp))
4646 {
4647 if (debug_threads)
4648 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4649 " signals.\n",
4650 lwpid_of (thread));
4651
4652 return false;
4653 }
4654
4655 saved_thread = current_thread;
4656 current_thread = thread;
4657
4658 /* We can only step over breakpoints we know about. */
4659 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4660 {
4661 /* Don't step over a breakpoint that GDB expects to hit
4662 though. If the condition is being evaluated on the target's side
4663 and it evaluate to false, step over this breakpoint as well. */
4664 if (gdb_breakpoint_here (pc)
4665 && gdb_condition_true_at_breakpoint (pc)
4666 && gdb_no_commands_at_breakpoint (pc))
4667 {
4668 if (debug_threads)
4669 debug_printf ("Need step over [LWP %ld]? yes, but found"
4670 " GDB breakpoint at 0x%s; skipping step over\n",
4671 lwpid_of (thread), paddress (pc));
4672
4673 current_thread = saved_thread;
4674 return false;
4675 }
4676 else
4677 {
4678 if (debug_threads)
4679 debug_printf ("Need step over [LWP %ld]? yes, "
4680 "found breakpoint at 0x%s\n",
4681 lwpid_of (thread), paddress (pc));
4682
4683 /* We've found an lwp that needs stepping over --- return 1 so
4684 that find_thread stops looking. */
4685 current_thread = saved_thread;
4686
4687 return true;
4688 }
4689 }
4690
4691 current_thread = saved_thread;
4692
4693 if (debug_threads)
4694 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4695 " at 0x%s\n",
4696 lwpid_of (thread), paddress (pc));
4697
4698 return false;
4699 }
4700
4701 void
4702 linux_process_target::start_step_over (lwp_info *lwp)
4703 {
4704 struct thread_info *thread = get_lwp_thread (lwp);
4705 struct thread_info *saved_thread;
4706 CORE_ADDR pc;
4707 int step;
4708
4709 if (debug_threads)
4710 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4711 lwpid_of (thread));
4712
4713 stop_all_lwps (1, lwp);
4714
4715 if (lwp->suspended != 0)
4716 {
4717 internal_error (__FILE__, __LINE__,
4718 "LWP %ld suspended=%d\n", lwpid_of (thread),
4719 lwp->suspended);
4720 }
4721
4722 if (debug_threads)
4723 debug_printf ("Done stopping all threads for step-over.\n");
4724
4725 /* Note, we should always reach here with an already adjusted PC,
4726 either by GDB (if we're resuming due to GDB's request), or by our
4727 caller, if we just finished handling an internal breakpoint GDB
4728 shouldn't care about. */
4729 pc = get_pc (lwp);
4730
4731 saved_thread = current_thread;
4732 current_thread = thread;
4733
4734 lwp->bp_reinsert = pc;
4735 uninsert_breakpoints_at (pc);
4736 uninsert_fast_tracepoint_jumps_at (pc);
4737
4738 step = single_step (lwp);
4739
4740 current_thread = saved_thread;
4741
4742 linux_resume_one_lwp (lwp, step, 0, NULL);
4743
4744 /* Require next event from this LWP. */
4745 step_over_bkpt = thread->id;
4746 }
4747
4748 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4749 start_step_over, if still there, and delete any single-step
4750 breakpoints we've set, on non hardware single-step targets. */
4751
4752 static int
4753 finish_step_over (struct lwp_info *lwp)
4754 {
4755 if (lwp->bp_reinsert != 0)
4756 {
4757 struct thread_info *saved_thread = current_thread;
4758
4759 if (debug_threads)
4760 debug_printf ("Finished step over.\n");
4761
4762 current_thread = get_lwp_thread (lwp);
4763
4764 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4765 may be no breakpoint to reinsert there by now. */
4766 reinsert_breakpoints_at (lwp->bp_reinsert);
4767 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4768
4769 lwp->bp_reinsert = 0;
4770
4771 /* Delete any single-step breakpoints. No longer needed. We
4772 don't have to worry about other threads hitting this trap,
4773 and later not being able to explain it, because we were
4774 stepping over a breakpoint, and we hold all threads but
4775 LWP stopped while doing that. */
4776 if (!can_hardware_single_step ())
4777 {
4778 gdb_assert (has_single_step_breakpoints (current_thread));
4779 delete_single_step_breakpoints (current_thread);
4780 }
4781
4782 step_over_bkpt = null_ptid;
4783 current_thread = saved_thread;
4784 return 1;
4785 }
4786 else
4787 return 0;
4788 }
4789
4790 void
4791 linux_process_target::complete_ongoing_step_over ()
4792 {
4793 if (step_over_bkpt != null_ptid)
4794 {
4795 struct lwp_info *lwp;
4796 int wstat;
4797 int ret;
4798
4799 if (debug_threads)
4800 debug_printf ("detach: step over in progress, finish it first\n");
4801
4802 /* Passing NULL_PTID as filter indicates we want all events to
4803 be left pending. Eventually this returns when there are no
4804 unwaited-for children left. */
4805 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4806 __WALL);
4807 gdb_assert (ret == -1);
4808
4809 lwp = find_lwp_pid (step_over_bkpt);
4810 if (lwp != NULL)
4811 finish_step_over (lwp);
4812 step_over_bkpt = null_ptid;
4813 unsuspend_all_lwps (lwp);
4814 }
4815 }
4816
4817 /* This function is called once per thread. We check the thread's resume
4818 request, which will tell us whether to resume, step, or leave the thread
4819 stopped; and what signal, if any, it should be sent.
4820
4821 For threads which we aren't explicitly told otherwise, we preserve
4822 the stepping flag; this is used for stepping over gdbserver-placed
4823 breakpoints.
4824
4825 If pending_flags was set in any thread, we queue any needed
4826 signals, since we won't actually resume. We already have a pending
4827 event to report, so we don't need to preserve any step requests;
4828 they should be re-issued if necessary. */
4829
4830 static void
4831 linux_resume_one_thread (thread_info *thread, bool leave_all_stopped)
4832 {
4833 struct lwp_info *lwp = get_thread_lwp (thread);
4834 int leave_pending;
4835
4836 if (lwp->resume == NULL)
4837 return;
4838
4839 if (lwp->resume->kind == resume_stop)
4840 {
4841 if (debug_threads)
4842 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4843
4844 if (!lwp->stopped)
4845 {
4846 if (debug_threads)
4847 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4848
4849 /* Stop the thread, and wait for the event asynchronously,
4850 through the event loop. */
4851 send_sigstop (lwp);
4852 }
4853 else
4854 {
4855 if (debug_threads)
4856 debug_printf ("already stopped LWP %ld\n",
4857 lwpid_of (thread));
4858
4859 /* The LWP may have been stopped in an internal event that
4860 was not meant to be notified back to GDB (e.g., gdbserver
4861 breakpoint), so we should be reporting a stop event in
4862 this case too. */
4863
4864 /* If the thread already has a pending SIGSTOP, this is a
4865 no-op. Otherwise, something later will presumably resume
4866 the thread and this will cause it to cancel any pending
4867 operation, due to last_resume_kind == resume_stop. If
4868 the thread already has a pending status to report, we
4869 will still report it the next time we wait - see
4870 status_pending_p_callback. */
4871
4872 /* If we already have a pending signal to report, then
4873 there's no need to queue a SIGSTOP, as this means we're
4874 midway through moving the LWP out of the jumppad, and we
4875 will report the pending signal as soon as that is
4876 finished. */
4877 if (lwp->pending_signals_to_report == NULL)
4878 send_sigstop (lwp);
4879 }
4880
4881 /* For stop requests, we're done. */
4882 lwp->resume = NULL;
4883 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4884 return;
4885 }
4886
4887 /* If this thread which is about to be resumed has a pending status,
4888 then don't resume it - we can just report the pending status.
4889 Likewise if it is suspended, because e.g., another thread is
4890 stepping past a breakpoint. Make sure to queue any signals that
4891 would otherwise be sent. In all-stop mode, we do this decision
4892 based on if *any* thread has a pending status. If there's a
4893 thread that needs the step-over-breakpoint dance, then don't
4894 resume any other thread but that particular one. */
4895 leave_pending = (lwp->suspended
4896 || lwp->status_pending_p
4897 || leave_all_stopped);
4898
4899 /* If we have a new signal, enqueue the signal. */
4900 if (lwp->resume->sig != 0)
4901 {
4902 siginfo_t info, *info_p;
4903
4904 /* If this is the same signal we were previously stopped by,
4905 make sure to queue its siginfo. */
4906 if (WIFSTOPPED (lwp->last_status)
4907 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4908 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4909 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4910 info_p = &info;
4911 else
4912 info_p = NULL;
4913
4914 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4915 }
4916
4917 if (!leave_pending)
4918 {
4919 if (debug_threads)
4920 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4921
4922 proceed_one_lwp (thread, NULL);
4923 }
4924 else
4925 {
4926 if (debug_threads)
4927 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4928 }
4929
4930 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4931 lwp->resume = NULL;
4932 }
4933
4934 void
4935 linux_process_target::resume (thread_resume *resume_info, size_t n)
4936 {
4937 struct thread_info *need_step_over = NULL;
4938
4939 if (debug_threads)
4940 {
4941 debug_enter ();
4942 debug_printf ("linux_resume:\n");
4943 }
4944
4945 for_each_thread ([&] (thread_info *thread)
4946 {
4947 linux_set_resume_request (thread, resume_info, n);
4948 });
4949
4950 /* If there is a thread which would otherwise be resumed, which has
4951 a pending status, then don't resume any threads - we can just
4952 report the pending status. Make sure to queue any signals that
4953 would otherwise be sent. In non-stop mode, we'll apply this
4954 logic to each thread individually. We consume all pending events
4955 before considering to start a step-over (in all-stop). */
4956 bool any_pending = false;
4957 if (!non_stop)
4958 any_pending = find_thread (resume_status_pending_p) != NULL;
4959
4960 /* If there is a thread which would otherwise be resumed, which is
4961 stopped at a breakpoint that needs stepping over, then don't
4962 resume any threads - have it step over the breakpoint with all
4963 other threads stopped, then resume all threads again. Make sure
4964 to queue any signals that would otherwise be delivered or
4965 queued. */
4966 if (!any_pending && supports_breakpoints ())
4967 need_step_over = find_thread (need_step_over_p);
4968
4969 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4970
4971 if (debug_threads)
4972 {
4973 if (need_step_over != NULL)
4974 debug_printf ("Not resuming all, need step over\n");
4975 else if (any_pending)
4976 debug_printf ("Not resuming, all-stop and found "
4977 "an LWP with pending status\n");
4978 else
4979 debug_printf ("Resuming, no pending status or step over needed\n");
4980 }
4981
4982 /* Even if we're leaving threads stopped, queue all signals we'd
4983 otherwise deliver. */
4984 for_each_thread ([&] (thread_info *thread)
4985 {
4986 linux_resume_one_thread (thread, leave_all_stopped);
4987 });
4988
4989 if (need_step_over)
4990 start_step_over (get_thread_lwp (need_step_over));
4991
4992 if (debug_threads)
4993 {
4994 debug_printf ("linux_resume done\n");
4995 debug_exit ();
4996 }
4997
4998 /* We may have events that were pending that can/should be sent to
4999 the client now. Trigger a linux_wait call. */
5000 if (target_is_async_p ())
5001 async_file_mark ();
5002 }
5003
5004 /* This function is called once per thread. We check the thread's
5005 last resume request, which will tell us whether to resume, step, or
5006 leave the thread stopped. Any signal the client requested to be
5007 delivered has already been enqueued at this point.
5008
5009 If any thread that GDB wants running is stopped at an internal
5010 breakpoint that needs stepping over, we start a step-over operation
5011 on that particular thread, and leave all others stopped. */
5012
5013 static void
5014 proceed_one_lwp (thread_info *thread, lwp_info *except)
5015 {
5016 struct lwp_info *lwp = get_thread_lwp (thread);
5017 int step;
5018
5019 if (lwp == except)
5020 return;
5021
5022 if (debug_threads)
5023 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5024
5025 if (!lwp->stopped)
5026 {
5027 if (debug_threads)
5028 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5029 return;
5030 }
5031
5032 if (thread->last_resume_kind == resume_stop
5033 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5034 {
5035 if (debug_threads)
5036 debug_printf (" client wants LWP to remain %ld stopped\n",
5037 lwpid_of (thread));
5038 return;
5039 }
5040
5041 if (lwp->status_pending_p)
5042 {
5043 if (debug_threads)
5044 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5045 lwpid_of (thread));
5046 return;
5047 }
5048
5049 gdb_assert (lwp->suspended >= 0);
5050
5051 if (lwp->suspended)
5052 {
5053 if (debug_threads)
5054 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5055 return;
5056 }
5057
5058 if (thread->last_resume_kind == resume_stop
5059 && lwp->pending_signals_to_report == NULL
5060 && (lwp->collecting_fast_tracepoint
5061 == fast_tpoint_collect_result::not_collecting))
5062 {
5063 /* We haven't reported this LWP as stopped yet (otherwise, the
5064 last_status.kind check above would catch it, and we wouldn't
5065 reach here. This LWP may have been momentarily paused by a
5066 stop_all_lwps call while handling for example, another LWP's
5067 step-over. In that case, the pending expected SIGSTOP signal
5068 that was queued at vCont;t handling time will have already
5069 been consumed by wait_for_sigstop, and so we need to requeue
5070 another one here. Note that if the LWP already has a SIGSTOP
5071 pending, this is a no-op. */
5072
5073 if (debug_threads)
5074 debug_printf ("Client wants LWP %ld to stop. "
5075 "Making sure it has a SIGSTOP pending\n",
5076 lwpid_of (thread));
5077
5078 send_sigstop (lwp);
5079 }
5080
5081 if (thread->last_resume_kind == resume_step)
5082 {
5083 if (debug_threads)
5084 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5085 lwpid_of (thread));
5086
5087 /* If resume_step is requested by GDB, install single-step
5088 breakpoints when the thread is about to be actually resumed if
5089 the single-step breakpoints weren't removed. */
5090 if (can_software_single_step ()
5091 && !has_single_step_breakpoints (thread))
5092 install_software_single_step_breakpoints (lwp);
5093
5094 step = maybe_hw_step (thread);
5095 }
5096 else if (lwp->bp_reinsert != 0)
5097 {
5098 if (debug_threads)
5099 debug_printf (" stepping LWP %ld, reinsert set\n",
5100 lwpid_of (thread));
5101
5102 step = maybe_hw_step (thread);
5103 }
5104 else
5105 step = 0;
5106
5107 linux_resume_one_lwp (lwp, step, 0, NULL);
5108 }
5109
5110 static void
5111 unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except)
5112 {
5113 struct lwp_info *lwp = get_thread_lwp (thread);
5114
5115 if (lwp == except)
5116 return;
5117
5118 lwp_suspended_decr (lwp);
5119
5120 proceed_one_lwp (thread, except);
5121 }
5122
5123 void
5124 linux_process_target::proceed_all_lwps ()
5125 {
5126 struct thread_info *need_step_over;
5127
5128 /* If there is a thread which would otherwise be resumed, which is
5129 stopped at a breakpoint that needs stepping over, then don't
5130 resume any threads - have it step over the breakpoint with all
5131 other threads stopped, then resume all threads again. */
5132
5133 if (supports_breakpoints ())
5134 {
5135 need_step_over = find_thread (need_step_over_p);
5136
5137 if (need_step_over != NULL)
5138 {
5139 if (debug_threads)
5140 debug_printf ("proceed_all_lwps: found "
5141 "thread %ld needing a step-over\n",
5142 lwpid_of (need_step_over));
5143
5144 start_step_over (get_thread_lwp (need_step_over));
5145 return;
5146 }
5147 }
5148
5149 if (debug_threads)
5150 debug_printf ("Proceeding, no step-over needed\n");
5151
5152 for_each_thread ([] (thread_info *thread)
5153 {
5154 proceed_one_lwp (thread, NULL);
5155 });
5156 }
5157
5158 void
5159 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
5160 {
5161 if (debug_threads)
5162 {
5163 debug_enter ();
5164 if (except)
5165 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5166 lwpid_of (get_lwp_thread (except)));
5167 else
5168 debug_printf ("unstopping all lwps\n");
5169 }
5170
5171 if (unsuspend)
5172 for_each_thread ([&] (thread_info *thread)
5173 {
5174 unsuspend_and_proceed_one_lwp (thread, except);
5175 });
5176 else
5177 for_each_thread ([&] (thread_info *thread)
5178 {
5179 proceed_one_lwp (thread, except);
5180 });
5181
5182 if (debug_threads)
5183 {
5184 debug_printf ("unstop_all_lwps done\n");
5185 debug_exit ();
5186 }
5187 }
5188
5189
5190 #ifdef HAVE_LINUX_REGSETS
5191
5192 #define use_linux_regsets 1
5193
5194 /* Returns true if REGSET has been disabled. */
5195
5196 static int
5197 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5198 {
5199 return (info->disabled_regsets != NULL
5200 && info->disabled_regsets[regset - info->regsets]);
5201 }
5202
5203 /* Disable REGSET. */
5204
5205 static void
5206 disable_regset (struct regsets_info *info, struct regset_info *regset)
5207 {
5208 int dr_offset;
5209
5210 dr_offset = regset - info->regsets;
5211 if (info->disabled_regsets == NULL)
5212 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5213 info->disabled_regsets[dr_offset] = 1;
5214 }
5215
5216 static int
5217 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5218 struct regcache *regcache)
5219 {
5220 struct regset_info *regset;
5221 int saw_general_regs = 0;
5222 int pid;
5223 struct iovec iov;
5224
5225 pid = lwpid_of (current_thread);
5226 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5227 {
5228 void *buf, *data;
5229 int nt_type, res;
5230
5231 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5232 continue;
5233
5234 buf = xmalloc (regset->size);
5235
5236 nt_type = regset->nt_type;
5237 if (nt_type)
5238 {
5239 iov.iov_base = buf;
5240 iov.iov_len = regset->size;
5241 data = (void *) &iov;
5242 }
5243 else
5244 data = buf;
5245
5246 #ifndef __sparc__
5247 res = ptrace (regset->get_request, pid,
5248 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5249 #else
5250 res = ptrace (regset->get_request, pid, data, nt_type);
5251 #endif
5252 if (res < 0)
5253 {
5254 if (errno == EIO
5255 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5256 {
5257 /* If we get EIO on a regset, or an EINVAL and the regset is
5258 optional, do not try it again for this process mode. */
5259 disable_regset (regsets_info, regset);
5260 }
5261 else if (errno == ENODATA)
5262 {
5263 /* ENODATA may be returned if the regset is currently
5264 not "active". This can happen in normal operation,
5265 so suppress the warning in this case. */
5266 }
5267 else if (errno == ESRCH)
5268 {
5269 /* At this point, ESRCH should mean the process is
5270 already gone, in which case we simply ignore attempts
5271 to read its registers. */
5272 }
5273 else
5274 {
5275 char s[256];
5276 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5277 pid);
5278 perror (s);
5279 }
5280 }
5281 else
5282 {
5283 if (regset->type == GENERAL_REGS)
5284 saw_general_regs = 1;
5285 regset->store_function (regcache, buf);
5286 }
5287 free (buf);
5288 }
5289 if (saw_general_regs)
5290 return 0;
5291 else
5292 return 1;
5293 }
5294
5295 static int
5296 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5297 struct regcache *regcache)
5298 {
5299 struct regset_info *regset;
5300 int saw_general_regs = 0;
5301 int pid;
5302 struct iovec iov;
5303
5304 pid = lwpid_of (current_thread);
5305 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5306 {
5307 void *buf, *data;
5308 int nt_type, res;
5309
5310 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5311 || regset->fill_function == NULL)
5312 continue;
5313
5314 buf = xmalloc (regset->size);
5315
5316 /* First fill the buffer with the current register set contents,
5317 in case there are any items in the kernel's regset that are
5318 not in gdbserver's regcache. */
5319
5320 nt_type = regset->nt_type;
5321 if (nt_type)
5322 {
5323 iov.iov_base = buf;
5324 iov.iov_len = regset->size;
5325 data = (void *) &iov;
5326 }
5327 else
5328 data = buf;
5329
5330 #ifndef __sparc__
5331 res = ptrace (regset->get_request, pid,
5332 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5333 #else
5334 res = ptrace (regset->get_request, pid, data, nt_type);
5335 #endif
5336
5337 if (res == 0)
5338 {
5339 /* Then overlay our cached registers on that. */
5340 regset->fill_function (regcache, buf);
5341
5342 /* Only now do we write the register set. */
5343 #ifndef __sparc__
5344 res = ptrace (regset->set_request, pid,
5345 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5346 #else
5347 res = ptrace (regset->set_request, pid, data, nt_type);
5348 #endif
5349 }
5350
5351 if (res < 0)
5352 {
5353 if (errno == EIO
5354 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5355 {
5356 /* If we get EIO on a regset, or an EINVAL and the regset is
5357 optional, do not try it again for this process mode. */
5358 disable_regset (regsets_info, regset);
5359 }
5360 else if (errno == ESRCH)
5361 {
5362 /* At this point, ESRCH should mean the process is
5363 already gone, in which case we simply ignore attempts
5364 to change its registers. See also the related
5365 comment in linux_resume_one_lwp. */
5366 free (buf);
5367 return 0;
5368 }
5369 else
5370 {
5371 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5372 }
5373 }
5374 else if (regset->type == GENERAL_REGS)
5375 saw_general_regs = 1;
5376 free (buf);
5377 }
5378 if (saw_general_regs)
5379 return 0;
5380 else
5381 return 1;
5382 }
5383
5384 #else /* !HAVE_LINUX_REGSETS */
5385
5386 #define use_linux_regsets 0
5387 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5388 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5389
5390 #endif
5391
5392 /* Return 1 if register REGNO is supported by one of the regset ptrace
5393 calls or 0 if it has to be transferred individually. */
5394
5395 static int
5396 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5397 {
5398 unsigned char mask = 1 << (regno % 8);
5399 size_t index = regno / 8;
5400
5401 return (use_linux_regsets
5402 && (regs_info->regset_bitmap == NULL
5403 || (regs_info->regset_bitmap[index] & mask) != 0));
5404 }
5405
5406 #ifdef HAVE_LINUX_USRREGS
5407
5408 static int
5409 register_addr (const struct usrregs_info *usrregs, int regnum)
5410 {
5411 int addr;
5412
5413 if (regnum < 0 || regnum >= usrregs->num_regs)
5414 error ("Invalid register number %d.", regnum);
5415
5416 addr = usrregs->regmap[regnum];
5417
5418 return addr;
5419 }
5420
5421
5422 void
5423 linux_process_target::fetch_register (const usrregs_info *usrregs,
5424 regcache *regcache, int regno)
5425 {
5426 CORE_ADDR regaddr;
5427 int i, size;
5428 char *buf;
5429 int pid;
5430
5431 if (regno >= usrregs->num_regs)
5432 return;
5433 if (low_cannot_fetch_register (regno))
5434 return;
5435
5436 regaddr = register_addr (usrregs, regno);
5437 if (regaddr == -1)
5438 return;
5439
5440 size = ((register_size (regcache->tdesc, regno)
5441 + sizeof (PTRACE_XFER_TYPE) - 1)
5442 & -sizeof (PTRACE_XFER_TYPE));
5443 buf = (char *) alloca (size);
5444
5445 pid = lwpid_of (current_thread);
5446 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5447 {
5448 errno = 0;
5449 *(PTRACE_XFER_TYPE *) (buf + i) =
5450 ptrace (PTRACE_PEEKUSER, pid,
5451 /* Coerce to a uintptr_t first to avoid potential gcc warning
5452 of coercing an 8 byte integer to a 4 byte pointer. */
5453 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5454 regaddr += sizeof (PTRACE_XFER_TYPE);
5455 if (errno != 0)
5456 {
5457 /* Mark register REGNO unavailable. */
5458 supply_register (regcache, regno, NULL);
5459 return;
5460 }
5461 }
5462
5463 if (the_low_target.supply_ptrace_register)
5464 the_low_target.supply_ptrace_register (regcache, regno, buf);
5465 else
5466 supply_register (regcache, regno, buf);
5467 }
5468
5469 void
5470 linux_process_target::store_register (const usrregs_info *usrregs,
5471 regcache *regcache, int regno)
5472 {
5473 CORE_ADDR regaddr;
5474 int i, size;
5475 char *buf;
5476 int pid;
5477
5478 if (regno >= usrregs->num_regs)
5479 return;
5480 if (low_cannot_store_register (regno))
5481 return;
5482
5483 regaddr = register_addr (usrregs, regno);
5484 if (regaddr == -1)
5485 return;
5486
5487 size = ((register_size (regcache->tdesc, regno)
5488 + sizeof (PTRACE_XFER_TYPE) - 1)
5489 & -sizeof (PTRACE_XFER_TYPE));
5490 buf = (char *) alloca (size);
5491 memset (buf, 0, size);
5492
5493 if (the_low_target.collect_ptrace_register)
5494 the_low_target.collect_ptrace_register (regcache, regno, buf);
5495 else
5496 collect_register (regcache, regno, buf);
5497
5498 pid = lwpid_of (current_thread);
5499 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5500 {
5501 errno = 0;
5502 ptrace (PTRACE_POKEUSER, pid,
5503 /* Coerce to a uintptr_t first to avoid potential gcc warning
5504 about coercing an 8 byte integer to a 4 byte pointer. */
5505 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5506 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5507 if (errno != 0)
5508 {
5509 /* At this point, ESRCH should mean the process is
5510 already gone, in which case we simply ignore attempts
5511 to change its registers. See also the related
5512 comment in linux_resume_one_lwp. */
5513 if (errno == ESRCH)
5514 return;
5515
5516
5517 if (!low_cannot_store_register (regno))
5518 error ("writing register %d: %s", regno, safe_strerror (errno));
5519 }
5520 regaddr += sizeof (PTRACE_XFER_TYPE);
5521 }
5522 }
5523 #endif /* HAVE_LINUX_USRREGS */
5524
5525 void
5526 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5527 regcache *regcache,
5528 int regno, int all)
5529 {
5530 #ifdef HAVE_LINUX_USRREGS
5531 struct usrregs_info *usr = regs_info->usrregs;
5532
5533 if (regno == -1)
5534 {
5535 for (regno = 0; regno < usr->num_regs; regno++)
5536 if (all || !linux_register_in_regsets (regs_info, regno))
5537 fetch_register (usr, regcache, regno);
5538 }
5539 else
5540 fetch_register (usr, regcache, regno);
5541 #endif
5542 }
5543
5544 void
5545 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5546 regcache *regcache,
5547 int regno, int all)
5548 {
5549 #ifdef HAVE_LINUX_USRREGS
5550 struct usrregs_info *usr = regs_info->usrregs;
5551
5552 if (regno == -1)
5553 {
5554 for (regno = 0; regno < usr->num_regs; regno++)
5555 if (all || !linux_register_in_regsets (regs_info, regno))
5556 store_register (usr, regcache, regno);
5557 }
5558 else
5559 store_register (usr, regcache, regno);
5560 #endif
5561 }
5562
5563 void
5564 linux_process_target::fetch_registers (regcache *regcache, int regno)
5565 {
5566 int use_regsets;
5567 int all = 0;
5568 const regs_info *regs_info = get_regs_info ();
5569
5570 if (regno == -1)
5571 {
5572 if (the_low_target.fetch_register != NULL
5573 && regs_info->usrregs != NULL)
5574 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5575 (*the_low_target.fetch_register) (regcache, regno);
5576
5577 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5578 if (regs_info->usrregs != NULL)
5579 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5580 }
5581 else
5582 {
5583 if (the_low_target.fetch_register != NULL
5584 && (*the_low_target.fetch_register) (regcache, regno))
5585 return;
5586
5587 use_regsets = linux_register_in_regsets (regs_info, regno);
5588 if (use_regsets)
5589 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5590 regcache);
5591 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5592 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5593 }
5594 }
5595
5596 void
5597 linux_process_target::store_registers (regcache *regcache, int regno)
5598 {
5599 int use_regsets;
5600 int all = 0;
5601 const regs_info *regs_info = get_regs_info ();
5602
5603 if (regno == -1)
5604 {
5605 all = regsets_store_inferior_registers (regs_info->regsets_info,
5606 regcache);
5607 if (regs_info->usrregs != NULL)
5608 usr_store_inferior_registers (regs_info, regcache, regno, all);
5609 }
5610 else
5611 {
5612 use_regsets = linux_register_in_regsets (regs_info, regno);
5613 if (use_regsets)
5614 all = regsets_store_inferior_registers (regs_info->regsets_info,
5615 regcache);
5616 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5617 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5618 }
5619 }
5620
5621
5622 /* A wrapper for the read_memory target op. */
5623
5624 static int
5625 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5626 {
5627 return the_target->read_memory (memaddr, myaddr, len);
5628 }
5629
5630 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5631 to debugger memory starting at MYADDR. */
5632
5633 int
5634 linux_process_target::read_memory (CORE_ADDR memaddr,
5635 unsigned char *myaddr, int len)
5636 {
5637 int pid = lwpid_of (current_thread);
5638 PTRACE_XFER_TYPE *buffer;
5639 CORE_ADDR addr;
5640 int count;
5641 char filename[64];
5642 int i;
5643 int ret;
5644 int fd;
5645
5646 /* Try using /proc. Don't bother for one word. */
5647 if (len >= 3 * sizeof (long))
5648 {
5649 int bytes;
5650
5651 /* We could keep this file open and cache it - possibly one per
5652 thread. That requires some juggling, but is even faster. */
5653 sprintf (filename, "/proc/%d/mem", pid);
5654 fd = open (filename, O_RDONLY | O_LARGEFILE);
5655 if (fd == -1)
5656 goto no_proc;
5657
5658 /* If pread64 is available, use it. It's faster if the kernel
5659 supports it (only one syscall), and it's 64-bit safe even on
5660 32-bit platforms (for instance, SPARC debugging a SPARC64
5661 application). */
5662 #ifdef HAVE_PREAD64
5663 bytes = pread64 (fd, myaddr, len, memaddr);
5664 #else
5665 bytes = -1;
5666 if (lseek (fd, memaddr, SEEK_SET) != -1)
5667 bytes = read (fd, myaddr, len);
5668 #endif
5669
5670 close (fd);
5671 if (bytes == len)
5672 return 0;
5673
5674 /* Some data was read, we'll try to get the rest with ptrace. */
5675 if (bytes > 0)
5676 {
5677 memaddr += bytes;
5678 myaddr += bytes;
5679 len -= bytes;
5680 }
5681 }
5682
5683 no_proc:
5684 /* Round starting address down to longword boundary. */
5685 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5686 /* Round ending address up; get number of longwords that makes. */
5687 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5688 / sizeof (PTRACE_XFER_TYPE));
5689 /* Allocate buffer of that many longwords. */
5690 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5691
5692 /* Read all the longwords */
5693 errno = 0;
5694 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5695 {
5696 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5697 about coercing an 8 byte integer to a 4 byte pointer. */
5698 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5699 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5700 (PTRACE_TYPE_ARG4) 0);
5701 if (errno)
5702 break;
5703 }
5704 ret = errno;
5705
5706 /* Copy appropriate bytes out of the buffer. */
5707 if (i > 0)
5708 {
5709 i *= sizeof (PTRACE_XFER_TYPE);
5710 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5711 memcpy (myaddr,
5712 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5713 i < len ? i : len);
5714 }
5715
5716 return ret;
5717 }
5718
5719 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5720 memory at MEMADDR. On failure (cannot write to the inferior)
5721 returns the value of errno. Always succeeds if LEN is zero. */
5722
5723 int
5724 linux_process_target::write_memory (CORE_ADDR memaddr,
5725 const unsigned char *myaddr, int len)
5726 {
5727 int i;
5728 /* Round starting address down to longword boundary. */
5729 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5730 /* Round ending address up; get number of longwords that makes. */
5731 int count
5732 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5733 / sizeof (PTRACE_XFER_TYPE);
5734
5735 /* Allocate buffer of that many longwords. */
5736 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5737
5738 int pid = lwpid_of (current_thread);
5739
5740 if (len == 0)
5741 {
5742 /* Zero length write always succeeds. */
5743 return 0;
5744 }
5745
5746 if (debug_threads)
5747 {
5748 /* Dump up to four bytes. */
5749 char str[4 * 2 + 1];
5750 char *p = str;
5751 int dump = len < 4 ? len : 4;
5752
5753 for (i = 0; i < dump; i++)
5754 {
5755 sprintf (p, "%02x", myaddr[i]);
5756 p += 2;
5757 }
5758 *p = '\0';
5759
5760 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5761 str, (long) memaddr, pid);
5762 }
5763
5764 /* Fill start and end extra bytes of buffer with existing memory data. */
5765
5766 errno = 0;
5767 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5768 about coercing an 8 byte integer to a 4 byte pointer. */
5769 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5770 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5771 (PTRACE_TYPE_ARG4) 0);
5772 if (errno)
5773 return errno;
5774
5775 if (count > 1)
5776 {
5777 errno = 0;
5778 buffer[count - 1]
5779 = ptrace (PTRACE_PEEKTEXT, pid,
5780 /* Coerce to a uintptr_t first to avoid potential gcc warning
5781 about coercing an 8 byte integer to a 4 byte pointer. */
5782 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5783 * sizeof (PTRACE_XFER_TYPE)),
5784 (PTRACE_TYPE_ARG4) 0);
5785 if (errno)
5786 return errno;
5787 }
5788
5789 /* Copy data to be written over corresponding part of buffer. */
5790
5791 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5792 myaddr, len);
5793
5794 /* Write the entire buffer. */
5795
5796 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5797 {
5798 errno = 0;
5799 ptrace (PTRACE_POKETEXT, pid,
5800 /* Coerce to a uintptr_t first to avoid potential gcc warning
5801 about coercing an 8 byte integer to a 4 byte pointer. */
5802 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5803 (PTRACE_TYPE_ARG4) buffer[i]);
5804 if (errno)
5805 return errno;
5806 }
5807
5808 return 0;
5809 }
5810
5811 void
5812 linux_process_target::look_up_symbols ()
5813 {
5814 #ifdef USE_THREAD_DB
5815 struct process_info *proc = current_process ();
5816
5817 if (proc->priv->thread_db != NULL)
5818 return;
5819
5820 thread_db_init ();
5821 #endif
5822 }
5823
5824 void
5825 linux_process_target::request_interrupt ()
5826 {
5827 /* Send a SIGINT to the process group. This acts just like the user
5828 typed a ^C on the controlling terminal. */
5829 ::kill (-signal_pid, SIGINT);
5830 }
5831
5832 bool
5833 linux_process_target::supports_read_auxv ()
5834 {
5835 return true;
5836 }
5837
5838 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5839 to debugger memory starting at MYADDR. */
5840
5841 int
5842 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5843 unsigned int len)
5844 {
5845 char filename[PATH_MAX];
5846 int fd, n;
5847 int pid = lwpid_of (current_thread);
5848
5849 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5850
5851 fd = open (filename, O_RDONLY);
5852 if (fd < 0)
5853 return -1;
5854
5855 if (offset != (CORE_ADDR) 0
5856 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5857 n = -1;
5858 else
5859 n = read (fd, myaddr, len);
5860
5861 close (fd);
5862
5863 return n;
5864 }
5865
5866 /* These breakpoint and watchpoint related wrapper functions simply
5867 pass on the function call if the target has registered a
5868 corresponding function. */
5869
5870 bool
5871 linux_process_target::supports_z_point_type (char z_type)
5872 {
5873 return (the_low_target.supports_z_point_type != NULL
5874 && the_low_target.supports_z_point_type (z_type));
5875 }
5876
5877 int
5878 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5879 int size, raw_breakpoint *bp)
5880 {
5881 if (type == raw_bkpt_type_sw)
5882 return insert_memory_breakpoint (bp);
5883 else if (the_low_target.insert_point != NULL)
5884 return the_low_target.insert_point (type, addr, size, bp);
5885 else
5886 /* Unsupported (see target.h). */
5887 return 1;
5888 }
5889
5890 int
5891 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5892 int size, raw_breakpoint *bp)
5893 {
5894 if (type == raw_bkpt_type_sw)
5895 return remove_memory_breakpoint (bp);
5896 else if (the_low_target.remove_point != NULL)
5897 return the_low_target.remove_point (type, addr, size, bp);
5898 else
5899 /* Unsupported (see target.h). */
5900 return 1;
5901 }
5902
5903 /* Implement the stopped_by_sw_breakpoint target_ops
5904 method. */
5905
5906 bool
5907 linux_process_target::stopped_by_sw_breakpoint ()
5908 {
5909 struct lwp_info *lwp = get_thread_lwp (current_thread);
5910
5911 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5912 }
5913
5914 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5915 method. */
5916
5917 bool
5918 linux_process_target::supports_stopped_by_sw_breakpoint ()
5919 {
5920 return USE_SIGTRAP_SIGINFO;
5921 }
5922
5923 /* Implement the stopped_by_hw_breakpoint target_ops
5924 method. */
5925
5926 bool
5927 linux_process_target::stopped_by_hw_breakpoint ()
5928 {
5929 struct lwp_info *lwp = get_thread_lwp (current_thread);
5930
5931 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5932 }
5933
5934 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5935 method. */
5936
5937 bool
5938 linux_process_target::supports_stopped_by_hw_breakpoint ()
5939 {
5940 return USE_SIGTRAP_SIGINFO;
5941 }
5942
5943 /* Implement the supports_hardware_single_step target_ops method. */
5944
5945 bool
5946 linux_process_target::supports_hardware_single_step ()
5947 {
5948 return can_hardware_single_step ();
5949 }
5950
5951 bool
5952 linux_process_target::supports_software_single_step ()
5953 {
5954 return can_software_single_step ();
5955 }
5956
5957 bool
5958 linux_process_target::stopped_by_watchpoint ()
5959 {
5960 struct lwp_info *lwp = get_thread_lwp (current_thread);
5961
5962 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5963 }
5964
5965 CORE_ADDR
5966 linux_process_target::stopped_data_address ()
5967 {
5968 struct lwp_info *lwp = get_thread_lwp (current_thread);
5969
5970 return lwp->stopped_data_address;
5971 }
5972
5973 /* This is only used for targets that define PT_TEXT_ADDR,
5974 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5975 the target has different ways of acquiring this information, like
5976 loadmaps. */
5977
5978 bool
5979 linux_process_target::supports_read_offsets ()
5980 {
5981 #ifdef SUPPORTS_READ_OFFSETS
5982 return true;
5983 #else
5984 return false;
5985 #endif
5986 }
5987
5988 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5989 to tell gdb about. */
5990
5991 int
5992 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5993 {
5994 #ifdef SUPPORTS_READ_OFFSETS
5995 unsigned long text, text_end, data;
5996 int pid = lwpid_of (current_thread);
5997
5998 errno = 0;
5999
6000 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6001 (PTRACE_TYPE_ARG4) 0);
6002 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6003 (PTRACE_TYPE_ARG4) 0);
6004 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6005 (PTRACE_TYPE_ARG4) 0);
6006
6007 if (errno == 0)
6008 {
6009 /* Both text and data offsets produced at compile-time (and so
6010 used by gdb) are relative to the beginning of the program,
6011 with the data segment immediately following the text segment.
6012 However, the actual runtime layout in memory may put the data
6013 somewhere else, so when we send gdb a data base-address, we
6014 use the real data base address and subtract the compile-time
6015 data base-address from it (which is just the length of the
6016 text segment). BSS immediately follows data in both
6017 cases. */
6018 *text_p = text;
6019 *data_p = data - (text_end - text);
6020
6021 return 1;
6022 }
6023 return 0;
6024 #else
6025 gdb_assert_not_reached ("target op read_offsets not supported");
6026 #endif
6027 }
6028
6029 bool
6030 linux_process_target::supports_get_tls_address ()
6031 {
6032 #ifdef USE_THREAD_DB
6033 return true;
6034 #else
6035 return false;
6036 #endif
6037 }
6038
6039 int
6040 linux_process_target::get_tls_address (thread_info *thread,
6041 CORE_ADDR offset,
6042 CORE_ADDR load_module,
6043 CORE_ADDR *address)
6044 {
6045 #ifdef USE_THREAD_DB
6046 return thread_db_get_tls_address (thread, offset, load_module, address);
6047 #else
6048 return -1;
6049 #endif
6050 }
6051
6052 bool
6053 linux_process_target::supports_qxfer_osdata ()
6054 {
6055 return true;
6056 }
6057
6058 int
6059 linux_process_target::qxfer_osdata (const char *annex,
6060 unsigned char *readbuf,
6061 unsigned const char *writebuf,
6062 CORE_ADDR offset, int len)
6063 {
6064 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6065 }
6066
6067 /* Convert a native/host siginfo object, into/from the siginfo in the
6068 layout of the inferiors' architecture. */
6069
6070 static void
6071 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6072 {
6073 int done = 0;
6074
6075 if (the_low_target.siginfo_fixup != NULL)
6076 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6077
6078 /* If there was no callback, or the callback didn't do anything,
6079 then just do a straight memcpy. */
6080 if (!done)
6081 {
6082 if (direction == 1)
6083 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6084 else
6085 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6086 }
6087 }
6088
6089 bool
6090 linux_process_target::supports_qxfer_siginfo ()
6091 {
6092 return true;
6093 }
6094
6095 int
6096 linux_process_target::qxfer_siginfo (const char *annex,
6097 unsigned char *readbuf,
6098 unsigned const char *writebuf,
6099 CORE_ADDR offset, int len)
6100 {
6101 int pid;
6102 siginfo_t siginfo;
6103 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6104
6105 if (current_thread == NULL)
6106 return -1;
6107
6108 pid = lwpid_of (current_thread);
6109
6110 if (debug_threads)
6111 debug_printf ("%s siginfo for lwp %d.\n",
6112 readbuf != NULL ? "Reading" : "Writing",
6113 pid);
6114
6115 if (offset >= sizeof (siginfo))
6116 return -1;
6117
6118 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6119 return -1;
6120
6121 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6122 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6123 inferior with a 64-bit GDBSERVER should look the same as debugging it
6124 with a 32-bit GDBSERVER, we need to convert it. */
6125 siginfo_fixup (&siginfo, inf_siginfo, 0);
6126
6127 if (offset + len > sizeof (siginfo))
6128 len = sizeof (siginfo) - offset;
6129
6130 if (readbuf != NULL)
6131 memcpy (readbuf, inf_siginfo + offset, len);
6132 else
6133 {
6134 memcpy (inf_siginfo + offset, writebuf, len);
6135
6136 /* Convert back to ptrace layout before flushing it out. */
6137 siginfo_fixup (&siginfo, inf_siginfo, 1);
6138
6139 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6140 return -1;
6141 }
6142
6143 return len;
6144 }
6145
6146 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6147 so we notice when children change state; as the handler for the
6148 sigsuspend in my_waitpid. */
6149
6150 static void
6151 sigchld_handler (int signo)
6152 {
6153 int old_errno = errno;
6154
6155 if (debug_threads)
6156 {
6157 do
6158 {
6159 /* Use the async signal safe debug function. */
6160 if (debug_write ("sigchld_handler\n",
6161 sizeof ("sigchld_handler\n") - 1) < 0)
6162 break; /* just ignore */
6163 } while (0);
6164 }
6165
6166 if (target_is_async_p ())
6167 async_file_mark (); /* trigger a linux_wait */
6168
6169 errno = old_errno;
6170 }
6171
6172 bool
6173 linux_process_target::supports_non_stop ()
6174 {
6175 return true;
6176 }
6177
6178 bool
6179 linux_process_target::async (bool enable)
6180 {
6181 bool previous = target_is_async_p ();
6182
6183 if (debug_threads)
6184 debug_printf ("linux_async (%d), previous=%d\n",
6185 enable, previous);
6186
6187 if (previous != enable)
6188 {
6189 sigset_t mask;
6190 sigemptyset (&mask);
6191 sigaddset (&mask, SIGCHLD);
6192
6193 gdb_sigmask (SIG_BLOCK, &mask, NULL);
6194
6195 if (enable)
6196 {
6197 if (pipe (linux_event_pipe) == -1)
6198 {
6199 linux_event_pipe[0] = -1;
6200 linux_event_pipe[1] = -1;
6201 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6202
6203 warning ("creating event pipe failed.");
6204 return previous;
6205 }
6206
6207 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6208 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6209
6210 /* Register the event loop handler. */
6211 add_file_handler (linux_event_pipe[0],
6212 handle_target_event, NULL);
6213
6214 /* Always trigger a linux_wait. */
6215 async_file_mark ();
6216 }
6217 else
6218 {
6219 delete_file_handler (linux_event_pipe[0]);
6220
6221 close (linux_event_pipe[0]);
6222 close (linux_event_pipe[1]);
6223 linux_event_pipe[0] = -1;
6224 linux_event_pipe[1] = -1;
6225 }
6226
6227 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6228 }
6229
6230 return previous;
6231 }
6232
6233 int
6234 linux_process_target::start_non_stop (bool nonstop)
6235 {
6236 /* Register or unregister from event-loop accordingly. */
6237 target_async (nonstop);
6238
6239 if (target_is_async_p () != (nonstop != false))
6240 return -1;
6241
6242 return 0;
6243 }
6244
6245 bool
6246 linux_process_target::supports_multi_process ()
6247 {
6248 return true;
6249 }
6250
6251 /* Check if fork events are supported. */
6252
6253 bool
6254 linux_process_target::supports_fork_events ()
6255 {
6256 return linux_supports_tracefork ();
6257 }
6258
6259 /* Check if vfork events are supported. */
6260
6261 bool
6262 linux_process_target::supports_vfork_events ()
6263 {
6264 return linux_supports_tracefork ();
6265 }
6266
6267 /* Check if exec events are supported. */
6268
6269 bool
6270 linux_process_target::supports_exec_events ()
6271 {
6272 return linux_supports_traceexec ();
6273 }
6274
6275 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6276 ptrace flags for all inferiors. This is in case the new GDB connection
6277 doesn't support the same set of events that the previous one did. */
6278
6279 void
6280 linux_process_target::handle_new_gdb_connection ()
6281 {
6282 /* Request that all the lwps reset their ptrace options. */
6283 for_each_thread ([] (thread_info *thread)
6284 {
6285 struct lwp_info *lwp = get_thread_lwp (thread);
6286
6287 if (!lwp->stopped)
6288 {
6289 /* Stop the lwp so we can modify its ptrace options. */
6290 lwp->must_set_ptrace_flags = 1;
6291 linux_stop_lwp (lwp);
6292 }
6293 else
6294 {
6295 /* Already stopped; go ahead and set the ptrace options. */
6296 struct process_info *proc = find_process_pid (pid_of (thread));
6297 int options = linux_low_ptrace_options (proc->attached);
6298
6299 linux_enable_event_reporting (lwpid_of (thread), options);
6300 lwp->must_set_ptrace_flags = 0;
6301 }
6302 });
6303 }
6304
6305 int
6306 linux_process_target::handle_monitor_command (char *mon)
6307 {
6308 #ifdef USE_THREAD_DB
6309 return thread_db_handle_monitor_command (mon);
6310 #else
6311 return 0;
6312 #endif
6313 }
6314
6315 int
6316 linux_process_target::core_of_thread (ptid_t ptid)
6317 {
6318 return linux_common_core_of_thread (ptid);
6319 }
6320
6321 bool
6322 linux_process_target::supports_disable_randomization ()
6323 {
6324 #ifdef HAVE_PERSONALITY
6325 return true;
6326 #else
6327 return false;
6328 #endif
6329 }
6330
6331 bool
6332 linux_process_target::supports_agent ()
6333 {
6334 return true;
6335 }
6336
6337 bool
6338 linux_process_target::supports_range_stepping ()
6339 {
6340 if (can_software_single_step ())
6341 return true;
6342 if (*the_low_target.supports_range_stepping == NULL)
6343 return false;
6344
6345 return (*the_low_target.supports_range_stepping) ();
6346 }
6347
6348 bool
6349 linux_process_target::supports_pid_to_exec_file ()
6350 {
6351 return true;
6352 }
6353
6354 char *
6355 linux_process_target::pid_to_exec_file (int pid)
6356 {
6357 return linux_proc_pid_to_exec_file (pid);
6358 }
6359
6360 bool
6361 linux_process_target::supports_multifs ()
6362 {
6363 return true;
6364 }
6365
6366 int
6367 linux_process_target::multifs_open (int pid, const char *filename,
6368 int flags, mode_t mode)
6369 {
6370 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6371 }
6372
6373 int
6374 linux_process_target::multifs_unlink (int pid, const char *filename)
6375 {
6376 return linux_mntns_unlink (pid, filename);
6377 }
6378
6379 ssize_t
6380 linux_process_target::multifs_readlink (int pid, const char *filename,
6381 char *buf, size_t bufsiz)
6382 {
6383 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6384 }
6385
6386 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6387 struct target_loadseg
6388 {
6389 /* Core address to which the segment is mapped. */
6390 Elf32_Addr addr;
6391 /* VMA recorded in the program header. */
6392 Elf32_Addr p_vaddr;
6393 /* Size of this segment in memory. */
6394 Elf32_Word p_memsz;
6395 };
6396
6397 # if defined PT_GETDSBT
6398 struct target_loadmap
6399 {
6400 /* Protocol version number, must be zero. */
6401 Elf32_Word version;
6402 /* Pointer to the DSBT table, its size, and the DSBT index. */
6403 unsigned *dsbt_table;
6404 unsigned dsbt_size, dsbt_index;
6405 /* Number of segments in this map. */
6406 Elf32_Word nsegs;
6407 /* The actual memory map. */
6408 struct target_loadseg segs[/*nsegs*/];
6409 };
6410 # define LINUX_LOADMAP PT_GETDSBT
6411 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6412 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6413 # else
6414 struct target_loadmap
6415 {
6416 /* Protocol version number, must be zero. */
6417 Elf32_Half version;
6418 /* Number of segments in this map. */
6419 Elf32_Half nsegs;
6420 /* The actual memory map. */
6421 struct target_loadseg segs[/*nsegs*/];
6422 };
6423 # define LINUX_LOADMAP PTRACE_GETFDPIC
6424 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6425 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6426 # endif
6427
6428 bool
6429 linux_process_target::supports_read_loadmap ()
6430 {
6431 return true;
6432 }
6433
6434 int
6435 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6436 unsigned char *myaddr, unsigned int len)
6437 {
6438 int pid = lwpid_of (current_thread);
6439 int addr = -1;
6440 struct target_loadmap *data = NULL;
6441 unsigned int actual_length, copy_length;
6442
6443 if (strcmp (annex, "exec") == 0)
6444 addr = (int) LINUX_LOADMAP_EXEC;
6445 else if (strcmp (annex, "interp") == 0)
6446 addr = (int) LINUX_LOADMAP_INTERP;
6447 else
6448 return -1;
6449
6450 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6451 return -1;
6452
6453 if (data == NULL)
6454 return -1;
6455
6456 actual_length = sizeof (struct target_loadmap)
6457 + sizeof (struct target_loadseg) * data->nsegs;
6458
6459 if (offset < 0 || offset > actual_length)
6460 return -1;
6461
6462 copy_length = actual_length - offset < len ? actual_length - offset : len;
6463 memcpy (myaddr, (char *) data + offset, copy_length);
6464 return copy_length;
6465 }
6466 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6467
6468 void
6469 linux_process_target::process_qsupported (char **features, int count)
6470 {
6471 if (the_low_target.process_qsupported != NULL)
6472 the_low_target.process_qsupported (features, count);
6473 }
6474
6475 bool
6476 linux_process_target::supports_catch_syscall ()
6477 {
6478 return (the_low_target.get_syscall_trapinfo != NULL
6479 && linux_supports_tracesysgood ());
6480 }
6481
6482 int
6483 linux_process_target::get_ipa_tdesc_idx ()
6484 {
6485 if (the_low_target.get_ipa_tdesc_idx == NULL)
6486 return 0;
6487
6488 return (*the_low_target.get_ipa_tdesc_idx) ();
6489 }
6490
6491 bool
6492 linux_process_target::supports_tracepoints ()
6493 {
6494 if (*the_low_target.supports_tracepoints == NULL)
6495 return false;
6496
6497 return (*the_low_target.supports_tracepoints) ();
6498 }
6499
6500 CORE_ADDR
6501 linux_process_target::read_pc (regcache *regcache)
6502 {
6503 if (the_low_target.get_pc == NULL)
6504 return 0;
6505
6506 return (*the_low_target.get_pc) (regcache);
6507 }
6508
6509 void
6510 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6511 {
6512 gdb_assert (the_low_target.set_pc != NULL);
6513
6514 (*the_low_target.set_pc) (regcache, pc);
6515 }
6516
6517 bool
6518 linux_process_target::supports_thread_stopped ()
6519 {
6520 return true;
6521 }
6522
6523 bool
6524 linux_process_target::thread_stopped (thread_info *thread)
6525 {
6526 return get_thread_lwp (thread)->stopped;
6527 }
6528
6529 /* This exposes stop-all-threads functionality to other modules. */
6530
6531 void
6532 linux_process_target::pause_all (bool freeze)
6533 {
6534 stop_all_lwps (freeze, NULL);
6535 }
6536
6537 /* This exposes unstop-all-threads functionality to other gdbserver
6538 modules. */
6539
6540 void
6541 linux_process_target::unpause_all (bool unfreeze)
6542 {
6543 unstop_all_lwps (unfreeze, NULL);
6544 }
6545
6546 int
6547 linux_process_target::prepare_to_access_memory ()
6548 {
6549 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6550 running LWP. */
6551 if (non_stop)
6552 target_pause_all (true);
6553 return 0;
6554 }
6555
6556 void
6557 linux_process_target::done_accessing_memory ()
6558 {
6559 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6560 running LWP. */
6561 if (non_stop)
6562 target_unpause_all (true);
6563 }
6564
6565 bool
6566 linux_process_target::supports_fast_tracepoints ()
6567 {
6568 return the_low_target.install_fast_tracepoint_jump_pad != nullptr;
6569 }
6570
6571 int
6572 linux_process_target::install_fast_tracepoint_jump_pad
6573 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
6574 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
6575 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
6576 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
6577 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
6578 char *err)
6579 {
6580 return (*the_low_target.install_fast_tracepoint_jump_pad)
6581 (tpoint, tpaddr, collector, lockaddr, orig_size,
6582 jump_entry, trampoline, trampoline_size,
6583 jjump_pad_insn, jjump_pad_insn_size,
6584 adjusted_insn_addr, adjusted_insn_addr_end,
6585 err);
6586 }
6587
6588 emit_ops *
6589 linux_process_target::emit_ops ()
6590 {
6591 if (the_low_target.emit_ops != NULL)
6592 return (*the_low_target.emit_ops) ();
6593 else
6594 return NULL;
6595 }
6596
6597 int
6598 linux_process_target::get_min_fast_tracepoint_insn_len ()
6599 {
6600 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6601 }
6602
6603 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6604
6605 static int
6606 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6607 CORE_ADDR *phdr_memaddr, int *num_phdr)
6608 {
6609 char filename[PATH_MAX];
6610 int fd;
6611 const int auxv_size = is_elf64
6612 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6613 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6614
6615 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6616
6617 fd = open (filename, O_RDONLY);
6618 if (fd < 0)
6619 return 1;
6620
6621 *phdr_memaddr = 0;
6622 *num_phdr = 0;
6623 while (read (fd, buf, auxv_size) == auxv_size
6624 && (*phdr_memaddr == 0 || *num_phdr == 0))
6625 {
6626 if (is_elf64)
6627 {
6628 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6629
6630 switch (aux->a_type)
6631 {
6632 case AT_PHDR:
6633 *phdr_memaddr = aux->a_un.a_val;
6634 break;
6635 case AT_PHNUM:
6636 *num_phdr = aux->a_un.a_val;
6637 break;
6638 }
6639 }
6640 else
6641 {
6642 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6643
6644 switch (aux->a_type)
6645 {
6646 case AT_PHDR:
6647 *phdr_memaddr = aux->a_un.a_val;
6648 break;
6649 case AT_PHNUM:
6650 *num_phdr = aux->a_un.a_val;
6651 break;
6652 }
6653 }
6654 }
6655
6656 close (fd);
6657
6658 if (*phdr_memaddr == 0 || *num_phdr == 0)
6659 {
6660 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6661 "phdr_memaddr = %ld, phdr_num = %d",
6662 (long) *phdr_memaddr, *num_phdr);
6663 return 2;
6664 }
6665
6666 return 0;
6667 }
6668
6669 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6670
6671 static CORE_ADDR
6672 get_dynamic (const int pid, const int is_elf64)
6673 {
6674 CORE_ADDR phdr_memaddr, relocation;
6675 int num_phdr, i;
6676 unsigned char *phdr_buf;
6677 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6678
6679 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6680 return 0;
6681
6682 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6683 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6684
6685 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6686 return 0;
6687
6688 /* Compute relocation: it is expected to be 0 for "regular" executables,
6689 non-zero for PIE ones. */
6690 relocation = -1;
6691 for (i = 0; relocation == -1 && i < num_phdr; i++)
6692 if (is_elf64)
6693 {
6694 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6695
6696 if (p->p_type == PT_PHDR)
6697 relocation = phdr_memaddr - p->p_vaddr;
6698 }
6699 else
6700 {
6701 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6702
6703 if (p->p_type == PT_PHDR)
6704 relocation = phdr_memaddr - p->p_vaddr;
6705 }
6706
6707 if (relocation == -1)
6708 {
6709 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6710 any real world executables, including PIE executables, have always
6711 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6712 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6713 or present DT_DEBUG anyway (fpc binaries are statically linked).
6714
6715 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6716
6717 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6718
6719 return 0;
6720 }
6721
6722 for (i = 0; i < num_phdr; i++)
6723 {
6724 if (is_elf64)
6725 {
6726 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6727
6728 if (p->p_type == PT_DYNAMIC)
6729 return p->p_vaddr + relocation;
6730 }
6731 else
6732 {
6733 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6734
6735 if (p->p_type == PT_DYNAMIC)
6736 return p->p_vaddr + relocation;
6737 }
6738 }
6739
6740 return 0;
6741 }
6742
6743 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6744 can be 0 if the inferior does not yet have the library list initialized.
6745 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6746 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6747
6748 static CORE_ADDR
6749 get_r_debug (const int pid, const int is_elf64)
6750 {
6751 CORE_ADDR dynamic_memaddr;
6752 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6753 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6754 CORE_ADDR map = -1;
6755
6756 dynamic_memaddr = get_dynamic (pid, is_elf64);
6757 if (dynamic_memaddr == 0)
6758 return map;
6759
6760 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6761 {
6762 if (is_elf64)
6763 {
6764 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6765 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6766 union
6767 {
6768 Elf64_Xword map;
6769 unsigned char buf[sizeof (Elf64_Xword)];
6770 }
6771 rld_map;
6772 #endif
6773 #ifdef DT_MIPS_RLD_MAP
6774 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6775 {
6776 if (linux_read_memory (dyn->d_un.d_val,
6777 rld_map.buf, sizeof (rld_map.buf)) == 0)
6778 return rld_map.map;
6779 else
6780 break;
6781 }
6782 #endif /* DT_MIPS_RLD_MAP */
6783 #ifdef DT_MIPS_RLD_MAP_REL
6784 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6785 {
6786 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6787 rld_map.buf, sizeof (rld_map.buf)) == 0)
6788 return rld_map.map;
6789 else
6790 break;
6791 }
6792 #endif /* DT_MIPS_RLD_MAP_REL */
6793
6794 if (dyn->d_tag == DT_DEBUG && map == -1)
6795 map = dyn->d_un.d_val;
6796
6797 if (dyn->d_tag == DT_NULL)
6798 break;
6799 }
6800 else
6801 {
6802 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6803 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6804 union
6805 {
6806 Elf32_Word map;
6807 unsigned char buf[sizeof (Elf32_Word)];
6808 }
6809 rld_map;
6810 #endif
6811 #ifdef DT_MIPS_RLD_MAP
6812 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6813 {
6814 if (linux_read_memory (dyn->d_un.d_val,
6815 rld_map.buf, sizeof (rld_map.buf)) == 0)
6816 return rld_map.map;
6817 else
6818 break;
6819 }
6820 #endif /* DT_MIPS_RLD_MAP */
6821 #ifdef DT_MIPS_RLD_MAP_REL
6822 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6823 {
6824 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6825 rld_map.buf, sizeof (rld_map.buf)) == 0)
6826 return rld_map.map;
6827 else
6828 break;
6829 }
6830 #endif /* DT_MIPS_RLD_MAP_REL */
6831
6832 if (dyn->d_tag == DT_DEBUG && map == -1)
6833 map = dyn->d_un.d_val;
6834
6835 if (dyn->d_tag == DT_NULL)
6836 break;
6837 }
6838
6839 dynamic_memaddr += dyn_size;
6840 }
6841
6842 return map;
6843 }
6844
6845 /* Read one pointer from MEMADDR in the inferior. */
6846
6847 static int
6848 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6849 {
6850 int ret;
6851
6852 /* Go through a union so this works on either big or little endian
6853 hosts, when the inferior's pointer size is smaller than the size
6854 of CORE_ADDR. It is assumed the inferior's endianness is the
6855 same of the superior's. */
6856 union
6857 {
6858 CORE_ADDR core_addr;
6859 unsigned int ui;
6860 unsigned char uc;
6861 } addr;
6862
6863 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6864 if (ret == 0)
6865 {
6866 if (ptr_size == sizeof (CORE_ADDR))
6867 *ptr = addr.core_addr;
6868 else if (ptr_size == sizeof (unsigned int))
6869 *ptr = addr.ui;
6870 else
6871 gdb_assert_not_reached ("unhandled pointer size");
6872 }
6873 return ret;
6874 }
6875
6876 bool
6877 linux_process_target::supports_qxfer_libraries_svr4 ()
6878 {
6879 return true;
6880 }
6881
6882 struct link_map_offsets
6883 {
6884 /* Offset and size of r_debug.r_version. */
6885 int r_version_offset;
6886
6887 /* Offset and size of r_debug.r_map. */
6888 int r_map_offset;
6889
6890 /* Offset to l_addr field in struct link_map. */
6891 int l_addr_offset;
6892
6893 /* Offset to l_name field in struct link_map. */
6894 int l_name_offset;
6895
6896 /* Offset to l_ld field in struct link_map. */
6897 int l_ld_offset;
6898
6899 /* Offset to l_next field in struct link_map. */
6900 int l_next_offset;
6901
6902 /* Offset to l_prev field in struct link_map. */
6903 int l_prev_offset;
6904 };
6905
6906 /* Construct qXfer:libraries-svr4:read reply. */
6907
6908 int
6909 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6910 unsigned char *readbuf,
6911 unsigned const char *writebuf,
6912 CORE_ADDR offset, int len)
6913 {
6914 struct process_info_private *const priv = current_process ()->priv;
6915 char filename[PATH_MAX];
6916 int pid, is_elf64;
6917
6918 static const struct link_map_offsets lmo_32bit_offsets =
6919 {
6920 0, /* r_version offset. */
6921 4, /* r_debug.r_map offset. */
6922 0, /* l_addr offset in link_map. */
6923 4, /* l_name offset in link_map. */
6924 8, /* l_ld offset in link_map. */
6925 12, /* l_next offset in link_map. */
6926 16 /* l_prev offset in link_map. */
6927 };
6928
6929 static const struct link_map_offsets lmo_64bit_offsets =
6930 {
6931 0, /* r_version offset. */
6932 8, /* r_debug.r_map offset. */
6933 0, /* l_addr offset in link_map. */
6934 8, /* l_name offset in link_map. */
6935 16, /* l_ld offset in link_map. */
6936 24, /* l_next offset in link_map. */
6937 32 /* l_prev offset in link_map. */
6938 };
6939 const struct link_map_offsets *lmo;
6940 unsigned int machine;
6941 int ptr_size;
6942 CORE_ADDR lm_addr = 0, lm_prev = 0;
6943 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6944 int header_done = 0;
6945
6946 if (writebuf != NULL)
6947 return -2;
6948 if (readbuf == NULL)
6949 return -1;
6950
6951 pid = lwpid_of (current_thread);
6952 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6953 is_elf64 = elf_64_file_p (filename, &machine);
6954 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6955 ptr_size = is_elf64 ? 8 : 4;
6956
6957 while (annex[0] != '\0')
6958 {
6959 const char *sep;
6960 CORE_ADDR *addrp;
6961 int name_len;
6962
6963 sep = strchr (annex, '=');
6964 if (sep == NULL)
6965 break;
6966
6967 name_len = sep - annex;
6968 if (name_len == 5 && startswith (annex, "start"))
6969 addrp = &lm_addr;
6970 else if (name_len == 4 && startswith (annex, "prev"))
6971 addrp = &lm_prev;
6972 else
6973 {
6974 annex = strchr (sep, ';');
6975 if (annex == NULL)
6976 break;
6977 annex++;
6978 continue;
6979 }
6980
6981 annex = decode_address_to_semicolon (addrp, sep + 1);
6982 }
6983
6984 if (lm_addr == 0)
6985 {
6986 int r_version = 0;
6987
6988 if (priv->r_debug == 0)
6989 priv->r_debug = get_r_debug (pid, is_elf64);
6990
6991 /* We failed to find DT_DEBUG. Such situation will not change
6992 for this inferior - do not retry it. Report it to GDB as
6993 E01, see for the reasons at the GDB solib-svr4.c side. */
6994 if (priv->r_debug == (CORE_ADDR) -1)
6995 return -1;
6996
6997 if (priv->r_debug != 0)
6998 {
6999 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7000 (unsigned char *) &r_version,
7001 sizeof (r_version)) != 0
7002 || r_version != 1)
7003 {
7004 warning ("unexpected r_debug version %d", r_version);
7005 }
7006 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7007 &lm_addr, ptr_size) != 0)
7008 {
7009 warning ("unable to read r_map from 0x%lx",
7010 (long) priv->r_debug + lmo->r_map_offset);
7011 }
7012 }
7013 }
7014
7015 std::string document = "<library-list-svr4 version=\"1.0\"";
7016
7017 while (lm_addr
7018 && read_one_ptr (lm_addr + lmo->l_name_offset,
7019 &l_name, ptr_size) == 0
7020 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7021 &l_addr, ptr_size) == 0
7022 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7023 &l_ld, ptr_size) == 0
7024 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7025 &l_prev, ptr_size) == 0
7026 && read_one_ptr (lm_addr + lmo->l_next_offset,
7027 &l_next, ptr_size) == 0)
7028 {
7029 unsigned char libname[PATH_MAX];
7030
7031 if (lm_prev != l_prev)
7032 {
7033 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7034 (long) lm_prev, (long) l_prev);
7035 break;
7036 }
7037
7038 /* Ignore the first entry even if it has valid name as the first entry
7039 corresponds to the main executable. The first entry should not be
7040 skipped if the dynamic loader was loaded late by a static executable
7041 (see solib-svr4.c parameter ignore_first). But in such case the main
7042 executable does not have PT_DYNAMIC present and this function already
7043 exited above due to failed get_r_debug. */
7044 if (lm_prev == 0)
7045 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7046 else
7047 {
7048 /* Not checking for error because reading may stop before
7049 we've got PATH_MAX worth of characters. */
7050 libname[0] = '\0';
7051 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7052 libname[sizeof (libname) - 1] = '\0';
7053 if (libname[0] != '\0')
7054 {
7055 if (!header_done)
7056 {
7057 /* Terminate `<library-list-svr4'. */
7058 document += '>';
7059 header_done = 1;
7060 }
7061
7062 string_appendf (document, "<library name=\"");
7063 xml_escape_text_append (&document, (char *) libname);
7064 string_appendf (document, "\" lm=\"0x%lx\" "
7065 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7066 (unsigned long) lm_addr, (unsigned long) l_addr,
7067 (unsigned long) l_ld);
7068 }
7069 }
7070
7071 lm_prev = lm_addr;
7072 lm_addr = l_next;
7073 }
7074
7075 if (!header_done)
7076 {
7077 /* Empty list; terminate `<library-list-svr4'. */
7078 document += "/>";
7079 }
7080 else
7081 document += "</library-list-svr4>";
7082
7083 int document_len = document.length ();
7084 if (offset < document_len)
7085 document_len -= offset;
7086 else
7087 document_len = 0;
7088 if (len > document_len)
7089 len = document_len;
7090
7091 memcpy (readbuf, document.data () + offset, len);
7092
7093 return len;
7094 }
7095
7096 #ifdef HAVE_LINUX_BTRACE
7097
7098 btrace_target_info *
7099 linux_process_target::enable_btrace (ptid_t ptid,
7100 const btrace_config *conf)
7101 {
7102 return linux_enable_btrace (ptid, conf);
7103 }
7104
7105 /* See to_disable_btrace target method. */
7106
7107 int
7108 linux_process_target::disable_btrace (btrace_target_info *tinfo)
7109 {
7110 enum btrace_error err;
7111
7112 err = linux_disable_btrace (tinfo);
7113 return (err == BTRACE_ERR_NONE ? 0 : -1);
7114 }
7115
7116 /* Encode an Intel Processor Trace configuration. */
7117
7118 static void
7119 linux_low_encode_pt_config (struct buffer *buffer,
7120 const struct btrace_data_pt_config *config)
7121 {
7122 buffer_grow_str (buffer, "<pt-config>\n");
7123
7124 switch (config->cpu.vendor)
7125 {
7126 case CV_INTEL:
7127 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7128 "model=\"%u\" stepping=\"%u\"/>\n",
7129 config->cpu.family, config->cpu.model,
7130 config->cpu.stepping);
7131 break;
7132
7133 default:
7134 break;
7135 }
7136
7137 buffer_grow_str (buffer, "</pt-config>\n");
7138 }
7139
7140 /* Encode a raw buffer. */
7141
7142 static void
7143 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7144 unsigned int size)
7145 {
7146 if (size == 0)
7147 return;
7148
7149 /* We use hex encoding - see gdbsupport/rsp-low.h. */
7150 buffer_grow_str (buffer, "<raw>\n");
7151
7152 while (size-- > 0)
7153 {
7154 char elem[2];
7155
7156 elem[0] = tohex ((*data >> 4) & 0xf);
7157 elem[1] = tohex (*data++ & 0xf);
7158
7159 buffer_grow (buffer, elem, 2);
7160 }
7161
7162 buffer_grow_str (buffer, "</raw>\n");
7163 }
7164
7165 /* See to_read_btrace target method. */
7166
7167 int
7168 linux_process_target::read_btrace (btrace_target_info *tinfo,
7169 buffer *buffer,
7170 enum btrace_read_type type)
7171 {
7172 struct btrace_data btrace;
7173 enum btrace_error err;
7174
7175 err = linux_read_btrace (&btrace, tinfo, type);
7176 if (err != BTRACE_ERR_NONE)
7177 {
7178 if (err == BTRACE_ERR_OVERFLOW)
7179 buffer_grow_str0 (buffer, "E.Overflow.");
7180 else
7181 buffer_grow_str0 (buffer, "E.Generic Error.");
7182
7183 return -1;
7184 }
7185
7186 switch (btrace.format)
7187 {
7188 case BTRACE_FORMAT_NONE:
7189 buffer_grow_str0 (buffer, "E.No Trace.");
7190 return -1;
7191
7192 case BTRACE_FORMAT_BTS:
7193 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7194 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7195
7196 for (const btrace_block &block : *btrace.variant.bts.blocks)
7197 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7198 paddress (block.begin), paddress (block.end));
7199
7200 buffer_grow_str0 (buffer, "</btrace>\n");
7201 break;
7202
7203 case BTRACE_FORMAT_PT:
7204 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7205 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7206 buffer_grow_str (buffer, "<pt>\n");
7207
7208 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7209
7210 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7211 btrace.variant.pt.size);
7212
7213 buffer_grow_str (buffer, "</pt>\n");
7214 buffer_grow_str0 (buffer, "</btrace>\n");
7215 break;
7216
7217 default:
7218 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7219 return -1;
7220 }
7221
7222 return 0;
7223 }
7224
7225 /* See to_btrace_conf target method. */
7226
7227 int
7228 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7229 buffer *buffer)
7230 {
7231 const struct btrace_config *conf;
7232
7233 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7234 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7235
7236 conf = linux_btrace_conf (tinfo);
7237 if (conf != NULL)
7238 {
7239 switch (conf->format)
7240 {
7241 case BTRACE_FORMAT_NONE:
7242 break;
7243
7244 case BTRACE_FORMAT_BTS:
7245 buffer_xml_printf (buffer, "<bts");
7246 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7247 buffer_xml_printf (buffer, " />\n");
7248 break;
7249
7250 case BTRACE_FORMAT_PT:
7251 buffer_xml_printf (buffer, "<pt");
7252 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7253 buffer_xml_printf (buffer, "/>\n");
7254 break;
7255 }
7256 }
7257
7258 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7259 return 0;
7260 }
7261 #endif /* HAVE_LINUX_BTRACE */
7262
7263 /* See nat/linux-nat.h. */
7264
7265 ptid_t
7266 current_lwp_ptid (void)
7267 {
7268 return ptid_of (current_thread);
7269 }
7270
7271 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7272
7273 int
7274 linux_process_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7275 {
7276 if (the_low_target.breakpoint_kind_from_pc != NULL)
7277 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7278 else
7279 return process_stratum_target::breakpoint_kind_from_pc (pcptr);
7280 }
7281
7282 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7283
7284 const gdb_byte *
7285 linux_process_target::sw_breakpoint_from_kind (int kind, int *size)
7286 {
7287 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7288
7289 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7290 }
7291
7292 /* Implementation of the target_ops method
7293 "breakpoint_kind_from_current_state". */
7294
7295 int
7296 linux_process_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7297 {
7298 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7299 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7300 else
7301 return breakpoint_kind_from_pc (pcptr);
7302 }
7303
7304 const char *
7305 linux_process_target::thread_name (ptid_t thread)
7306 {
7307 return linux_proc_tid_get_name (thread);
7308 }
7309
7310 #if USE_THREAD_DB
7311 bool
7312 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7313 int *handle_len)
7314 {
7315 return thread_db_thread_handle (ptid, handle, handle_len);
7316 }
7317 #endif
7318
7319 /* Default implementation of linux_target_ops method "set_pc" for
7320 32-bit pc register which is literally named "pc". */
7321
7322 void
7323 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7324 {
7325 uint32_t newpc = pc;
7326
7327 supply_register_by_name (regcache, "pc", &newpc);
7328 }
7329
7330 /* Default implementation of linux_target_ops method "get_pc" for
7331 32-bit pc register which is literally named "pc". */
7332
7333 CORE_ADDR
7334 linux_get_pc_32bit (struct regcache *regcache)
7335 {
7336 uint32_t pc;
7337
7338 collect_register_by_name (regcache, "pc", &pc);
7339 if (debug_threads)
7340 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7341 return pc;
7342 }
7343
7344 /* Default implementation of linux_target_ops method "set_pc" for
7345 64-bit pc register which is literally named "pc". */
7346
7347 void
7348 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7349 {
7350 uint64_t newpc = pc;
7351
7352 supply_register_by_name (regcache, "pc", &newpc);
7353 }
7354
7355 /* Default implementation of linux_target_ops method "get_pc" for
7356 64-bit pc register which is literally named "pc". */
7357
7358 CORE_ADDR
7359 linux_get_pc_64bit (struct regcache *regcache)
7360 {
7361 uint64_t pc;
7362
7363 collect_register_by_name (regcache, "pc", &pc);
7364 if (debug_threads)
7365 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7366 return pc;
7367 }
7368
7369 /* See linux-low.h. */
7370
7371 int
7372 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7373 {
7374 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7375 int offset = 0;
7376
7377 gdb_assert (wordsize == 4 || wordsize == 8);
7378
7379 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
7380 {
7381 if (wordsize == 4)
7382 {
7383 uint32_t *data_p = (uint32_t *) data;
7384 if (data_p[0] == match)
7385 {
7386 *valp = data_p[1];
7387 return 1;
7388 }
7389 }
7390 else
7391 {
7392 uint64_t *data_p = (uint64_t *) data;
7393 if (data_p[0] == match)
7394 {
7395 *valp = data_p[1];
7396 return 1;
7397 }
7398 }
7399
7400 offset += 2 * wordsize;
7401 }
7402
7403 return 0;
7404 }
7405
7406 /* See linux-low.h. */
7407
7408 CORE_ADDR
7409 linux_get_hwcap (int wordsize)
7410 {
7411 CORE_ADDR hwcap = 0;
7412 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7413 return hwcap;
7414 }
7415
7416 /* See linux-low.h. */
7417
7418 CORE_ADDR
7419 linux_get_hwcap2 (int wordsize)
7420 {
7421 CORE_ADDR hwcap2 = 0;
7422 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7423 return hwcap2;
7424 }
7425
7426 #ifdef HAVE_LINUX_REGSETS
7427 void
7428 initialize_regsets_info (struct regsets_info *info)
7429 {
7430 for (info->num_regsets = 0;
7431 info->regsets[info->num_regsets].size >= 0;
7432 info->num_regsets++)
7433 ;
7434 }
7435 #endif
7436
7437 void
7438 initialize_low (void)
7439 {
7440 struct sigaction sigchld_action;
7441
7442 memset (&sigchld_action, 0, sizeof (sigchld_action));
7443 set_target_ops (the_linux_target);
7444
7445 linux_ptrace_init_warnings ();
7446 linux_proc_init_warnings ();
7447
7448 sigchld_action.sa_handler = sigchld_handler;
7449 sigemptyset (&sigchld_action.sa_mask);
7450 sigchld_action.sa_flags = SA_RESTART;
7451 sigaction (SIGCHLD, &sigchld_action, NULL);
7452
7453 initialize_low_arch ();
7454
7455 linux_check_ptrace_features ();
7456 }