gdbserver/linux-low: turn 'supports_range_stepping' into a method
[binutils-gdb.git] / gdbserver / linux-low.cc
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2020 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
48 #include <inttypes.h>
49 #include "gdbsupport/common-inferior.h"
50 #include "nat/fork-inferior.h"
51 #include "gdbsupport/environ.h"
52 #include "gdbsupport/gdb-sigmask.h"
53 #include "gdbsupport/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 #ifndef AT_HWCAP2
75 #define AT_HWCAP2 26
76 #endif
77
78 /* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81 #if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84 #if defined(__mcoldfire__)
85 /* These are still undefined in 3.10 kernels. */
86 #define PT_TEXT_ADDR 49*4
87 #define PT_DATA_ADDR 50*4
88 #define PT_TEXT_END_ADDR 51*4
89 /* BFIN already defines these since at least 2.6.32 kernels. */
90 #elif defined(BFIN)
91 #define PT_TEXT_ADDR 220
92 #define PT_TEXT_END_ADDR 224
93 #define PT_DATA_ADDR 228
94 /* These are still undefined in 3.10 kernels. */
95 #elif defined(__TMS320C6X__)
96 #define PT_TEXT_ADDR (0x10000*4)
97 #define PT_DATA_ADDR (0x10004*4)
98 #define PT_TEXT_END_ADDR (0x10008*4)
99 #endif
100 #endif
101
102 #if (defined(__UCLIBC__) \
103 && defined(HAS_NOMMU) \
104 && defined(PT_TEXT_ADDR) \
105 && defined(PT_DATA_ADDR) \
106 && defined(PT_TEXT_END_ADDR))
107 #define SUPPORTS_READ_OFFSETS
108 #endif
109
110 #ifdef HAVE_LINUX_BTRACE
111 # include "nat/linux-btrace.h"
112 # include "gdbsupport/btrace-common.h"
113 #endif
114
115 #ifndef HAVE_ELF32_AUXV_T
116 /* Copied from glibc's elf.h. */
117 typedef struct
118 {
119 uint32_t a_type; /* Entry type */
120 union
121 {
122 uint32_t a_val; /* Integer value */
123 /* We use to have pointer elements added here. We cannot do that,
124 though, since it does not work when using 32-bit definitions
125 on 64-bit platforms and vice versa. */
126 } a_un;
127 } Elf32_auxv_t;
128 #endif
129
130 #ifndef HAVE_ELF64_AUXV_T
131 /* Copied from glibc's elf.h. */
132 typedef struct
133 {
134 uint64_t a_type; /* Entry type */
135 union
136 {
137 uint64_t a_val; /* Integer value */
138 /* We use to have pointer elements added here. We cannot do that,
139 though, since it does not work when using 32-bit definitions
140 on 64-bit platforms and vice versa. */
141 } a_un;
142 } Elf64_auxv_t;
143 #endif
144
145 /* Does the current host support PTRACE_GETREGSET? */
146 int have_ptrace_getregset = -1;
147
148 /* LWP accessors. */
149
150 /* See nat/linux-nat.h. */
151
152 ptid_t
153 ptid_of_lwp (struct lwp_info *lwp)
154 {
155 return ptid_of (get_lwp_thread (lwp));
156 }
157
158 /* See nat/linux-nat.h. */
159
160 void
161 lwp_set_arch_private_info (struct lwp_info *lwp,
162 struct arch_lwp_info *info)
163 {
164 lwp->arch_private = info;
165 }
166
167 /* See nat/linux-nat.h. */
168
169 struct arch_lwp_info *
170 lwp_arch_private_info (struct lwp_info *lwp)
171 {
172 return lwp->arch_private;
173 }
174
175 /* See nat/linux-nat.h. */
176
177 int
178 lwp_is_stopped (struct lwp_info *lwp)
179 {
180 return lwp->stopped;
181 }
182
183 /* See nat/linux-nat.h. */
184
185 enum target_stop_reason
186 lwp_stop_reason (struct lwp_info *lwp)
187 {
188 return lwp->stop_reason;
189 }
190
191 /* See nat/linux-nat.h. */
192
193 int
194 lwp_is_stepping (struct lwp_info *lwp)
195 {
196 return lwp->stepping;
197 }
198
199 /* A list of all unknown processes which receive stop signals. Some
200 other process will presumably claim each of these as forked
201 children momentarily. */
202
203 struct simple_pid_list
204 {
205 /* The process ID. */
206 int pid;
207
208 /* The status as reported by waitpid. */
209 int status;
210
211 /* Next in chain. */
212 struct simple_pid_list *next;
213 };
214 struct simple_pid_list *stopped_pids;
215
216 /* Trivial list manipulation functions to keep track of a list of new
217 stopped processes. */
218
219 static void
220 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
221 {
222 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
223
224 new_pid->pid = pid;
225 new_pid->status = status;
226 new_pid->next = *listp;
227 *listp = new_pid;
228 }
229
230 static int
231 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
232 {
233 struct simple_pid_list **p;
234
235 for (p = listp; *p != NULL; p = &(*p)->next)
236 if ((*p)->pid == pid)
237 {
238 struct simple_pid_list *next = (*p)->next;
239
240 *statusp = (*p)->status;
241 xfree (*p);
242 *p = next;
243 return 1;
244 }
245 return 0;
246 }
247
248 enum stopping_threads_kind
249 {
250 /* Not stopping threads presently. */
251 NOT_STOPPING_THREADS,
252
253 /* Stopping threads. */
254 STOPPING_THREADS,
255
256 /* Stopping and suspending threads. */
257 STOPPING_AND_SUSPENDING_THREADS
258 };
259
260 /* This is set while stop_all_lwps is in effect. */
261 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
262
263 /* FIXME make into a target method? */
264 int using_threads = 1;
265
266 /* True if we're presently stabilizing threads (moving them out of
267 jump pads). */
268 static int stabilizing_threads;
269
270 static void unsuspend_all_lwps (struct lwp_info *except);
271 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
272 static int lwp_is_marked_dead (struct lwp_info *lwp);
273 static int finish_step_over (struct lwp_info *lwp);
274 static int kill_lwp (unsigned long lwpid, int signo);
275 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
276 static int linux_low_ptrace_options (int attached);
277 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
278
279 /* When the event-loop is doing a step-over, this points at the thread
280 being stepped. */
281 ptid_t step_over_bkpt;
282
283 /* True if the low target can hardware single-step. */
284
285 static int
286 can_hardware_single_step (void)
287 {
288 if (the_low_target.supports_hardware_single_step != NULL)
289 return the_low_target.supports_hardware_single_step ();
290 else
291 return 0;
292 }
293
294 bool
295 linux_process_target::low_supports_breakpoints ()
296 {
297 return false;
298 }
299
300 CORE_ADDR
301 linux_process_target::low_get_pc (regcache *regcache)
302 {
303 return 0;
304 }
305
306 void
307 linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
308 {
309 gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
310 }
311
312 std::vector<CORE_ADDR>
313 linux_process_target::low_get_next_pcs (regcache *regcache)
314 {
315 gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
316 "implemented");
317 }
318
319 int
320 linux_process_target::low_decr_pc_after_break ()
321 {
322 return 0;
323 }
324
325 /* True if LWP is stopped in its stepping range. */
326
327 static int
328 lwp_in_step_range (struct lwp_info *lwp)
329 {
330 CORE_ADDR pc = lwp->stop_pc;
331
332 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
333 }
334
335 struct pending_signals
336 {
337 int signal;
338 siginfo_t info;
339 struct pending_signals *prev;
340 };
341
342 /* The read/write ends of the pipe registered as waitable file in the
343 event loop. */
344 static int linux_event_pipe[2] = { -1, -1 };
345
346 /* True if we're currently in async mode. */
347 #define target_is_async_p() (linux_event_pipe[0] != -1)
348
349 static void send_sigstop (struct lwp_info *lwp);
350
351 /* Return non-zero if HEADER is a 64-bit ELF file. */
352
353 static int
354 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
355 {
356 if (header->e_ident[EI_MAG0] == ELFMAG0
357 && header->e_ident[EI_MAG1] == ELFMAG1
358 && header->e_ident[EI_MAG2] == ELFMAG2
359 && header->e_ident[EI_MAG3] == ELFMAG3)
360 {
361 *machine = header->e_machine;
362 return header->e_ident[EI_CLASS] == ELFCLASS64;
363
364 }
365 *machine = EM_NONE;
366 return -1;
367 }
368
369 /* Return non-zero if FILE is a 64-bit ELF file,
370 zero if the file is not a 64-bit ELF file,
371 and -1 if the file is not accessible or doesn't exist. */
372
373 static int
374 elf_64_file_p (const char *file, unsigned int *machine)
375 {
376 Elf64_Ehdr header;
377 int fd;
378
379 fd = open (file, O_RDONLY);
380 if (fd < 0)
381 return -1;
382
383 if (read (fd, &header, sizeof (header)) != sizeof (header))
384 {
385 close (fd);
386 return 0;
387 }
388 close (fd);
389
390 return elf_64_header_p (&header, machine);
391 }
392
393 /* Accepts an integer PID; Returns true if the executable PID is
394 running is a 64-bit ELF file.. */
395
396 int
397 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
398 {
399 char file[PATH_MAX];
400
401 sprintf (file, "/proc/%d/exe", pid);
402 return elf_64_file_p (file, machine);
403 }
404
405 void
406 linux_process_target::delete_lwp (lwp_info *lwp)
407 {
408 struct thread_info *thr = get_lwp_thread (lwp);
409
410 if (debug_threads)
411 debug_printf ("deleting %ld\n", lwpid_of (thr));
412
413 remove_thread (thr);
414
415 low_delete_thread (lwp->arch_private);
416
417 free (lwp);
418 }
419
420 void
421 linux_process_target::low_delete_thread (arch_lwp_info *info)
422 {
423 /* Default implementation should be overridden if architecture-specific
424 info is being used. */
425 gdb_assert (info == nullptr);
426 }
427
428 process_info *
429 linux_process_target::add_linux_process (int pid, int attached)
430 {
431 struct process_info *proc;
432
433 proc = add_process (pid, attached);
434 proc->priv = XCNEW (struct process_info_private);
435
436 proc->priv->arch_private = low_new_process ();
437
438 return proc;
439 }
440
441 arch_process_info *
442 linux_process_target::low_new_process ()
443 {
444 return nullptr;
445 }
446
447 void
448 linux_process_target::low_delete_process (arch_process_info *info)
449 {
450 /* Default implementation must be overridden if architecture-specific
451 info exists. */
452 gdb_assert (info == nullptr);
453 }
454
455 void
456 linux_process_target::low_new_fork (process_info *parent, process_info *child)
457 {
458 /* Nop. */
459 }
460
461 void
462 linux_process_target::arch_setup_thread (thread_info *thread)
463 {
464 struct thread_info *saved_thread;
465
466 saved_thread = current_thread;
467 current_thread = thread;
468
469 low_arch_setup ();
470
471 current_thread = saved_thread;
472 }
473
474 int
475 linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
476 int wstat)
477 {
478 client_state &cs = get_client_state ();
479 struct lwp_info *event_lwp = *orig_event_lwp;
480 int event = linux_ptrace_get_extended_event (wstat);
481 struct thread_info *event_thr = get_lwp_thread (event_lwp);
482 struct lwp_info *new_lwp;
483
484 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
485
486 /* All extended events we currently use are mid-syscall. Only
487 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
488 you have to be using PTRACE_SEIZE to get that. */
489 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
490
491 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
492 || (event == PTRACE_EVENT_CLONE))
493 {
494 ptid_t ptid;
495 unsigned long new_pid;
496 int ret, status;
497
498 /* Get the pid of the new lwp. */
499 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
500 &new_pid);
501
502 /* If we haven't already seen the new PID stop, wait for it now. */
503 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
504 {
505 /* The new child has a pending SIGSTOP. We can't affect it until it
506 hits the SIGSTOP, but we're already attached. */
507
508 ret = my_waitpid (new_pid, &status, __WALL);
509
510 if (ret == -1)
511 perror_with_name ("waiting for new child");
512 else if (ret != new_pid)
513 warning ("wait returned unexpected PID %d", ret);
514 else if (!WIFSTOPPED (status))
515 warning ("wait returned unexpected status 0x%x", status);
516 }
517
518 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
519 {
520 struct process_info *parent_proc;
521 struct process_info *child_proc;
522 struct lwp_info *child_lwp;
523 struct thread_info *child_thr;
524 struct target_desc *tdesc;
525
526 ptid = ptid_t (new_pid, new_pid, 0);
527
528 if (debug_threads)
529 {
530 debug_printf ("HEW: Got fork event from LWP %ld, "
531 "new child is %d\n",
532 ptid_of (event_thr).lwp (),
533 ptid.pid ());
534 }
535
536 /* Add the new process to the tables and clone the breakpoint
537 lists of the parent. We need to do this even if the new process
538 will be detached, since we will need the process object and the
539 breakpoints to remove any breakpoints from memory when we
540 detach, and the client side will access registers. */
541 child_proc = add_linux_process (new_pid, 0);
542 gdb_assert (child_proc != NULL);
543 child_lwp = add_lwp (ptid);
544 gdb_assert (child_lwp != NULL);
545 child_lwp->stopped = 1;
546 child_lwp->must_set_ptrace_flags = 1;
547 child_lwp->status_pending_p = 0;
548 child_thr = get_lwp_thread (child_lwp);
549 child_thr->last_resume_kind = resume_stop;
550 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
551
552 /* If we're suspending all threads, leave this one suspended
553 too. If the fork/clone parent is stepping over a breakpoint,
554 all other threads have been suspended already. Leave the
555 child suspended too. */
556 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
557 || event_lwp->bp_reinsert != 0)
558 {
559 if (debug_threads)
560 debug_printf ("HEW: leaving child suspended\n");
561 child_lwp->suspended = 1;
562 }
563
564 parent_proc = get_thread_process (event_thr);
565 child_proc->attached = parent_proc->attached;
566
567 if (event_lwp->bp_reinsert != 0
568 && supports_software_single_step ()
569 && event == PTRACE_EVENT_VFORK)
570 {
571 /* If we leave single-step breakpoints there, child will
572 hit it, so uninsert single-step breakpoints from parent
573 (and child). Once vfork child is done, reinsert
574 them back to parent. */
575 uninsert_single_step_breakpoints (event_thr);
576 }
577
578 clone_all_breakpoints (child_thr, event_thr);
579
580 tdesc = allocate_target_description ();
581 copy_target_description (tdesc, parent_proc->tdesc);
582 child_proc->tdesc = tdesc;
583
584 /* Clone arch-specific process data. */
585 low_new_fork (parent_proc, child_proc);
586
587 /* Save fork info in the parent thread. */
588 if (event == PTRACE_EVENT_FORK)
589 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
590 else if (event == PTRACE_EVENT_VFORK)
591 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
592
593 event_lwp->waitstatus.value.related_pid = ptid;
594
595 /* The status_pending field contains bits denoting the
596 extended event, so when the pending event is handled,
597 the handler will look at lwp->waitstatus. */
598 event_lwp->status_pending_p = 1;
599 event_lwp->status_pending = wstat;
600
601 /* Link the threads until the parent event is passed on to
602 higher layers. */
603 event_lwp->fork_relative = child_lwp;
604 child_lwp->fork_relative = event_lwp;
605
606 /* If the parent thread is doing step-over with single-step
607 breakpoints, the list of single-step breakpoints are cloned
608 from the parent's. Remove them from the child process.
609 In case of vfork, we'll reinsert them back once vforked
610 child is done. */
611 if (event_lwp->bp_reinsert != 0
612 && supports_software_single_step ())
613 {
614 /* The child process is forked and stopped, so it is safe
615 to access its memory without stopping all other threads
616 from other processes. */
617 delete_single_step_breakpoints (child_thr);
618
619 gdb_assert (has_single_step_breakpoints (event_thr));
620 gdb_assert (!has_single_step_breakpoints (child_thr));
621 }
622
623 /* Report the event. */
624 return 0;
625 }
626
627 if (debug_threads)
628 debug_printf ("HEW: Got clone event "
629 "from LWP %ld, new child is LWP %ld\n",
630 lwpid_of (event_thr), new_pid);
631
632 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
633 new_lwp = add_lwp (ptid);
634
635 /* Either we're going to immediately resume the new thread
636 or leave it stopped. resume_one_lwp is a nop if it
637 thinks the thread is currently running, so set this first
638 before calling resume_one_lwp. */
639 new_lwp->stopped = 1;
640
641 /* If we're suspending all threads, leave this one suspended
642 too. If the fork/clone parent is stepping over a breakpoint,
643 all other threads have been suspended already. Leave the
644 child suspended too. */
645 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
646 || event_lwp->bp_reinsert != 0)
647 new_lwp->suspended = 1;
648
649 /* Normally we will get the pending SIGSTOP. But in some cases
650 we might get another signal delivered to the group first.
651 If we do get another signal, be sure not to lose it. */
652 if (WSTOPSIG (status) != SIGSTOP)
653 {
654 new_lwp->stop_expected = 1;
655 new_lwp->status_pending_p = 1;
656 new_lwp->status_pending = status;
657 }
658 else if (cs.report_thread_events)
659 {
660 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
661 new_lwp->status_pending_p = 1;
662 new_lwp->status_pending = status;
663 }
664
665 #ifdef USE_THREAD_DB
666 thread_db_notice_clone (event_thr, ptid);
667 #endif
668
669 /* Don't report the event. */
670 return 1;
671 }
672 else if (event == PTRACE_EVENT_VFORK_DONE)
673 {
674 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
675
676 if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
677 {
678 reinsert_single_step_breakpoints (event_thr);
679
680 gdb_assert (has_single_step_breakpoints (event_thr));
681 }
682
683 /* Report the event. */
684 return 0;
685 }
686 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
687 {
688 struct process_info *proc;
689 std::vector<int> syscalls_to_catch;
690 ptid_t event_ptid;
691 pid_t event_pid;
692
693 if (debug_threads)
694 {
695 debug_printf ("HEW: Got exec event from LWP %ld\n",
696 lwpid_of (event_thr));
697 }
698
699 /* Get the event ptid. */
700 event_ptid = ptid_of (event_thr);
701 event_pid = event_ptid.pid ();
702
703 /* Save the syscall list from the execing process. */
704 proc = get_thread_process (event_thr);
705 syscalls_to_catch = std::move (proc->syscalls_to_catch);
706
707 /* Delete the execing process and all its threads. */
708 mourn (proc);
709 current_thread = NULL;
710
711 /* Create a new process/lwp/thread. */
712 proc = add_linux_process (event_pid, 0);
713 event_lwp = add_lwp (event_ptid);
714 event_thr = get_lwp_thread (event_lwp);
715 gdb_assert (current_thread == event_thr);
716 arch_setup_thread (event_thr);
717
718 /* Set the event status. */
719 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
720 event_lwp->waitstatus.value.execd_pathname
721 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
722
723 /* Mark the exec status as pending. */
724 event_lwp->stopped = 1;
725 event_lwp->status_pending_p = 1;
726 event_lwp->status_pending = wstat;
727 event_thr->last_resume_kind = resume_continue;
728 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
729
730 /* Update syscall state in the new lwp, effectively mid-syscall too. */
731 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
732
733 /* Restore the list to catch. Don't rely on the client, which is free
734 to avoid sending a new list when the architecture doesn't change.
735 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
736 proc->syscalls_to_catch = std::move (syscalls_to_catch);
737
738 /* Report the event. */
739 *orig_event_lwp = event_lwp;
740 return 0;
741 }
742
743 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
744 }
745
746 CORE_ADDR
747 linux_process_target::get_pc (lwp_info *lwp)
748 {
749 struct thread_info *saved_thread;
750 struct regcache *regcache;
751 CORE_ADDR pc;
752
753 if (!low_supports_breakpoints ())
754 return 0;
755
756 saved_thread = current_thread;
757 current_thread = get_lwp_thread (lwp);
758
759 regcache = get_thread_regcache (current_thread, 1);
760 pc = low_get_pc (regcache);
761
762 if (debug_threads)
763 debug_printf ("pc is 0x%lx\n", (long) pc);
764
765 current_thread = saved_thread;
766 return pc;
767 }
768
769 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
770 Fill *SYSNO with the syscall nr trapped. */
771
772 static void
773 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
774 {
775 struct thread_info *saved_thread;
776 struct regcache *regcache;
777
778 if (the_low_target.get_syscall_trapinfo == NULL)
779 {
780 /* If we cannot get the syscall trapinfo, report an unknown
781 system call number. */
782 *sysno = UNKNOWN_SYSCALL;
783 return;
784 }
785
786 saved_thread = current_thread;
787 current_thread = get_lwp_thread (lwp);
788
789 regcache = get_thread_regcache (current_thread, 1);
790 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
791
792 if (debug_threads)
793 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
794
795 current_thread = saved_thread;
796 }
797
798 bool
799 linux_process_target::save_stop_reason (lwp_info *lwp)
800 {
801 CORE_ADDR pc;
802 CORE_ADDR sw_breakpoint_pc;
803 struct thread_info *saved_thread;
804 #if USE_SIGTRAP_SIGINFO
805 siginfo_t siginfo;
806 #endif
807
808 if (!low_supports_breakpoints ())
809 return false;
810
811 pc = get_pc (lwp);
812 sw_breakpoint_pc = pc - low_decr_pc_after_break ();
813
814 /* breakpoint_at reads from the current thread. */
815 saved_thread = current_thread;
816 current_thread = get_lwp_thread (lwp);
817
818 #if USE_SIGTRAP_SIGINFO
819 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
820 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
821 {
822 if (siginfo.si_signo == SIGTRAP)
823 {
824 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
825 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
826 {
827 /* The si_code is ambiguous on this arch -- check debug
828 registers. */
829 if (!check_stopped_by_watchpoint (lwp))
830 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
831 }
832 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
833 {
834 /* If we determine the LWP stopped for a SW breakpoint,
835 trust it. Particularly don't check watchpoint
836 registers, because at least on s390, we'd find
837 stopped-by-watchpoint as long as there's a watchpoint
838 set. */
839 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
840 }
841 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
842 {
843 /* This can indicate either a hardware breakpoint or
844 hardware watchpoint. Check debug registers. */
845 if (!check_stopped_by_watchpoint (lwp))
846 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
847 }
848 else if (siginfo.si_code == TRAP_TRACE)
849 {
850 /* We may have single stepped an instruction that
851 triggered a watchpoint. In that case, on some
852 architectures (such as x86), instead of TRAP_HWBKPT,
853 si_code indicates TRAP_TRACE, and we need to check
854 the debug registers separately. */
855 if (!check_stopped_by_watchpoint (lwp))
856 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
857 }
858 }
859 }
860 #else
861 /* We may have just stepped a breakpoint instruction. E.g., in
862 non-stop mode, GDB first tells the thread A to step a range, and
863 then the user inserts a breakpoint inside the range. In that
864 case we need to report the breakpoint PC. */
865 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
866 && low_breakpoint_at (sw_breakpoint_pc))
867 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
868
869 if (hardware_breakpoint_inserted_here (pc))
870 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
871
872 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
873 check_stopped_by_watchpoint (lwp);
874 #endif
875
876 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
877 {
878 if (debug_threads)
879 {
880 struct thread_info *thr = get_lwp_thread (lwp);
881
882 debug_printf ("CSBB: %s stopped by software breakpoint\n",
883 target_pid_to_str (ptid_of (thr)));
884 }
885
886 /* Back up the PC if necessary. */
887 if (pc != sw_breakpoint_pc)
888 {
889 struct regcache *regcache
890 = get_thread_regcache (current_thread, 1);
891 low_set_pc (regcache, sw_breakpoint_pc);
892 }
893
894 /* Update this so we record the correct stop PC below. */
895 pc = sw_breakpoint_pc;
896 }
897 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
898 {
899 if (debug_threads)
900 {
901 struct thread_info *thr = get_lwp_thread (lwp);
902
903 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
904 target_pid_to_str (ptid_of (thr)));
905 }
906 }
907 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
908 {
909 if (debug_threads)
910 {
911 struct thread_info *thr = get_lwp_thread (lwp);
912
913 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
914 target_pid_to_str (ptid_of (thr)));
915 }
916 }
917 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
918 {
919 if (debug_threads)
920 {
921 struct thread_info *thr = get_lwp_thread (lwp);
922
923 debug_printf ("CSBB: %s stopped by trace\n",
924 target_pid_to_str (ptid_of (thr)));
925 }
926 }
927
928 lwp->stop_pc = pc;
929 current_thread = saved_thread;
930 return true;
931 }
932
933 lwp_info *
934 linux_process_target::add_lwp (ptid_t ptid)
935 {
936 struct lwp_info *lwp;
937
938 lwp = XCNEW (struct lwp_info);
939
940 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
941
942 lwp->thread = add_thread (ptid, lwp);
943
944 low_new_thread (lwp);
945
946 return lwp;
947 }
948
949 void
950 linux_process_target::low_new_thread (lwp_info *info)
951 {
952 /* Nop. */
953 }
954
955 /* Callback to be used when calling fork_inferior, responsible for
956 actually initiating the tracing of the inferior. */
957
958 static void
959 linux_ptrace_fun ()
960 {
961 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
962 (PTRACE_TYPE_ARG4) 0) < 0)
963 trace_start_error_with_name ("ptrace");
964
965 if (setpgid (0, 0) < 0)
966 trace_start_error_with_name ("setpgid");
967
968 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
969 stdout to stderr so that inferior i/o doesn't corrupt the connection.
970 Also, redirect stdin to /dev/null. */
971 if (remote_connection_is_stdio ())
972 {
973 if (close (0) < 0)
974 trace_start_error_with_name ("close");
975 if (open ("/dev/null", O_RDONLY) < 0)
976 trace_start_error_with_name ("open");
977 if (dup2 (2, 1) < 0)
978 trace_start_error_with_name ("dup2");
979 if (write (2, "stdin/stdout redirected\n",
980 sizeof ("stdin/stdout redirected\n") - 1) < 0)
981 {
982 /* Errors ignored. */;
983 }
984 }
985 }
986
987 /* Start an inferior process and returns its pid.
988 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
989 are its arguments. */
990
991 int
992 linux_process_target::create_inferior (const char *program,
993 const std::vector<char *> &program_args)
994 {
995 client_state &cs = get_client_state ();
996 struct lwp_info *new_lwp;
997 int pid;
998 ptid_t ptid;
999
1000 {
1001 maybe_disable_address_space_randomization restore_personality
1002 (cs.disable_randomization);
1003 std::string str_program_args = stringify_argv (program_args);
1004
1005 pid = fork_inferior (program,
1006 str_program_args.c_str (),
1007 get_environ ()->envp (), linux_ptrace_fun,
1008 NULL, NULL, NULL, NULL);
1009 }
1010
1011 add_linux_process (pid, 0);
1012
1013 ptid = ptid_t (pid, pid, 0);
1014 new_lwp = add_lwp (ptid);
1015 new_lwp->must_set_ptrace_flags = 1;
1016
1017 post_fork_inferior (pid, program);
1018
1019 return pid;
1020 }
1021
1022 /* Implement the post_create_inferior target_ops method. */
1023
1024 void
1025 linux_process_target::post_create_inferior ()
1026 {
1027 struct lwp_info *lwp = get_thread_lwp (current_thread);
1028
1029 low_arch_setup ();
1030
1031 if (lwp->must_set_ptrace_flags)
1032 {
1033 struct process_info *proc = current_process ();
1034 int options = linux_low_ptrace_options (proc->attached);
1035
1036 linux_enable_event_reporting (lwpid_of (current_thread), options);
1037 lwp->must_set_ptrace_flags = 0;
1038 }
1039 }
1040
1041 int
1042 linux_process_target::attach_lwp (ptid_t ptid)
1043 {
1044 struct lwp_info *new_lwp;
1045 int lwpid = ptid.lwp ();
1046
1047 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1048 != 0)
1049 return errno;
1050
1051 new_lwp = add_lwp (ptid);
1052
1053 /* We need to wait for SIGSTOP before being able to make the next
1054 ptrace call on this LWP. */
1055 new_lwp->must_set_ptrace_flags = 1;
1056
1057 if (linux_proc_pid_is_stopped (lwpid))
1058 {
1059 if (debug_threads)
1060 debug_printf ("Attached to a stopped process\n");
1061
1062 /* The process is definitely stopped. It is in a job control
1063 stop, unless the kernel predates the TASK_STOPPED /
1064 TASK_TRACED distinction, in which case it might be in a
1065 ptrace stop. Make sure it is in a ptrace stop; from there we
1066 can kill it, signal it, et cetera.
1067
1068 First make sure there is a pending SIGSTOP. Since we are
1069 already attached, the process can not transition from stopped
1070 to running without a PTRACE_CONT; so we know this signal will
1071 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1072 probably already in the queue (unless this kernel is old
1073 enough to use TASK_STOPPED for ptrace stops); but since
1074 SIGSTOP is not an RT signal, it can only be queued once. */
1075 kill_lwp (lwpid, SIGSTOP);
1076
1077 /* Finally, resume the stopped process. This will deliver the
1078 SIGSTOP (or a higher priority signal, just like normal
1079 PTRACE_ATTACH), which we'll catch later on. */
1080 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1081 }
1082
1083 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1084 brings it to a halt.
1085
1086 There are several cases to consider here:
1087
1088 1) gdbserver has already attached to the process and is being notified
1089 of a new thread that is being created.
1090 In this case we should ignore that SIGSTOP and resume the
1091 process. This is handled below by setting stop_expected = 1,
1092 and the fact that add_thread sets last_resume_kind ==
1093 resume_continue.
1094
1095 2) This is the first thread (the process thread), and we're attaching
1096 to it via attach_inferior.
1097 In this case we want the process thread to stop.
1098 This is handled by having linux_attach set last_resume_kind ==
1099 resume_stop after we return.
1100
1101 If the pid we are attaching to is also the tgid, we attach to and
1102 stop all the existing threads. Otherwise, we attach to pid and
1103 ignore any other threads in the same group as this pid.
1104
1105 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1106 existing threads.
1107 In this case we want the thread to stop.
1108 FIXME: This case is currently not properly handled.
1109 We should wait for the SIGSTOP but don't. Things work apparently
1110 because enough time passes between when we ptrace (ATTACH) and when
1111 gdb makes the next ptrace call on the thread.
1112
1113 On the other hand, if we are currently trying to stop all threads, we
1114 should treat the new thread as if we had sent it a SIGSTOP. This works
1115 because we are guaranteed that the add_lwp call above added us to the
1116 end of the list, and so the new thread has not yet reached
1117 wait_for_sigstop (but will). */
1118 new_lwp->stop_expected = 1;
1119
1120 return 0;
1121 }
1122
1123 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1124 already attached. Returns true if a new LWP is found, false
1125 otherwise. */
1126
1127 static int
1128 attach_proc_task_lwp_callback (ptid_t ptid)
1129 {
1130 /* Is this a new thread? */
1131 if (find_thread_ptid (ptid) == NULL)
1132 {
1133 int lwpid = ptid.lwp ();
1134 int err;
1135
1136 if (debug_threads)
1137 debug_printf ("Found new lwp %d\n", lwpid);
1138
1139 err = the_linux_target->attach_lwp (ptid);
1140
1141 /* Be quiet if we simply raced with the thread exiting. EPERM
1142 is returned if the thread's task still exists, and is marked
1143 as exited or zombie, as well as other conditions, so in that
1144 case, confirm the status in /proc/PID/status. */
1145 if (err == ESRCH
1146 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1147 {
1148 if (debug_threads)
1149 {
1150 debug_printf ("Cannot attach to lwp %d: "
1151 "thread is gone (%d: %s)\n",
1152 lwpid, err, safe_strerror (err));
1153 }
1154 }
1155 else if (err != 0)
1156 {
1157 std::string reason
1158 = linux_ptrace_attach_fail_reason_string (ptid, err);
1159
1160 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1161 }
1162
1163 return 1;
1164 }
1165 return 0;
1166 }
1167
1168 static void async_file_mark (void);
1169
1170 /* Attach to PID. If PID is the tgid, attach to it and all
1171 of its threads. */
1172
1173 int
1174 linux_process_target::attach (unsigned long pid)
1175 {
1176 struct process_info *proc;
1177 struct thread_info *initial_thread;
1178 ptid_t ptid = ptid_t (pid, pid, 0);
1179 int err;
1180
1181 proc = add_linux_process (pid, 1);
1182
1183 /* Attach to PID. We will check for other threads
1184 soon. */
1185 err = attach_lwp (ptid);
1186 if (err != 0)
1187 {
1188 remove_process (proc);
1189
1190 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1191 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1192 }
1193
1194 /* Don't ignore the initial SIGSTOP if we just attached to this
1195 process. It will be collected by wait shortly. */
1196 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
1197 initial_thread->last_resume_kind = resume_stop;
1198
1199 /* We must attach to every LWP. If /proc is mounted, use that to
1200 find them now. On the one hand, the inferior may be using raw
1201 clone instead of using pthreads. On the other hand, even if it
1202 is using pthreads, GDB may not be connected yet (thread_db needs
1203 to do symbol lookups, through qSymbol). Also, thread_db walks
1204 structures in the inferior's address space to find the list of
1205 threads/LWPs, and those structures may well be corrupted. Note
1206 that once thread_db is loaded, we'll still use it to list threads
1207 and associate pthread info with each LWP. */
1208 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1209
1210 /* GDB will shortly read the xml target description for this
1211 process, to figure out the process' architecture. But the target
1212 description is only filled in when the first process/thread in
1213 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1214 that now, otherwise, if GDB is fast enough, it could read the
1215 target description _before_ that initial stop. */
1216 if (non_stop)
1217 {
1218 struct lwp_info *lwp;
1219 int wstat, lwpid;
1220 ptid_t pid_ptid = ptid_t (pid);
1221
1222 lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1223 gdb_assert (lwpid > 0);
1224
1225 lwp = find_lwp_pid (ptid_t (lwpid));
1226
1227 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1228 {
1229 lwp->status_pending_p = 1;
1230 lwp->status_pending = wstat;
1231 }
1232
1233 initial_thread->last_resume_kind = resume_continue;
1234
1235 async_file_mark ();
1236
1237 gdb_assert (proc->tdesc != NULL);
1238 }
1239
1240 return 0;
1241 }
1242
1243 static int
1244 last_thread_of_process_p (int pid)
1245 {
1246 bool seen_one = false;
1247
1248 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1249 {
1250 if (!seen_one)
1251 {
1252 /* This is the first thread of this process we see. */
1253 seen_one = true;
1254 return false;
1255 }
1256 else
1257 {
1258 /* This is the second thread of this process we see. */
1259 return true;
1260 }
1261 });
1262
1263 return thread == NULL;
1264 }
1265
1266 /* Kill LWP. */
1267
1268 static void
1269 linux_kill_one_lwp (struct lwp_info *lwp)
1270 {
1271 struct thread_info *thr = get_lwp_thread (lwp);
1272 int pid = lwpid_of (thr);
1273
1274 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1275 there is no signal context, and ptrace(PTRACE_KILL) (or
1276 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1277 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1278 alternative is to kill with SIGKILL. We only need one SIGKILL
1279 per process, not one for each thread. But since we still support
1280 support debugging programs using raw clone without CLONE_THREAD,
1281 we send one for each thread. For years, we used PTRACE_KILL
1282 only, so we're being a bit paranoid about some old kernels where
1283 PTRACE_KILL might work better (dubious if there are any such, but
1284 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1285 second, and so we're fine everywhere. */
1286
1287 errno = 0;
1288 kill_lwp (pid, SIGKILL);
1289 if (debug_threads)
1290 {
1291 int save_errno = errno;
1292
1293 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1294 target_pid_to_str (ptid_of (thr)),
1295 save_errno ? safe_strerror (save_errno) : "OK");
1296 }
1297
1298 errno = 0;
1299 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1300 if (debug_threads)
1301 {
1302 int save_errno = errno;
1303
1304 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1305 target_pid_to_str (ptid_of (thr)),
1306 save_errno ? safe_strerror (save_errno) : "OK");
1307 }
1308 }
1309
1310 /* Kill LWP and wait for it to die. */
1311
1312 static void
1313 kill_wait_lwp (struct lwp_info *lwp)
1314 {
1315 struct thread_info *thr = get_lwp_thread (lwp);
1316 int pid = ptid_of (thr).pid ();
1317 int lwpid = ptid_of (thr).lwp ();
1318 int wstat;
1319 int res;
1320
1321 if (debug_threads)
1322 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1323
1324 do
1325 {
1326 linux_kill_one_lwp (lwp);
1327
1328 /* Make sure it died. Notes:
1329
1330 - The loop is most likely unnecessary.
1331
1332 - We don't use wait_for_event as that could delete lwps
1333 while we're iterating over them. We're not interested in
1334 any pending status at this point, only in making sure all
1335 wait status on the kernel side are collected until the
1336 process is reaped.
1337
1338 - We don't use __WALL here as the __WALL emulation relies on
1339 SIGCHLD, and killing a stopped process doesn't generate
1340 one, nor an exit status.
1341 */
1342 res = my_waitpid (lwpid, &wstat, 0);
1343 if (res == -1 && errno == ECHILD)
1344 res = my_waitpid (lwpid, &wstat, __WCLONE);
1345 } while (res > 0 && WIFSTOPPED (wstat));
1346
1347 /* Even if it was stopped, the child may have already disappeared.
1348 E.g., if it was killed by SIGKILL. */
1349 if (res < 0 && errno != ECHILD)
1350 perror_with_name ("kill_wait_lwp");
1351 }
1352
1353 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1354 except the leader. */
1355
1356 static void
1357 kill_one_lwp_callback (thread_info *thread, int pid)
1358 {
1359 struct lwp_info *lwp = get_thread_lwp (thread);
1360
1361 /* We avoid killing the first thread here, because of a Linux kernel (at
1362 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1363 the children get a chance to be reaped, it will remain a zombie
1364 forever. */
1365
1366 if (lwpid_of (thread) == pid)
1367 {
1368 if (debug_threads)
1369 debug_printf ("lkop: is last of process %s\n",
1370 target_pid_to_str (thread->id));
1371 return;
1372 }
1373
1374 kill_wait_lwp (lwp);
1375 }
1376
1377 int
1378 linux_process_target::kill (process_info *process)
1379 {
1380 int pid = process->pid;
1381
1382 /* If we're killing a running inferior, make sure it is stopped
1383 first, as PTRACE_KILL will not work otherwise. */
1384 stop_all_lwps (0, NULL);
1385
1386 for_each_thread (pid, [&] (thread_info *thread)
1387 {
1388 kill_one_lwp_callback (thread, pid);
1389 });
1390
1391 /* See the comment in linux_kill_one_lwp. We did not kill the first
1392 thread in the list, so do so now. */
1393 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1394
1395 if (lwp == NULL)
1396 {
1397 if (debug_threads)
1398 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1399 pid);
1400 }
1401 else
1402 kill_wait_lwp (lwp);
1403
1404 mourn (process);
1405
1406 /* Since we presently can only stop all lwps of all processes, we
1407 need to unstop lwps of other processes. */
1408 unstop_all_lwps (0, NULL);
1409 return 0;
1410 }
1411
1412 /* Get pending signal of THREAD, for detaching purposes. This is the
1413 signal the thread last stopped for, which we need to deliver to the
1414 thread when detaching, otherwise, it'd be suppressed/lost. */
1415
1416 static int
1417 get_detach_signal (struct thread_info *thread)
1418 {
1419 client_state &cs = get_client_state ();
1420 enum gdb_signal signo = GDB_SIGNAL_0;
1421 int status;
1422 struct lwp_info *lp = get_thread_lwp (thread);
1423
1424 if (lp->status_pending_p)
1425 status = lp->status_pending;
1426 else
1427 {
1428 /* If the thread had been suspended by gdbserver, and it stopped
1429 cleanly, then it'll have stopped with SIGSTOP. But we don't
1430 want to deliver that SIGSTOP. */
1431 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1432 || thread->last_status.value.sig == GDB_SIGNAL_0)
1433 return 0;
1434
1435 /* Otherwise, we may need to deliver the signal we
1436 intercepted. */
1437 status = lp->last_status;
1438 }
1439
1440 if (!WIFSTOPPED (status))
1441 {
1442 if (debug_threads)
1443 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1444 target_pid_to_str (ptid_of (thread)));
1445 return 0;
1446 }
1447
1448 /* Extended wait statuses aren't real SIGTRAPs. */
1449 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1450 {
1451 if (debug_threads)
1452 debug_printf ("GPS: lwp %s had stopped with extended "
1453 "status: no pending signal\n",
1454 target_pid_to_str (ptid_of (thread)));
1455 return 0;
1456 }
1457
1458 signo = gdb_signal_from_host (WSTOPSIG (status));
1459
1460 if (cs.program_signals_p && !cs.program_signals[signo])
1461 {
1462 if (debug_threads)
1463 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1464 target_pid_to_str (ptid_of (thread)),
1465 gdb_signal_to_string (signo));
1466 return 0;
1467 }
1468 else if (!cs.program_signals_p
1469 /* If we have no way to know which signals GDB does not
1470 want to have passed to the program, assume
1471 SIGTRAP/SIGINT, which is GDB's default. */
1472 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1473 {
1474 if (debug_threads)
1475 debug_printf ("GPS: lwp %s had signal %s, "
1476 "but we don't know if we should pass it. "
1477 "Default to not.\n",
1478 target_pid_to_str (ptid_of (thread)),
1479 gdb_signal_to_string (signo));
1480 return 0;
1481 }
1482 else
1483 {
1484 if (debug_threads)
1485 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1486 target_pid_to_str (ptid_of (thread)),
1487 gdb_signal_to_string (signo));
1488
1489 return WSTOPSIG (status);
1490 }
1491 }
1492
1493 void
1494 linux_process_target::detach_one_lwp (lwp_info *lwp)
1495 {
1496 struct thread_info *thread = get_lwp_thread (lwp);
1497 int sig;
1498 int lwpid;
1499
1500 /* If there is a pending SIGSTOP, get rid of it. */
1501 if (lwp->stop_expected)
1502 {
1503 if (debug_threads)
1504 debug_printf ("Sending SIGCONT to %s\n",
1505 target_pid_to_str (ptid_of (thread)));
1506
1507 kill_lwp (lwpid_of (thread), SIGCONT);
1508 lwp->stop_expected = 0;
1509 }
1510
1511 /* Pass on any pending signal for this thread. */
1512 sig = get_detach_signal (thread);
1513
1514 /* Preparing to resume may try to write registers, and fail if the
1515 lwp is zombie. If that happens, ignore the error. We'll handle
1516 it below, when detach fails with ESRCH. */
1517 try
1518 {
1519 /* Flush any pending changes to the process's registers. */
1520 regcache_invalidate_thread (thread);
1521
1522 /* Finally, let it resume. */
1523 low_prepare_to_resume (lwp);
1524 }
1525 catch (const gdb_exception_error &ex)
1526 {
1527 if (!check_ptrace_stopped_lwp_gone (lwp))
1528 throw;
1529 }
1530
1531 lwpid = lwpid_of (thread);
1532 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1533 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1534 {
1535 int save_errno = errno;
1536
1537 /* We know the thread exists, so ESRCH must mean the lwp is
1538 zombie. This can happen if one of the already-detached
1539 threads exits the whole thread group. In that case we're
1540 still attached, and must reap the lwp. */
1541 if (save_errno == ESRCH)
1542 {
1543 int ret, status;
1544
1545 ret = my_waitpid (lwpid, &status, __WALL);
1546 if (ret == -1)
1547 {
1548 warning (_("Couldn't reap LWP %d while detaching: %s"),
1549 lwpid, safe_strerror (errno));
1550 }
1551 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1552 {
1553 warning (_("Reaping LWP %d while detaching "
1554 "returned unexpected status 0x%x"),
1555 lwpid, status);
1556 }
1557 }
1558 else
1559 {
1560 error (_("Can't detach %s: %s"),
1561 target_pid_to_str (ptid_of (thread)),
1562 safe_strerror (save_errno));
1563 }
1564 }
1565 else if (debug_threads)
1566 {
1567 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1568 target_pid_to_str (ptid_of (thread)),
1569 strsignal (sig));
1570 }
1571
1572 delete_lwp (lwp);
1573 }
1574
1575 int
1576 linux_process_target::detach (process_info *process)
1577 {
1578 struct lwp_info *main_lwp;
1579
1580 /* As there's a step over already in progress, let it finish first,
1581 otherwise nesting a stabilize_threads operation on top gets real
1582 messy. */
1583 complete_ongoing_step_over ();
1584
1585 /* Stop all threads before detaching. First, ptrace requires that
1586 the thread is stopped to successfully detach. Second, thread_db
1587 may need to uninstall thread event breakpoints from memory, which
1588 only works with a stopped process anyway. */
1589 stop_all_lwps (0, NULL);
1590
1591 #ifdef USE_THREAD_DB
1592 thread_db_detach (process);
1593 #endif
1594
1595 /* Stabilize threads (move out of jump pads). */
1596 target_stabilize_threads ();
1597
1598 /* Detach from the clone lwps first. If the thread group exits just
1599 while we're detaching, we must reap the clone lwps before we're
1600 able to reap the leader. */
1601 for_each_thread (process->pid, [this] (thread_info *thread)
1602 {
1603 /* We don't actually detach from the thread group leader just yet.
1604 If the thread group exits, we must reap the zombie clone lwps
1605 before we're able to reap the leader. */
1606 if (thread->id.pid () == thread->id.lwp ())
1607 return;
1608
1609 lwp_info *lwp = get_thread_lwp (thread);
1610 detach_one_lwp (lwp);
1611 });
1612
1613 main_lwp = find_lwp_pid (ptid_t (process->pid));
1614 detach_one_lwp (main_lwp);
1615
1616 mourn (process);
1617
1618 /* Since we presently can only stop all lwps of all processes, we
1619 need to unstop lwps of other processes. */
1620 unstop_all_lwps (0, NULL);
1621 return 0;
1622 }
1623
1624 /* Remove all LWPs that belong to process PROC from the lwp list. */
1625
1626 void
1627 linux_process_target::mourn (process_info *process)
1628 {
1629 struct process_info_private *priv;
1630
1631 #ifdef USE_THREAD_DB
1632 thread_db_mourn (process);
1633 #endif
1634
1635 for_each_thread (process->pid, [this] (thread_info *thread)
1636 {
1637 delete_lwp (get_thread_lwp (thread));
1638 });
1639
1640 /* Freeing all private data. */
1641 priv = process->priv;
1642 low_delete_process (priv->arch_private);
1643 free (priv);
1644 process->priv = NULL;
1645
1646 remove_process (process);
1647 }
1648
1649 void
1650 linux_process_target::join (int pid)
1651 {
1652 int status, ret;
1653
1654 do {
1655 ret = my_waitpid (pid, &status, 0);
1656 if (WIFEXITED (status) || WIFSIGNALED (status))
1657 break;
1658 } while (ret != -1 || errno != ECHILD);
1659 }
1660
1661 /* Return true if the given thread is still alive. */
1662
1663 bool
1664 linux_process_target::thread_alive (ptid_t ptid)
1665 {
1666 struct lwp_info *lwp = find_lwp_pid (ptid);
1667
1668 /* We assume we always know if a thread exits. If a whole process
1669 exited but we still haven't been able to report it to GDB, we'll
1670 hold on to the last lwp of the dead process. */
1671 if (lwp != NULL)
1672 return !lwp_is_marked_dead (lwp);
1673 else
1674 return 0;
1675 }
1676
1677 bool
1678 linux_process_target::thread_still_has_status_pending (thread_info *thread)
1679 {
1680 struct lwp_info *lp = get_thread_lwp (thread);
1681
1682 if (!lp->status_pending_p)
1683 return 0;
1684
1685 if (thread->last_resume_kind != resume_stop
1686 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1687 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1688 {
1689 struct thread_info *saved_thread;
1690 CORE_ADDR pc;
1691 int discard = 0;
1692
1693 gdb_assert (lp->last_status != 0);
1694
1695 pc = get_pc (lp);
1696
1697 saved_thread = current_thread;
1698 current_thread = thread;
1699
1700 if (pc != lp->stop_pc)
1701 {
1702 if (debug_threads)
1703 debug_printf ("PC of %ld changed\n",
1704 lwpid_of (thread));
1705 discard = 1;
1706 }
1707
1708 #if !USE_SIGTRAP_SIGINFO
1709 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1710 && !low_breakpoint_at (pc))
1711 {
1712 if (debug_threads)
1713 debug_printf ("previous SW breakpoint of %ld gone\n",
1714 lwpid_of (thread));
1715 discard = 1;
1716 }
1717 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1718 && !hardware_breakpoint_inserted_here (pc))
1719 {
1720 if (debug_threads)
1721 debug_printf ("previous HW breakpoint of %ld gone\n",
1722 lwpid_of (thread));
1723 discard = 1;
1724 }
1725 #endif
1726
1727 current_thread = saved_thread;
1728
1729 if (discard)
1730 {
1731 if (debug_threads)
1732 debug_printf ("discarding pending breakpoint status\n");
1733 lp->status_pending_p = 0;
1734 return 0;
1735 }
1736 }
1737
1738 return 1;
1739 }
1740
1741 /* Returns true if LWP is resumed from the client's perspective. */
1742
1743 static int
1744 lwp_resumed (struct lwp_info *lwp)
1745 {
1746 struct thread_info *thread = get_lwp_thread (lwp);
1747
1748 if (thread->last_resume_kind != resume_stop)
1749 return 1;
1750
1751 /* Did gdb send us a `vCont;t', but we haven't reported the
1752 corresponding stop to gdb yet? If so, the thread is still
1753 resumed/running from gdb's perspective. */
1754 if (thread->last_resume_kind == resume_stop
1755 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1756 return 1;
1757
1758 return 0;
1759 }
1760
1761 bool
1762 linux_process_target::status_pending_p_callback (thread_info *thread,
1763 ptid_t ptid)
1764 {
1765 struct lwp_info *lp = get_thread_lwp (thread);
1766
1767 /* Check if we're only interested in events from a specific process
1768 or a specific LWP. */
1769 if (!thread->id.matches (ptid))
1770 return 0;
1771
1772 if (!lwp_resumed (lp))
1773 return 0;
1774
1775 if (lp->status_pending_p
1776 && !thread_still_has_status_pending (thread))
1777 {
1778 resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1779 return 0;
1780 }
1781
1782 return lp->status_pending_p;
1783 }
1784
1785 struct lwp_info *
1786 find_lwp_pid (ptid_t ptid)
1787 {
1788 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1789 {
1790 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1791 return thr_arg->id.lwp () == lwp;
1792 });
1793
1794 if (thread == NULL)
1795 return NULL;
1796
1797 return get_thread_lwp (thread);
1798 }
1799
1800 /* Return the number of known LWPs in the tgid given by PID. */
1801
1802 static int
1803 num_lwps (int pid)
1804 {
1805 int count = 0;
1806
1807 for_each_thread (pid, [&] (thread_info *thread)
1808 {
1809 count++;
1810 });
1811
1812 return count;
1813 }
1814
1815 /* See nat/linux-nat.h. */
1816
1817 struct lwp_info *
1818 iterate_over_lwps (ptid_t filter,
1819 gdb::function_view<iterate_over_lwps_ftype> callback)
1820 {
1821 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1822 {
1823 lwp_info *lwp = get_thread_lwp (thr_arg);
1824
1825 return callback (lwp);
1826 });
1827
1828 if (thread == NULL)
1829 return NULL;
1830
1831 return get_thread_lwp (thread);
1832 }
1833
1834 void
1835 linux_process_target::check_zombie_leaders ()
1836 {
1837 for_each_process ([this] (process_info *proc) {
1838 pid_t leader_pid = pid_of (proc);
1839 struct lwp_info *leader_lp;
1840
1841 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1842
1843 if (debug_threads)
1844 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1845 "num_lwps=%d, zombie=%d\n",
1846 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1847 linux_proc_pid_is_zombie (leader_pid));
1848
1849 if (leader_lp != NULL && !leader_lp->stopped
1850 /* Check if there are other threads in the group, as we may
1851 have raced with the inferior simply exiting. */
1852 && !last_thread_of_process_p (leader_pid)
1853 && linux_proc_pid_is_zombie (leader_pid))
1854 {
1855 /* A leader zombie can mean one of two things:
1856
1857 - It exited, and there's an exit status pending
1858 available, or only the leader exited (not the whole
1859 program). In the latter case, we can't waitpid the
1860 leader's exit status until all other threads are gone.
1861
1862 - There are 3 or more threads in the group, and a thread
1863 other than the leader exec'd. On an exec, the Linux
1864 kernel destroys all other threads (except the execing
1865 one) in the thread group, and resets the execing thread's
1866 tid to the tgid. No exit notification is sent for the
1867 execing thread -- from the ptracer's perspective, it
1868 appears as though the execing thread just vanishes.
1869 Until we reap all other threads except the leader and the
1870 execing thread, the leader will be zombie, and the
1871 execing thread will be in `D (disc sleep)'. As soon as
1872 all other threads are reaped, the execing thread changes
1873 it's tid to the tgid, and the previous (zombie) leader
1874 vanishes, giving place to the "new" leader. We could try
1875 distinguishing the exit and exec cases, by waiting once
1876 more, and seeing if something comes out, but it doesn't
1877 sound useful. The previous leader _does_ go away, and
1878 we'll re-add the new one once we see the exec event
1879 (which is just the same as what would happen if the
1880 previous leader did exit voluntarily before some other
1881 thread execs). */
1882
1883 if (debug_threads)
1884 debug_printf ("CZL: Thread group leader %d zombie "
1885 "(it exited, or another thread execd).\n",
1886 leader_pid);
1887
1888 delete_lwp (leader_lp);
1889 }
1890 });
1891 }
1892
1893 /* Callback for `find_thread'. Returns the first LWP that is not
1894 stopped. */
1895
1896 static bool
1897 not_stopped_callback (thread_info *thread, ptid_t filter)
1898 {
1899 if (!thread->id.matches (filter))
1900 return false;
1901
1902 lwp_info *lwp = get_thread_lwp (thread);
1903
1904 return !lwp->stopped;
1905 }
1906
1907 /* Increment LWP's suspend count. */
1908
1909 static void
1910 lwp_suspended_inc (struct lwp_info *lwp)
1911 {
1912 lwp->suspended++;
1913
1914 if (debug_threads && lwp->suspended > 4)
1915 {
1916 struct thread_info *thread = get_lwp_thread (lwp);
1917
1918 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1919 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1920 }
1921 }
1922
1923 /* Decrement LWP's suspend count. */
1924
1925 static void
1926 lwp_suspended_decr (struct lwp_info *lwp)
1927 {
1928 lwp->suspended--;
1929
1930 if (lwp->suspended < 0)
1931 {
1932 struct thread_info *thread = get_lwp_thread (lwp);
1933
1934 internal_error (__FILE__, __LINE__,
1935 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1936 lwp->suspended);
1937 }
1938 }
1939
1940 /* This function should only be called if the LWP got a SIGTRAP.
1941
1942 Handle any tracepoint steps or hits. Return true if a tracepoint
1943 event was handled, 0 otherwise. */
1944
1945 static int
1946 handle_tracepoints (struct lwp_info *lwp)
1947 {
1948 struct thread_info *tinfo = get_lwp_thread (lwp);
1949 int tpoint_related_event = 0;
1950
1951 gdb_assert (lwp->suspended == 0);
1952
1953 /* If this tracepoint hit causes a tracing stop, we'll immediately
1954 uninsert tracepoints. To do this, we temporarily pause all
1955 threads, unpatch away, and then unpause threads. We need to make
1956 sure the unpausing doesn't resume LWP too. */
1957 lwp_suspended_inc (lwp);
1958
1959 /* And we need to be sure that any all-threads-stopping doesn't try
1960 to move threads out of the jump pads, as it could deadlock the
1961 inferior (LWP could be in the jump pad, maybe even holding the
1962 lock.) */
1963
1964 /* Do any necessary step collect actions. */
1965 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1966
1967 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1968
1969 /* See if we just hit a tracepoint and do its main collect
1970 actions. */
1971 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1972
1973 lwp_suspended_decr (lwp);
1974
1975 gdb_assert (lwp->suspended == 0);
1976 gdb_assert (!stabilizing_threads
1977 || (lwp->collecting_fast_tracepoint
1978 != fast_tpoint_collect_result::not_collecting));
1979
1980 if (tpoint_related_event)
1981 {
1982 if (debug_threads)
1983 debug_printf ("got a tracepoint event\n");
1984 return 1;
1985 }
1986
1987 return 0;
1988 }
1989
1990 fast_tpoint_collect_result
1991 linux_process_target::linux_fast_tracepoint_collecting
1992 (lwp_info *lwp, fast_tpoint_collect_status *status)
1993 {
1994 CORE_ADDR thread_area;
1995 struct thread_info *thread = get_lwp_thread (lwp);
1996
1997 /* Get the thread area address. This is used to recognize which
1998 thread is which when tracing with the in-process agent library.
1999 We don't read anything from the address, and treat it as opaque;
2000 it's the address itself that we assume is unique per-thread. */
2001 if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
2002 return fast_tpoint_collect_result::not_collecting;
2003
2004 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2005 }
2006
2007 int
2008 linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
2009 {
2010 return -1;
2011 }
2012
2013 bool
2014 linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
2015 {
2016 struct thread_info *saved_thread;
2017
2018 saved_thread = current_thread;
2019 current_thread = get_lwp_thread (lwp);
2020
2021 if ((wstat == NULL
2022 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2023 && supports_fast_tracepoints ()
2024 && agent_loaded_p ())
2025 {
2026 struct fast_tpoint_collect_status status;
2027
2028 if (debug_threads)
2029 debug_printf ("Checking whether LWP %ld needs to move out of the "
2030 "jump pad.\n",
2031 lwpid_of (current_thread));
2032
2033 fast_tpoint_collect_result r
2034 = linux_fast_tracepoint_collecting (lwp, &status);
2035
2036 if (wstat == NULL
2037 || (WSTOPSIG (*wstat) != SIGILL
2038 && WSTOPSIG (*wstat) != SIGFPE
2039 && WSTOPSIG (*wstat) != SIGSEGV
2040 && WSTOPSIG (*wstat) != SIGBUS))
2041 {
2042 lwp->collecting_fast_tracepoint = r;
2043
2044 if (r != fast_tpoint_collect_result::not_collecting)
2045 {
2046 if (r == fast_tpoint_collect_result::before_insn
2047 && lwp->exit_jump_pad_bkpt == NULL)
2048 {
2049 /* Haven't executed the original instruction yet.
2050 Set breakpoint there, and wait till it's hit,
2051 then single-step until exiting the jump pad. */
2052 lwp->exit_jump_pad_bkpt
2053 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2054 }
2055
2056 if (debug_threads)
2057 debug_printf ("Checking whether LWP %ld needs to move out of "
2058 "the jump pad...it does\n",
2059 lwpid_of (current_thread));
2060 current_thread = saved_thread;
2061
2062 return true;
2063 }
2064 }
2065 else
2066 {
2067 /* If we get a synchronous signal while collecting, *and*
2068 while executing the (relocated) original instruction,
2069 reset the PC to point at the tpoint address, before
2070 reporting to GDB. Otherwise, it's an IPA lib bug: just
2071 report the signal to GDB, and pray for the best. */
2072
2073 lwp->collecting_fast_tracepoint
2074 = fast_tpoint_collect_result::not_collecting;
2075
2076 if (r != fast_tpoint_collect_result::not_collecting
2077 && (status.adjusted_insn_addr <= lwp->stop_pc
2078 && lwp->stop_pc < status.adjusted_insn_addr_end))
2079 {
2080 siginfo_t info;
2081 struct regcache *regcache;
2082
2083 /* The si_addr on a few signals references the address
2084 of the faulting instruction. Adjust that as
2085 well. */
2086 if ((WSTOPSIG (*wstat) == SIGILL
2087 || WSTOPSIG (*wstat) == SIGFPE
2088 || WSTOPSIG (*wstat) == SIGBUS
2089 || WSTOPSIG (*wstat) == SIGSEGV)
2090 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2091 (PTRACE_TYPE_ARG3) 0, &info) == 0
2092 /* Final check just to make sure we don't clobber
2093 the siginfo of non-kernel-sent signals. */
2094 && (uintptr_t) info.si_addr == lwp->stop_pc)
2095 {
2096 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2097 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2098 (PTRACE_TYPE_ARG3) 0, &info);
2099 }
2100
2101 regcache = get_thread_regcache (current_thread, 1);
2102 low_set_pc (regcache, status.tpoint_addr);
2103 lwp->stop_pc = status.tpoint_addr;
2104
2105 /* Cancel any fast tracepoint lock this thread was
2106 holding. */
2107 force_unlock_trace_buffer ();
2108 }
2109
2110 if (lwp->exit_jump_pad_bkpt != NULL)
2111 {
2112 if (debug_threads)
2113 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2114 "stopping all threads momentarily.\n");
2115
2116 stop_all_lwps (1, lwp);
2117
2118 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2119 lwp->exit_jump_pad_bkpt = NULL;
2120
2121 unstop_all_lwps (1, lwp);
2122
2123 gdb_assert (lwp->suspended >= 0);
2124 }
2125 }
2126 }
2127
2128 if (debug_threads)
2129 debug_printf ("Checking whether LWP %ld needs to move out of the "
2130 "jump pad...no\n",
2131 lwpid_of (current_thread));
2132
2133 current_thread = saved_thread;
2134 return false;
2135 }
2136
2137 /* Enqueue one signal in the "signals to report later when out of the
2138 jump pad" list. */
2139
2140 static void
2141 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2142 {
2143 struct pending_signals *p_sig;
2144 struct thread_info *thread = get_lwp_thread (lwp);
2145
2146 if (debug_threads)
2147 debug_printf ("Deferring signal %d for LWP %ld.\n",
2148 WSTOPSIG (*wstat), lwpid_of (thread));
2149
2150 if (debug_threads)
2151 {
2152 struct pending_signals *sig;
2153
2154 for (sig = lwp->pending_signals_to_report;
2155 sig != NULL;
2156 sig = sig->prev)
2157 debug_printf (" Already queued %d\n",
2158 sig->signal);
2159
2160 debug_printf (" (no more currently queued signals)\n");
2161 }
2162
2163 /* Don't enqueue non-RT signals if they are already in the deferred
2164 queue. (SIGSTOP being the easiest signal to see ending up here
2165 twice) */
2166 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2167 {
2168 struct pending_signals *sig;
2169
2170 for (sig = lwp->pending_signals_to_report;
2171 sig != NULL;
2172 sig = sig->prev)
2173 {
2174 if (sig->signal == WSTOPSIG (*wstat))
2175 {
2176 if (debug_threads)
2177 debug_printf ("Not requeuing already queued non-RT signal %d"
2178 " for LWP %ld\n",
2179 sig->signal,
2180 lwpid_of (thread));
2181 return;
2182 }
2183 }
2184 }
2185
2186 p_sig = XCNEW (struct pending_signals);
2187 p_sig->prev = lwp->pending_signals_to_report;
2188 p_sig->signal = WSTOPSIG (*wstat);
2189
2190 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2191 &p_sig->info);
2192
2193 lwp->pending_signals_to_report = p_sig;
2194 }
2195
2196 /* Dequeue one signal from the "signals to report later when out of
2197 the jump pad" list. */
2198
2199 static int
2200 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2201 {
2202 struct thread_info *thread = get_lwp_thread (lwp);
2203
2204 if (lwp->pending_signals_to_report != NULL)
2205 {
2206 struct pending_signals **p_sig;
2207
2208 p_sig = &lwp->pending_signals_to_report;
2209 while ((*p_sig)->prev != NULL)
2210 p_sig = &(*p_sig)->prev;
2211
2212 *wstat = W_STOPCODE ((*p_sig)->signal);
2213 if ((*p_sig)->info.si_signo != 0)
2214 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2215 &(*p_sig)->info);
2216 free (*p_sig);
2217 *p_sig = NULL;
2218
2219 if (debug_threads)
2220 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2221 WSTOPSIG (*wstat), lwpid_of (thread));
2222
2223 if (debug_threads)
2224 {
2225 struct pending_signals *sig;
2226
2227 for (sig = lwp->pending_signals_to_report;
2228 sig != NULL;
2229 sig = sig->prev)
2230 debug_printf (" Still queued %d\n",
2231 sig->signal);
2232
2233 debug_printf (" (no more queued signals)\n");
2234 }
2235
2236 return 1;
2237 }
2238
2239 return 0;
2240 }
2241
2242 bool
2243 linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2244 {
2245 struct thread_info *saved_thread = current_thread;
2246 current_thread = get_lwp_thread (child);
2247
2248 if (low_stopped_by_watchpoint ())
2249 {
2250 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2251 child->stopped_data_address = low_stopped_data_address ();
2252 }
2253
2254 current_thread = saved_thread;
2255
2256 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2257 }
2258
2259 bool
2260 linux_process_target::low_stopped_by_watchpoint ()
2261 {
2262 return false;
2263 }
2264
2265 CORE_ADDR
2266 linux_process_target::low_stopped_data_address ()
2267 {
2268 return 0;
2269 }
2270
2271 /* Return the ptrace options that we want to try to enable. */
2272
2273 static int
2274 linux_low_ptrace_options (int attached)
2275 {
2276 client_state &cs = get_client_state ();
2277 int options = 0;
2278
2279 if (!attached)
2280 options |= PTRACE_O_EXITKILL;
2281
2282 if (cs.report_fork_events)
2283 options |= PTRACE_O_TRACEFORK;
2284
2285 if (cs.report_vfork_events)
2286 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2287
2288 if (cs.report_exec_events)
2289 options |= PTRACE_O_TRACEEXEC;
2290
2291 options |= PTRACE_O_TRACESYSGOOD;
2292
2293 return options;
2294 }
2295
2296 lwp_info *
2297 linux_process_target::filter_event (int lwpid, int wstat)
2298 {
2299 client_state &cs = get_client_state ();
2300 struct lwp_info *child;
2301 struct thread_info *thread;
2302 int have_stop_pc = 0;
2303
2304 child = find_lwp_pid (ptid_t (lwpid));
2305
2306 /* Check for stop events reported by a process we didn't already
2307 know about - anything not already in our LWP list.
2308
2309 If we're expecting to receive stopped processes after
2310 fork, vfork, and clone events, then we'll just add the
2311 new one to our list and go back to waiting for the event
2312 to be reported - the stopped process might be returned
2313 from waitpid before or after the event is.
2314
2315 But note the case of a non-leader thread exec'ing after the
2316 leader having exited, and gone from our lists (because
2317 check_zombie_leaders deleted it). The non-leader thread
2318 changes its tid to the tgid. */
2319
2320 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2321 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2322 {
2323 ptid_t child_ptid;
2324
2325 /* A multi-thread exec after we had seen the leader exiting. */
2326 if (debug_threads)
2327 {
2328 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2329 "after exec.\n", lwpid);
2330 }
2331
2332 child_ptid = ptid_t (lwpid, lwpid, 0);
2333 child = add_lwp (child_ptid);
2334 child->stopped = 1;
2335 current_thread = child->thread;
2336 }
2337
2338 /* If we didn't find a process, one of two things presumably happened:
2339 - A process we started and then detached from has exited. Ignore it.
2340 - A process we are controlling has forked and the new child's stop
2341 was reported to us by the kernel. Save its PID. */
2342 if (child == NULL && WIFSTOPPED (wstat))
2343 {
2344 add_to_pid_list (&stopped_pids, lwpid, wstat);
2345 return NULL;
2346 }
2347 else if (child == NULL)
2348 return NULL;
2349
2350 thread = get_lwp_thread (child);
2351
2352 child->stopped = 1;
2353
2354 child->last_status = wstat;
2355
2356 /* Check if the thread has exited. */
2357 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2358 {
2359 if (debug_threads)
2360 debug_printf ("LLFE: %d exited.\n", lwpid);
2361
2362 if (finish_step_over (child))
2363 {
2364 /* Unsuspend all other LWPs, and set them back running again. */
2365 unsuspend_all_lwps (child);
2366 }
2367
2368 /* If there is at least one more LWP, then the exit signal was
2369 not the end of the debugged application and should be
2370 ignored, unless GDB wants to hear about thread exits. */
2371 if (cs.report_thread_events
2372 || last_thread_of_process_p (pid_of (thread)))
2373 {
2374 /* Since events are serialized to GDB core, and we can't
2375 report this one right now. Leave the status pending for
2376 the next time we're able to report it. */
2377 mark_lwp_dead (child, wstat);
2378 return child;
2379 }
2380 else
2381 {
2382 delete_lwp (child);
2383 return NULL;
2384 }
2385 }
2386
2387 gdb_assert (WIFSTOPPED (wstat));
2388
2389 if (WIFSTOPPED (wstat))
2390 {
2391 struct process_info *proc;
2392
2393 /* Architecture-specific setup after inferior is running. */
2394 proc = find_process_pid (pid_of (thread));
2395 if (proc->tdesc == NULL)
2396 {
2397 if (proc->attached)
2398 {
2399 /* This needs to happen after we have attached to the
2400 inferior and it is stopped for the first time, but
2401 before we access any inferior registers. */
2402 arch_setup_thread (thread);
2403 }
2404 else
2405 {
2406 /* The process is started, but GDBserver will do
2407 architecture-specific setup after the program stops at
2408 the first instruction. */
2409 child->status_pending_p = 1;
2410 child->status_pending = wstat;
2411 return child;
2412 }
2413 }
2414 }
2415
2416 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2417 {
2418 struct process_info *proc = find_process_pid (pid_of (thread));
2419 int options = linux_low_ptrace_options (proc->attached);
2420
2421 linux_enable_event_reporting (lwpid, options);
2422 child->must_set_ptrace_flags = 0;
2423 }
2424
2425 /* Always update syscall_state, even if it will be filtered later. */
2426 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2427 {
2428 child->syscall_state
2429 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2430 ? TARGET_WAITKIND_SYSCALL_RETURN
2431 : TARGET_WAITKIND_SYSCALL_ENTRY);
2432 }
2433 else
2434 {
2435 /* Almost all other ptrace-stops are known to be outside of system
2436 calls, with further exceptions in handle_extended_wait. */
2437 child->syscall_state = TARGET_WAITKIND_IGNORE;
2438 }
2439
2440 /* Be careful to not overwrite stop_pc until save_stop_reason is
2441 called. */
2442 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2443 && linux_is_extended_waitstatus (wstat))
2444 {
2445 child->stop_pc = get_pc (child);
2446 if (handle_extended_wait (&child, wstat))
2447 {
2448 /* The event has been handled, so just return without
2449 reporting it. */
2450 return NULL;
2451 }
2452 }
2453
2454 if (linux_wstatus_maybe_breakpoint (wstat))
2455 {
2456 if (save_stop_reason (child))
2457 have_stop_pc = 1;
2458 }
2459
2460 if (!have_stop_pc)
2461 child->stop_pc = get_pc (child);
2462
2463 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2464 && child->stop_expected)
2465 {
2466 if (debug_threads)
2467 debug_printf ("Expected stop.\n");
2468 child->stop_expected = 0;
2469
2470 if (thread->last_resume_kind == resume_stop)
2471 {
2472 /* We want to report the stop to the core. Treat the
2473 SIGSTOP as a normal event. */
2474 if (debug_threads)
2475 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2476 target_pid_to_str (ptid_of (thread)));
2477 }
2478 else if (stopping_threads != NOT_STOPPING_THREADS)
2479 {
2480 /* Stopping threads. We don't want this SIGSTOP to end up
2481 pending. */
2482 if (debug_threads)
2483 debug_printf ("LLW: SIGSTOP caught for %s "
2484 "while stopping threads.\n",
2485 target_pid_to_str (ptid_of (thread)));
2486 return NULL;
2487 }
2488 else
2489 {
2490 /* This is a delayed SIGSTOP. Filter out the event. */
2491 if (debug_threads)
2492 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2493 child->stepping ? "step" : "continue",
2494 target_pid_to_str (ptid_of (thread)));
2495
2496 resume_one_lwp (child, child->stepping, 0, NULL);
2497 return NULL;
2498 }
2499 }
2500
2501 child->status_pending_p = 1;
2502 child->status_pending = wstat;
2503 return child;
2504 }
2505
2506 /* Return true if THREAD is doing hardware single step. */
2507
2508 static int
2509 maybe_hw_step (struct thread_info *thread)
2510 {
2511 if (can_hardware_single_step ())
2512 return 1;
2513 else
2514 {
2515 /* GDBserver must insert single-step breakpoint for software
2516 single step. */
2517 gdb_assert (has_single_step_breakpoints (thread));
2518 return 0;
2519 }
2520 }
2521
2522 void
2523 linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2524 {
2525 struct lwp_info *lp = get_thread_lwp (thread);
2526
2527 if (lp->stopped
2528 && !lp->suspended
2529 && !lp->status_pending_p
2530 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2531 {
2532 int step = 0;
2533
2534 if (thread->last_resume_kind == resume_step)
2535 step = maybe_hw_step (thread);
2536
2537 if (debug_threads)
2538 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2539 target_pid_to_str (ptid_of (thread)),
2540 paddress (lp->stop_pc),
2541 step);
2542
2543 resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2544 }
2545 }
2546
2547 int
2548 linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2549 ptid_t filter_ptid,
2550 int *wstatp, int options)
2551 {
2552 struct thread_info *event_thread;
2553 struct lwp_info *event_child, *requested_child;
2554 sigset_t block_mask, prev_mask;
2555
2556 retry:
2557 /* N.B. event_thread points to the thread_info struct that contains
2558 event_child. Keep them in sync. */
2559 event_thread = NULL;
2560 event_child = NULL;
2561 requested_child = NULL;
2562
2563 /* Check for a lwp with a pending status. */
2564
2565 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2566 {
2567 event_thread = find_thread_in_random ([&] (thread_info *thread)
2568 {
2569 return status_pending_p_callback (thread, filter_ptid);
2570 });
2571
2572 if (event_thread != NULL)
2573 event_child = get_thread_lwp (event_thread);
2574 if (debug_threads && event_thread)
2575 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2576 }
2577 else if (filter_ptid != null_ptid)
2578 {
2579 requested_child = find_lwp_pid (filter_ptid);
2580
2581 if (stopping_threads == NOT_STOPPING_THREADS
2582 && requested_child->status_pending_p
2583 && (requested_child->collecting_fast_tracepoint
2584 != fast_tpoint_collect_result::not_collecting))
2585 {
2586 enqueue_one_deferred_signal (requested_child,
2587 &requested_child->status_pending);
2588 requested_child->status_pending_p = 0;
2589 requested_child->status_pending = 0;
2590 resume_one_lwp (requested_child, 0, 0, NULL);
2591 }
2592
2593 if (requested_child->suspended
2594 && requested_child->status_pending_p)
2595 {
2596 internal_error (__FILE__, __LINE__,
2597 "requesting an event out of a"
2598 " suspended child?");
2599 }
2600
2601 if (requested_child->status_pending_p)
2602 {
2603 event_child = requested_child;
2604 event_thread = get_lwp_thread (event_child);
2605 }
2606 }
2607
2608 if (event_child != NULL)
2609 {
2610 if (debug_threads)
2611 debug_printf ("Got an event from pending child %ld (%04x)\n",
2612 lwpid_of (event_thread), event_child->status_pending);
2613 *wstatp = event_child->status_pending;
2614 event_child->status_pending_p = 0;
2615 event_child->status_pending = 0;
2616 current_thread = event_thread;
2617 return lwpid_of (event_thread);
2618 }
2619
2620 /* But if we don't find a pending event, we'll have to wait.
2621
2622 We only enter this loop if no process has a pending wait status.
2623 Thus any action taken in response to a wait status inside this
2624 loop is responding as soon as we detect the status, not after any
2625 pending events. */
2626
2627 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2628 all signals while here. */
2629 sigfillset (&block_mask);
2630 gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2631
2632 /* Always pull all events out of the kernel. We'll randomly select
2633 an event LWP out of all that have events, to prevent
2634 starvation. */
2635 while (event_child == NULL)
2636 {
2637 pid_t ret = 0;
2638
2639 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2640 quirks:
2641
2642 - If the thread group leader exits while other threads in the
2643 thread group still exist, waitpid(TGID, ...) hangs. That
2644 waitpid won't return an exit status until the other threads
2645 in the group are reaped.
2646
2647 - When a non-leader thread execs, that thread just vanishes
2648 without reporting an exit (so we'd hang if we waited for it
2649 explicitly in that case). The exec event is reported to
2650 the TGID pid. */
2651 errno = 0;
2652 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2653
2654 if (debug_threads)
2655 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2656 ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2657
2658 if (ret > 0)
2659 {
2660 if (debug_threads)
2661 {
2662 debug_printf ("LLW: waitpid %ld received %s\n",
2663 (long) ret, status_to_str (*wstatp));
2664 }
2665
2666 /* Filter all events. IOW, leave all events pending. We'll
2667 randomly select an event LWP out of all that have events
2668 below. */
2669 filter_event (ret, *wstatp);
2670 /* Retry until nothing comes out of waitpid. A single
2671 SIGCHLD can indicate more than one child stopped. */
2672 continue;
2673 }
2674
2675 /* Now that we've pulled all events out of the kernel, resume
2676 LWPs that don't have an interesting event to report. */
2677 if (stopping_threads == NOT_STOPPING_THREADS)
2678 for_each_thread ([this] (thread_info *thread)
2679 {
2680 resume_stopped_resumed_lwps (thread);
2681 });
2682
2683 /* ... and find an LWP with a status to report to the core, if
2684 any. */
2685 event_thread = find_thread_in_random ([&] (thread_info *thread)
2686 {
2687 return status_pending_p_callback (thread, filter_ptid);
2688 });
2689
2690 if (event_thread != NULL)
2691 {
2692 event_child = get_thread_lwp (event_thread);
2693 *wstatp = event_child->status_pending;
2694 event_child->status_pending_p = 0;
2695 event_child->status_pending = 0;
2696 break;
2697 }
2698
2699 /* Check for zombie thread group leaders. Those can't be reaped
2700 until all other threads in the thread group are. */
2701 check_zombie_leaders ();
2702
2703 auto not_stopped = [&] (thread_info *thread)
2704 {
2705 return not_stopped_callback (thread, wait_ptid);
2706 };
2707
2708 /* If there are no resumed children left in the set of LWPs we
2709 want to wait for, bail. We can't just block in
2710 waitpid/sigsuspend, because lwps might have been left stopped
2711 in trace-stop state, and we'd be stuck forever waiting for
2712 their status to change (which would only happen if we resumed
2713 them). Even if WNOHANG is set, this return code is preferred
2714 over 0 (below), as it is more detailed. */
2715 if (find_thread (not_stopped) == NULL)
2716 {
2717 if (debug_threads)
2718 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2719 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2720 return -1;
2721 }
2722
2723 /* No interesting event to report to the caller. */
2724 if ((options & WNOHANG))
2725 {
2726 if (debug_threads)
2727 debug_printf ("WNOHANG set, no event found\n");
2728
2729 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2730 return 0;
2731 }
2732
2733 /* Block until we get an event reported with SIGCHLD. */
2734 if (debug_threads)
2735 debug_printf ("sigsuspend'ing\n");
2736
2737 sigsuspend (&prev_mask);
2738 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2739 goto retry;
2740 }
2741
2742 gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2743
2744 current_thread = event_thread;
2745
2746 return lwpid_of (event_thread);
2747 }
2748
2749 int
2750 linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2751 {
2752 return wait_for_event_filtered (ptid, ptid, wstatp, options);
2753 }
2754
2755 /* Select one LWP out of those that have events pending. */
2756
2757 static void
2758 select_event_lwp (struct lwp_info **orig_lp)
2759 {
2760 struct thread_info *event_thread = NULL;
2761
2762 /* In all-stop, give preference to the LWP that is being
2763 single-stepped. There will be at most one, and it's the LWP that
2764 the core is most interested in. If we didn't do this, then we'd
2765 have to handle pending step SIGTRAPs somehow in case the core
2766 later continues the previously-stepped thread, otherwise we'd
2767 report the pending SIGTRAP, and the core, not having stepped the
2768 thread, wouldn't understand what the trap was for, and therefore
2769 would report it to the user as a random signal. */
2770 if (!non_stop)
2771 {
2772 event_thread = find_thread ([] (thread_info *thread)
2773 {
2774 lwp_info *lp = get_thread_lwp (thread);
2775
2776 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2777 && thread->last_resume_kind == resume_step
2778 && lp->status_pending_p);
2779 });
2780
2781 if (event_thread != NULL)
2782 {
2783 if (debug_threads)
2784 debug_printf ("SEL: Select single-step %s\n",
2785 target_pid_to_str (ptid_of (event_thread)));
2786 }
2787 }
2788 if (event_thread == NULL)
2789 {
2790 /* No single-stepping LWP. Select one at random, out of those
2791 which have had events. */
2792
2793 event_thread = find_thread_in_random ([&] (thread_info *thread)
2794 {
2795 lwp_info *lp = get_thread_lwp (thread);
2796
2797 /* Only resumed LWPs that have an event pending. */
2798 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2799 && lp->status_pending_p);
2800 });
2801 }
2802
2803 if (event_thread != NULL)
2804 {
2805 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2806
2807 /* Switch the event LWP. */
2808 *orig_lp = event_lp;
2809 }
2810 }
2811
2812 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2813 NULL. */
2814
2815 static void
2816 unsuspend_all_lwps (struct lwp_info *except)
2817 {
2818 for_each_thread ([&] (thread_info *thread)
2819 {
2820 lwp_info *lwp = get_thread_lwp (thread);
2821
2822 if (lwp != except)
2823 lwp_suspended_decr (lwp);
2824 });
2825 }
2826
2827 static bool lwp_running (thread_info *thread);
2828
2829 /* Stabilize threads (move out of jump pads).
2830
2831 If a thread is midway collecting a fast tracepoint, we need to
2832 finish the collection and move it out of the jump pad before
2833 reporting the signal.
2834
2835 This avoids recursion while collecting (when a signal arrives
2836 midway, and the signal handler itself collects), which would trash
2837 the trace buffer. In case the user set a breakpoint in a signal
2838 handler, this avoids the backtrace showing the jump pad, etc..
2839 Most importantly, there are certain things we can't do safely if
2840 threads are stopped in a jump pad (or in its callee's). For
2841 example:
2842
2843 - starting a new trace run. A thread still collecting the
2844 previous run, could trash the trace buffer when resumed. The trace
2845 buffer control structures would have been reset but the thread had
2846 no way to tell. The thread could even midway memcpy'ing to the
2847 buffer, which would mean that when resumed, it would clobber the
2848 trace buffer that had been set for a new run.
2849
2850 - we can't rewrite/reuse the jump pads for new tracepoints
2851 safely. Say you do tstart while a thread is stopped midway while
2852 collecting. When the thread is later resumed, it finishes the
2853 collection, and returns to the jump pad, to execute the original
2854 instruction that was under the tracepoint jump at the time the
2855 older run had been started. If the jump pad had been rewritten
2856 since for something else in the new run, the thread would now
2857 execute the wrong / random instructions. */
2858
2859 void
2860 linux_process_target::stabilize_threads ()
2861 {
2862 thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2863 {
2864 return stuck_in_jump_pad (thread);
2865 });
2866
2867 if (thread_stuck != NULL)
2868 {
2869 if (debug_threads)
2870 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2871 lwpid_of (thread_stuck));
2872 return;
2873 }
2874
2875 thread_info *saved_thread = current_thread;
2876
2877 stabilizing_threads = 1;
2878
2879 /* Kick 'em all. */
2880 for_each_thread ([this] (thread_info *thread)
2881 {
2882 move_out_of_jump_pad (thread);
2883 });
2884
2885 /* Loop until all are stopped out of the jump pads. */
2886 while (find_thread (lwp_running) != NULL)
2887 {
2888 struct target_waitstatus ourstatus;
2889 struct lwp_info *lwp;
2890 int wstat;
2891
2892 /* Note that we go through the full wait even loop. While
2893 moving threads out of jump pad, we need to be able to step
2894 over internal breakpoints and such. */
2895 wait_1 (minus_one_ptid, &ourstatus, 0);
2896
2897 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2898 {
2899 lwp = get_thread_lwp (current_thread);
2900
2901 /* Lock it. */
2902 lwp_suspended_inc (lwp);
2903
2904 if (ourstatus.value.sig != GDB_SIGNAL_0
2905 || current_thread->last_resume_kind == resume_stop)
2906 {
2907 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2908 enqueue_one_deferred_signal (lwp, &wstat);
2909 }
2910 }
2911 }
2912
2913 unsuspend_all_lwps (NULL);
2914
2915 stabilizing_threads = 0;
2916
2917 current_thread = saved_thread;
2918
2919 if (debug_threads)
2920 {
2921 thread_stuck = find_thread ([this] (thread_info *thread)
2922 {
2923 return stuck_in_jump_pad (thread);
2924 });
2925
2926 if (thread_stuck != NULL)
2927 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2928 lwpid_of (thread_stuck));
2929 }
2930 }
2931
2932 /* Convenience function that is called when the kernel reports an
2933 event that is not passed out to GDB. */
2934
2935 static ptid_t
2936 ignore_event (struct target_waitstatus *ourstatus)
2937 {
2938 /* If we got an event, there may still be others, as a single
2939 SIGCHLD can indicate more than one child stopped. This forces
2940 another target_wait call. */
2941 async_file_mark ();
2942
2943 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2944 return null_ptid;
2945 }
2946
2947 ptid_t
2948 linux_process_target::filter_exit_event (lwp_info *event_child,
2949 target_waitstatus *ourstatus)
2950 {
2951 client_state &cs = get_client_state ();
2952 struct thread_info *thread = get_lwp_thread (event_child);
2953 ptid_t ptid = ptid_of (thread);
2954
2955 if (!last_thread_of_process_p (pid_of (thread)))
2956 {
2957 if (cs.report_thread_events)
2958 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2959 else
2960 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2961
2962 delete_lwp (event_child);
2963 }
2964 return ptid;
2965 }
2966
2967 /* Returns 1 if GDB is interested in any event_child syscalls. */
2968
2969 static int
2970 gdb_catching_syscalls_p (struct lwp_info *event_child)
2971 {
2972 struct thread_info *thread = get_lwp_thread (event_child);
2973 struct process_info *proc = get_thread_process (thread);
2974
2975 return !proc->syscalls_to_catch.empty ();
2976 }
2977
2978 /* Returns 1 if GDB is interested in the event_child syscall.
2979 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
2980
2981 static int
2982 gdb_catch_this_syscall_p (struct lwp_info *event_child)
2983 {
2984 int sysno;
2985 struct thread_info *thread = get_lwp_thread (event_child);
2986 struct process_info *proc = get_thread_process (thread);
2987
2988 if (proc->syscalls_to_catch.empty ())
2989 return 0;
2990
2991 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2992 return 1;
2993
2994 get_syscall_trapinfo (event_child, &sysno);
2995
2996 for (int iter : proc->syscalls_to_catch)
2997 if (iter == sysno)
2998 return 1;
2999
3000 return 0;
3001 }
3002
3003 ptid_t
3004 linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
3005 int target_options)
3006 {
3007 client_state &cs = get_client_state ();
3008 int w;
3009 struct lwp_info *event_child;
3010 int options;
3011 int pid;
3012 int step_over_finished;
3013 int bp_explains_trap;
3014 int maybe_internal_trap;
3015 int report_to_gdb;
3016 int trace_event;
3017 int in_step_range;
3018 int any_resumed;
3019
3020 if (debug_threads)
3021 {
3022 debug_enter ();
3023 debug_printf ("wait_1: [%s]\n", target_pid_to_str (ptid));
3024 }
3025
3026 /* Translate generic target options into linux options. */
3027 options = __WALL;
3028 if (target_options & TARGET_WNOHANG)
3029 options |= WNOHANG;
3030
3031 bp_explains_trap = 0;
3032 trace_event = 0;
3033 in_step_range = 0;
3034 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3035
3036 auto status_pending_p_any = [&] (thread_info *thread)
3037 {
3038 return status_pending_p_callback (thread, minus_one_ptid);
3039 };
3040
3041 auto not_stopped = [&] (thread_info *thread)
3042 {
3043 return not_stopped_callback (thread, minus_one_ptid);
3044 };
3045
3046 /* Find a resumed LWP, if any. */
3047 if (find_thread (status_pending_p_any) != NULL)
3048 any_resumed = 1;
3049 else if (find_thread (not_stopped) != NULL)
3050 any_resumed = 1;
3051 else
3052 any_resumed = 0;
3053
3054 if (step_over_bkpt == null_ptid)
3055 pid = wait_for_event (ptid, &w, options);
3056 else
3057 {
3058 if (debug_threads)
3059 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3060 target_pid_to_str (step_over_bkpt));
3061 pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3062 }
3063
3064 if (pid == 0 || (pid == -1 && !any_resumed))
3065 {
3066 gdb_assert (target_options & TARGET_WNOHANG);
3067
3068 if (debug_threads)
3069 {
3070 debug_printf ("wait_1 ret = null_ptid, "
3071 "TARGET_WAITKIND_IGNORE\n");
3072 debug_exit ();
3073 }
3074
3075 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3076 return null_ptid;
3077 }
3078 else if (pid == -1)
3079 {
3080 if (debug_threads)
3081 {
3082 debug_printf ("wait_1 ret = null_ptid, "
3083 "TARGET_WAITKIND_NO_RESUMED\n");
3084 debug_exit ();
3085 }
3086
3087 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3088 return null_ptid;
3089 }
3090
3091 event_child = get_thread_lwp (current_thread);
3092
3093 /* wait_for_event only returns an exit status for the last
3094 child of a process. Report it. */
3095 if (WIFEXITED (w) || WIFSIGNALED (w))
3096 {
3097 if (WIFEXITED (w))
3098 {
3099 ourstatus->kind = TARGET_WAITKIND_EXITED;
3100 ourstatus->value.integer = WEXITSTATUS (w);
3101
3102 if (debug_threads)
3103 {
3104 debug_printf ("wait_1 ret = %s, exited with "
3105 "retcode %d\n",
3106 target_pid_to_str (ptid_of (current_thread)),
3107 WEXITSTATUS (w));
3108 debug_exit ();
3109 }
3110 }
3111 else
3112 {
3113 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3114 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3115
3116 if (debug_threads)
3117 {
3118 debug_printf ("wait_1 ret = %s, terminated with "
3119 "signal %d\n",
3120 target_pid_to_str (ptid_of (current_thread)),
3121 WTERMSIG (w));
3122 debug_exit ();
3123 }
3124 }
3125
3126 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3127 return filter_exit_event (event_child, ourstatus);
3128
3129 return ptid_of (current_thread);
3130 }
3131
3132 /* If step-over executes a breakpoint instruction, in the case of a
3133 hardware single step it means a gdb/gdbserver breakpoint had been
3134 planted on top of a permanent breakpoint, in the case of a software
3135 single step it may just mean that gdbserver hit the reinsert breakpoint.
3136 The PC has been adjusted by save_stop_reason to point at
3137 the breakpoint address.
3138 So in the case of the hardware single step advance the PC manually
3139 past the breakpoint and in the case of software single step advance only
3140 if it's not the single_step_breakpoint we are hitting.
3141 This avoids that a program would keep trapping a permanent breakpoint
3142 forever. */
3143 if (step_over_bkpt != null_ptid
3144 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3145 && (event_child->stepping
3146 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3147 {
3148 int increment_pc = 0;
3149 int breakpoint_kind = 0;
3150 CORE_ADDR stop_pc = event_child->stop_pc;
3151
3152 breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3153 sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3154
3155 if (debug_threads)
3156 {
3157 debug_printf ("step-over for %s executed software breakpoint\n",
3158 target_pid_to_str (ptid_of (current_thread)));
3159 }
3160
3161 if (increment_pc != 0)
3162 {
3163 struct regcache *regcache
3164 = get_thread_regcache (current_thread, 1);
3165
3166 event_child->stop_pc += increment_pc;
3167 low_set_pc (regcache, event_child->stop_pc);
3168
3169 if (!low_breakpoint_at (event_child->stop_pc))
3170 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3171 }
3172 }
3173
3174 /* If this event was not handled before, and is not a SIGTRAP, we
3175 report it. SIGILL and SIGSEGV are also treated as traps in case
3176 a breakpoint is inserted at the current PC. If this target does
3177 not support internal breakpoints at all, we also report the
3178 SIGTRAP without further processing; it's of no concern to us. */
3179 maybe_internal_trap
3180 = (low_supports_breakpoints ()
3181 && (WSTOPSIG (w) == SIGTRAP
3182 || ((WSTOPSIG (w) == SIGILL
3183 || WSTOPSIG (w) == SIGSEGV)
3184 && low_breakpoint_at (event_child->stop_pc))));
3185
3186 if (maybe_internal_trap)
3187 {
3188 /* Handle anything that requires bookkeeping before deciding to
3189 report the event or continue waiting. */
3190
3191 /* First check if we can explain the SIGTRAP with an internal
3192 breakpoint, or if we should possibly report the event to GDB.
3193 Do this before anything that may remove or insert a
3194 breakpoint. */
3195 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3196
3197 /* We have a SIGTRAP, possibly a step-over dance has just
3198 finished. If so, tweak the state machine accordingly,
3199 reinsert breakpoints and delete any single-step
3200 breakpoints. */
3201 step_over_finished = finish_step_over (event_child);
3202
3203 /* Now invoke the callbacks of any internal breakpoints there. */
3204 check_breakpoints (event_child->stop_pc);
3205
3206 /* Handle tracepoint data collecting. This may overflow the
3207 trace buffer, and cause a tracing stop, removing
3208 breakpoints. */
3209 trace_event = handle_tracepoints (event_child);
3210
3211 if (bp_explains_trap)
3212 {
3213 if (debug_threads)
3214 debug_printf ("Hit a gdbserver breakpoint.\n");
3215 }
3216 }
3217 else
3218 {
3219 /* We have some other signal, possibly a step-over dance was in
3220 progress, and it should be cancelled too. */
3221 step_over_finished = finish_step_over (event_child);
3222 }
3223
3224 /* We have all the data we need. Either report the event to GDB, or
3225 resume threads and keep waiting for more. */
3226
3227 /* If we're collecting a fast tracepoint, finish the collection and
3228 move out of the jump pad before delivering a signal. See
3229 linux_stabilize_threads. */
3230
3231 if (WIFSTOPPED (w)
3232 && WSTOPSIG (w) != SIGTRAP
3233 && supports_fast_tracepoints ()
3234 && agent_loaded_p ())
3235 {
3236 if (debug_threads)
3237 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3238 "to defer or adjust it.\n",
3239 WSTOPSIG (w), lwpid_of (current_thread));
3240
3241 /* Allow debugging the jump pad itself. */
3242 if (current_thread->last_resume_kind != resume_step
3243 && maybe_move_out_of_jump_pad (event_child, &w))
3244 {
3245 enqueue_one_deferred_signal (event_child, &w);
3246
3247 if (debug_threads)
3248 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3249 WSTOPSIG (w), lwpid_of (current_thread));
3250
3251 resume_one_lwp (event_child, 0, 0, NULL);
3252
3253 if (debug_threads)
3254 debug_exit ();
3255 return ignore_event (ourstatus);
3256 }
3257 }
3258
3259 if (event_child->collecting_fast_tracepoint
3260 != fast_tpoint_collect_result::not_collecting)
3261 {
3262 if (debug_threads)
3263 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3264 "Check if we're already there.\n",
3265 lwpid_of (current_thread),
3266 (int) event_child->collecting_fast_tracepoint);
3267
3268 trace_event = 1;
3269
3270 event_child->collecting_fast_tracepoint
3271 = linux_fast_tracepoint_collecting (event_child, NULL);
3272
3273 if (event_child->collecting_fast_tracepoint
3274 != fast_tpoint_collect_result::before_insn)
3275 {
3276 /* No longer need this breakpoint. */
3277 if (event_child->exit_jump_pad_bkpt != NULL)
3278 {
3279 if (debug_threads)
3280 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3281 "stopping all threads momentarily.\n");
3282
3283 /* Other running threads could hit this breakpoint.
3284 We don't handle moribund locations like GDB does,
3285 instead we always pause all threads when removing
3286 breakpoints, so that any step-over or
3287 decr_pc_after_break adjustment is always taken
3288 care of while the breakpoint is still
3289 inserted. */
3290 stop_all_lwps (1, event_child);
3291
3292 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3293 event_child->exit_jump_pad_bkpt = NULL;
3294
3295 unstop_all_lwps (1, event_child);
3296
3297 gdb_assert (event_child->suspended >= 0);
3298 }
3299 }
3300
3301 if (event_child->collecting_fast_tracepoint
3302 == fast_tpoint_collect_result::not_collecting)
3303 {
3304 if (debug_threads)
3305 debug_printf ("fast tracepoint finished "
3306 "collecting successfully.\n");
3307
3308 /* We may have a deferred signal to report. */
3309 if (dequeue_one_deferred_signal (event_child, &w))
3310 {
3311 if (debug_threads)
3312 debug_printf ("dequeued one signal.\n");
3313 }
3314 else
3315 {
3316 if (debug_threads)
3317 debug_printf ("no deferred signals.\n");
3318
3319 if (stabilizing_threads)
3320 {
3321 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3322 ourstatus->value.sig = GDB_SIGNAL_0;
3323
3324 if (debug_threads)
3325 {
3326 debug_printf ("wait_1 ret = %s, stopped "
3327 "while stabilizing threads\n",
3328 target_pid_to_str (ptid_of (current_thread)));
3329 debug_exit ();
3330 }
3331
3332 return ptid_of (current_thread);
3333 }
3334 }
3335 }
3336 }
3337
3338 /* Check whether GDB would be interested in this event. */
3339
3340 /* Check if GDB is interested in this syscall. */
3341 if (WIFSTOPPED (w)
3342 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3343 && !gdb_catch_this_syscall_p (event_child))
3344 {
3345 if (debug_threads)
3346 {
3347 debug_printf ("Ignored syscall for LWP %ld.\n",
3348 lwpid_of (current_thread));
3349 }
3350
3351 resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3352
3353 if (debug_threads)
3354 debug_exit ();
3355 return ignore_event (ourstatus);
3356 }
3357
3358 /* If GDB is not interested in this signal, don't stop other
3359 threads, and don't report it to GDB. Just resume the inferior
3360 right away. We do this for threading-related signals as well as
3361 any that GDB specifically requested we ignore. But never ignore
3362 SIGSTOP if we sent it ourselves, and do not ignore signals when
3363 stepping - they may require special handling to skip the signal
3364 handler. Also never ignore signals that could be caused by a
3365 breakpoint. */
3366 if (WIFSTOPPED (w)
3367 && current_thread->last_resume_kind != resume_step
3368 && (
3369 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3370 (current_process ()->priv->thread_db != NULL
3371 && (WSTOPSIG (w) == __SIGRTMIN
3372 || WSTOPSIG (w) == __SIGRTMIN + 1))
3373 ||
3374 #endif
3375 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3376 && !(WSTOPSIG (w) == SIGSTOP
3377 && current_thread->last_resume_kind == resume_stop)
3378 && !linux_wstatus_maybe_breakpoint (w))))
3379 {
3380 siginfo_t info, *info_p;
3381
3382 if (debug_threads)
3383 debug_printf ("Ignored signal %d for LWP %ld.\n",
3384 WSTOPSIG (w), lwpid_of (current_thread));
3385
3386 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3387 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3388 info_p = &info;
3389 else
3390 info_p = NULL;
3391
3392 if (step_over_finished)
3393 {
3394 /* We cancelled this thread's step-over above. We still
3395 need to unsuspend all other LWPs, and set them back
3396 running again while the signal handler runs. */
3397 unsuspend_all_lwps (event_child);
3398
3399 /* Enqueue the pending signal info so that proceed_all_lwps
3400 doesn't lose it. */
3401 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3402
3403 proceed_all_lwps ();
3404 }
3405 else
3406 {
3407 resume_one_lwp (event_child, event_child->stepping,
3408 WSTOPSIG (w), info_p);
3409 }
3410
3411 if (debug_threads)
3412 debug_exit ();
3413
3414 return ignore_event (ourstatus);
3415 }
3416
3417 /* Note that all addresses are always "out of the step range" when
3418 there's no range to begin with. */
3419 in_step_range = lwp_in_step_range (event_child);
3420
3421 /* If GDB wanted this thread to single step, and the thread is out
3422 of the step range, we always want to report the SIGTRAP, and let
3423 GDB handle it. Watchpoints should always be reported. So should
3424 signals we can't explain. A SIGTRAP we can't explain could be a
3425 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3426 do, we're be able to handle GDB breakpoints on top of internal
3427 breakpoints, by handling the internal breakpoint and still
3428 reporting the event to GDB. If we don't, we're out of luck, GDB
3429 won't see the breakpoint hit. If we see a single-step event but
3430 the thread should be continuing, don't pass the trap to gdb.
3431 That indicates that we had previously finished a single-step but
3432 left the single-step pending -- see
3433 complete_ongoing_step_over. */
3434 report_to_gdb = (!maybe_internal_trap
3435 || (current_thread->last_resume_kind == resume_step
3436 && !in_step_range)
3437 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3438 || (!in_step_range
3439 && !bp_explains_trap
3440 && !trace_event
3441 && !step_over_finished
3442 && !(current_thread->last_resume_kind == resume_continue
3443 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3444 || (gdb_breakpoint_here (event_child->stop_pc)
3445 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3446 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3447 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3448
3449 run_breakpoint_commands (event_child->stop_pc);
3450
3451 /* We found no reason GDB would want us to stop. We either hit one
3452 of our own breakpoints, or finished an internal step GDB
3453 shouldn't know about. */
3454 if (!report_to_gdb)
3455 {
3456 if (debug_threads)
3457 {
3458 if (bp_explains_trap)
3459 debug_printf ("Hit a gdbserver breakpoint.\n");
3460 if (step_over_finished)
3461 debug_printf ("Step-over finished.\n");
3462 if (trace_event)
3463 debug_printf ("Tracepoint event.\n");
3464 if (lwp_in_step_range (event_child))
3465 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3466 paddress (event_child->stop_pc),
3467 paddress (event_child->step_range_start),
3468 paddress (event_child->step_range_end));
3469 }
3470
3471 /* We're not reporting this breakpoint to GDB, so apply the
3472 decr_pc_after_break adjustment to the inferior's regcache
3473 ourselves. */
3474
3475 if (low_supports_breakpoints ())
3476 {
3477 struct regcache *regcache
3478 = get_thread_regcache (current_thread, 1);
3479 low_set_pc (regcache, event_child->stop_pc);
3480 }
3481
3482 if (step_over_finished)
3483 {
3484 /* If we have finished stepping over a breakpoint, we've
3485 stopped and suspended all LWPs momentarily except the
3486 stepping one. This is where we resume them all again.
3487 We're going to keep waiting, so use proceed, which
3488 handles stepping over the next breakpoint. */
3489 unsuspend_all_lwps (event_child);
3490 }
3491 else
3492 {
3493 /* Remove the single-step breakpoints if any. Note that
3494 there isn't single-step breakpoint if we finished stepping
3495 over. */
3496 if (supports_software_single_step ()
3497 && has_single_step_breakpoints (current_thread))
3498 {
3499 stop_all_lwps (0, event_child);
3500 delete_single_step_breakpoints (current_thread);
3501 unstop_all_lwps (0, event_child);
3502 }
3503 }
3504
3505 if (debug_threads)
3506 debug_printf ("proceeding all threads.\n");
3507 proceed_all_lwps ();
3508
3509 if (debug_threads)
3510 debug_exit ();
3511
3512 return ignore_event (ourstatus);
3513 }
3514
3515 if (debug_threads)
3516 {
3517 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3518 {
3519 std::string str
3520 = target_waitstatus_to_string (&event_child->waitstatus);
3521
3522 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3523 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3524 }
3525 if (current_thread->last_resume_kind == resume_step)
3526 {
3527 if (event_child->step_range_start == event_child->step_range_end)
3528 debug_printf ("GDB wanted to single-step, reporting event.\n");
3529 else if (!lwp_in_step_range (event_child))
3530 debug_printf ("Out of step range, reporting event.\n");
3531 }
3532 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3533 debug_printf ("Stopped by watchpoint.\n");
3534 else if (gdb_breakpoint_here (event_child->stop_pc))
3535 debug_printf ("Stopped by GDB breakpoint.\n");
3536 if (debug_threads)
3537 debug_printf ("Hit a non-gdbserver trap event.\n");
3538 }
3539
3540 /* Alright, we're going to report a stop. */
3541
3542 /* Remove single-step breakpoints. */
3543 if (supports_software_single_step ())
3544 {
3545 /* Remove single-step breakpoints or not. It it is true, stop all
3546 lwps, so that other threads won't hit the breakpoint in the
3547 staled memory. */
3548 int remove_single_step_breakpoints_p = 0;
3549
3550 if (non_stop)
3551 {
3552 remove_single_step_breakpoints_p
3553 = has_single_step_breakpoints (current_thread);
3554 }
3555 else
3556 {
3557 /* In all-stop, a stop reply cancels all previous resume
3558 requests. Delete all single-step breakpoints. */
3559
3560 find_thread ([&] (thread_info *thread) {
3561 if (has_single_step_breakpoints (thread))
3562 {
3563 remove_single_step_breakpoints_p = 1;
3564 return true;
3565 }
3566
3567 return false;
3568 });
3569 }
3570
3571 if (remove_single_step_breakpoints_p)
3572 {
3573 /* If we remove single-step breakpoints from memory, stop all lwps,
3574 so that other threads won't hit the breakpoint in the staled
3575 memory. */
3576 stop_all_lwps (0, event_child);
3577
3578 if (non_stop)
3579 {
3580 gdb_assert (has_single_step_breakpoints (current_thread));
3581 delete_single_step_breakpoints (current_thread);
3582 }
3583 else
3584 {
3585 for_each_thread ([] (thread_info *thread){
3586 if (has_single_step_breakpoints (thread))
3587 delete_single_step_breakpoints (thread);
3588 });
3589 }
3590
3591 unstop_all_lwps (0, event_child);
3592 }
3593 }
3594
3595 if (!stabilizing_threads)
3596 {
3597 /* In all-stop, stop all threads. */
3598 if (!non_stop)
3599 stop_all_lwps (0, NULL);
3600
3601 if (step_over_finished)
3602 {
3603 if (!non_stop)
3604 {
3605 /* If we were doing a step-over, all other threads but
3606 the stepping one had been paused in start_step_over,
3607 with their suspend counts incremented. We don't want
3608 to do a full unstop/unpause, because we're in
3609 all-stop mode (so we want threads stopped), but we
3610 still need to unsuspend the other threads, to
3611 decrement their `suspended' count back. */
3612 unsuspend_all_lwps (event_child);
3613 }
3614 else
3615 {
3616 /* If we just finished a step-over, then all threads had
3617 been momentarily paused. In all-stop, that's fine,
3618 we want threads stopped by now anyway. In non-stop,
3619 we need to re-resume threads that GDB wanted to be
3620 running. */
3621 unstop_all_lwps (1, event_child);
3622 }
3623 }
3624
3625 /* If we're not waiting for a specific LWP, choose an event LWP
3626 from among those that have had events. Giving equal priority
3627 to all LWPs that have had events helps prevent
3628 starvation. */
3629 if (ptid == minus_one_ptid)
3630 {
3631 event_child->status_pending_p = 1;
3632 event_child->status_pending = w;
3633
3634 select_event_lwp (&event_child);
3635
3636 /* current_thread and event_child must stay in sync. */
3637 current_thread = get_lwp_thread (event_child);
3638
3639 event_child->status_pending_p = 0;
3640 w = event_child->status_pending;
3641 }
3642
3643
3644 /* Stabilize threads (move out of jump pads). */
3645 if (!non_stop)
3646 target_stabilize_threads ();
3647 }
3648 else
3649 {
3650 /* If we just finished a step-over, then all threads had been
3651 momentarily paused. In all-stop, that's fine, we want
3652 threads stopped by now anyway. In non-stop, we need to
3653 re-resume threads that GDB wanted to be running. */
3654 if (step_over_finished)
3655 unstop_all_lwps (1, event_child);
3656 }
3657
3658 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3659 {
3660 /* If the reported event is an exit, fork, vfork or exec, let
3661 GDB know. */
3662
3663 /* Break the unreported fork relationship chain. */
3664 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3665 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3666 {
3667 event_child->fork_relative->fork_relative = NULL;
3668 event_child->fork_relative = NULL;
3669 }
3670
3671 *ourstatus = event_child->waitstatus;
3672 /* Clear the event lwp's waitstatus since we handled it already. */
3673 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3674 }
3675 else
3676 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3677
3678 /* Now that we've selected our final event LWP, un-adjust its PC if
3679 it was a software breakpoint, and the client doesn't know we can
3680 adjust the breakpoint ourselves. */
3681 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3682 && !cs.swbreak_feature)
3683 {
3684 int decr_pc = low_decr_pc_after_break ();
3685
3686 if (decr_pc != 0)
3687 {
3688 struct regcache *regcache
3689 = get_thread_regcache (current_thread, 1);
3690 low_set_pc (regcache, event_child->stop_pc + decr_pc);
3691 }
3692 }
3693
3694 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3695 {
3696 get_syscall_trapinfo (event_child,
3697 &ourstatus->value.syscall_number);
3698 ourstatus->kind = event_child->syscall_state;
3699 }
3700 else if (current_thread->last_resume_kind == resume_stop
3701 && WSTOPSIG (w) == SIGSTOP)
3702 {
3703 /* A thread that has been requested to stop by GDB with vCont;t,
3704 and it stopped cleanly, so report as SIG0. The use of
3705 SIGSTOP is an implementation detail. */
3706 ourstatus->value.sig = GDB_SIGNAL_0;
3707 }
3708 else if (current_thread->last_resume_kind == resume_stop
3709 && WSTOPSIG (w) != SIGSTOP)
3710 {
3711 /* A thread that has been requested to stop by GDB with vCont;t,
3712 but, it stopped for other reasons. */
3713 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3714 }
3715 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3716 {
3717 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3718 }
3719
3720 gdb_assert (step_over_bkpt == null_ptid);
3721
3722 if (debug_threads)
3723 {
3724 debug_printf ("wait_1 ret = %s, %d, %d\n",
3725 target_pid_to_str (ptid_of (current_thread)),
3726 ourstatus->kind, ourstatus->value.sig);
3727 debug_exit ();
3728 }
3729
3730 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3731 return filter_exit_event (event_child, ourstatus);
3732
3733 return ptid_of (current_thread);
3734 }
3735
3736 /* Get rid of any pending event in the pipe. */
3737 static void
3738 async_file_flush (void)
3739 {
3740 int ret;
3741 char buf;
3742
3743 do
3744 ret = read (linux_event_pipe[0], &buf, 1);
3745 while (ret >= 0 || (ret == -1 && errno == EINTR));
3746 }
3747
3748 /* Put something in the pipe, so the event loop wakes up. */
3749 static void
3750 async_file_mark (void)
3751 {
3752 int ret;
3753
3754 async_file_flush ();
3755
3756 do
3757 ret = write (linux_event_pipe[1], "+", 1);
3758 while (ret == 0 || (ret == -1 && errno == EINTR));
3759
3760 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3761 be awakened anyway. */
3762 }
3763
3764 ptid_t
3765 linux_process_target::wait (ptid_t ptid,
3766 target_waitstatus *ourstatus,
3767 int target_options)
3768 {
3769 ptid_t event_ptid;
3770
3771 /* Flush the async file first. */
3772 if (target_is_async_p ())
3773 async_file_flush ();
3774
3775 do
3776 {
3777 event_ptid = wait_1 (ptid, ourstatus, target_options);
3778 }
3779 while ((target_options & TARGET_WNOHANG) == 0
3780 && event_ptid == null_ptid
3781 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3782
3783 /* If at least one stop was reported, there may be more. A single
3784 SIGCHLD can signal more than one child stop. */
3785 if (target_is_async_p ()
3786 && (target_options & TARGET_WNOHANG) != 0
3787 && event_ptid != null_ptid)
3788 async_file_mark ();
3789
3790 return event_ptid;
3791 }
3792
3793 /* Send a signal to an LWP. */
3794
3795 static int
3796 kill_lwp (unsigned long lwpid, int signo)
3797 {
3798 int ret;
3799
3800 errno = 0;
3801 ret = syscall (__NR_tkill, lwpid, signo);
3802 if (errno == ENOSYS)
3803 {
3804 /* If tkill fails, then we are not using nptl threads, a
3805 configuration we no longer support. */
3806 perror_with_name (("tkill"));
3807 }
3808 return ret;
3809 }
3810
3811 void
3812 linux_stop_lwp (struct lwp_info *lwp)
3813 {
3814 send_sigstop (lwp);
3815 }
3816
3817 static void
3818 send_sigstop (struct lwp_info *lwp)
3819 {
3820 int pid;
3821
3822 pid = lwpid_of (get_lwp_thread (lwp));
3823
3824 /* If we already have a pending stop signal for this process, don't
3825 send another. */
3826 if (lwp->stop_expected)
3827 {
3828 if (debug_threads)
3829 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3830
3831 return;
3832 }
3833
3834 if (debug_threads)
3835 debug_printf ("Sending sigstop to lwp %d\n", pid);
3836
3837 lwp->stop_expected = 1;
3838 kill_lwp (pid, SIGSTOP);
3839 }
3840
3841 static void
3842 send_sigstop (thread_info *thread, lwp_info *except)
3843 {
3844 struct lwp_info *lwp = get_thread_lwp (thread);
3845
3846 /* Ignore EXCEPT. */
3847 if (lwp == except)
3848 return;
3849
3850 if (lwp->stopped)
3851 return;
3852
3853 send_sigstop (lwp);
3854 }
3855
3856 /* Increment the suspend count of an LWP, and stop it, if not stopped
3857 yet. */
3858 static void
3859 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3860 {
3861 struct lwp_info *lwp = get_thread_lwp (thread);
3862
3863 /* Ignore EXCEPT. */
3864 if (lwp == except)
3865 return;
3866
3867 lwp_suspended_inc (lwp);
3868
3869 send_sigstop (thread, except);
3870 }
3871
3872 static void
3873 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3874 {
3875 /* Store the exit status for later. */
3876 lwp->status_pending_p = 1;
3877 lwp->status_pending = wstat;
3878
3879 /* Store in waitstatus as well, as there's nothing else to process
3880 for this event. */
3881 if (WIFEXITED (wstat))
3882 {
3883 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3884 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3885 }
3886 else if (WIFSIGNALED (wstat))
3887 {
3888 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3889 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3890 }
3891
3892 /* Prevent trying to stop it. */
3893 lwp->stopped = 1;
3894
3895 /* No further stops are expected from a dead lwp. */
3896 lwp->stop_expected = 0;
3897 }
3898
3899 /* Return true if LWP has exited already, and has a pending exit event
3900 to report to GDB. */
3901
3902 static int
3903 lwp_is_marked_dead (struct lwp_info *lwp)
3904 {
3905 return (lwp->status_pending_p
3906 && (WIFEXITED (lwp->status_pending)
3907 || WIFSIGNALED (lwp->status_pending)));
3908 }
3909
3910 void
3911 linux_process_target::wait_for_sigstop ()
3912 {
3913 struct thread_info *saved_thread;
3914 ptid_t saved_tid;
3915 int wstat;
3916 int ret;
3917
3918 saved_thread = current_thread;
3919 if (saved_thread != NULL)
3920 saved_tid = saved_thread->id;
3921 else
3922 saved_tid = null_ptid; /* avoid bogus unused warning */
3923
3924 if (debug_threads)
3925 debug_printf ("wait_for_sigstop: pulling events\n");
3926
3927 /* Passing NULL_PTID as filter indicates we want all events to be
3928 left pending. Eventually this returns when there are no
3929 unwaited-for children left. */
3930 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3931 gdb_assert (ret == -1);
3932
3933 if (saved_thread == NULL || mythread_alive (saved_tid))
3934 current_thread = saved_thread;
3935 else
3936 {
3937 if (debug_threads)
3938 debug_printf ("Previously current thread died.\n");
3939
3940 /* We can't change the current inferior behind GDB's back,
3941 otherwise, a subsequent command may apply to the wrong
3942 process. */
3943 current_thread = NULL;
3944 }
3945 }
3946
3947 bool
3948 linux_process_target::stuck_in_jump_pad (thread_info *thread)
3949 {
3950 struct lwp_info *lwp = get_thread_lwp (thread);
3951
3952 if (lwp->suspended != 0)
3953 {
3954 internal_error (__FILE__, __LINE__,
3955 "LWP %ld is suspended, suspended=%d\n",
3956 lwpid_of (thread), lwp->suspended);
3957 }
3958 gdb_assert (lwp->stopped);
3959
3960 /* Allow debugging the jump pad, gdb_collect, etc.. */
3961 return (supports_fast_tracepoints ()
3962 && agent_loaded_p ()
3963 && (gdb_breakpoint_here (lwp->stop_pc)
3964 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3965 || thread->last_resume_kind == resume_step)
3966 && (linux_fast_tracepoint_collecting (lwp, NULL)
3967 != fast_tpoint_collect_result::not_collecting));
3968 }
3969
3970 void
3971 linux_process_target::move_out_of_jump_pad (thread_info *thread)
3972 {
3973 struct thread_info *saved_thread;
3974 struct lwp_info *lwp = get_thread_lwp (thread);
3975 int *wstat;
3976
3977 if (lwp->suspended != 0)
3978 {
3979 internal_error (__FILE__, __LINE__,
3980 "LWP %ld is suspended, suspended=%d\n",
3981 lwpid_of (thread), lwp->suspended);
3982 }
3983 gdb_assert (lwp->stopped);
3984
3985 /* For gdb_breakpoint_here. */
3986 saved_thread = current_thread;
3987 current_thread = thread;
3988
3989 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3990
3991 /* Allow debugging the jump pad, gdb_collect, etc. */
3992 if (!gdb_breakpoint_here (lwp->stop_pc)
3993 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3994 && thread->last_resume_kind != resume_step
3995 && maybe_move_out_of_jump_pad (lwp, wstat))
3996 {
3997 if (debug_threads)
3998 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3999 lwpid_of (thread));
4000
4001 if (wstat)
4002 {
4003 lwp->status_pending_p = 0;
4004 enqueue_one_deferred_signal (lwp, wstat);
4005
4006 if (debug_threads)
4007 debug_printf ("Signal %d for LWP %ld deferred "
4008 "(in jump pad)\n",
4009 WSTOPSIG (*wstat), lwpid_of (thread));
4010 }
4011
4012 resume_one_lwp (lwp, 0, 0, NULL);
4013 }
4014 else
4015 lwp_suspended_inc (lwp);
4016
4017 current_thread = saved_thread;
4018 }
4019
4020 static bool
4021 lwp_running (thread_info *thread)
4022 {
4023 struct lwp_info *lwp = get_thread_lwp (thread);
4024
4025 if (lwp_is_marked_dead (lwp))
4026 return false;
4027
4028 return !lwp->stopped;
4029 }
4030
4031 void
4032 linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
4033 {
4034 /* Should not be called recursively. */
4035 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4036
4037 if (debug_threads)
4038 {
4039 debug_enter ();
4040 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4041 suspend ? "stop-and-suspend" : "stop",
4042 except != NULL
4043 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4044 : "none");
4045 }
4046
4047 stopping_threads = (suspend
4048 ? STOPPING_AND_SUSPENDING_THREADS
4049 : STOPPING_THREADS);
4050
4051 if (suspend)
4052 for_each_thread ([&] (thread_info *thread)
4053 {
4054 suspend_and_send_sigstop (thread, except);
4055 });
4056 else
4057 for_each_thread ([&] (thread_info *thread)
4058 {
4059 send_sigstop (thread, except);
4060 });
4061
4062 wait_for_sigstop ();
4063 stopping_threads = NOT_STOPPING_THREADS;
4064
4065 if (debug_threads)
4066 {
4067 debug_printf ("stop_all_lwps done, setting stopping_threads "
4068 "back to !stopping\n");
4069 debug_exit ();
4070 }
4071 }
4072
4073 /* Enqueue one signal in the chain of signals which need to be
4074 delivered to this process on next resume. */
4075
4076 static void
4077 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4078 {
4079 struct pending_signals *p_sig = XNEW (struct pending_signals);
4080
4081 p_sig->prev = lwp->pending_signals;
4082 p_sig->signal = signal;
4083 if (info == NULL)
4084 memset (&p_sig->info, 0, sizeof (siginfo_t));
4085 else
4086 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4087 lwp->pending_signals = p_sig;
4088 }
4089
4090 void
4091 linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
4092 {
4093 struct thread_info *thread = get_lwp_thread (lwp);
4094 struct regcache *regcache = get_thread_regcache (thread, 1);
4095
4096 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4097
4098 current_thread = thread;
4099 std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
4100
4101 for (CORE_ADDR pc : next_pcs)
4102 set_single_step_breakpoint (pc, current_ptid);
4103 }
4104
4105 int
4106 linux_process_target::single_step (lwp_info* lwp)
4107 {
4108 int step = 0;
4109
4110 if (can_hardware_single_step ())
4111 {
4112 step = 1;
4113 }
4114 else if (supports_software_single_step ())
4115 {
4116 install_software_single_step_breakpoints (lwp);
4117 step = 0;
4118 }
4119 else
4120 {
4121 if (debug_threads)
4122 debug_printf ("stepping is not implemented on this target");
4123 }
4124
4125 return step;
4126 }
4127
4128 /* The signal can be delivered to the inferior if we are not trying to
4129 finish a fast tracepoint collect. Since signal can be delivered in
4130 the step-over, the program may go to signal handler and trap again
4131 after return from the signal handler. We can live with the spurious
4132 double traps. */
4133
4134 static int
4135 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4136 {
4137 return (lwp->collecting_fast_tracepoint
4138 == fast_tpoint_collect_result::not_collecting);
4139 }
4140
4141 void
4142 linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
4143 int signal, siginfo_t *info)
4144 {
4145 struct thread_info *thread = get_lwp_thread (lwp);
4146 struct thread_info *saved_thread;
4147 int ptrace_request;
4148 struct process_info *proc = get_thread_process (thread);
4149
4150 /* Note that target description may not be initialised
4151 (proc->tdesc == NULL) at this point because the program hasn't
4152 stopped at the first instruction yet. It means GDBserver skips
4153 the extra traps from the wrapper program (see option --wrapper).
4154 Code in this function that requires register access should be
4155 guarded by proc->tdesc == NULL or something else. */
4156
4157 if (lwp->stopped == 0)
4158 return;
4159
4160 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4161
4162 fast_tpoint_collect_result fast_tp_collecting
4163 = lwp->collecting_fast_tracepoint;
4164
4165 gdb_assert (!stabilizing_threads
4166 || (fast_tp_collecting
4167 != fast_tpoint_collect_result::not_collecting));
4168
4169 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4170 user used the "jump" command, or "set $pc = foo"). */
4171 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4172 {
4173 /* Collecting 'while-stepping' actions doesn't make sense
4174 anymore. */
4175 release_while_stepping_state_list (thread);
4176 }
4177
4178 /* If we have pending signals or status, and a new signal, enqueue the
4179 signal. Also enqueue the signal if it can't be delivered to the
4180 inferior right now. */
4181 if (signal != 0
4182 && (lwp->status_pending_p
4183 || lwp->pending_signals != NULL
4184 || !lwp_signal_can_be_delivered (lwp)))
4185 {
4186 enqueue_pending_signal (lwp, signal, info);
4187
4188 /* Postpone any pending signal. It was enqueued above. */
4189 signal = 0;
4190 }
4191
4192 if (lwp->status_pending_p)
4193 {
4194 if (debug_threads)
4195 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4196 " has pending status\n",
4197 lwpid_of (thread), step ? "step" : "continue",
4198 lwp->stop_expected ? "expected" : "not expected");
4199 return;
4200 }
4201
4202 saved_thread = current_thread;
4203 current_thread = thread;
4204
4205 /* This bit needs some thinking about. If we get a signal that
4206 we must report while a single-step reinsert is still pending,
4207 we often end up resuming the thread. It might be better to
4208 (ew) allow a stack of pending events; then we could be sure that
4209 the reinsert happened right away and not lose any signals.
4210
4211 Making this stack would also shrink the window in which breakpoints are
4212 uninserted (see comment in linux_wait_for_lwp) but not enough for
4213 complete correctness, so it won't solve that problem. It may be
4214 worthwhile just to solve this one, however. */
4215 if (lwp->bp_reinsert != 0)
4216 {
4217 if (debug_threads)
4218 debug_printf (" pending reinsert at 0x%s\n",
4219 paddress (lwp->bp_reinsert));
4220
4221 if (can_hardware_single_step ())
4222 {
4223 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4224 {
4225 if (step == 0)
4226 warning ("BAD - reinserting but not stepping.");
4227 if (lwp->suspended)
4228 warning ("BAD - reinserting and suspended(%d).",
4229 lwp->suspended);
4230 }
4231 }
4232
4233 step = maybe_hw_step (thread);
4234 }
4235
4236 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4237 {
4238 if (debug_threads)
4239 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4240 " (exit-jump-pad-bkpt)\n",
4241 lwpid_of (thread));
4242 }
4243 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4244 {
4245 if (debug_threads)
4246 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4247 " single-stepping\n",
4248 lwpid_of (thread));
4249
4250 if (can_hardware_single_step ())
4251 step = 1;
4252 else
4253 {
4254 internal_error (__FILE__, __LINE__,
4255 "moving out of jump pad single-stepping"
4256 " not implemented on this target");
4257 }
4258 }
4259
4260 /* If we have while-stepping actions in this thread set it stepping.
4261 If we have a signal to deliver, it may or may not be set to
4262 SIG_IGN, we don't know. Assume so, and allow collecting
4263 while-stepping into a signal handler. A possible smart thing to
4264 do would be to set an internal breakpoint at the signal return
4265 address, continue, and carry on catching this while-stepping
4266 action only when that breakpoint is hit. A future
4267 enhancement. */
4268 if (thread->while_stepping != NULL)
4269 {
4270 if (debug_threads)
4271 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4272 lwpid_of (thread));
4273
4274 step = single_step (lwp);
4275 }
4276
4277 if (proc->tdesc != NULL && low_supports_breakpoints ())
4278 {
4279 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4280
4281 lwp->stop_pc = low_get_pc (regcache);
4282
4283 if (debug_threads)
4284 {
4285 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4286 (long) lwp->stop_pc);
4287 }
4288 }
4289
4290 /* If we have pending signals, consume one if it can be delivered to
4291 the inferior. */
4292 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4293 {
4294 struct pending_signals **p_sig;
4295
4296 p_sig = &lwp->pending_signals;
4297 while ((*p_sig)->prev != NULL)
4298 p_sig = &(*p_sig)->prev;
4299
4300 signal = (*p_sig)->signal;
4301 if ((*p_sig)->info.si_signo != 0)
4302 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4303 &(*p_sig)->info);
4304
4305 free (*p_sig);
4306 *p_sig = NULL;
4307 }
4308
4309 if (debug_threads)
4310 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4311 lwpid_of (thread), step ? "step" : "continue", signal,
4312 lwp->stop_expected ? "expected" : "not expected");
4313
4314 low_prepare_to_resume (lwp);
4315
4316 regcache_invalidate_thread (thread);
4317 errno = 0;
4318 lwp->stepping = step;
4319 if (step)
4320 ptrace_request = PTRACE_SINGLESTEP;
4321 else if (gdb_catching_syscalls_p (lwp))
4322 ptrace_request = PTRACE_SYSCALL;
4323 else
4324 ptrace_request = PTRACE_CONT;
4325 ptrace (ptrace_request,
4326 lwpid_of (thread),
4327 (PTRACE_TYPE_ARG3) 0,
4328 /* Coerce to a uintptr_t first to avoid potential gcc warning
4329 of coercing an 8 byte integer to a 4 byte pointer. */
4330 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4331
4332 current_thread = saved_thread;
4333 if (errno)
4334 perror_with_name ("resuming thread");
4335
4336 /* Successfully resumed. Clear state that no longer makes sense,
4337 and mark the LWP as running. Must not do this before resuming
4338 otherwise if that fails other code will be confused. E.g., we'd
4339 later try to stop the LWP and hang forever waiting for a stop
4340 status. Note that we must not throw after this is cleared,
4341 otherwise handle_zombie_lwp_error would get confused. */
4342 lwp->stopped = 0;
4343 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4344 }
4345
4346 void
4347 linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4348 {
4349 /* Nop. */
4350 }
4351
4352 /* Called when we try to resume a stopped LWP and that errors out. If
4353 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4354 or about to become), discard the error, clear any pending status
4355 the LWP may have, and return true (we'll collect the exit status
4356 soon enough). Otherwise, return false. */
4357
4358 static int
4359 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4360 {
4361 struct thread_info *thread = get_lwp_thread (lp);
4362
4363 /* If we get an error after resuming the LWP successfully, we'd
4364 confuse !T state for the LWP being gone. */
4365 gdb_assert (lp->stopped);
4366
4367 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4368 because even if ptrace failed with ESRCH, the tracee may be "not
4369 yet fully dead", but already refusing ptrace requests. In that
4370 case the tracee has 'R (Running)' state for a little bit
4371 (observed in Linux 3.18). See also the note on ESRCH in the
4372 ptrace(2) man page. Instead, check whether the LWP has any state
4373 other than ptrace-stopped. */
4374
4375 /* Don't assume anything if /proc/PID/status can't be read. */
4376 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4377 {
4378 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4379 lp->status_pending_p = 0;
4380 return 1;
4381 }
4382 return 0;
4383 }
4384
4385 void
4386 linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4387 siginfo_t *info)
4388 {
4389 try
4390 {
4391 resume_one_lwp_throw (lwp, step, signal, info);
4392 }
4393 catch (const gdb_exception_error &ex)
4394 {
4395 if (!check_ptrace_stopped_lwp_gone (lwp))
4396 throw;
4397 }
4398 }
4399
4400 /* This function is called once per thread via for_each_thread.
4401 We look up which resume request applies to THREAD and mark it with a
4402 pointer to the appropriate resume request.
4403
4404 This algorithm is O(threads * resume elements), but resume elements
4405 is small (and will remain small at least until GDB supports thread
4406 suspension). */
4407
4408 static void
4409 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4410 {
4411 struct lwp_info *lwp = get_thread_lwp (thread);
4412
4413 for (int ndx = 0; ndx < n; ndx++)
4414 {
4415 ptid_t ptid = resume[ndx].thread;
4416 if (ptid == minus_one_ptid
4417 || ptid == thread->id
4418 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4419 of PID'. */
4420 || (ptid.pid () == pid_of (thread)
4421 && (ptid.is_pid ()
4422 || ptid.lwp () == -1)))
4423 {
4424 if (resume[ndx].kind == resume_stop
4425 && thread->last_resume_kind == resume_stop)
4426 {
4427 if (debug_threads)
4428 debug_printf ("already %s LWP %ld at GDB's request\n",
4429 (thread->last_status.kind
4430 == TARGET_WAITKIND_STOPPED)
4431 ? "stopped"
4432 : "stopping",
4433 lwpid_of (thread));
4434
4435 continue;
4436 }
4437
4438 /* Ignore (wildcard) resume requests for already-resumed
4439 threads. */
4440 if (resume[ndx].kind != resume_stop
4441 && thread->last_resume_kind != resume_stop)
4442 {
4443 if (debug_threads)
4444 debug_printf ("already %s LWP %ld at GDB's request\n",
4445 (thread->last_resume_kind
4446 == resume_step)
4447 ? "stepping"
4448 : "continuing",
4449 lwpid_of (thread));
4450 continue;
4451 }
4452
4453 /* Don't let wildcard resumes resume fork children that GDB
4454 does not yet know are new fork children. */
4455 if (lwp->fork_relative != NULL)
4456 {
4457 struct lwp_info *rel = lwp->fork_relative;
4458
4459 if (rel->status_pending_p
4460 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4461 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4462 {
4463 if (debug_threads)
4464 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4465 lwpid_of (thread));
4466 continue;
4467 }
4468 }
4469
4470 /* If the thread has a pending event that has already been
4471 reported to GDBserver core, but GDB has not pulled the
4472 event out of the vStopped queue yet, likewise, ignore the
4473 (wildcard) resume request. */
4474 if (in_queued_stop_replies (thread->id))
4475 {
4476 if (debug_threads)
4477 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4478 lwpid_of (thread));
4479 continue;
4480 }
4481
4482 lwp->resume = &resume[ndx];
4483 thread->last_resume_kind = lwp->resume->kind;
4484
4485 lwp->step_range_start = lwp->resume->step_range_start;
4486 lwp->step_range_end = lwp->resume->step_range_end;
4487
4488 /* If we had a deferred signal to report, dequeue one now.
4489 This can happen if LWP gets more than one signal while
4490 trying to get out of a jump pad. */
4491 if (lwp->stopped
4492 && !lwp->status_pending_p
4493 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4494 {
4495 lwp->status_pending_p = 1;
4496
4497 if (debug_threads)
4498 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4499 "leaving status pending.\n",
4500 WSTOPSIG (lwp->status_pending),
4501 lwpid_of (thread));
4502 }
4503
4504 return;
4505 }
4506 }
4507
4508 /* No resume action for this thread. */
4509 lwp->resume = NULL;
4510 }
4511
4512 bool
4513 linux_process_target::resume_status_pending (thread_info *thread)
4514 {
4515 struct lwp_info *lwp = get_thread_lwp (thread);
4516
4517 /* LWPs which will not be resumed are not interesting, because
4518 we might not wait for them next time through linux_wait. */
4519 if (lwp->resume == NULL)
4520 return false;
4521
4522 return thread_still_has_status_pending (thread);
4523 }
4524
4525 bool
4526 linux_process_target::thread_needs_step_over (thread_info *thread)
4527 {
4528 struct lwp_info *lwp = get_thread_lwp (thread);
4529 struct thread_info *saved_thread;
4530 CORE_ADDR pc;
4531 struct process_info *proc = get_thread_process (thread);
4532
4533 /* GDBserver is skipping the extra traps from the wrapper program,
4534 don't have to do step over. */
4535 if (proc->tdesc == NULL)
4536 return false;
4537
4538 /* LWPs which will not be resumed are not interesting, because we
4539 might not wait for them next time through linux_wait. */
4540
4541 if (!lwp->stopped)
4542 {
4543 if (debug_threads)
4544 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4545 lwpid_of (thread));
4546 return false;
4547 }
4548
4549 if (thread->last_resume_kind == resume_stop)
4550 {
4551 if (debug_threads)
4552 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4553 " stopped\n",
4554 lwpid_of (thread));
4555 return false;
4556 }
4557
4558 gdb_assert (lwp->suspended >= 0);
4559
4560 if (lwp->suspended)
4561 {
4562 if (debug_threads)
4563 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4564 lwpid_of (thread));
4565 return false;
4566 }
4567
4568 if (lwp->status_pending_p)
4569 {
4570 if (debug_threads)
4571 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4572 " status.\n",
4573 lwpid_of (thread));
4574 return false;
4575 }
4576
4577 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4578 or we have. */
4579 pc = get_pc (lwp);
4580
4581 /* If the PC has changed since we stopped, then don't do anything,
4582 and let the breakpoint/tracepoint be hit. This happens if, for
4583 instance, GDB handled the decr_pc_after_break subtraction itself,
4584 GDB is OOL stepping this thread, or the user has issued a "jump"
4585 command, or poked thread's registers herself. */
4586 if (pc != lwp->stop_pc)
4587 {
4588 if (debug_threads)
4589 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4590 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4591 lwpid_of (thread),
4592 paddress (lwp->stop_pc), paddress (pc));
4593 return false;
4594 }
4595
4596 /* On software single step target, resume the inferior with signal
4597 rather than stepping over. */
4598 if (supports_software_single_step ()
4599 && lwp->pending_signals != NULL
4600 && lwp_signal_can_be_delivered (lwp))
4601 {
4602 if (debug_threads)
4603 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4604 " signals.\n",
4605 lwpid_of (thread));
4606
4607 return false;
4608 }
4609
4610 saved_thread = current_thread;
4611 current_thread = thread;
4612
4613 /* We can only step over breakpoints we know about. */
4614 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4615 {
4616 /* Don't step over a breakpoint that GDB expects to hit
4617 though. If the condition is being evaluated on the target's side
4618 and it evaluate to false, step over this breakpoint as well. */
4619 if (gdb_breakpoint_here (pc)
4620 && gdb_condition_true_at_breakpoint (pc)
4621 && gdb_no_commands_at_breakpoint (pc))
4622 {
4623 if (debug_threads)
4624 debug_printf ("Need step over [LWP %ld]? yes, but found"
4625 " GDB breakpoint at 0x%s; skipping step over\n",
4626 lwpid_of (thread), paddress (pc));
4627
4628 current_thread = saved_thread;
4629 return false;
4630 }
4631 else
4632 {
4633 if (debug_threads)
4634 debug_printf ("Need step over [LWP %ld]? yes, "
4635 "found breakpoint at 0x%s\n",
4636 lwpid_of (thread), paddress (pc));
4637
4638 /* We've found an lwp that needs stepping over --- return 1 so
4639 that find_thread stops looking. */
4640 current_thread = saved_thread;
4641
4642 return true;
4643 }
4644 }
4645
4646 current_thread = saved_thread;
4647
4648 if (debug_threads)
4649 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4650 " at 0x%s\n",
4651 lwpid_of (thread), paddress (pc));
4652
4653 return false;
4654 }
4655
4656 void
4657 linux_process_target::start_step_over (lwp_info *lwp)
4658 {
4659 struct thread_info *thread = get_lwp_thread (lwp);
4660 struct thread_info *saved_thread;
4661 CORE_ADDR pc;
4662 int step;
4663
4664 if (debug_threads)
4665 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4666 lwpid_of (thread));
4667
4668 stop_all_lwps (1, lwp);
4669
4670 if (lwp->suspended != 0)
4671 {
4672 internal_error (__FILE__, __LINE__,
4673 "LWP %ld suspended=%d\n", lwpid_of (thread),
4674 lwp->suspended);
4675 }
4676
4677 if (debug_threads)
4678 debug_printf ("Done stopping all threads for step-over.\n");
4679
4680 /* Note, we should always reach here with an already adjusted PC,
4681 either by GDB (if we're resuming due to GDB's request), or by our
4682 caller, if we just finished handling an internal breakpoint GDB
4683 shouldn't care about. */
4684 pc = get_pc (lwp);
4685
4686 saved_thread = current_thread;
4687 current_thread = thread;
4688
4689 lwp->bp_reinsert = pc;
4690 uninsert_breakpoints_at (pc);
4691 uninsert_fast_tracepoint_jumps_at (pc);
4692
4693 step = single_step (lwp);
4694
4695 current_thread = saved_thread;
4696
4697 resume_one_lwp (lwp, step, 0, NULL);
4698
4699 /* Require next event from this LWP. */
4700 step_over_bkpt = thread->id;
4701 }
4702
4703 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4704 start_step_over, if still there, and delete any single-step
4705 breakpoints we've set, on non hardware single-step targets. */
4706
4707 static int
4708 finish_step_over (struct lwp_info *lwp)
4709 {
4710 if (lwp->bp_reinsert != 0)
4711 {
4712 struct thread_info *saved_thread = current_thread;
4713
4714 if (debug_threads)
4715 debug_printf ("Finished step over.\n");
4716
4717 current_thread = get_lwp_thread (lwp);
4718
4719 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4720 may be no breakpoint to reinsert there by now. */
4721 reinsert_breakpoints_at (lwp->bp_reinsert);
4722 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4723
4724 lwp->bp_reinsert = 0;
4725
4726 /* Delete any single-step breakpoints. No longer needed. We
4727 don't have to worry about other threads hitting this trap,
4728 and later not being able to explain it, because we were
4729 stepping over a breakpoint, and we hold all threads but
4730 LWP stopped while doing that. */
4731 if (!can_hardware_single_step ())
4732 {
4733 gdb_assert (has_single_step_breakpoints (current_thread));
4734 delete_single_step_breakpoints (current_thread);
4735 }
4736
4737 step_over_bkpt = null_ptid;
4738 current_thread = saved_thread;
4739 return 1;
4740 }
4741 else
4742 return 0;
4743 }
4744
4745 void
4746 linux_process_target::complete_ongoing_step_over ()
4747 {
4748 if (step_over_bkpt != null_ptid)
4749 {
4750 struct lwp_info *lwp;
4751 int wstat;
4752 int ret;
4753
4754 if (debug_threads)
4755 debug_printf ("detach: step over in progress, finish it first\n");
4756
4757 /* Passing NULL_PTID as filter indicates we want all events to
4758 be left pending. Eventually this returns when there are no
4759 unwaited-for children left. */
4760 ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4761 __WALL);
4762 gdb_assert (ret == -1);
4763
4764 lwp = find_lwp_pid (step_over_bkpt);
4765 if (lwp != NULL)
4766 finish_step_over (lwp);
4767 step_over_bkpt = null_ptid;
4768 unsuspend_all_lwps (lwp);
4769 }
4770 }
4771
4772 void
4773 linux_process_target::resume_one_thread (thread_info *thread,
4774 bool leave_all_stopped)
4775 {
4776 struct lwp_info *lwp = get_thread_lwp (thread);
4777 int leave_pending;
4778
4779 if (lwp->resume == NULL)
4780 return;
4781
4782 if (lwp->resume->kind == resume_stop)
4783 {
4784 if (debug_threads)
4785 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4786
4787 if (!lwp->stopped)
4788 {
4789 if (debug_threads)
4790 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4791
4792 /* Stop the thread, and wait for the event asynchronously,
4793 through the event loop. */
4794 send_sigstop (lwp);
4795 }
4796 else
4797 {
4798 if (debug_threads)
4799 debug_printf ("already stopped LWP %ld\n",
4800 lwpid_of (thread));
4801
4802 /* The LWP may have been stopped in an internal event that
4803 was not meant to be notified back to GDB (e.g., gdbserver
4804 breakpoint), so we should be reporting a stop event in
4805 this case too. */
4806
4807 /* If the thread already has a pending SIGSTOP, this is a
4808 no-op. Otherwise, something later will presumably resume
4809 the thread and this will cause it to cancel any pending
4810 operation, due to last_resume_kind == resume_stop. If
4811 the thread already has a pending status to report, we
4812 will still report it the next time we wait - see
4813 status_pending_p_callback. */
4814
4815 /* If we already have a pending signal to report, then
4816 there's no need to queue a SIGSTOP, as this means we're
4817 midway through moving the LWP out of the jumppad, and we
4818 will report the pending signal as soon as that is
4819 finished. */
4820 if (lwp->pending_signals_to_report == NULL)
4821 send_sigstop (lwp);
4822 }
4823
4824 /* For stop requests, we're done. */
4825 lwp->resume = NULL;
4826 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4827 return;
4828 }
4829
4830 /* If this thread which is about to be resumed has a pending status,
4831 then don't resume it - we can just report the pending status.
4832 Likewise if it is suspended, because e.g., another thread is
4833 stepping past a breakpoint. Make sure to queue any signals that
4834 would otherwise be sent. In all-stop mode, we do this decision
4835 based on if *any* thread has a pending status. If there's a
4836 thread that needs the step-over-breakpoint dance, then don't
4837 resume any other thread but that particular one. */
4838 leave_pending = (lwp->suspended
4839 || lwp->status_pending_p
4840 || leave_all_stopped);
4841
4842 /* If we have a new signal, enqueue the signal. */
4843 if (lwp->resume->sig != 0)
4844 {
4845 siginfo_t info, *info_p;
4846
4847 /* If this is the same signal we were previously stopped by,
4848 make sure to queue its siginfo. */
4849 if (WIFSTOPPED (lwp->last_status)
4850 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4851 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4852 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4853 info_p = &info;
4854 else
4855 info_p = NULL;
4856
4857 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4858 }
4859
4860 if (!leave_pending)
4861 {
4862 if (debug_threads)
4863 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4864
4865 proceed_one_lwp (thread, NULL);
4866 }
4867 else
4868 {
4869 if (debug_threads)
4870 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4871 }
4872
4873 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4874 lwp->resume = NULL;
4875 }
4876
4877 void
4878 linux_process_target::resume (thread_resume *resume_info, size_t n)
4879 {
4880 struct thread_info *need_step_over = NULL;
4881
4882 if (debug_threads)
4883 {
4884 debug_enter ();
4885 debug_printf ("linux_resume:\n");
4886 }
4887
4888 for_each_thread ([&] (thread_info *thread)
4889 {
4890 linux_set_resume_request (thread, resume_info, n);
4891 });
4892
4893 /* If there is a thread which would otherwise be resumed, which has
4894 a pending status, then don't resume any threads - we can just
4895 report the pending status. Make sure to queue any signals that
4896 would otherwise be sent. In non-stop mode, we'll apply this
4897 logic to each thread individually. We consume all pending events
4898 before considering to start a step-over (in all-stop). */
4899 bool any_pending = false;
4900 if (!non_stop)
4901 any_pending = find_thread ([this] (thread_info *thread)
4902 {
4903 return resume_status_pending (thread);
4904 }) != nullptr;
4905
4906 /* If there is a thread which would otherwise be resumed, which is
4907 stopped at a breakpoint that needs stepping over, then don't
4908 resume any threads - have it step over the breakpoint with all
4909 other threads stopped, then resume all threads again. Make sure
4910 to queue any signals that would otherwise be delivered or
4911 queued. */
4912 if (!any_pending && low_supports_breakpoints ())
4913 need_step_over = find_thread ([this] (thread_info *thread)
4914 {
4915 return thread_needs_step_over (thread);
4916 });
4917
4918 bool leave_all_stopped = (need_step_over != NULL || any_pending);
4919
4920 if (debug_threads)
4921 {
4922 if (need_step_over != NULL)
4923 debug_printf ("Not resuming all, need step over\n");
4924 else if (any_pending)
4925 debug_printf ("Not resuming, all-stop and found "
4926 "an LWP with pending status\n");
4927 else
4928 debug_printf ("Resuming, no pending status or step over needed\n");
4929 }
4930
4931 /* Even if we're leaving threads stopped, queue all signals we'd
4932 otherwise deliver. */
4933 for_each_thread ([&] (thread_info *thread)
4934 {
4935 resume_one_thread (thread, leave_all_stopped);
4936 });
4937
4938 if (need_step_over)
4939 start_step_over (get_thread_lwp (need_step_over));
4940
4941 if (debug_threads)
4942 {
4943 debug_printf ("linux_resume done\n");
4944 debug_exit ();
4945 }
4946
4947 /* We may have events that were pending that can/should be sent to
4948 the client now. Trigger a linux_wait call. */
4949 if (target_is_async_p ())
4950 async_file_mark ();
4951 }
4952
4953 void
4954 linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4955 {
4956 struct lwp_info *lwp = get_thread_lwp (thread);
4957 int step;
4958
4959 if (lwp == except)
4960 return;
4961
4962 if (debug_threads)
4963 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4964
4965 if (!lwp->stopped)
4966 {
4967 if (debug_threads)
4968 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4969 return;
4970 }
4971
4972 if (thread->last_resume_kind == resume_stop
4973 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4974 {
4975 if (debug_threads)
4976 debug_printf (" client wants LWP to remain %ld stopped\n",
4977 lwpid_of (thread));
4978 return;
4979 }
4980
4981 if (lwp->status_pending_p)
4982 {
4983 if (debug_threads)
4984 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4985 lwpid_of (thread));
4986 return;
4987 }
4988
4989 gdb_assert (lwp->suspended >= 0);
4990
4991 if (lwp->suspended)
4992 {
4993 if (debug_threads)
4994 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4995 return;
4996 }
4997
4998 if (thread->last_resume_kind == resume_stop
4999 && lwp->pending_signals_to_report == NULL
5000 && (lwp->collecting_fast_tracepoint
5001 == fast_tpoint_collect_result::not_collecting))
5002 {
5003 /* We haven't reported this LWP as stopped yet (otherwise, the
5004 last_status.kind check above would catch it, and we wouldn't
5005 reach here. This LWP may have been momentarily paused by a
5006 stop_all_lwps call while handling for example, another LWP's
5007 step-over. In that case, the pending expected SIGSTOP signal
5008 that was queued at vCont;t handling time will have already
5009 been consumed by wait_for_sigstop, and so we need to requeue
5010 another one here. Note that if the LWP already has a SIGSTOP
5011 pending, this is a no-op. */
5012
5013 if (debug_threads)
5014 debug_printf ("Client wants LWP %ld to stop. "
5015 "Making sure it has a SIGSTOP pending\n",
5016 lwpid_of (thread));
5017
5018 send_sigstop (lwp);
5019 }
5020
5021 if (thread->last_resume_kind == resume_step)
5022 {
5023 if (debug_threads)
5024 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5025 lwpid_of (thread));
5026
5027 /* If resume_step is requested by GDB, install single-step
5028 breakpoints when the thread is about to be actually resumed if
5029 the single-step breakpoints weren't removed. */
5030 if (supports_software_single_step ()
5031 && !has_single_step_breakpoints (thread))
5032 install_software_single_step_breakpoints (lwp);
5033
5034 step = maybe_hw_step (thread);
5035 }
5036 else if (lwp->bp_reinsert != 0)
5037 {
5038 if (debug_threads)
5039 debug_printf (" stepping LWP %ld, reinsert set\n",
5040 lwpid_of (thread));
5041
5042 step = maybe_hw_step (thread);
5043 }
5044 else
5045 step = 0;
5046
5047 resume_one_lwp (lwp, step, 0, NULL);
5048 }
5049
5050 void
5051 linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
5052 lwp_info *except)
5053 {
5054 struct lwp_info *lwp = get_thread_lwp (thread);
5055
5056 if (lwp == except)
5057 return;
5058
5059 lwp_suspended_decr (lwp);
5060
5061 proceed_one_lwp (thread, except);
5062 }
5063
5064 void
5065 linux_process_target::proceed_all_lwps ()
5066 {
5067 struct thread_info *need_step_over;
5068
5069 /* If there is a thread which would otherwise be resumed, which is
5070 stopped at a breakpoint that needs stepping over, then don't
5071 resume any threads - have it step over the breakpoint with all
5072 other threads stopped, then resume all threads again. */
5073
5074 if (low_supports_breakpoints ())
5075 {
5076 need_step_over = find_thread ([this] (thread_info *thread)
5077 {
5078 return thread_needs_step_over (thread);
5079 });
5080
5081 if (need_step_over != NULL)
5082 {
5083 if (debug_threads)
5084 debug_printf ("proceed_all_lwps: found "
5085 "thread %ld needing a step-over\n",
5086 lwpid_of (need_step_over));
5087
5088 start_step_over (get_thread_lwp (need_step_over));
5089 return;
5090 }
5091 }
5092
5093 if (debug_threads)
5094 debug_printf ("Proceeding, no step-over needed\n");
5095
5096 for_each_thread ([this] (thread_info *thread)
5097 {
5098 proceed_one_lwp (thread, NULL);
5099 });
5100 }
5101
5102 void
5103 linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
5104 {
5105 if (debug_threads)
5106 {
5107 debug_enter ();
5108 if (except)
5109 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5110 lwpid_of (get_lwp_thread (except)));
5111 else
5112 debug_printf ("unstopping all lwps\n");
5113 }
5114
5115 if (unsuspend)
5116 for_each_thread ([&] (thread_info *thread)
5117 {
5118 unsuspend_and_proceed_one_lwp (thread, except);
5119 });
5120 else
5121 for_each_thread ([&] (thread_info *thread)
5122 {
5123 proceed_one_lwp (thread, except);
5124 });
5125
5126 if (debug_threads)
5127 {
5128 debug_printf ("unstop_all_lwps done\n");
5129 debug_exit ();
5130 }
5131 }
5132
5133
5134 #ifdef HAVE_LINUX_REGSETS
5135
5136 #define use_linux_regsets 1
5137
5138 /* Returns true if REGSET has been disabled. */
5139
5140 static int
5141 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5142 {
5143 return (info->disabled_regsets != NULL
5144 && info->disabled_regsets[regset - info->regsets]);
5145 }
5146
5147 /* Disable REGSET. */
5148
5149 static void
5150 disable_regset (struct regsets_info *info, struct regset_info *regset)
5151 {
5152 int dr_offset;
5153
5154 dr_offset = regset - info->regsets;
5155 if (info->disabled_regsets == NULL)
5156 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5157 info->disabled_regsets[dr_offset] = 1;
5158 }
5159
5160 static int
5161 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5162 struct regcache *regcache)
5163 {
5164 struct regset_info *regset;
5165 int saw_general_regs = 0;
5166 int pid;
5167 struct iovec iov;
5168
5169 pid = lwpid_of (current_thread);
5170 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5171 {
5172 void *buf, *data;
5173 int nt_type, res;
5174
5175 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5176 continue;
5177
5178 buf = xmalloc (regset->size);
5179
5180 nt_type = regset->nt_type;
5181 if (nt_type)
5182 {
5183 iov.iov_base = buf;
5184 iov.iov_len = regset->size;
5185 data = (void *) &iov;
5186 }
5187 else
5188 data = buf;
5189
5190 #ifndef __sparc__
5191 res = ptrace (regset->get_request, pid,
5192 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5193 #else
5194 res = ptrace (regset->get_request, pid, data, nt_type);
5195 #endif
5196 if (res < 0)
5197 {
5198 if (errno == EIO
5199 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5200 {
5201 /* If we get EIO on a regset, or an EINVAL and the regset is
5202 optional, do not try it again for this process mode. */
5203 disable_regset (regsets_info, regset);
5204 }
5205 else if (errno == ENODATA)
5206 {
5207 /* ENODATA may be returned if the regset is currently
5208 not "active". This can happen in normal operation,
5209 so suppress the warning in this case. */
5210 }
5211 else if (errno == ESRCH)
5212 {
5213 /* At this point, ESRCH should mean the process is
5214 already gone, in which case we simply ignore attempts
5215 to read its registers. */
5216 }
5217 else
5218 {
5219 char s[256];
5220 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5221 pid);
5222 perror (s);
5223 }
5224 }
5225 else
5226 {
5227 if (regset->type == GENERAL_REGS)
5228 saw_general_regs = 1;
5229 regset->store_function (regcache, buf);
5230 }
5231 free (buf);
5232 }
5233 if (saw_general_regs)
5234 return 0;
5235 else
5236 return 1;
5237 }
5238
5239 static int
5240 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5241 struct regcache *regcache)
5242 {
5243 struct regset_info *regset;
5244 int saw_general_regs = 0;
5245 int pid;
5246 struct iovec iov;
5247
5248 pid = lwpid_of (current_thread);
5249 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5250 {
5251 void *buf, *data;
5252 int nt_type, res;
5253
5254 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5255 || regset->fill_function == NULL)
5256 continue;
5257
5258 buf = xmalloc (regset->size);
5259
5260 /* First fill the buffer with the current register set contents,
5261 in case there are any items in the kernel's regset that are
5262 not in gdbserver's regcache. */
5263
5264 nt_type = regset->nt_type;
5265 if (nt_type)
5266 {
5267 iov.iov_base = buf;
5268 iov.iov_len = regset->size;
5269 data = (void *) &iov;
5270 }
5271 else
5272 data = buf;
5273
5274 #ifndef __sparc__
5275 res = ptrace (regset->get_request, pid,
5276 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5277 #else
5278 res = ptrace (regset->get_request, pid, data, nt_type);
5279 #endif
5280
5281 if (res == 0)
5282 {
5283 /* Then overlay our cached registers on that. */
5284 regset->fill_function (regcache, buf);
5285
5286 /* Only now do we write the register set. */
5287 #ifndef __sparc__
5288 res = ptrace (regset->set_request, pid,
5289 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5290 #else
5291 res = ptrace (regset->set_request, pid, data, nt_type);
5292 #endif
5293 }
5294
5295 if (res < 0)
5296 {
5297 if (errno == EIO
5298 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5299 {
5300 /* If we get EIO on a regset, or an EINVAL and the regset is
5301 optional, do not try it again for this process mode. */
5302 disable_regset (regsets_info, regset);
5303 }
5304 else if (errno == ESRCH)
5305 {
5306 /* At this point, ESRCH should mean the process is
5307 already gone, in which case we simply ignore attempts
5308 to change its registers. See also the related
5309 comment in resume_one_lwp. */
5310 free (buf);
5311 return 0;
5312 }
5313 else
5314 {
5315 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5316 }
5317 }
5318 else if (regset->type == GENERAL_REGS)
5319 saw_general_regs = 1;
5320 free (buf);
5321 }
5322 if (saw_general_regs)
5323 return 0;
5324 else
5325 return 1;
5326 }
5327
5328 #else /* !HAVE_LINUX_REGSETS */
5329
5330 #define use_linux_regsets 0
5331 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5332 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5333
5334 #endif
5335
5336 /* Return 1 if register REGNO is supported by one of the regset ptrace
5337 calls or 0 if it has to be transferred individually. */
5338
5339 static int
5340 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5341 {
5342 unsigned char mask = 1 << (regno % 8);
5343 size_t index = regno / 8;
5344
5345 return (use_linux_regsets
5346 && (regs_info->regset_bitmap == NULL
5347 || (regs_info->regset_bitmap[index] & mask) != 0));
5348 }
5349
5350 #ifdef HAVE_LINUX_USRREGS
5351
5352 static int
5353 register_addr (const struct usrregs_info *usrregs, int regnum)
5354 {
5355 int addr;
5356
5357 if (regnum < 0 || regnum >= usrregs->num_regs)
5358 error ("Invalid register number %d.", regnum);
5359
5360 addr = usrregs->regmap[regnum];
5361
5362 return addr;
5363 }
5364
5365
5366 void
5367 linux_process_target::fetch_register (const usrregs_info *usrregs,
5368 regcache *regcache, int regno)
5369 {
5370 CORE_ADDR regaddr;
5371 int i, size;
5372 char *buf;
5373 int pid;
5374
5375 if (regno >= usrregs->num_regs)
5376 return;
5377 if (low_cannot_fetch_register (regno))
5378 return;
5379
5380 regaddr = register_addr (usrregs, regno);
5381 if (regaddr == -1)
5382 return;
5383
5384 size = ((register_size (regcache->tdesc, regno)
5385 + sizeof (PTRACE_XFER_TYPE) - 1)
5386 & -sizeof (PTRACE_XFER_TYPE));
5387 buf = (char *) alloca (size);
5388
5389 pid = lwpid_of (current_thread);
5390 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5391 {
5392 errno = 0;
5393 *(PTRACE_XFER_TYPE *) (buf + i) =
5394 ptrace (PTRACE_PEEKUSER, pid,
5395 /* Coerce to a uintptr_t first to avoid potential gcc warning
5396 of coercing an 8 byte integer to a 4 byte pointer. */
5397 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5398 regaddr += sizeof (PTRACE_XFER_TYPE);
5399 if (errno != 0)
5400 {
5401 /* Mark register REGNO unavailable. */
5402 supply_register (regcache, regno, NULL);
5403 return;
5404 }
5405 }
5406
5407 low_supply_ptrace_register (regcache, regno, buf);
5408 }
5409
5410 void
5411 linux_process_target::store_register (const usrregs_info *usrregs,
5412 regcache *regcache, int regno)
5413 {
5414 CORE_ADDR regaddr;
5415 int i, size;
5416 char *buf;
5417 int pid;
5418
5419 if (regno >= usrregs->num_regs)
5420 return;
5421 if (low_cannot_store_register (regno))
5422 return;
5423
5424 regaddr = register_addr (usrregs, regno);
5425 if (regaddr == -1)
5426 return;
5427
5428 size = ((register_size (regcache->tdesc, regno)
5429 + sizeof (PTRACE_XFER_TYPE) - 1)
5430 & -sizeof (PTRACE_XFER_TYPE));
5431 buf = (char *) alloca (size);
5432 memset (buf, 0, size);
5433
5434 low_collect_ptrace_register (regcache, regno, buf);
5435
5436 pid = lwpid_of (current_thread);
5437 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5438 {
5439 errno = 0;
5440 ptrace (PTRACE_POKEUSER, pid,
5441 /* Coerce to a uintptr_t first to avoid potential gcc warning
5442 about coercing an 8 byte integer to a 4 byte pointer. */
5443 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5444 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5445 if (errno != 0)
5446 {
5447 /* At this point, ESRCH should mean the process is
5448 already gone, in which case we simply ignore attempts
5449 to change its registers. See also the related
5450 comment in resume_one_lwp. */
5451 if (errno == ESRCH)
5452 return;
5453
5454
5455 if (!low_cannot_store_register (regno))
5456 error ("writing register %d: %s", regno, safe_strerror (errno));
5457 }
5458 regaddr += sizeof (PTRACE_XFER_TYPE);
5459 }
5460 }
5461 #endif /* HAVE_LINUX_USRREGS */
5462
5463 void
5464 linux_process_target::low_collect_ptrace_register (regcache *regcache,
5465 int regno, char *buf)
5466 {
5467 collect_register (regcache, regno, buf);
5468 }
5469
5470 void
5471 linux_process_target::low_supply_ptrace_register (regcache *regcache,
5472 int regno, const char *buf)
5473 {
5474 supply_register (regcache, regno, buf);
5475 }
5476
5477 void
5478 linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5479 regcache *regcache,
5480 int regno, int all)
5481 {
5482 #ifdef HAVE_LINUX_USRREGS
5483 struct usrregs_info *usr = regs_info->usrregs;
5484
5485 if (regno == -1)
5486 {
5487 for (regno = 0; regno < usr->num_regs; regno++)
5488 if (all || !linux_register_in_regsets (regs_info, regno))
5489 fetch_register (usr, regcache, regno);
5490 }
5491 else
5492 fetch_register (usr, regcache, regno);
5493 #endif
5494 }
5495
5496 void
5497 linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5498 regcache *regcache,
5499 int regno, int all)
5500 {
5501 #ifdef HAVE_LINUX_USRREGS
5502 struct usrregs_info *usr = regs_info->usrregs;
5503
5504 if (regno == -1)
5505 {
5506 for (regno = 0; regno < usr->num_regs; regno++)
5507 if (all || !linux_register_in_regsets (regs_info, regno))
5508 store_register (usr, regcache, regno);
5509 }
5510 else
5511 store_register (usr, regcache, regno);
5512 #endif
5513 }
5514
5515 void
5516 linux_process_target::fetch_registers (regcache *regcache, int regno)
5517 {
5518 int use_regsets;
5519 int all = 0;
5520 const regs_info *regs_info = get_regs_info ();
5521
5522 if (regno == -1)
5523 {
5524 if (regs_info->usrregs != NULL)
5525 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5526 low_fetch_register (regcache, regno);
5527
5528 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5529 if (regs_info->usrregs != NULL)
5530 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5531 }
5532 else
5533 {
5534 if (low_fetch_register (regcache, regno))
5535 return;
5536
5537 use_regsets = linux_register_in_regsets (regs_info, regno);
5538 if (use_regsets)
5539 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5540 regcache);
5541 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5542 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5543 }
5544 }
5545
5546 void
5547 linux_process_target::store_registers (regcache *regcache, int regno)
5548 {
5549 int use_regsets;
5550 int all = 0;
5551 const regs_info *regs_info = get_regs_info ();
5552
5553 if (regno == -1)
5554 {
5555 all = regsets_store_inferior_registers (regs_info->regsets_info,
5556 regcache);
5557 if (regs_info->usrregs != NULL)
5558 usr_store_inferior_registers (regs_info, regcache, regno, all);
5559 }
5560 else
5561 {
5562 use_regsets = linux_register_in_regsets (regs_info, regno);
5563 if (use_regsets)
5564 all = regsets_store_inferior_registers (regs_info->regsets_info,
5565 regcache);
5566 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5567 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5568 }
5569 }
5570
5571 bool
5572 linux_process_target::low_fetch_register (regcache *regcache, int regno)
5573 {
5574 return false;
5575 }
5576
5577 /* A wrapper for the read_memory target op. */
5578
5579 static int
5580 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5581 {
5582 return the_target->read_memory (memaddr, myaddr, len);
5583 }
5584
5585 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5586 to debugger memory starting at MYADDR. */
5587
5588 int
5589 linux_process_target::read_memory (CORE_ADDR memaddr,
5590 unsigned char *myaddr, int len)
5591 {
5592 int pid = lwpid_of (current_thread);
5593 PTRACE_XFER_TYPE *buffer;
5594 CORE_ADDR addr;
5595 int count;
5596 char filename[64];
5597 int i;
5598 int ret;
5599 int fd;
5600
5601 /* Try using /proc. Don't bother for one word. */
5602 if (len >= 3 * sizeof (long))
5603 {
5604 int bytes;
5605
5606 /* We could keep this file open and cache it - possibly one per
5607 thread. That requires some juggling, but is even faster. */
5608 sprintf (filename, "/proc/%d/mem", pid);
5609 fd = open (filename, O_RDONLY | O_LARGEFILE);
5610 if (fd == -1)
5611 goto no_proc;
5612
5613 /* If pread64 is available, use it. It's faster if the kernel
5614 supports it (only one syscall), and it's 64-bit safe even on
5615 32-bit platforms (for instance, SPARC debugging a SPARC64
5616 application). */
5617 #ifdef HAVE_PREAD64
5618 bytes = pread64 (fd, myaddr, len, memaddr);
5619 #else
5620 bytes = -1;
5621 if (lseek (fd, memaddr, SEEK_SET) != -1)
5622 bytes = read (fd, myaddr, len);
5623 #endif
5624
5625 close (fd);
5626 if (bytes == len)
5627 return 0;
5628
5629 /* Some data was read, we'll try to get the rest with ptrace. */
5630 if (bytes > 0)
5631 {
5632 memaddr += bytes;
5633 myaddr += bytes;
5634 len -= bytes;
5635 }
5636 }
5637
5638 no_proc:
5639 /* Round starting address down to longword boundary. */
5640 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5641 /* Round ending address up; get number of longwords that makes. */
5642 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5643 / sizeof (PTRACE_XFER_TYPE));
5644 /* Allocate buffer of that many longwords. */
5645 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5646
5647 /* Read all the longwords */
5648 errno = 0;
5649 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5650 {
5651 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5652 about coercing an 8 byte integer to a 4 byte pointer. */
5653 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5654 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5655 (PTRACE_TYPE_ARG4) 0);
5656 if (errno)
5657 break;
5658 }
5659 ret = errno;
5660
5661 /* Copy appropriate bytes out of the buffer. */
5662 if (i > 0)
5663 {
5664 i *= sizeof (PTRACE_XFER_TYPE);
5665 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5666 memcpy (myaddr,
5667 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5668 i < len ? i : len);
5669 }
5670
5671 return ret;
5672 }
5673
5674 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5675 memory at MEMADDR. On failure (cannot write to the inferior)
5676 returns the value of errno. Always succeeds if LEN is zero. */
5677
5678 int
5679 linux_process_target::write_memory (CORE_ADDR memaddr,
5680 const unsigned char *myaddr, int len)
5681 {
5682 int i;
5683 /* Round starting address down to longword boundary. */
5684 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5685 /* Round ending address up; get number of longwords that makes. */
5686 int count
5687 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5688 / sizeof (PTRACE_XFER_TYPE);
5689
5690 /* Allocate buffer of that many longwords. */
5691 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5692
5693 int pid = lwpid_of (current_thread);
5694
5695 if (len == 0)
5696 {
5697 /* Zero length write always succeeds. */
5698 return 0;
5699 }
5700
5701 if (debug_threads)
5702 {
5703 /* Dump up to four bytes. */
5704 char str[4 * 2 + 1];
5705 char *p = str;
5706 int dump = len < 4 ? len : 4;
5707
5708 for (i = 0; i < dump; i++)
5709 {
5710 sprintf (p, "%02x", myaddr[i]);
5711 p += 2;
5712 }
5713 *p = '\0';
5714
5715 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5716 str, (long) memaddr, pid);
5717 }
5718
5719 /* Fill start and end extra bytes of buffer with existing memory data. */
5720
5721 errno = 0;
5722 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5723 about coercing an 8 byte integer to a 4 byte pointer. */
5724 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5725 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5726 (PTRACE_TYPE_ARG4) 0);
5727 if (errno)
5728 return errno;
5729
5730 if (count > 1)
5731 {
5732 errno = 0;
5733 buffer[count - 1]
5734 = ptrace (PTRACE_PEEKTEXT, pid,
5735 /* Coerce to a uintptr_t first to avoid potential gcc warning
5736 about coercing an 8 byte integer to a 4 byte pointer. */
5737 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5738 * sizeof (PTRACE_XFER_TYPE)),
5739 (PTRACE_TYPE_ARG4) 0);
5740 if (errno)
5741 return errno;
5742 }
5743
5744 /* Copy data to be written over corresponding part of buffer. */
5745
5746 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5747 myaddr, len);
5748
5749 /* Write the entire buffer. */
5750
5751 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5752 {
5753 errno = 0;
5754 ptrace (PTRACE_POKETEXT, pid,
5755 /* Coerce to a uintptr_t first to avoid potential gcc warning
5756 about coercing an 8 byte integer to a 4 byte pointer. */
5757 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5758 (PTRACE_TYPE_ARG4) buffer[i]);
5759 if (errno)
5760 return errno;
5761 }
5762
5763 return 0;
5764 }
5765
5766 void
5767 linux_process_target::look_up_symbols ()
5768 {
5769 #ifdef USE_THREAD_DB
5770 struct process_info *proc = current_process ();
5771
5772 if (proc->priv->thread_db != NULL)
5773 return;
5774
5775 thread_db_init ();
5776 #endif
5777 }
5778
5779 void
5780 linux_process_target::request_interrupt ()
5781 {
5782 /* Send a SIGINT to the process group. This acts just like the user
5783 typed a ^C on the controlling terminal. */
5784 ::kill (-signal_pid, SIGINT);
5785 }
5786
5787 bool
5788 linux_process_target::supports_read_auxv ()
5789 {
5790 return true;
5791 }
5792
5793 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5794 to debugger memory starting at MYADDR. */
5795
5796 int
5797 linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5798 unsigned int len)
5799 {
5800 char filename[PATH_MAX];
5801 int fd, n;
5802 int pid = lwpid_of (current_thread);
5803
5804 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5805
5806 fd = open (filename, O_RDONLY);
5807 if (fd < 0)
5808 return -1;
5809
5810 if (offset != (CORE_ADDR) 0
5811 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5812 n = -1;
5813 else
5814 n = read (fd, myaddr, len);
5815
5816 close (fd);
5817
5818 return n;
5819 }
5820
5821 int
5822 linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5823 int size, raw_breakpoint *bp)
5824 {
5825 if (type == raw_bkpt_type_sw)
5826 return insert_memory_breakpoint (bp);
5827 else
5828 return low_insert_point (type, addr, size, bp);
5829 }
5830
5831 int
5832 linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5833 int size, raw_breakpoint *bp)
5834 {
5835 /* Unsupported (see target.h). */
5836 return 1;
5837 }
5838
5839 int
5840 linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5841 int size, raw_breakpoint *bp)
5842 {
5843 if (type == raw_bkpt_type_sw)
5844 return remove_memory_breakpoint (bp);
5845 else
5846 return low_remove_point (type, addr, size, bp);
5847 }
5848
5849 int
5850 linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5851 int size, raw_breakpoint *bp)
5852 {
5853 /* Unsupported (see target.h). */
5854 return 1;
5855 }
5856
5857 /* Implement the stopped_by_sw_breakpoint target_ops
5858 method. */
5859
5860 bool
5861 linux_process_target::stopped_by_sw_breakpoint ()
5862 {
5863 struct lwp_info *lwp = get_thread_lwp (current_thread);
5864
5865 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5866 }
5867
5868 /* Implement the supports_stopped_by_sw_breakpoint target_ops
5869 method. */
5870
5871 bool
5872 linux_process_target::supports_stopped_by_sw_breakpoint ()
5873 {
5874 return USE_SIGTRAP_SIGINFO;
5875 }
5876
5877 /* Implement the stopped_by_hw_breakpoint target_ops
5878 method. */
5879
5880 bool
5881 linux_process_target::stopped_by_hw_breakpoint ()
5882 {
5883 struct lwp_info *lwp = get_thread_lwp (current_thread);
5884
5885 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5886 }
5887
5888 /* Implement the supports_stopped_by_hw_breakpoint target_ops
5889 method. */
5890
5891 bool
5892 linux_process_target::supports_stopped_by_hw_breakpoint ()
5893 {
5894 return USE_SIGTRAP_SIGINFO;
5895 }
5896
5897 /* Implement the supports_hardware_single_step target_ops method. */
5898
5899 bool
5900 linux_process_target::supports_hardware_single_step ()
5901 {
5902 return can_hardware_single_step ();
5903 }
5904
5905 bool
5906 linux_process_target::stopped_by_watchpoint ()
5907 {
5908 struct lwp_info *lwp = get_thread_lwp (current_thread);
5909
5910 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5911 }
5912
5913 CORE_ADDR
5914 linux_process_target::stopped_data_address ()
5915 {
5916 struct lwp_info *lwp = get_thread_lwp (current_thread);
5917
5918 return lwp->stopped_data_address;
5919 }
5920
5921 /* This is only used for targets that define PT_TEXT_ADDR,
5922 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5923 the target has different ways of acquiring this information, like
5924 loadmaps. */
5925
5926 bool
5927 linux_process_target::supports_read_offsets ()
5928 {
5929 #ifdef SUPPORTS_READ_OFFSETS
5930 return true;
5931 #else
5932 return false;
5933 #endif
5934 }
5935
5936 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5937 to tell gdb about. */
5938
5939 int
5940 linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5941 {
5942 #ifdef SUPPORTS_READ_OFFSETS
5943 unsigned long text, text_end, data;
5944 int pid = lwpid_of (current_thread);
5945
5946 errno = 0;
5947
5948 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5949 (PTRACE_TYPE_ARG4) 0);
5950 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5951 (PTRACE_TYPE_ARG4) 0);
5952 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5953 (PTRACE_TYPE_ARG4) 0);
5954
5955 if (errno == 0)
5956 {
5957 /* Both text and data offsets produced at compile-time (and so
5958 used by gdb) are relative to the beginning of the program,
5959 with the data segment immediately following the text segment.
5960 However, the actual runtime layout in memory may put the data
5961 somewhere else, so when we send gdb a data base-address, we
5962 use the real data base address and subtract the compile-time
5963 data base-address from it (which is just the length of the
5964 text segment). BSS immediately follows data in both
5965 cases. */
5966 *text_p = text;
5967 *data_p = data - (text_end - text);
5968
5969 return 1;
5970 }
5971 return 0;
5972 #else
5973 gdb_assert_not_reached ("target op read_offsets not supported");
5974 #endif
5975 }
5976
5977 bool
5978 linux_process_target::supports_get_tls_address ()
5979 {
5980 #ifdef USE_THREAD_DB
5981 return true;
5982 #else
5983 return false;
5984 #endif
5985 }
5986
5987 int
5988 linux_process_target::get_tls_address (thread_info *thread,
5989 CORE_ADDR offset,
5990 CORE_ADDR load_module,
5991 CORE_ADDR *address)
5992 {
5993 #ifdef USE_THREAD_DB
5994 return thread_db_get_tls_address (thread, offset, load_module, address);
5995 #else
5996 return -1;
5997 #endif
5998 }
5999
6000 bool
6001 linux_process_target::supports_qxfer_osdata ()
6002 {
6003 return true;
6004 }
6005
6006 int
6007 linux_process_target::qxfer_osdata (const char *annex,
6008 unsigned char *readbuf,
6009 unsigned const char *writebuf,
6010 CORE_ADDR offset, int len)
6011 {
6012 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6013 }
6014
6015 void
6016 linux_process_target::siginfo_fixup (siginfo_t *siginfo,
6017 gdb_byte *inf_siginfo, int direction)
6018 {
6019 bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
6020
6021 /* If there was no callback, or the callback didn't do anything,
6022 then just do a straight memcpy. */
6023 if (!done)
6024 {
6025 if (direction == 1)
6026 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6027 else
6028 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6029 }
6030 }
6031
6032 bool
6033 linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
6034 int direction)
6035 {
6036 return false;
6037 }
6038
6039 bool
6040 linux_process_target::supports_qxfer_siginfo ()
6041 {
6042 return true;
6043 }
6044
6045 int
6046 linux_process_target::qxfer_siginfo (const char *annex,
6047 unsigned char *readbuf,
6048 unsigned const char *writebuf,
6049 CORE_ADDR offset, int len)
6050 {
6051 int pid;
6052 siginfo_t siginfo;
6053 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6054
6055 if (current_thread == NULL)
6056 return -1;
6057
6058 pid = lwpid_of (current_thread);
6059
6060 if (debug_threads)
6061 debug_printf ("%s siginfo for lwp %d.\n",
6062 readbuf != NULL ? "Reading" : "Writing",
6063 pid);
6064
6065 if (offset >= sizeof (siginfo))
6066 return -1;
6067
6068 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6069 return -1;
6070
6071 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6072 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6073 inferior with a 64-bit GDBSERVER should look the same as debugging it
6074 with a 32-bit GDBSERVER, we need to convert it. */
6075 siginfo_fixup (&siginfo, inf_siginfo, 0);
6076
6077 if (offset + len > sizeof (siginfo))
6078 len = sizeof (siginfo) - offset;
6079
6080 if (readbuf != NULL)
6081 memcpy (readbuf, inf_siginfo + offset, len);
6082 else
6083 {
6084 memcpy (inf_siginfo + offset, writebuf, len);
6085
6086 /* Convert back to ptrace layout before flushing it out. */
6087 siginfo_fixup (&siginfo, inf_siginfo, 1);
6088
6089 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6090 return -1;
6091 }
6092
6093 return len;
6094 }
6095
6096 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6097 so we notice when children change state; as the handler for the
6098 sigsuspend in my_waitpid. */
6099
6100 static void
6101 sigchld_handler (int signo)
6102 {
6103 int old_errno = errno;
6104
6105 if (debug_threads)
6106 {
6107 do
6108 {
6109 /* Use the async signal safe debug function. */
6110 if (debug_write ("sigchld_handler\n",
6111 sizeof ("sigchld_handler\n") - 1) < 0)
6112 break; /* just ignore */
6113 } while (0);
6114 }
6115
6116 if (target_is_async_p ())
6117 async_file_mark (); /* trigger a linux_wait */
6118
6119 errno = old_errno;
6120 }
6121
6122 bool
6123 linux_process_target::supports_non_stop ()
6124 {
6125 return true;
6126 }
6127
6128 bool
6129 linux_process_target::async (bool enable)
6130 {
6131 bool previous = target_is_async_p ();
6132
6133 if (debug_threads)
6134 debug_printf ("linux_async (%d), previous=%d\n",
6135 enable, previous);
6136
6137 if (previous != enable)
6138 {
6139 sigset_t mask;
6140 sigemptyset (&mask);
6141 sigaddset (&mask, SIGCHLD);
6142
6143 gdb_sigmask (SIG_BLOCK, &mask, NULL);
6144
6145 if (enable)
6146 {
6147 if (pipe (linux_event_pipe) == -1)
6148 {
6149 linux_event_pipe[0] = -1;
6150 linux_event_pipe[1] = -1;
6151 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6152
6153 warning ("creating event pipe failed.");
6154 return previous;
6155 }
6156
6157 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6158 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6159
6160 /* Register the event loop handler. */
6161 add_file_handler (linux_event_pipe[0],
6162 handle_target_event, NULL);
6163
6164 /* Always trigger a linux_wait. */
6165 async_file_mark ();
6166 }
6167 else
6168 {
6169 delete_file_handler (linux_event_pipe[0]);
6170
6171 close (linux_event_pipe[0]);
6172 close (linux_event_pipe[1]);
6173 linux_event_pipe[0] = -1;
6174 linux_event_pipe[1] = -1;
6175 }
6176
6177 gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
6178 }
6179
6180 return previous;
6181 }
6182
6183 int
6184 linux_process_target::start_non_stop (bool nonstop)
6185 {
6186 /* Register or unregister from event-loop accordingly. */
6187 target_async (nonstop);
6188
6189 if (target_is_async_p () != (nonstop != false))
6190 return -1;
6191
6192 return 0;
6193 }
6194
6195 bool
6196 linux_process_target::supports_multi_process ()
6197 {
6198 return true;
6199 }
6200
6201 /* Check if fork events are supported. */
6202
6203 bool
6204 linux_process_target::supports_fork_events ()
6205 {
6206 return linux_supports_tracefork ();
6207 }
6208
6209 /* Check if vfork events are supported. */
6210
6211 bool
6212 linux_process_target::supports_vfork_events ()
6213 {
6214 return linux_supports_tracefork ();
6215 }
6216
6217 /* Check if exec events are supported. */
6218
6219 bool
6220 linux_process_target::supports_exec_events ()
6221 {
6222 return linux_supports_traceexec ();
6223 }
6224
6225 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6226 ptrace flags for all inferiors. This is in case the new GDB connection
6227 doesn't support the same set of events that the previous one did. */
6228
6229 void
6230 linux_process_target::handle_new_gdb_connection ()
6231 {
6232 /* Request that all the lwps reset their ptrace options. */
6233 for_each_thread ([] (thread_info *thread)
6234 {
6235 struct lwp_info *lwp = get_thread_lwp (thread);
6236
6237 if (!lwp->stopped)
6238 {
6239 /* Stop the lwp so we can modify its ptrace options. */
6240 lwp->must_set_ptrace_flags = 1;
6241 linux_stop_lwp (lwp);
6242 }
6243 else
6244 {
6245 /* Already stopped; go ahead and set the ptrace options. */
6246 struct process_info *proc = find_process_pid (pid_of (thread));
6247 int options = linux_low_ptrace_options (proc->attached);
6248
6249 linux_enable_event_reporting (lwpid_of (thread), options);
6250 lwp->must_set_ptrace_flags = 0;
6251 }
6252 });
6253 }
6254
6255 int
6256 linux_process_target::handle_monitor_command (char *mon)
6257 {
6258 #ifdef USE_THREAD_DB
6259 return thread_db_handle_monitor_command (mon);
6260 #else
6261 return 0;
6262 #endif
6263 }
6264
6265 int
6266 linux_process_target::core_of_thread (ptid_t ptid)
6267 {
6268 return linux_common_core_of_thread (ptid);
6269 }
6270
6271 bool
6272 linux_process_target::supports_disable_randomization ()
6273 {
6274 #ifdef HAVE_PERSONALITY
6275 return true;
6276 #else
6277 return false;
6278 #endif
6279 }
6280
6281 bool
6282 linux_process_target::supports_agent ()
6283 {
6284 return true;
6285 }
6286
6287 bool
6288 linux_process_target::supports_range_stepping ()
6289 {
6290 if (supports_software_single_step ())
6291 return true;
6292
6293 return low_supports_range_stepping ();
6294 }
6295
6296 bool
6297 linux_process_target::low_supports_range_stepping ()
6298 {
6299 return false;
6300 }
6301
6302 bool
6303 linux_process_target::supports_pid_to_exec_file ()
6304 {
6305 return true;
6306 }
6307
6308 char *
6309 linux_process_target::pid_to_exec_file (int pid)
6310 {
6311 return linux_proc_pid_to_exec_file (pid);
6312 }
6313
6314 bool
6315 linux_process_target::supports_multifs ()
6316 {
6317 return true;
6318 }
6319
6320 int
6321 linux_process_target::multifs_open (int pid, const char *filename,
6322 int flags, mode_t mode)
6323 {
6324 return linux_mntns_open_cloexec (pid, filename, flags, mode);
6325 }
6326
6327 int
6328 linux_process_target::multifs_unlink (int pid, const char *filename)
6329 {
6330 return linux_mntns_unlink (pid, filename);
6331 }
6332
6333 ssize_t
6334 linux_process_target::multifs_readlink (int pid, const char *filename,
6335 char *buf, size_t bufsiz)
6336 {
6337 return linux_mntns_readlink (pid, filename, buf, bufsiz);
6338 }
6339
6340 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6341 struct target_loadseg
6342 {
6343 /* Core address to which the segment is mapped. */
6344 Elf32_Addr addr;
6345 /* VMA recorded in the program header. */
6346 Elf32_Addr p_vaddr;
6347 /* Size of this segment in memory. */
6348 Elf32_Word p_memsz;
6349 };
6350
6351 # if defined PT_GETDSBT
6352 struct target_loadmap
6353 {
6354 /* Protocol version number, must be zero. */
6355 Elf32_Word version;
6356 /* Pointer to the DSBT table, its size, and the DSBT index. */
6357 unsigned *dsbt_table;
6358 unsigned dsbt_size, dsbt_index;
6359 /* Number of segments in this map. */
6360 Elf32_Word nsegs;
6361 /* The actual memory map. */
6362 struct target_loadseg segs[/*nsegs*/];
6363 };
6364 # define LINUX_LOADMAP PT_GETDSBT
6365 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6366 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6367 # else
6368 struct target_loadmap
6369 {
6370 /* Protocol version number, must be zero. */
6371 Elf32_Half version;
6372 /* Number of segments in this map. */
6373 Elf32_Half nsegs;
6374 /* The actual memory map. */
6375 struct target_loadseg segs[/*nsegs*/];
6376 };
6377 # define LINUX_LOADMAP PTRACE_GETFDPIC
6378 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6379 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6380 # endif
6381
6382 bool
6383 linux_process_target::supports_read_loadmap ()
6384 {
6385 return true;
6386 }
6387
6388 int
6389 linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6390 unsigned char *myaddr, unsigned int len)
6391 {
6392 int pid = lwpid_of (current_thread);
6393 int addr = -1;
6394 struct target_loadmap *data = NULL;
6395 unsigned int actual_length, copy_length;
6396
6397 if (strcmp (annex, "exec") == 0)
6398 addr = (int) LINUX_LOADMAP_EXEC;
6399 else if (strcmp (annex, "interp") == 0)
6400 addr = (int) LINUX_LOADMAP_INTERP;
6401 else
6402 return -1;
6403
6404 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6405 return -1;
6406
6407 if (data == NULL)
6408 return -1;
6409
6410 actual_length = sizeof (struct target_loadmap)
6411 + sizeof (struct target_loadseg) * data->nsegs;
6412
6413 if (offset < 0 || offset > actual_length)
6414 return -1;
6415
6416 copy_length = actual_length - offset < len ? actual_length - offset : len;
6417 memcpy (myaddr, (char *) data + offset, copy_length);
6418 return copy_length;
6419 }
6420 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6421
6422 bool
6423 linux_process_target::supports_catch_syscall ()
6424 {
6425 return (the_low_target.get_syscall_trapinfo != NULL
6426 && linux_supports_tracesysgood ());
6427 }
6428
6429 int
6430 linux_process_target::get_ipa_tdesc_idx ()
6431 {
6432 if (the_low_target.get_ipa_tdesc_idx == NULL)
6433 return 0;
6434
6435 return (*the_low_target.get_ipa_tdesc_idx) ();
6436 }
6437
6438 CORE_ADDR
6439 linux_process_target::read_pc (regcache *regcache)
6440 {
6441 if (!low_supports_breakpoints ())
6442 return 0;
6443
6444 return low_get_pc (regcache);
6445 }
6446
6447 void
6448 linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6449 {
6450 gdb_assert (low_supports_breakpoints ());
6451
6452 low_set_pc (regcache, pc);
6453 }
6454
6455 bool
6456 linux_process_target::supports_thread_stopped ()
6457 {
6458 return true;
6459 }
6460
6461 bool
6462 linux_process_target::thread_stopped (thread_info *thread)
6463 {
6464 return get_thread_lwp (thread)->stopped;
6465 }
6466
6467 /* This exposes stop-all-threads functionality to other modules. */
6468
6469 void
6470 linux_process_target::pause_all (bool freeze)
6471 {
6472 stop_all_lwps (freeze, NULL);
6473 }
6474
6475 /* This exposes unstop-all-threads functionality to other gdbserver
6476 modules. */
6477
6478 void
6479 linux_process_target::unpause_all (bool unfreeze)
6480 {
6481 unstop_all_lwps (unfreeze, NULL);
6482 }
6483
6484 int
6485 linux_process_target::prepare_to_access_memory ()
6486 {
6487 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6488 running LWP. */
6489 if (non_stop)
6490 target_pause_all (true);
6491 return 0;
6492 }
6493
6494 void
6495 linux_process_target::done_accessing_memory ()
6496 {
6497 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6498 running LWP. */
6499 if (non_stop)
6500 target_unpause_all (true);
6501 }
6502
6503 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6504
6505 static int
6506 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6507 CORE_ADDR *phdr_memaddr, int *num_phdr)
6508 {
6509 char filename[PATH_MAX];
6510 int fd;
6511 const int auxv_size = is_elf64
6512 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6513 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6514
6515 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6516
6517 fd = open (filename, O_RDONLY);
6518 if (fd < 0)
6519 return 1;
6520
6521 *phdr_memaddr = 0;
6522 *num_phdr = 0;
6523 while (read (fd, buf, auxv_size) == auxv_size
6524 && (*phdr_memaddr == 0 || *num_phdr == 0))
6525 {
6526 if (is_elf64)
6527 {
6528 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6529
6530 switch (aux->a_type)
6531 {
6532 case AT_PHDR:
6533 *phdr_memaddr = aux->a_un.a_val;
6534 break;
6535 case AT_PHNUM:
6536 *num_phdr = aux->a_un.a_val;
6537 break;
6538 }
6539 }
6540 else
6541 {
6542 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6543
6544 switch (aux->a_type)
6545 {
6546 case AT_PHDR:
6547 *phdr_memaddr = aux->a_un.a_val;
6548 break;
6549 case AT_PHNUM:
6550 *num_phdr = aux->a_un.a_val;
6551 break;
6552 }
6553 }
6554 }
6555
6556 close (fd);
6557
6558 if (*phdr_memaddr == 0 || *num_phdr == 0)
6559 {
6560 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6561 "phdr_memaddr = %ld, phdr_num = %d",
6562 (long) *phdr_memaddr, *num_phdr);
6563 return 2;
6564 }
6565
6566 return 0;
6567 }
6568
6569 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6570
6571 static CORE_ADDR
6572 get_dynamic (const int pid, const int is_elf64)
6573 {
6574 CORE_ADDR phdr_memaddr, relocation;
6575 int num_phdr, i;
6576 unsigned char *phdr_buf;
6577 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6578
6579 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6580 return 0;
6581
6582 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6583 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6584
6585 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6586 return 0;
6587
6588 /* Compute relocation: it is expected to be 0 for "regular" executables,
6589 non-zero for PIE ones. */
6590 relocation = -1;
6591 for (i = 0; relocation == -1 && i < num_phdr; i++)
6592 if (is_elf64)
6593 {
6594 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6595
6596 if (p->p_type == PT_PHDR)
6597 relocation = phdr_memaddr - p->p_vaddr;
6598 }
6599 else
6600 {
6601 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6602
6603 if (p->p_type == PT_PHDR)
6604 relocation = phdr_memaddr - p->p_vaddr;
6605 }
6606
6607 if (relocation == -1)
6608 {
6609 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6610 any real world executables, including PIE executables, have always
6611 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6612 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6613 or present DT_DEBUG anyway (fpc binaries are statically linked).
6614
6615 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6616
6617 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6618
6619 return 0;
6620 }
6621
6622 for (i = 0; i < num_phdr; i++)
6623 {
6624 if (is_elf64)
6625 {
6626 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6627
6628 if (p->p_type == PT_DYNAMIC)
6629 return p->p_vaddr + relocation;
6630 }
6631 else
6632 {
6633 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6634
6635 if (p->p_type == PT_DYNAMIC)
6636 return p->p_vaddr + relocation;
6637 }
6638 }
6639
6640 return 0;
6641 }
6642
6643 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6644 can be 0 if the inferior does not yet have the library list initialized.
6645 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6646 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6647
6648 static CORE_ADDR
6649 get_r_debug (const int pid, const int is_elf64)
6650 {
6651 CORE_ADDR dynamic_memaddr;
6652 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6653 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6654 CORE_ADDR map = -1;
6655
6656 dynamic_memaddr = get_dynamic (pid, is_elf64);
6657 if (dynamic_memaddr == 0)
6658 return map;
6659
6660 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6661 {
6662 if (is_elf64)
6663 {
6664 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6665 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6666 union
6667 {
6668 Elf64_Xword map;
6669 unsigned char buf[sizeof (Elf64_Xword)];
6670 }
6671 rld_map;
6672 #endif
6673 #ifdef DT_MIPS_RLD_MAP
6674 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6675 {
6676 if (linux_read_memory (dyn->d_un.d_val,
6677 rld_map.buf, sizeof (rld_map.buf)) == 0)
6678 return rld_map.map;
6679 else
6680 break;
6681 }
6682 #endif /* DT_MIPS_RLD_MAP */
6683 #ifdef DT_MIPS_RLD_MAP_REL
6684 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6685 {
6686 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6687 rld_map.buf, sizeof (rld_map.buf)) == 0)
6688 return rld_map.map;
6689 else
6690 break;
6691 }
6692 #endif /* DT_MIPS_RLD_MAP_REL */
6693
6694 if (dyn->d_tag == DT_DEBUG && map == -1)
6695 map = dyn->d_un.d_val;
6696
6697 if (dyn->d_tag == DT_NULL)
6698 break;
6699 }
6700 else
6701 {
6702 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6703 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6704 union
6705 {
6706 Elf32_Word map;
6707 unsigned char buf[sizeof (Elf32_Word)];
6708 }
6709 rld_map;
6710 #endif
6711 #ifdef DT_MIPS_RLD_MAP
6712 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6713 {
6714 if (linux_read_memory (dyn->d_un.d_val,
6715 rld_map.buf, sizeof (rld_map.buf)) == 0)
6716 return rld_map.map;
6717 else
6718 break;
6719 }
6720 #endif /* DT_MIPS_RLD_MAP */
6721 #ifdef DT_MIPS_RLD_MAP_REL
6722 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6723 {
6724 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6725 rld_map.buf, sizeof (rld_map.buf)) == 0)
6726 return rld_map.map;
6727 else
6728 break;
6729 }
6730 #endif /* DT_MIPS_RLD_MAP_REL */
6731
6732 if (dyn->d_tag == DT_DEBUG && map == -1)
6733 map = dyn->d_un.d_val;
6734
6735 if (dyn->d_tag == DT_NULL)
6736 break;
6737 }
6738
6739 dynamic_memaddr += dyn_size;
6740 }
6741
6742 return map;
6743 }
6744
6745 /* Read one pointer from MEMADDR in the inferior. */
6746
6747 static int
6748 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6749 {
6750 int ret;
6751
6752 /* Go through a union so this works on either big or little endian
6753 hosts, when the inferior's pointer size is smaller than the size
6754 of CORE_ADDR. It is assumed the inferior's endianness is the
6755 same of the superior's. */
6756 union
6757 {
6758 CORE_ADDR core_addr;
6759 unsigned int ui;
6760 unsigned char uc;
6761 } addr;
6762
6763 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6764 if (ret == 0)
6765 {
6766 if (ptr_size == sizeof (CORE_ADDR))
6767 *ptr = addr.core_addr;
6768 else if (ptr_size == sizeof (unsigned int))
6769 *ptr = addr.ui;
6770 else
6771 gdb_assert_not_reached ("unhandled pointer size");
6772 }
6773 return ret;
6774 }
6775
6776 bool
6777 linux_process_target::supports_qxfer_libraries_svr4 ()
6778 {
6779 return true;
6780 }
6781
6782 struct link_map_offsets
6783 {
6784 /* Offset and size of r_debug.r_version. */
6785 int r_version_offset;
6786
6787 /* Offset and size of r_debug.r_map. */
6788 int r_map_offset;
6789
6790 /* Offset to l_addr field in struct link_map. */
6791 int l_addr_offset;
6792
6793 /* Offset to l_name field in struct link_map. */
6794 int l_name_offset;
6795
6796 /* Offset to l_ld field in struct link_map. */
6797 int l_ld_offset;
6798
6799 /* Offset to l_next field in struct link_map. */
6800 int l_next_offset;
6801
6802 /* Offset to l_prev field in struct link_map. */
6803 int l_prev_offset;
6804 };
6805
6806 /* Construct qXfer:libraries-svr4:read reply. */
6807
6808 int
6809 linux_process_target::qxfer_libraries_svr4 (const char *annex,
6810 unsigned char *readbuf,
6811 unsigned const char *writebuf,
6812 CORE_ADDR offset, int len)
6813 {
6814 struct process_info_private *const priv = current_process ()->priv;
6815 char filename[PATH_MAX];
6816 int pid, is_elf64;
6817
6818 static const struct link_map_offsets lmo_32bit_offsets =
6819 {
6820 0, /* r_version offset. */
6821 4, /* r_debug.r_map offset. */
6822 0, /* l_addr offset in link_map. */
6823 4, /* l_name offset in link_map. */
6824 8, /* l_ld offset in link_map. */
6825 12, /* l_next offset in link_map. */
6826 16 /* l_prev offset in link_map. */
6827 };
6828
6829 static const struct link_map_offsets lmo_64bit_offsets =
6830 {
6831 0, /* r_version offset. */
6832 8, /* r_debug.r_map offset. */
6833 0, /* l_addr offset in link_map. */
6834 8, /* l_name offset in link_map. */
6835 16, /* l_ld offset in link_map. */
6836 24, /* l_next offset in link_map. */
6837 32 /* l_prev offset in link_map. */
6838 };
6839 const struct link_map_offsets *lmo;
6840 unsigned int machine;
6841 int ptr_size;
6842 CORE_ADDR lm_addr = 0, lm_prev = 0;
6843 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6844 int header_done = 0;
6845
6846 if (writebuf != NULL)
6847 return -2;
6848 if (readbuf == NULL)
6849 return -1;
6850
6851 pid = lwpid_of (current_thread);
6852 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6853 is_elf64 = elf_64_file_p (filename, &machine);
6854 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6855 ptr_size = is_elf64 ? 8 : 4;
6856
6857 while (annex[0] != '\0')
6858 {
6859 const char *sep;
6860 CORE_ADDR *addrp;
6861 int name_len;
6862
6863 sep = strchr (annex, '=');
6864 if (sep == NULL)
6865 break;
6866
6867 name_len = sep - annex;
6868 if (name_len == 5 && startswith (annex, "start"))
6869 addrp = &lm_addr;
6870 else if (name_len == 4 && startswith (annex, "prev"))
6871 addrp = &lm_prev;
6872 else
6873 {
6874 annex = strchr (sep, ';');
6875 if (annex == NULL)
6876 break;
6877 annex++;
6878 continue;
6879 }
6880
6881 annex = decode_address_to_semicolon (addrp, sep + 1);
6882 }
6883
6884 if (lm_addr == 0)
6885 {
6886 int r_version = 0;
6887
6888 if (priv->r_debug == 0)
6889 priv->r_debug = get_r_debug (pid, is_elf64);
6890
6891 /* We failed to find DT_DEBUG. Such situation will not change
6892 for this inferior - do not retry it. Report it to GDB as
6893 E01, see for the reasons at the GDB solib-svr4.c side. */
6894 if (priv->r_debug == (CORE_ADDR) -1)
6895 return -1;
6896
6897 if (priv->r_debug != 0)
6898 {
6899 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6900 (unsigned char *) &r_version,
6901 sizeof (r_version)) != 0
6902 || r_version != 1)
6903 {
6904 warning ("unexpected r_debug version %d", r_version);
6905 }
6906 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6907 &lm_addr, ptr_size) != 0)
6908 {
6909 warning ("unable to read r_map from 0x%lx",
6910 (long) priv->r_debug + lmo->r_map_offset);
6911 }
6912 }
6913 }
6914
6915 std::string document = "<library-list-svr4 version=\"1.0\"";
6916
6917 while (lm_addr
6918 && read_one_ptr (lm_addr + lmo->l_name_offset,
6919 &l_name, ptr_size) == 0
6920 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6921 &l_addr, ptr_size) == 0
6922 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6923 &l_ld, ptr_size) == 0
6924 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6925 &l_prev, ptr_size) == 0
6926 && read_one_ptr (lm_addr + lmo->l_next_offset,
6927 &l_next, ptr_size) == 0)
6928 {
6929 unsigned char libname[PATH_MAX];
6930
6931 if (lm_prev != l_prev)
6932 {
6933 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6934 (long) lm_prev, (long) l_prev);
6935 break;
6936 }
6937
6938 /* Ignore the first entry even if it has valid name as the first entry
6939 corresponds to the main executable. The first entry should not be
6940 skipped if the dynamic loader was loaded late by a static executable
6941 (see solib-svr4.c parameter ignore_first). But in such case the main
6942 executable does not have PT_DYNAMIC present and this function already
6943 exited above due to failed get_r_debug. */
6944 if (lm_prev == 0)
6945 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6946 else
6947 {
6948 /* Not checking for error because reading may stop before
6949 we've got PATH_MAX worth of characters. */
6950 libname[0] = '\0';
6951 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6952 libname[sizeof (libname) - 1] = '\0';
6953 if (libname[0] != '\0')
6954 {
6955 if (!header_done)
6956 {
6957 /* Terminate `<library-list-svr4'. */
6958 document += '>';
6959 header_done = 1;
6960 }
6961
6962 string_appendf (document, "<library name=\"");
6963 xml_escape_text_append (&document, (char *) libname);
6964 string_appendf (document, "\" lm=\"0x%lx\" "
6965 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6966 (unsigned long) lm_addr, (unsigned long) l_addr,
6967 (unsigned long) l_ld);
6968 }
6969 }
6970
6971 lm_prev = lm_addr;
6972 lm_addr = l_next;
6973 }
6974
6975 if (!header_done)
6976 {
6977 /* Empty list; terminate `<library-list-svr4'. */
6978 document += "/>";
6979 }
6980 else
6981 document += "</library-list-svr4>";
6982
6983 int document_len = document.length ();
6984 if (offset < document_len)
6985 document_len -= offset;
6986 else
6987 document_len = 0;
6988 if (len > document_len)
6989 len = document_len;
6990
6991 memcpy (readbuf, document.data () + offset, len);
6992
6993 return len;
6994 }
6995
6996 #ifdef HAVE_LINUX_BTRACE
6997
6998 btrace_target_info *
6999 linux_process_target::enable_btrace (ptid_t ptid,
7000 const btrace_config *conf)
7001 {
7002 return linux_enable_btrace (ptid, conf);
7003 }
7004
7005 /* See to_disable_btrace target method. */
7006
7007 int
7008 linux_process_target::disable_btrace (btrace_target_info *tinfo)
7009 {
7010 enum btrace_error err;
7011
7012 err = linux_disable_btrace (tinfo);
7013 return (err == BTRACE_ERR_NONE ? 0 : -1);
7014 }
7015
7016 /* Encode an Intel Processor Trace configuration. */
7017
7018 static void
7019 linux_low_encode_pt_config (struct buffer *buffer,
7020 const struct btrace_data_pt_config *config)
7021 {
7022 buffer_grow_str (buffer, "<pt-config>\n");
7023
7024 switch (config->cpu.vendor)
7025 {
7026 case CV_INTEL:
7027 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7028 "model=\"%u\" stepping=\"%u\"/>\n",
7029 config->cpu.family, config->cpu.model,
7030 config->cpu.stepping);
7031 break;
7032
7033 default:
7034 break;
7035 }
7036
7037 buffer_grow_str (buffer, "</pt-config>\n");
7038 }
7039
7040 /* Encode a raw buffer. */
7041
7042 static void
7043 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7044 unsigned int size)
7045 {
7046 if (size == 0)
7047 return;
7048
7049 /* We use hex encoding - see gdbsupport/rsp-low.h. */
7050 buffer_grow_str (buffer, "<raw>\n");
7051
7052 while (size-- > 0)
7053 {
7054 char elem[2];
7055
7056 elem[0] = tohex ((*data >> 4) & 0xf);
7057 elem[1] = tohex (*data++ & 0xf);
7058
7059 buffer_grow (buffer, elem, 2);
7060 }
7061
7062 buffer_grow_str (buffer, "</raw>\n");
7063 }
7064
7065 /* See to_read_btrace target method. */
7066
7067 int
7068 linux_process_target::read_btrace (btrace_target_info *tinfo,
7069 buffer *buffer,
7070 enum btrace_read_type type)
7071 {
7072 struct btrace_data btrace;
7073 enum btrace_error err;
7074
7075 err = linux_read_btrace (&btrace, tinfo, type);
7076 if (err != BTRACE_ERR_NONE)
7077 {
7078 if (err == BTRACE_ERR_OVERFLOW)
7079 buffer_grow_str0 (buffer, "E.Overflow.");
7080 else
7081 buffer_grow_str0 (buffer, "E.Generic Error.");
7082
7083 return -1;
7084 }
7085
7086 switch (btrace.format)
7087 {
7088 case BTRACE_FORMAT_NONE:
7089 buffer_grow_str0 (buffer, "E.No Trace.");
7090 return -1;
7091
7092 case BTRACE_FORMAT_BTS:
7093 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7094 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7095
7096 for (const btrace_block &block : *btrace.variant.bts.blocks)
7097 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7098 paddress (block.begin), paddress (block.end));
7099
7100 buffer_grow_str0 (buffer, "</btrace>\n");
7101 break;
7102
7103 case BTRACE_FORMAT_PT:
7104 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7105 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7106 buffer_grow_str (buffer, "<pt>\n");
7107
7108 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7109
7110 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7111 btrace.variant.pt.size);
7112
7113 buffer_grow_str (buffer, "</pt>\n");
7114 buffer_grow_str0 (buffer, "</btrace>\n");
7115 break;
7116
7117 default:
7118 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7119 return -1;
7120 }
7121
7122 return 0;
7123 }
7124
7125 /* See to_btrace_conf target method. */
7126
7127 int
7128 linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
7129 buffer *buffer)
7130 {
7131 const struct btrace_config *conf;
7132
7133 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7134 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7135
7136 conf = linux_btrace_conf (tinfo);
7137 if (conf != NULL)
7138 {
7139 switch (conf->format)
7140 {
7141 case BTRACE_FORMAT_NONE:
7142 break;
7143
7144 case BTRACE_FORMAT_BTS:
7145 buffer_xml_printf (buffer, "<bts");
7146 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7147 buffer_xml_printf (buffer, " />\n");
7148 break;
7149
7150 case BTRACE_FORMAT_PT:
7151 buffer_xml_printf (buffer, "<pt");
7152 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7153 buffer_xml_printf (buffer, "/>\n");
7154 break;
7155 }
7156 }
7157
7158 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7159 return 0;
7160 }
7161 #endif /* HAVE_LINUX_BTRACE */
7162
7163 /* See nat/linux-nat.h. */
7164
7165 ptid_t
7166 current_lwp_ptid (void)
7167 {
7168 return ptid_of (current_thread);
7169 }
7170
7171 const char *
7172 linux_process_target::thread_name (ptid_t thread)
7173 {
7174 return linux_proc_tid_get_name (thread);
7175 }
7176
7177 #if USE_THREAD_DB
7178 bool
7179 linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
7180 int *handle_len)
7181 {
7182 return thread_db_thread_handle (ptid, handle, handle_len);
7183 }
7184 #endif
7185
7186 /* Default implementation of linux_target_ops method "set_pc" for
7187 32-bit pc register which is literally named "pc". */
7188
7189 void
7190 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7191 {
7192 uint32_t newpc = pc;
7193
7194 supply_register_by_name (regcache, "pc", &newpc);
7195 }
7196
7197 /* Default implementation of linux_target_ops method "get_pc" for
7198 32-bit pc register which is literally named "pc". */
7199
7200 CORE_ADDR
7201 linux_get_pc_32bit (struct regcache *regcache)
7202 {
7203 uint32_t pc;
7204
7205 collect_register_by_name (regcache, "pc", &pc);
7206 if (debug_threads)
7207 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7208 return pc;
7209 }
7210
7211 /* Default implementation of linux_target_ops method "set_pc" for
7212 64-bit pc register which is literally named "pc". */
7213
7214 void
7215 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7216 {
7217 uint64_t newpc = pc;
7218
7219 supply_register_by_name (regcache, "pc", &newpc);
7220 }
7221
7222 /* Default implementation of linux_target_ops method "get_pc" for
7223 64-bit pc register which is literally named "pc". */
7224
7225 CORE_ADDR
7226 linux_get_pc_64bit (struct regcache *regcache)
7227 {
7228 uint64_t pc;
7229
7230 collect_register_by_name (regcache, "pc", &pc);
7231 if (debug_threads)
7232 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7233 return pc;
7234 }
7235
7236 /* See linux-low.h. */
7237
7238 int
7239 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7240 {
7241 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7242 int offset = 0;
7243
7244 gdb_assert (wordsize == 4 || wordsize == 8);
7245
7246 while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
7247 {
7248 if (wordsize == 4)
7249 {
7250 uint32_t *data_p = (uint32_t *) data;
7251 if (data_p[0] == match)
7252 {
7253 *valp = data_p[1];
7254 return 1;
7255 }
7256 }
7257 else
7258 {
7259 uint64_t *data_p = (uint64_t *) data;
7260 if (data_p[0] == match)
7261 {
7262 *valp = data_p[1];
7263 return 1;
7264 }
7265 }
7266
7267 offset += 2 * wordsize;
7268 }
7269
7270 return 0;
7271 }
7272
7273 /* See linux-low.h. */
7274
7275 CORE_ADDR
7276 linux_get_hwcap (int wordsize)
7277 {
7278 CORE_ADDR hwcap = 0;
7279 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7280 return hwcap;
7281 }
7282
7283 /* See linux-low.h. */
7284
7285 CORE_ADDR
7286 linux_get_hwcap2 (int wordsize)
7287 {
7288 CORE_ADDR hwcap2 = 0;
7289 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7290 return hwcap2;
7291 }
7292
7293 #ifdef HAVE_LINUX_REGSETS
7294 void
7295 initialize_regsets_info (struct regsets_info *info)
7296 {
7297 for (info->num_regsets = 0;
7298 info->regsets[info->num_regsets].size >= 0;
7299 info->num_regsets++)
7300 ;
7301 }
7302 #endif
7303
7304 void
7305 initialize_low (void)
7306 {
7307 struct sigaction sigchld_action;
7308
7309 memset (&sigchld_action, 0, sizeof (sigchld_action));
7310 set_target_ops (the_linux_target);
7311
7312 linux_ptrace_init_warnings ();
7313 linux_proc_init_warnings ();
7314
7315 sigchld_action.sa_handler = sigchld_handler;
7316 sigemptyset (&sigchld_action.sa_mask);
7317 sigchld_action.sa_flags = SA_RESTART;
7318 sigaction (SIGCHLD, &sigchld_action, NULL);
7319
7320 initialize_low_arch ();
7321
7322 linux_check_ptrace_features ();
7323 }