32b289f3bbc1e58c99d4bc2fb74f618020d3e662
[binutils-gdb.git] / gdb / fbsd-nat.c
1 /* Native-dependent code for FreeBSD.
2
3 Copyright (C) 2002-2022 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "gdbsupport/block-signals.h"
22 #include "gdbsupport/byte-vector.h"
23 #include "gdbsupport/event-loop.h"
24 #include "gdbcore.h"
25 #include "inferior.h"
26 #include "regcache.h"
27 #include "regset.h"
28 #include "gdbarch.h"
29 #include "gdbcmd.h"
30 #include "gdbthread.h"
31 #include "gdbsupport/buildargv.h"
32 #include "gdbsupport/gdb_wait.h"
33 #include "inf-loop.h"
34 #include "inf-ptrace.h"
35 #include <sys/types.h>
36 #ifdef HAVE_SYS_PROCCTL_H
37 #include <sys/procctl.h>
38 #endif
39 #include <sys/procfs.h>
40 #include <sys/ptrace.h>
41 #include <sys/signal.h>
42 #include <sys/sysctl.h>
43 #include <sys/user.h>
44 #include <libutil.h>
45
46 #include "elf-bfd.h"
47 #include "fbsd-nat.h"
48 #include "fbsd-tdep.h"
49
50 #include <list>
51
52 #ifndef PT_GETREGSET
53 #define PT_GETREGSET 42 /* Get a target register set */
54 #define PT_SETREGSET 43 /* Set a target register set */
55 #endif
56
57 /* Return the name of a file that can be opened to get the symbols for
58 the child process identified by PID. */
59
60 const char *
61 fbsd_nat_target::pid_to_exec_file (int pid)
62 {
63 static char buf[PATH_MAX];
64 size_t buflen;
65 int mib[4];
66
67 mib[0] = CTL_KERN;
68 mib[1] = KERN_PROC;
69 mib[2] = KERN_PROC_PATHNAME;
70 mib[3] = pid;
71 buflen = sizeof buf;
72 if (sysctl (mib, 4, buf, &buflen, NULL, 0) == 0)
73 /* The kern.proc.pathname.<pid> sysctl returns a length of zero
74 for processes without an associated executable such as kernel
75 processes. */
76 return buflen == 0 ? NULL : buf;
77
78 return NULL;
79 }
80
81 /* Iterate over all the memory regions in the current inferior,
82 calling FUNC for each memory region. DATA is passed as the last
83 argument to FUNC. */
84
85 int
86 fbsd_nat_target::find_memory_regions (find_memory_region_ftype func,
87 void *data)
88 {
89 pid_t pid = inferior_ptid.pid ();
90 struct kinfo_vmentry *kve;
91 uint64_t size;
92 int i, nitems;
93
94 gdb::unique_xmalloc_ptr<struct kinfo_vmentry>
95 vmentl (kinfo_getvmmap (pid, &nitems));
96 if (vmentl == NULL)
97 perror_with_name (_("Couldn't fetch VM map entries"));
98
99 for (i = 0, kve = vmentl.get (); i < nitems; i++, kve++)
100 {
101 /* Skip unreadable segments and those where MAP_NOCORE has been set. */
102 if (!(kve->kve_protection & KVME_PROT_READ)
103 || kve->kve_flags & KVME_FLAG_NOCOREDUMP)
104 continue;
105
106 /* Skip segments with an invalid type. */
107 if (kve->kve_type != KVME_TYPE_DEFAULT
108 && kve->kve_type != KVME_TYPE_VNODE
109 && kve->kve_type != KVME_TYPE_SWAP
110 && kve->kve_type != KVME_TYPE_PHYS)
111 continue;
112
113 size = kve->kve_end - kve->kve_start;
114 if (info_verbose)
115 {
116 gdb_printf ("Save segment, %ld bytes at %s (%c%c%c)\n",
117 (long) size,
118 paddress (target_gdbarch (), kve->kve_start),
119 kve->kve_protection & KVME_PROT_READ ? 'r' : '-',
120 kve->kve_protection & KVME_PROT_WRITE ? 'w' : '-',
121 kve->kve_protection & KVME_PROT_EXEC ? 'x' : '-');
122 }
123
124 /* Invoke the callback function to create the corefile segment.
125 Pass MODIFIED as true, we do not know the real modification state. */
126 func (kve->kve_start, size, kve->kve_protection & KVME_PROT_READ,
127 kve->kve_protection & KVME_PROT_WRITE,
128 kve->kve_protection & KVME_PROT_EXEC, 1, data);
129 }
130 return 0;
131 }
132
133 /* Fetch the command line for a running process. */
134
135 static gdb::unique_xmalloc_ptr<char>
136 fbsd_fetch_cmdline (pid_t pid)
137 {
138 size_t len;
139 int mib[4];
140
141 len = 0;
142 mib[0] = CTL_KERN;
143 mib[1] = KERN_PROC;
144 mib[2] = KERN_PROC_ARGS;
145 mib[3] = pid;
146 if (sysctl (mib, 4, NULL, &len, NULL, 0) == -1)
147 return nullptr;
148
149 if (len == 0)
150 return nullptr;
151
152 gdb::unique_xmalloc_ptr<char> cmdline ((char *) xmalloc (len));
153 if (sysctl (mib, 4, cmdline.get (), &len, NULL, 0) == -1)
154 return nullptr;
155
156 /* Join the arguments with spaces to form a single string. */
157 char *cp = cmdline.get ();
158 for (size_t i = 0; i < len - 1; i++)
159 if (cp[i] == '\0')
160 cp[i] = ' ';
161 cp[len - 1] = '\0';
162
163 return cmdline;
164 }
165
166 /* Fetch the external variant of the kernel's internal process
167 structure for the process PID into KP. */
168
169 static bool
170 fbsd_fetch_kinfo_proc (pid_t pid, struct kinfo_proc *kp)
171 {
172 size_t len;
173 int mib[4];
174
175 len = sizeof *kp;
176 mib[0] = CTL_KERN;
177 mib[1] = KERN_PROC;
178 mib[2] = KERN_PROC_PID;
179 mib[3] = pid;
180 return (sysctl (mib, 4, kp, &len, NULL, 0) == 0);
181 }
182
183 /* Implement the "info_proc" target_ops method. */
184
185 bool
186 fbsd_nat_target::info_proc (const char *args, enum info_proc_what what)
187 {
188 gdb::unique_xmalloc_ptr<struct kinfo_file> fdtbl;
189 int nfd = 0;
190 struct kinfo_proc kp;
191 pid_t pid;
192 bool do_cmdline = false;
193 bool do_cwd = false;
194 bool do_exe = false;
195 bool do_files = false;
196 bool do_mappings = false;
197 bool do_status = false;
198
199 switch (what)
200 {
201 case IP_MINIMAL:
202 do_cmdline = true;
203 do_cwd = true;
204 do_exe = true;
205 break;
206 case IP_MAPPINGS:
207 do_mappings = true;
208 break;
209 case IP_STATUS:
210 case IP_STAT:
211 do_status = true;
212 break;
213 case IP_CMDLINE:
214 do_cmdline = true;
215 break;
216 case IP_EXE:
217 do_exe = true;
218 break;
219 case IP_CWD:
220 do_cwd = true;
221 break;
222 case IP_FILES:
223 do_files = true;
224 break;
225 case IP_ALL:
226 do_cmdline = true;
227 do_cwd = true;
228 do_exe = true;
229 do_files = true;
230 do_mappings = true;
231 do_status = true;
232 break;
233 default:
234 error (_("Not supported on this target."));
235 }
236
237 gdb_argv built_argv (args);
238 if (built_argv.count () == 0)
239 {
240 pid = inferior_ptid.pid ();
241 if (pid == 0)
242 error (_("No current process: you must name one."));
243 }
244 else if (built_argv.count () == 1 && isdigit (built_argv[0][0]))
245 pid = strtol (built_argv[0], NULL, 10);
246 else
247 error (_("Invalid arguments."));
248
249 gdb_printf (_("process %d\n"), pid);
250 if (do_cwd || do_exe || do_files)
251 fdtbl.reset (kinfo_getfile (pid, &nfd));
252
253 if (do_cmdline)
254 {
255 gdb::unique_xmalloc_ptr<char> cmdline = fbsd_fetch_cmdline (pid);
256 if (cmdline != nullptr)
257 gdb_printf ("cmdline = '%s'\n", cmdline.get ());
258 else
259 warning (_("unable to fetch command line"));
260 }
261 if (do_cwd)
262 {
263 const char *cwd = NULL;
264 struct kinfo_file *kf = fdtbl.get ();
265 for (int i = 0; i < nfd; i++, kf++)
266 {
267 if (kf->kf_type == KF_TYPE_VNODE && kf->kf_fd == KF_FD_TYPE_CWD)
268 {
269 cwd = kf->kf_path;
270 break;
271 }
272 }
273 if (cwd != NULL)
274 gdb_printf ("cwd = '%s'\n", cwd);
275 else
276 warning (_("unable to fetch current working directory"));
277 }
278 if (do_exe)
279 {
280 const char *exe = NULL;
281 struct kinfo_file *kf = fdtbl.get ();
282 for (int i = 0; i < nfd; i++, kf++)
283 {
284 if (kf->kf_type == KF_TYPE_VNODE && kf->kf_fd == KF_FD_TYPE_TEXT)
285 {
286 exe = kf->kf_path;
287 break;
288 }
289 }
290 if (exe == NULL)
291 exe = pid_to_exec_file (pid);
292 if (exe != NULL)
293 gdb_printf ("exe = '%s'\n", exe);
294 else
295 warning (_("unable to fetch executable path name"));
296 }
297 if (do_files)
298 {
299 struct kinfo_file *kf = fdtbl.get ();
300
301 if (nfd > 0)
302 {
303 fbsd_info_proc_files_header ();
304 for (int i = 0; i < nfd; i++, kf++)
305 fbsd_info_proc_files_entry (kf->kf_type, kf->kf_fd, kf->kf_flags,
306 kf->kf_offset, kf->kf_vnode_type,
307 kf->kf_sock_domain, kf->kf_sock_type,
308 kf->kf_sock_protocol, &kf->kf_sa_local,
309 &kf->kf_sa_peer, kf->kf_path);
310 }
311 else
312 warning (_("unable to fetch list of open files"));
313 }
314 if (do_mappings)
315 {
316 int nvment;
317 gdb::unique_xmalloc_ptr<struct kinfo_vmentry>
318 vmentl (kinfo_getvmmap (pid, &nvment));
319
320 if (vmentl != nullptr)
321 {
322 int addr_bit = TARGET_CHAR_BIT * sizeof (void *);
323 fbsd_info_proc_mappings_header (addr_bit);
324
325 struct kinfo_vmentry *kve = vmentl.get ();
326 for (int i = 0; i < nvment; i++, kve++)
327 fbsd_info_proc_mappings_entry (addr_bit, kve->kve_start,
328 kve->kve_end, kve->kve_offset,
329 kve->kve_flags, kve->kve_protection,
330 kve->kve_path);
331 }
332 else
333 warning (_("unable to fetch virtual memory map"));
334 }
335 if (do_status)
336 {
337 if (!fbsd_fetch_kinfo_proc (pid, &kp))
338 warning (_("Failed to fetch process information"));
339 else
340 {
341 const char *state;
342 int pgtok;
343
344 gdb_printf ("Name: %s\n", kp.ki_comm);
345 switch (kp.ki_stat)
346 {
347 case SIDL:
348 state = "I (idle)";
349 break;
350 case SRUN:
351 state = "R (running)";
352 break;
353 case SSTOP:
354 state = "T (stopped)";
355 break;
356 case SZOMB:
357 state = "Z (zombie)";
358 break;
359 case SSLEEP:
360 state = "S (sleeping)";
361 break;
362 case SWAIT:
363 state = "W (interrupt wait)";
364 break;
365 case SLOCK:
366 state = "L (blocked on lock)";
367 break;
368 default:
369 state = "? (unknown)";
370 break;
371 }
372 gdb_printf ("State: %s\n", state);
373 gdb_printf ("Parent process: %d\n", kp.ki_ppid);
374 gdb_printf ("Process group: %d\n", kp.ki_pgid);
375 gdb_printf ("Session id: %d\n", kp.ki_sid);
376 gdb_printf ("TTY: %s\n", pulongest (kp.ki_tdev));
377 gdb_printf ("TTY owner process group: %d\n", kp.ki_tpgid);
378 gdb_printf ("User IDs (real, effective, saved): %d %d %d\n",
379 kp.ki_ruid, kp.ki_uid, kp.ki_svuid);
380 gdb_printf ("Group IDs (real, effective, saved): %d %d %d\n",
381 kp.ki_rgid, kp.ki_groups[0], kp.ki_svgid);
382 gdb_printf ("Groups: ");
383 for (int i = 0; i < kp.ki_ngroups; i++)
384 gdb_printf ("%d ", kp.ki_groups[i]);
385 gdb_printf ("\n");
386 gdb_printf ("Minor faults (no memory page): %ld\n",
387 kp.ki_rusage.ru_minflt);
388 gdb_printf ("Minor faults, children: %ld\n",
389 kp.ki_rusage_ch.ru_minflt);
390 gdb_printf ("Major faults (memory page faults): %ld\n",
391 kp.ki_rusage.ru_majflt);
392 gdb_printf ("Major faults, children: %ld\n",
393 kp.ki_rusage_ch.ru_majflt);
394 gdb_printf ("utime: %s.%06ld\n",
395 plongest (kp.ki_rusage.ru_utime.tv_sec),
396 kp.ki_rusage.ru_utime.tv_usec);
397 gdb_printf ("stime: %s.%06ld\n",
398 plongest (kp.ki_rusage.ru_stime.tv_sec),
399 kp.ki_rusage.ru_stime.tv_usec);
400 gdb_printf ("utime, children: %s.%06ld\n",
401 plongest (kp.ki_rusage_ch.ru_utime.tv_sec),
402 kp.ki_rusage_ch.ru_utime.tv_usec);
403 gdb_printf ("stime, children: %s.%06ld\n",
404 plongest (kp.ki_rusage_ch.ru_stime.tv_sec),
405 kp.ki_rusage_ch.ru_stime.tv_usec);
406 gdb_printf ("'nice' value: %d\n", kp.ki_nice);
407 gdb_printf ("Start time: %s.%06ld\n",
408 plongest (kp.ki_start.tv_sec),
409 kp.ki_start.tv_usec);
410 pgtok = getpagesize () / 1024;
411 gdb_printf ("Virtual memory size: %s kB\n",
412 pulongest (kp.ki_size / 1024));
413 gdb_printf ("Data size: %s kB\n",
414 pulongest (kp.ki_dsize * pgtok));
415 gdb_printf ("Stack size: %s kB\n",
416 pulongest (kp.ki_ssize * pgtok));
417 gdb_printf ("Text size: %s kB\n",
418 pulongest (kp.ki_tsize * pgtok));
419 gdb_printf ("Resident set size: %s kB\n",
420 pulongest (kp.ki_rssize * pgtok));
421 gdb_printf ("Maximum RSS: %s kB\n",
422 pulongest (kp.ki_rusage.ru_maxrss));
423 gdb_printf ("Pending Signals: ");
424 for (int i = 0; i < _SIG_WORDS; i++)
425 gdb_printf ("%08x ", kp.ki_siglist.__bits[i]);
426 gdb_printf ("\n");
427 gdb_printf ("Ignored Signals: ");
428 for (int i = 0; i < _SIG_WORDS; i++)
429 gdb_printf ("%08x ", kp.ki_sigignore.__bits[i]);
430 gdb_printf ("\n");
431 gdb_printf ("Caught Signals: ");
432 for (int i = 0; i < _SIG_WORDS; i++)
433 gdb_printf ("%08x ", kp.ki_sigcatch.__bits[i]);
434 gdb_printf ("\n");
435 }
436 }
437
438 return true;
439 }
440
441 /* Return the size of siginfo for the current inferior. */
442
443 #ifdef __LP64__
444 union sigval32 {
445 int sival_int;
446 uint32_t sival_ptr;
447 };
448
449 /* This structure matches the naming and layout of `siginfo_t' in
450 <sys/signal.h>. In particular, the `si_foo' macros defined in that
451 header can be used with both types to copy fields in the `_reason'
452 union. */
453
454 struct siginfo32
455 {
456 int si_signo;
457 int si_errno;
458 int si_code;
459 __pid_t si_pid;
460 __uid_t si_uid;
461 int si_status;
462 uint32_t si_addr;
463 union sigval32 si_value;
464 union
465 {
466 struct
467 {
468 int _trapno;
469 } _fault;
470 struct
471 {
472 int _timerid;
473 int _overrun;
474 } _timer;
475 struct
476 {
477 int _mqd;
478 } _mesgq;
479 struct
480 {
481 int32_t _band;
482 } _poll;
483 struct
484 {
485 int32_t __spare1__;
486 int __spare2__[7];
487 } __spare__;
488 } _reason;
489 };
490 #endif
491
492 static size_t
493 fbsd_siginfo_size ()
494 {
495 #ifdef __LP64__
496 struct gdbarch *gdbarch = get_frame_arch (get_current_frame ());
497
498 /* Is the inferior 32-bit? If so, use the 32-bit siginfo size. */
499 if (gdbarch_long_bit (gdbarch) == 32)
500 return sizeof (struct siginfo32);
501 #endif
502 return sizeof (siginfo_t);
503 }
504
505 /* Convert a native 64-bit siginfo object to a 32-bit object. Note
506 that FreeBSD doesn't support writing to $_siginfo, so this only
507 needs to convert one way. */
508
509 static void
510 fbsd_convert_siginfo (siginfo_t *si)
511 {
512 #ifdef __LP64__
513 struct gdbarch *gdbarch = get_frame_arch (get_current_frame ());
514
515 /* Is the inferior 32-bit? If not, nothing to do. */
516 if (gdbarch_long_bit (gdbarch) != 32)
517 return;
518
519 struct siginfo32 si32;
520
521 si32.si_signo = si->si_signo;
522 si32.si_errno = si->si_errno;
523 si32.si_code = si->si_code;
524 si32.si_pid = si->si_pid;
525 si32.si_uid = si->si_uid;
526 si32.si_status = si->si_status;
527 si32.si_addr = (uintptr_t) si->si_addr;
528
529 /* If sival_ptr is being used instead of sival_int on a big-endian
530 platform, then sival_int will be zero since it holds the upper
531 32-bits of the pointer value. */
532 #if _BYTE_ORDER == _BIG_ENDIAN
533 if (si->si_value.sival_int == 0)
534 si32.si_value.sival_ptr = (uintptr_t) si->si_value.sival_ptr;
535 else
536 si32.si_value.sival_int = si->si_value.sival_int;
537 #else
538 si32.si_value.sival_int = si->si_value.sival_int;
539 #endif
540
541 /* Always copy the spare fields and then possibly overwrite them for
542 signal-specific or code-specific fields. */
543 si32._reason.__spare__.__spare1__ = si->_reason.__spare__.__spare1__;
544 for (int i = 0; i < 7; i++)
545 si32._reason.__spare__.__spare2__[i] = si->_reason.__spare__.__spare2__[i];
546 switch (si->si_signo) {
547 case SIGILL:
548 case SIGFPE:
549 case SIGSEGV:
550 case SIGBUS:
551 si32.si_trapno = si->si_trapno;
552 break;
553 }
554 switch (si->si_code) {
555 case SI_TIMER:
556 si32.si_timerid = si->si_timerid;
557 si32.si_overrun = si->si_overrun;
558 break;
559 case SI_MESGQ:
560 si32.si_mqd = si->si_mqd;
561 break;
562 }
563
564 memcpy(si, &si32, sizeof (si32));
565 #endif
566 }
567
568 /* Implement the "xfer_partial" target_ops method. */
569
570 enum target_xfer_status
571 fbsd_nat_target::xfer_partial (enum target_object object,
572 const char *annex, gdb_byte *readbuf,
573 const gdb_byte *writebuf,
574 ULONGEST offset, ULONGEST len,
575 ULONGEST *xfered_len)
576 {
577 pid_t pid = inferior_ptid.pid ();
578
579 switch (object)
580 {
581 case TARGET_OBJECT_SIGNAL_INFO:
582 {
583 struct ptrace_lwpinfo pl;
584 size_t siginfo_size;
585
586 /* FreeBSD doesn't support writing to $_siginfo. */
587 if (writebuf != NULL)
588 return TARGET_XFER_E_IO;
589
590 if (inferior_ptid.lwp_p ())
591 pid = inferior_ptid.lwp ();
592
593 siginfo_size = fbsd_siginfo_size ();
594 if (offset > siginfo_size)
595 return TARGET_XFER_E_IO;
596
597 if (ptrace (PT_LWPINFO, pid, (PTRACE_TYPE_ARG3) &pl, sizeof (pl)) == -1)
598 return TARGET_XFER_E_IO;
599
600 if (!(pl.pl_flags & PL_FLAG_SI))
601 return TARGET_XFER_E_IO;
602
603 fbsd_convert_siginfo (&pl.pl_siginfo);
604 if (offset + len > siginfo_size)
605 len = siginfo_size - offset;
606
607 memcpy (readbuf, ((gdb_byte *) &pl.pl_siginfo) + offset, len);
608 *xfered_len = len;
609 return TARGET_XFER_OK;
610 }
611 #ifdef KERN_PROC_AUXV
612 case TARGET_OBJECT_AUXV:
613 {
614 gdb::byte_vector buf_storage;
615 gdb_byte *buf;
616 size_t buflen;
617 int mib[4];
618
619 if (writebuf != NULL)
620 return TARGET_XFER_E_IO;
621 mib[0] = CTL_KERN;
622 mib[1] = KERN_PROC;
623 mib[2] = KERN_PROC_AUXV;
624 mib[3] = pid;
625 if (offset == 0)
626 {
627 buf = readbuf;
628 buflen = len;
629 }
630 else
631 {
632 buflen = offset + len;
633 buf_storage.resize (buflen);
634 buf = buf_storage.data ();
635 }
636 if (sysctl (mib, 4, buf, &buflen, NULL, 0) == 0)
637 {
638 if (offset != 0)
639 {
640 if (buflen > offset)
641 {
642 buflen -= offset;
643 memcpy (readbuf, buf + offset, buflen);
644 }
645 else
646 buflen = 0;
647 }
648 *xfered_len = buflen;
649 return (buflen == 0) ? TARGET_XFER_EOF : TARGET_XFER_OK;
650 }
651 return TARGET_XFER_E_IO;
652 }
653 #endif
654 #if defined(KERN_PROC_VMMAP) && defined(KERN_PROC_PS_STRINGS)
655 case TARGET_OBJECT_FREEBSD_VMMAP:
656 case TARGET_OBJECT_FREEBSD_PS_STRINGS:
657 {
658 gdb::byte_vector buf_storage;
659 gdb_byte *buf;
660 size_t buflen;
661 int mib[4];
662
663 int proc_target;
664 uint32_t struct_size;
665 switch (object)
666 {
667 case TARGET_OBJECT_FREEBSD_VMMAP:
668 proc_target = KERN_PROC_VMMAP;
669 struct_size = sizeof (struct kinfo_vmentry);
670 break;
671 case TARGET_OBJECT_FREEBSD_PS_STRINGS:
672 proc_target = KERN_PROC_PS_STRINGS;
673 struct_size = sizeof (void *);
674 break;
675 }
676
677 if (writebuf != NULL)
678 return TARGET_XFER_E_IO;
679
680 mib[0] = CTL_KERN;
681 mib[1] = KERN_PROC;
682 mib[2] = proc_target;
683 mib[3] = pid;
684
685 if (sysctl (mib, 4, NULL, &buflen, NULL, 0) != 0)
686 return TARGET_XFER_E_IO;
687 buflen += sizeof (struct_size);
688
689 if (offset >= buflen)
690 {
691 *xfered_len = 0;
692 return TARGET_XFER_EOF;
693 }
694
695 buf_storage.resize (buflen);
696 buf = buf_storage.data ();
697
698 memcpy (buf, &struct_size, sizeof (struct_size));
699 buflen -= sizeof (struct_size);
700 if (sysctl (mib, 4, buf + sizeof (struct_size), &buflen, NULL, 0) != 0)
701 return TARGET_XFER_E_IO;
702 buflen += sizeof (struct_size);
703
704 if (buflen - offset < len)
705 len = buflen - offset;
706 memcpy (readbuf, buf + offset, len);
707 *xfered_len = len;
708 return TARGET_XFER_OK;
709 }
710 #endif
711 default:
712 return inf_ptrace_target::xfer_partial (object, annex,
713 readbuf, writebuf, offset,
714 len, xfered_len);
715 }
716 }
717
718 static bool debug_fbsd_lwp;
719 static bool debug_fbsd_nat;
720
721 static void
722 show_fbsd_lwp_debug (struct ui_file *file, int from_tty,
723 struct cmd_list_element *c, const char *value)
724 {
725 gdb_printf (file, _("Debugging of FreeBSD lwp module is %s.\n"), value);
726 }
727
728 static void
729 show_fbsd_nat_debug (struct ui_file *file, int from_tty,
730 struct cmd_list_element *c, const char *value)
731 {
732 gdb_printf (file, _("Debugging of FreeBSD native target is %s.\n"),
733 value);
734 }
735
736 #define fbsd_lwp_debug_printf(fmt, ...) \
737 debug_prefixed_printf_cond (debug_fbsd_lwp, "fbsd-lwp", fmt, ##__VA_ARGS__)
738
739 #define fbsd_nat_debug_printf(fmt, ...) \
740 debug_prefixed_printf_cond (debug_fbsd_nat, "fbsd-nat", fmt, ##__VA_ARGS__)
741
742
743 /*
744 FreeBSD's first thread support was via a "reentrant" version of libc
745 (libc_r) that first shipped in 2.2.7. This library multiplexed all
746 of the threads in a process onto a single kernel thread. This
747 library was supported via the bsd-uthread target.
748
749 FreeBSD 5.1 introduced two new threading libraries that made use of
750 multiple kernel threads. The first (libkse) scheduled M user
751 threads onto N (<= M) kernel threads (LWPs). The second (libthr)
752 bound each user thread to a dedicated kernel thread. libkse shipped
753 as the default threading library (libpthread).
754
755 FreeBSD 5.3 added a libthread_db to abstract the interface across
756 the various thread libraries (libc_r, libkse, and libthr).
757
758 FreeBSD 7.0 switched the default threading library from from libkse
759 to libpthread and removed libc_r.
760
761 FreeBSD 8.0 removed libkse and the in-kernel support for it. The
762 only threading library supported by 8.0 and later is libthr which
763 ties each user thread directly to an LWP. To simplify the
764 implementation, this target only supports LWP-backed threads using
765 ptrace directly rather than libthread_db.
766
767 FreeBSD 11.0 introduced LWP event reporting via PT_LWP_EVENTS.
768 */
769
770 /* Return true if PTID is still active in the inferior. */
771
772 bool
773 fbsd_nat_target::thread_alive (ptid_t ptid)
774 {
775 if (ptid.lwp_p ())
776 {
777 struct ptrace_lwpinfo pl;
778
779 if (ptrace (PT_LWPINFO, ptid.lwp (), (caddr_t) &pl, sizeof pl)
780 == -1)
781 return false;
782 #ifdef PL_FLAG_EXITED
783 if (pl.pl_flags & PL_FLAG_EXITED)
784 return false;
785 #endif
786 }
787
788 return true;
789 }
790
791 /* Convert PTID to a string. */
792
793 std::string
794 fbsd_nat_target::pid_to_str (ptid_t ptid)
795 {
796 lwpid_t lwp;
797
798 lwp = ptid.lwp ();
799 if (lwp != 0)
800 {
801 int pid = ptid.pid ();
802
803 return string_printf ("LWP %d of process %d", lwp, pid);
804 }
805
806 return normal_pid_to_str (ptid);
807 }
808
809 #ifdef HAVE_STRUCT_PTRACE_LWPINFO_PL_TDNAME
810 /* Return the name assigned to a thread by an application. Returns
811 the string in a static buffer. */
812
813 const char *
814 fbsd_nat_target::thread_name (struct thread_info *thr)
815 {
816 struct ptrace_lwpinfo pl;
817 struct kinfo_proc kp;
818 int pid = thr->ptid.pid ();
819 long lwp = thr->ptid.lwp ();
820 static char buf[sizeof pl.pl_tdname + 1];
821
822 /* Note that ptrace_lwpinfo returns the process command in pl_tdname
823 if a name has not been set explicitly. Return a NULL name in
824 that case. */
825 if (!fbsd_fetch_kinfo_proc (pid, &kp))
826 return nullptr;
827 if (ptrace (PT_LWPINFO, lwp, (caddr_t) &pl, sizeof pl) == -1)
828 return nullptr;
829 if (strcmp (kp.ki_comm, pl.pl_tdname) == 0)
830 return NULL;
831 xsnprintf (buf, sizeof buf, "%s", pl.pl_tdname);
832 return buf;
833 }
834 #endif
835
836 /* Enable additional event reporting on new processes.
837
838 To catch fork events, PTRACE_FORK is set on every traced process
839 to enable stops on returns from fork or vfork. Note that both the
840 parent and child will always stop, even if system call stops are
841 not enabled.
842
843 To catch LWP events, PTRACE_EVENTS is set on every traced process.
844 This enables stops on the birth for new LWPs (excluding the "main" LWP)
845 and the death of LWPs (excluding the last LWP in a process). Note
846 that unlike fork events, the LWP that creates a new LWP does not
847 report an event. */
848
849 static void
850 fbsd_enable_proc_events (pid_t pid)
851 {
852 #ifdef PT_GET_EVENT_MASK
853 int events;
854
855 if (ptrace (PT_GET_EVENT_MASK, pid, (PTRACE_TYPE_ARG3)&events,
856 sizeof (events)) == -1)
857 perror_with_name (("ptrace (PT_GET_EVENT_MASK)"));
858 events |= PTRACE_FORK | PTRACE_LWP;
859 #ifdef PTRACE_VFORK
860 events |= PTRACE_VFORK;
861 #endif
862 if (ptrace (PT_SET_EVENT_MASK, pid, (PTRACE_TYPE_ARG3)&events,
863 sizeof (events)) == -1)
864 perror_with_name (("ptrace (PT_SET_EVENT_MASK)"));
865 #else
866 #ifdef TDP_RFPPWAIT
867 if (ptrace (PT_FOLLOW_FORK, pid, (PTRACE_TYPE_ARG3)0, 1) == -1)
868 perror_with_name (("ptrace (PT_FOLLOW_FORK)"));
869 #endif
870 #ifdef PT_LWP_EVENTS
871 if (ptrace (PT_LWP_EVENTS, pid, (PTRACE_TYPE_ARG3)0, 1) == -1)
872 perror_with_name (("ptrace (PT_LWP_EVENTS)"));
873 #endif
874 #endif
875 }
876
877 /* Add threads for any new LWPs in a process.
878
879 When LWP events are used, this function is only used to detect existing
880 threads when attaching to a process. On older systems, this function is
881 called to discover new threads each time the thread list is updated. */
882
883 static void
884 fbsd_add_threads (fbsd_nat_target *target, pid_t pid)
885 {
886 int i, nlwps;
887
888 gdb_assert (!in_thread_list (target, ptid_t (pid)));
889 nlwps = ptrace (PT_GETNUMLWPS, pid, NULL, 0);
890 if (nlwps == -1)
891 perror_with_name (("ptrace (PT_GETNUMLWPS)"));
892
893 gdb::unique_xmalloc_ptr<lwpid_t[]> lwps (XCNEWVEC (lwpid_t, nlwps));
894
895 nlwps = ptrace (PT_GETLWPLIST, pid, (caddr_t) lwps.get (), nlwps);
896 if (nlwps == -1)
897 perror_with_name (("ptrace (PT_GETLWPLIST)"));
898
899 for (i = 0; i < nlwps; i++)
900 {
901 ptid_t ptid = ptid_t (pid, lwps[i]);
902
903 if (!in_thread_list (target, ptid))
904 {
905 #ifdef PT_LWP_EVENTS
906 struct ptrace_lwpinfo pl;
907
908 /* Don't add exited threads. Note that this is only called
909 when attaching to a multi-threaded process. */
910 if (ptrace (PT_LWPINFO, lwps[i], (caddr_t) &pl, sizeof pl) == -1)
911 perror_with_name (("ptrace (PT_LWPINFO)"));
912 if (pl.pl_flags & PL_FLAG_EXITED)
913 continue;
914 #endif
915 fbsd_lwp_debug_printf ("adding thread for LWP %u", lwps[i]);
916 add_thread (target, ptid);
917 }
918 }
919 }
920
921 /* Implement the "update_thread_list" target_ops method. */
922
923 void
924 fbsd_nat_target::update_thread_list ()
925 {
926 #ifdef PT_LWP_EVENTS
927 /* With support for thread events, threads are added/deleted from the
928 list as events are reported, so just try deleting exited threads. */
929 delete_exited_threads ();
930 #else
931 prune_threads ();
932
933 fbsd_add_threads (this, inferior_ptid.pid ());
934 #endif
935 }
936
937 /* Async mode support. */
938
939 /* Implement the "can_async_p" target method. */
940
941 bool
942 fbsd_nat_target::can_async_p ()
943 {
944 /* This flag should be checked in the common target.c code. */
945 gdb_assert (target_async_permitted);
946
947 /* Otherwise, this targets is always able to support async mode. */
948 return true;
949 }
950
951 /* SIGCHLD handler notifies the event-loop in async mode. */
952
953 static void
954 sigchld_handler (int signo)
955 {
956 int old_errno = errno;
957
958 fbsd_nat_target::async_file_mark_if_open ();
959
960 errno = old_errno;
961 }
962
963 /* Callback registered with the target events file descriptor. */
964
965 static void
966 handle_target_event (int error, gdb_client_data client_data)
967 {
968 inferior_event_handler (INF_REG_EVENT);
969 }
970
971 /* Implement the "async" target method. */
972
973 void
974 fbsd_nat_target::async (int enable)
975 {
976 if ((enable != 0) == is_async_p ())
977 return;
978
979 /* Block SIGCHILD while we create/destroy the pipe, as the handler
980 writes to it. */
981 gdb::block_signals blocker;
982
983 if (enable)
984 {
985 if (!async_file_open ())
986 internal_error (__FILE__, __LINE__, "failed to create event pipe.");
987
988 add_file_handler (async_wait_fd (), handle_target_event, NULL, "fbsd-nat");
989
990 /* Trigger a poll in case there are pending events to
991 handle. */
992 async_file_mark ();
993 }
994 else
995 {
996 delete_file_handler (async_wait_fd ());
997 async_file_close ();
998 }
999 }
1000
1001 #ifdef TDP_RFPPWAIT
1002 /*
1003 To catch fork events, PT_FOLLOW_FORK is set on every traced process
1004 to enable stops on returns from fork or vfork. Note that both the
1005 parent and child will always stop, even if system call stops are not
1006 enabled.
1007
1008 After a fork, both the child and parent process will stop and report
1009 an event. However, there is no guarantee of order. If the parent
1010 reports its stop first, then fbsd_wait explicitly waits for the new
1011 child before returning. If the child reports its stop first, then
1012 the event is saved on a list and ignored until the parent's stop is
1013 reported. fbsd_wait could have been changed to fetch the parent PID
1014 of the new child and used that to wait for the parent explicitly.
1015 However, if two threads in the parent fork at the same time, then
1016 the wait on the parent might return the "wrong" fork event.
1017
1018 The initial version of PT_FOLLOW_FORK did not set PL_FLAG_CHILD for
1019 the new child process. This flag could be inferred by treating any
1020 events for an unknown pid as a new child.
1021
1022 In addition, the initial version of PT_FOLLOW_FORK did not report a
1023 stop event for the parent process of a vfork until after the child
1024 process executed a new program or exited. The kernel was changed to
1025 defer the wait for exit or exec of the child until after posting the
1026 stop event shortly after the change to introduce PL_FLAG_CHILD.
1027 This could be worked around by reporting a vfork event when the
1028 child event posted and ignoring the subsequent event from the
1029 parent.
1030
1031 This implementation requires both of these fixes for simplicity's
1032 sake. FreeBSD versions newer than 9.1 contain both fixes.
1033 */
1034
1035 static std::list<ptid_t> fbsd_pending_children;
1036
1037 /* Record a new child process event that is reported before the
1038 corresponding fork event in the parent. */
1039
1040 static void
1041 fbsd_remember_child (ptid_t pid)
1042 {
1043 fbsd_pending_children.push_front (pid);
1044 }
1045
1046 /* Check for a previously-recorded new child process event for PID.
1047 If one is found, remove it from the list and return the PTID. */
1048
1049 static ptid_t
1050 fbsd_is_child_pending (pid_t pid)
1051 {
1052 for (auto it = fbsd_pending_children.begin ();
1053 it != fbsd_pending_children.end (); it++)
1054 if (it->pid () == pid)
1055 {
1056 ptid_t ptid = *it;
1057 fbsd_pending_children.erase (it);
1058 return ptid;
1059 }
1060 return null_ptid;
1061 }
1062
1063 #ifndef PTRACE_VFORK
1064 static std::forward_list<ptid_t> fbsd_pending_vfork_done;
1065
1066 /* Record a pending vfork done event. */
1067
1068 static void
1069 fbsd_add_vfork_done (ptid_t pid)
1070 {
1071 fbsd_pending_vfork_done.push_front (pid);
1072
1073 /* If we're in async mode, need to tell the event loop there's
1074 something here to process. */
1075 if (target_is_async_p ())
1076 async_file_mark ();
1077 }
1078
1079 /* Check for a pending vfork done event for a specific PID. */
1080
1081 static int
1082 fbsd_is_vfork_done_pending (pid_t pid)
1083 {
1084 for (auto it = fbsd_pending_vfork_done.begin ();
1085 it != fbsd_pending_vfork_done.end (); it++)
1086 if (it->pid () == pid)
1087 return 1;
1088 return 0;
1089 }
1090
1091 /* Check for a pending vfork done event. If one is found, remove it
1092 from the list and return the PTID. */
1093
1094 static ptid_t
1095 fbsd_next_vfork_done (void)
1096 {
1097 if (!fbsd_pending_vfork_done.empty ())
1098 {
1099 ptid_t ptid = fbsd_pending_vfork_done.front ();
1100 fbsd_pending_vfork_done.pop_front ();
1101 return ptid;
1102 }
1103 return null_ptid;
1104 }
1105 #endif
1106 #endif
1107
1108 /* Implement the "resume" target_ops method. */
1109
1110 void
1111 fbsd_nat_target::resume (ptid_t ptid, int step, enum gdb_signal signo)
1112 {
1113 #if defined(TDP_RFPPWAIT) && !defined(PTRACE_VFORK)
1114 pid_t pid;
1115
1116 /* Don't PT_CONTINUE a process which has a pending vfork done event. */
1117 if (minus_one_ptid == ptid)
1118 pid = inferior_ptid.pid ();
1119 else
1120 pid = ptid.pid ();
1121 if (fbsd_is_vfork_done_pending (pid))
1122 return;
1123 #endif
1124
1125 fbsd_nat_debug_printf ("[%s], step %d, signo %d (%s)",
1126 target_pid_to_str (ptid).c_str (), step, signo,
1127 gdb_signal_to_name (signo));
1128 if (ptid.lwp_p ())
1129 {
1130 /* If ptid is a specific LWP, suspend all other LWPs in the process. */
1131 inferior *inf = find_inferior_ptid (this, ptid);
1132
1133 for (thread_info *tp : inf->non_exited_threads ())
1134 {
1135 int request;
1136
1137 if (tp->ptid.lwp () == ptid.lwp ())
1138 request = PT_RESUME;
1139 else
1140 request = PT_SUSPEND;
1141
1142 if (ptrace (request, tp->ptid.lwp (), NULL, 0) == -1)
1143 perror_with_name (request == PT_RESUME ?
1144 ("ptrace (PT_RESUME)") :
1145 ("ptrace (PT_SUSPEND)"));
1146 if (request == PT_RESUME)
1147 low_prepare_to_resume (tp);
1148 }
1149 }
1150 else
1151 {
1152 /* If ptid is a wildcard, resume all matching threads (they won't run
1153 until the process is continued however). */
1154 for (thread_info *tp : all_non_exited_threads (this, ptid))
1155 {
1156 if (ptrace (PT_RESUME, tp->ptid.lwp (), NULL, 0) == -1)
1157 perror_with_name (("ptrace (PT_RESUME)"));
1158 low_prepare_to_resume (tp);
1159 }
1160 ptid = inferior_ptid;
1161 }
1162
1163 #if __FreeBSD_version < 1200052
1164 /* When multiple threads within a process wish to report STOPPED
1165 events from wait(), the kernel picks one thread event as the
1166 thread event to report. The chosen thread event is retrieved via
1167 PT_LWPINFO by passing the process ID as the request pid. If
1168 multiple events are pending, then the subsequent wait() after
1169 resuming a process will report another STOPPED event after
1170 resuming the process to handle the next thread event and so on.
1171
1172 A single thread event is cleared as a side effect of resuming the
1173 process with PT_CONTINUE, PT_STEP, etc. In older kernels,
1174 however, the request pid was used to select which thread's event
1175 was cleared rather than always clearing the event that was just
1176 reported. To avoid clearing the event of the wrong LWP, always
1177 pass the process ID instead of an LWP ID to PT_CONTINUE or
1178 PT_SYSCALL.
1179
1180 In the case of stepping, the process ID cannot be used with
1181 PT_STEP since it would step the thread that reported an event
1182 which may not be the thread indicated by PTID. For stepping, use
1183 PT_SETSTEP to enable stepping on the desired thread before
1184 resuming the process via PT_CONTINUE instead of using
1185 PT_STEP. */
1186 if (step)
1187 {
1188 if (ptrace (PT_SETSTEP, get_ptrace_pid (ptid), NULL, 0) == -1)
1189 perror_with_name (("ptrace (PT_SETSTEP)"));
1190 step = 0;
1191 }
1192 ptid = ptid_t (ptid.pid ());
1193 #endif
1194 inf_ptrace_target::resume (ptid, step, signo);
1195 }
1196
1197 #ifdef USE_SIGTRAP_SIGINFO
1198 /* Handle breakpoint and trace traps reported via SIGTRAP. If the
1199 trap was a breakpoint or trace trap that should be reported to the
1200 core, return true. */
1201
1202 static bool
1203 fbsd_handle_debug_trap (fbsd_nat_target *target, ptid_t ptid,
1204 const struct ptrace_lwpinfo &pl)
1205 {
1206
1207 /* Ignore traps without valid siginfo or for signals other than
1208 SIGTRAP.
1209
1210 FreeBSD kernels prior to r341800 can return stale siginfo for at
1211 least some events, but those events can be identified by
1212 additional flags set in pl_flags. True breakpoint and
1213 single-step traps should not have other flags set in
1214 pl_flags. */
1215 if (pl.pl_flags != PL_FLAG_SI || pl.pl_siginfo.si_signo != SIGTRAP)
1216 return false;
1217
1218 /* Trace traps are either a single step or a hardware watchpoint or
1219 breakpoint. */
1220 if (pl.pl_siginfo.si_code == TRAP_TRACE)
1221 {
1222 fbsd_nat_debug_printf ("trace trap for LWP %ld", ptid.lwp ());
1223 return true;
1224 }
1225
1226 if (pl.pl_siginfo.si_code == TRAP_BRKPT)
1227 {
1228 /* Fixup PC for the software breakpoint. */
1229 struct regcache *regcache = get_thread_regcache (target, ptid);
1230 struct gdbarch *gdbarch = regcache->arch ();
1231 int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
1232
1233 fbsd_nat_debug_printf ("sw breakpoint trap for LWP %ld", ptid.lwp ());
1234 if (decr_pc != 0)
1235 {
1236 CORE_ADDR pc;
1237
1238 pc = regcache_read_pc (regcache);
1239 regcache_write_pc (regcache, pc - decr_pc);
1240 }
1241 return true;
1242 }
1243
1244 return false;
1245 }
1246 #endif
1247
1248 /* Wait for the child specified by PTID to do something. Return the
1249 process ID of the child, or MINUS_ONE_PTID in case of error; store
1250 the status in *OURSTATUS. */
1251
1252 ptid_t
1253 fbsd_nat_target::wait_1 (ptid_t ptid, struct target_waitstatus *ourstatus,
1254 target_wait_flags target_options)
1255 {
1256 ptid_t wptid;
1257
1258 while (1)
1259 {
1260 #ifndef PTRACE_VFORK
1261 wptid = fbsd_next_vfork_done ();
1262 if (wptid != null_ptid)
1263 {
1264 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
1265 return wptid;
1266 }
1267 #endif
1268 wptid = inf_ptrace_target::wait (ptid, ourstatus, target_options);
1269 if (ourstatus->kind () == TARGET_WAITKIND_STOPPED)
1270 {
1271 struct ptrace_lwpinfo pl;
1272 pid_t pid;
1273 int status;
1274
1275 pid = wptid.pid ();
1276 if (ptrace (PT_LWPINFO, pid, (caddr_t) &pl, sizeof pl) == -1)
1277 perror_with_name (("ptrace (PT_LWPINFO)"));
1278
1279 wptid = ptid_t (pid, pl.pl_lwpid);
1280
1281 if (debug_fbsd_nat)
1282 {
1283 fbsd_nat_debug_printf ("stop for LWP %u event %d flags %#x",
1284 pl.pl_lwpid, pl.pl_event, pl.pl_flags);
1285 if (pl.pl_flags & PL_FLAG_SI)
1286 fbsd_nat_debug_printf ("si_signo %u si_code %u",
1287 pl.pl_siginfo.si_signo,
1288 pl.pl_siginfo.si_code);
1289 }
1290
1291 #ifdef PT_LWP_EVENTS
1292 if (pl.pl_flags & PL_FLAG_EXITED)
1293 {
1294 /* If GDB attaches to a multi-threaded process, exiting
1295 threads might be skipped during post_attach that
1296 have not yet reported their PL_FLAG_EXITED event.
1297 Ignore EXITED events for an unknown LWP. */
1298 thread_info *thr = find_thread_ptid (this, wptid);
1299 if (thr != nullptr)
1300 {
1301 fbsd_lwp_debug_printf ("deleting thread for LWP %u",
1302 pl.pl_lwpid);
1303 if (print_thread_events)
1304 gdb_printf (_("[%s exited]\n"),
1305 target_pid_to_str (wptid).c_str ());
1306 low_delete_thread (thr);
1307 delete_thread (thr);
1308 }
1309 if (ptrace (PT_CONTINUE, pid, (caddr_t) 1, 0) == -1)
1310 perror_with_name (("ptrace (PT_CONTINUE)"));
1311 continue;
1312 }
1313 #endif
1314
1315 /* Switch to an LWP PTID on the first stop in a new process.
1316 This is done after handling PL_FLAG_EXITED to avoid
1317 switching to an exited LWP. It is done before checking
1318 PL_FLAG_BORN in case the first stop reported after
1319 attaching to an existing process is a PL_FLAG_BORN
1320 event. */
1321 if (in_thread_list (this, ptid_t (pid)))
1322 {
1323 fbsd_lwp_debug_printf ("using LWP %u for first thread",
1324 pl.pl_lwpid);
1325 thread_change_ptid (this, ptid_t (pid), wptid);
1326 }
1327
1328 #ifdef PT_LWP_EVENTS
1329 if (pl.pl_flags & PL_FLAG_BORN)
1330 {
1331 /* If GDB attaches to a multi-threaded process, newborn
1332 threads might be added by fbsd_add_threads that have
1333 not yet reported their PL_FLAG_BORN event. Ignore
1334 BORN events for an already-known LWP. */
1335 if (!in_thread_list (this, wptid))
1336 {
1337 fbsd_lwp_debug_printf ("adding thread for LWP %u",
1338 pl.pl_lwpid);
1339 add_thread (this, wptid);
1340 }
1341 ourstatus->set_spurious ();
1342 return wptid;
1343 }
1344 #endif
1345
1346 #ifdef TDP_RFPPWAIT
1347 if (pl.pl_flags & PL_FLAG_FORKED)
1348 {
1349 #ifndef PTRACE_VFORK
1350 struct kinfo_proc kp;
1351 #endif
1352 bool is_vfork = false;
1353 ptid_t child_ptid;
1354 pid_t child;
1355
1356 child = pl.pl_child_pid;
1357 #ifdef PTRACE_VFORK
1358 if (pl.pl_flags & PL_FLAG_VFORKED)
1359 is_vfork = true;
1360 #endif
1361
1362 /* Make sure the other end of the fork is stopped too. */
1363 child_ptid = fbsd_is_child_pending (child);
1364 if (child_ptid == null_ptid)
1365 {
1366 pid = waitpid (child, &status, 0);
1367 if (pid == -1)
1368 perror_with_name (("waitpid"));
1369
1370 gdb_assert (pid == child);
1371
1372 if (ptrace (PT_LWPINFO, child, (caddr_t)&pl, sizeof pl) == -1)
1373 perror_with_name (("ptrace (PT_LWPINFO)"));
1374
1375 gdb_assert (pl.pl_flags & PL_FLAG_CHILD);
1376 child_ptid = ptid_t (child, pl.pl_lwpid);
1377 }
1378
1379 /* Enable additional events on the child process. */
1380 fbsd_enable_proc_events (child_ptid.pid ());
1381
1382 #ifndef PTRACE_VFORK
1383 /* For vfork, the child process will have the P_PPWAIT
1384 flag set. */
1385 if (fbsd_fetch_kinfo_proc (child, &kp))
1386 {
1387 if (kp.ki_flag & P_PPWAIT)
1388 is_vfork = true;
1389 }
1390 else
1391 warning (_("Failed to fetch process information"));
1392 #endif
1393
1394 low_new_fork (wptid, child);
1395
1396 if (is_vfork)
1397 ourstatus->set_vforked (child_ptid);
1398 else
1399 ourstatus->set_forked (child_ptid);
1400
1401 return wptid;
1402 }
1403
1404 if (pl.pl_flags & PL_FLAG_CHILD)
1405 {
1406 /* Remember that this child forked, but do not report it
1407 until the parent reports its corresponding fork
1408 event. */
1409 fbsd_remember_child (wptid);
1410 continue;
1411 }
1412
1413 #ifdef PTRACE_VFORK
1414 if (pl.pl_flags & PL_FLAG_VFORK_DONE)
1415 {
1416 ourstatus->set_vfork_done ();
1417 return wptid;
1418 }
1419 #endif
1420 #endif
1421
1422 if (pl.pl_flags & PL_FLAG_EXEC)
1423 {
1424 ourstatus->set_execd
1425 (make_unique_xstrdup (pid_to_exec_file (pid)));
1426 return wptid;
1427 }
1428
1429 #ifdef USE_SIGTRAP_SIGINFO
1430 if (fbsd_handle_debug_trap (this, wptid, pl))
1431 return wptid;
1432 #endif
1433
1434 /* Note that PL_FLAG_SCE is set for any event reported while
1435 a thread is executing a system call in the kernel. In
1436 particular, signals that interrupt a sleep in a system
1437 call will report this flag as part of their event. Stops
1438 explicitly for system call entry and exit always use
1439 SIGTRAP, so only treat SIGTRAP events as system call
1440 entry/exit events. */
1441 if (pl.pl_flags & (PL_FLAG_SCE | PL_FLAG_SCX)
1442 && ourstatus->sig () == SIGTRAP)
1443 {
1444 #ifdef HAVE_STRUCT_PTRACE_LWPINFO_PL_SYSCALL_CODE
1445 if (catch_syscall_enabled ())
1446 {
1447 if (catching_syscall_number (pl.pl_syscall_code))
1448 {
1449 if (pl.pl_flags & PL_FLAG_SCE)
1450 ourstatus->set_syscall_entry (pl.pl_syscall_code);
1451 else
1452 ourstatus->set_syscall_return (pl.pl_syscall_code);
1453
1454 return wptid;
1455 }
1456 }
1457 #endif
1458 /* If the core isn't interested in this event, just
1459 continue the process explicitly and wait for another
1460 event. Note that PT_SYSCALL is "sticky" on FreeBSD
1461 and once system call stops are enabled on a process
1462 it stops for all system call entries and exits. */
1463 if (ptrace (PT_CONTINUE, pid, (caddr_t) 1, 0) == -1)
1464 perror_with_name (("ptrace (PT_CONTINUE)"));
1465 continue;
1466 }
1467 }
1468 return wptid;
1469 }
1470 }
1471
1472 ptid_t
1473 fbsd_nat_target::wait (ptid_t ptid, struct target_waitstatus *ourstatus,
1474 target_wait_flags target_options)
1475 {
1476 ptid_t wptid;
1477
1478 fbsd_nat_debug_printf ("[%s], [%s]", target_pid_to_str (ptid).c_str (),
1479 target_options_to_string (target_options).c_str ());
1480
1481 /* Ensure any subsequent events trigger a new event in the loop. */
1482 if (is_async_p ())
1483 async_file_flush ();
1484
1485 wptid = wait_1 (ptid, ourstatus, target_options);
1486
1487 /* If we are in async mode and found an event, there may still be
1488 another event pending. Trigger the event pipe so that that the
1489 event loop keeps polling until no event is returned. */
1490 if (is_async_p ()
1491 && ((ourstatus->kind () != TARGET_WAITKIND_IGNORE
1492 && ourstatus->kind() != TARGET_WAITKIND_NO_RESUMED)
1493 || ptid != minus_one_ptid))
1494 async_file_mark ();
1495
1496 fbsd_nat_debug_printf ("returning [%s], [%s]",
1497 target_pid_to_str (wptid).c_str (),
1498 ourstatus->to_string ().c_str ());
1499 return wptid;
1500 }
1501
1502 #ifdef USE_SIGTRAP_SIGINFO
1503 /* Implement the "stopped_by_sw_breakpoint" target_ops method. */
1504
1505 bool
1506 fbsd_nat_target::stopped_by_sw_breakpoint ()
1507 {
1508 struct ptrace_lwpinfo pl;
1509
1510 if (ptrace (PT_LWPINFO, get_ptrace_pid (inferior_ptid), (caddr_t) &pl,
1511 sizeof pl) == -1)
1512 return false;
1513
1514 return (pl.pl_flags == PL_FLAG_SI
1515 && pl.pl_siginfo.si_signo == SIGTRAP
1516 && pl.pl_siginfo.si_code == TRAP_BRKPT);
1517 }
1518
1519 /* Implement the "supports_stopped_by_sw_breakpoint" target_ops
1520 method. */
1521
1522 bool
1523 fbsd_nat_target::supports_stopped_by_sw_breakpoint ()
1524 {
1525 return true;
1526 }
1527 #endif
1528
1529 #ifdef PROC_ASLR_CTL
1530 class maybe_disable_address_space_randomization
1531 {
1532 public:
1533 explicit maybe_disable_address_space_randomization (bool disable_randomization)
1534 {
1535 if (disable_randomization)
1536 {
1537 if (procctl (P_PID, getpid (), PROC_ASLR_STATUS, &m_aslr_ctl) == -1)
1538 {
1539 warning (_("Failed to fetch current address space randomization "
1540 "status: %s"), safe_strerror (errno));
1541 return;
1542 }
1543
1544 m_aslr_ctl &= ~PROC_ASLR_ACTIVE;
1545 if (m_aslr_ctl == PROC_ASLR_FORCE_DISABLE)
1546 return;
1547
1548 int ctl = PROC_ASLR_FORCE_DISABLE;
1549 if (procctl (P_PID, getpid (), PROC_ASLR_CTL, &ctl) == -1)
1550 {
1551 warning (_("Error disabling address space randomization: %s"),
1552 safe_strerror (errno));
1553 return;
1554 }
1555
1556 m_aslr_ctl_set = true;
1557 }
1558 }
1559
1560 ~maybe_disable_address_space_randomization ()
1561 {
1562 if (m_aslr_ctl_set)
1563 {
1564 if (procctl (P_PID, getpid (), PROC_ASLR_CTL, &m_aslr_ctl) == -1)
1565 warning (_("Error restoring address space randomization: %s"),
1566 safe_strerror (errno));
1567 }
1568 }
1569
1570 DISABLE_COPY_AND_ASSIGN (maybe_disable_address_space_randomization);
1571
1572 private:
1573 bool m_aslr_ctl_set = false;
1574 int m_aslr_ctl = 0;
1575 };
1576 #endif
1577
1578 void
1579 fbsd_nat_target::create_inferior (const char *exec_file,
1580 const std::string &allargs,
1581 char **env, int from_tty)
1582 {
1583 #ifdef PROC_ASLR_CTL
1584 maybe_disable_address_space_randomization restore_aslr_ctl
1585 (disable_randomization);
1586 #endif
1587
1588 inf_ptrace_target::create_inferior (exec_file, allargs, env, from_tty);
1589 }
1590
1591 #ifdef TDP_RFPPWAIT
1592 /* Target hook for follow_fork. On entry and at return inferior_ptid is
1593 the ptid of the followed inferior. */
1594
1595 void
1596 fbsd_nat_target::follow_fork (inferior *child_inf, ptid_t child_ptid,
1597 target_waitkind fork_kind, bool follow_child,
1598 bool detach_fork)
1599 {
1600 inf_ptrace_target::follow_fork (child_inf, child_ptid, fork_kind,
1601 follow_child, detach_fork);
1602
1603 if (!follow_child && detach_fork)
1604 {
1605 pid_t child_pid = child_ptid.pid ();
1606
1607 /* Breakpoints have already been detached from the child by
1608 infrun.c. */
1609
1610 if (ptrace (PT_DETACH, child_pid, (PTRACE_TYPE_ARG3)1, 0) == -1)
1611 perror_with_name (("ptrace (PT_DETACH)"));
1612
1613 #ifndef PTRACE_VFORK
1614 if (fork_kind () == TARGET_WAITKIND_VFORKED)
1615 {
1616 /* We can't insert breakpoints until the child process has
1617 finished with the shared memory region. The parent
1618 process doesn't wait for the child process to exit or
1619 exec until after it has been resumed from the ptrace stop
1620 to report the fork. Once it has been resumed it doesn't
1621 stop again before returning to userland, so there is no
1622 reliable way to wait on the parent.
1623
1624 We can't stay attached to the child to wait for an exec
1625 or exit because it may invoke ptrace(PT_TRACE_ME)
1626 (e.g. if the parent process is a debugger forking a new
1627 child process).
1628
1629 In the end, the best we can do is to make sure it runs
1630 for a little while. Hopefully it will be out of range of
1631 any breakpoints we reinsert. Usually this is only the
1632 single-step breakpoint at vfork's return point. */
1633
1634 usleep (10000);
1635
1636 /* Schedule a fake VFORK_DONE event to report on the next
1637 wait. */
1638 fbsd_add_vfork_done (inferior_ptid);
1639 }
1640 #endif
1641 }
1642 }
1643
1644 int
1645 fbsd_nat_target::insert_fork_catchpoint (int pid)
1646 {
1647 return 0;
1648 }
1649
1650 int
1651 fbsd_nat_target::remove_fork_catchpoint (int pid)
1652 {
1653 return 0;
1654 }
1655
1656 int
1657 fbsd_nat_target::insert_vfork_catchpoint (int pid)
1658 {
1659 return 0;
1660 }
1661
1662 int
1663 fbsd_nat_target::remove_vfork_catchpoint (int pid)
1664 {
1665 return 0;
1666 }
1667 #endif
1668
1669 /* Implement the virtual inf_ptrace_target::post_startup_inferior method. */
1670
1671 void
1672 fbsd_nat_target::post_startup_inferior (ptid_t pid)
1673 {
1674 fbsd_enable_proc_events (pid.pid ());
1675 }
1676
1677 /* Implement the "post_attach" target_ops method. */
1678
1679 void
1680 fbsd_nat_target::post_attach (int pid)
1681 {
1682 fbsd_enable_proc_events (pid);
1683 fbsd_add_threads (this, pid);
1684 }
1685
1686 /* Traced processes always stop after exec. */
1687
1688 int
1689 fbsd_nat_target::insert_exec_catchpoint (int pid)
1690 {
1691 return 0;
1692 }
1693
1694 int
1695 fbsd_nat_target::remove_exec_catchpoint (int pid)
1696 {
1697 return 0;
1698 }
1699
1700 #ifdef HAVE_STRUCT_PTRACE_LWPINFO_PL_SYSCALL_CODE
1701 int
1702 fbsd_nat_target::set_syscall_catchpoint (int pid, bool needed,
1703 int any_count,
1704 gdb::array_view<const int> syscall_counts)
1705 {
1706
1707 /* Ignore the arguments. inf-ptrace.c will use PT_SYSCALL which
1708 will catch all system call entries and exits. The system calls
1709 are filtered by GDB rather than the kernel. */
1710 return 0;
1711 }
1712 #endif
1713
1714 bool
1715 fbsd_nat_target::supports_multi_process ()
1716 {
1717 return true;
1718 }
1719
1720 bool
1721 fbsd_nat_target::supports_disable_randomization ()
1722 {
1723 #ifdef PROC_ASLR_CTL
1724 return true;
1725 #else
1726 return false;
1727 #endif
1728 }
1729
1730 /* See fbsd-nat.h. */
1731
1732 bool
1733 fbsd_nat_target::fetch_register_set (struct regcache *regcache, int regnum,
1734 int fetch_op, const struct regset *regset,
1735 void *regs, size_t size)
1736 {
1737 const struct regcache_map_entry *map
1738 = (const struct regcache_map_entry *) regset->regmap;
1739 pid_t pid = get_ptrace_pid (regcache->ptid ());
1740
1741 if (regnum == -1 || regcache_map_supplies (map, regnum, regcache->arch(),
1742 size))
1743 {
1744 if (ptrace (fetch_op, pid, (PTRACE_TYPE_ARG3) regs, 0) == -1)
1745 perror_with_name (_("Couldn't get registers"));
1746
1747 regcache->supply_regset (regset, regnum, regs, size);
1748 return true;
1749 }
1750 return false;
1751 }
1752
1753 /* See fbsd-nat.h. */
1754
1755 bool
1756 fbsd_nat_target::store_register_set (struct regcache *regcache, int regnum,
1757 int fetch_op, int store_op,
1758 const struct regset *regset, void *regs,
1759 size_t size)
1760 {
1761 const struct regcache_map_entry *map
1762 = (const struct regcache_map_entry *) regset->regmap;
1763 pid_t pid = get_ptrace_pid (regcache->ptid ());
1764
1765 if (regnum == -1 || regcache_map_supplies (map, regnum, regcache->arch(),
1766 size))
1767 {
1768 if (ptrace (fetch_op, pid, (PTRACE_TYPE_ARG3) regs, 0) == -1)
1769 perror_with_name (_("Couldn't get registers"));
1770
1771 regcache->collect_regset (regset, regnum, regs, size);
1772
1773 if (ptrace (store_op, pid, (PTRACE_TYPE_ARG3) regs, 0) == -1)
1774 perror_with_name (_("Couldn't write registers"));
1775 return true;
1776 }
1777 return false;
1778 }
1779
1780 /* See fbsd-nat.h. */
1781
1782 bool
1783 fbsd_nat_target::have_regset (ptid_t ptid, int note)
1784 {
1785 pid_t pid = get_ptrace_pid (ptid);
1786 struct iovec iov;
1787
1788 iov.iov_base = nullptr;
1789 iov.iov_len = 0;
1790 if (ptrace (PT_GETREGSET, pid, (PTRACE_TYPE_ARG3) &iov, note) == -1)
1791 return 0;
1792 return iov.iov_len;
1793 }
1794
1795 /* See fbsd-nat.h. */
1796
1797 bool
1798 fbsd_nat_target::fetch_regset (struct regcache *regcache, int regnum, int note,
1799 const struct regset *regset, void *regs,
1800 size_t size)
1801 {
1802 const struct regcache_map_entry *map
1803 = (const struct regcache_map_entry *) regset->regmap;
1804 pid_t pid = get_ptrace_pid (regcache->ptid ());
1805
1806 if (regnum == -1 || regcache_map_supplies (map, regnum, regcache->arch(),
1807 size))
1808 {
1809 struct iovec iov;
1810
1811 iov.iov_base = regs;
1812 iov.iov_len = size;
1813 if (ptrace (PT_GETREGSET, pid, (PTRACE_TYPE_ARG3) &iov, note) == -1)
1814 perror_with_name (_("Couldn't get registers"));
1815
1816 regcache->supply_regset (regset, regnum, regs, size);
1817 return true;
1818 }
1819 return false;
1820 }
1821
1822 bool
1823 fbsd_nat_target::store_regset (struct regcache *regcache, int regnum, int note,
1824 const struct regset *regset, void *regs,
1825 size_t size)
1826 {
1827 const struct regcache_map_entry *map
1828 = (const struct regcache_map_entry *) regset->regmap;
1829 pid_t pid = get_ptrace_pid (regcache->ptid ());
1830
1831 if (regnum == -1 || regcache_map_supplies (map, regnum, regcache->arch(),
1832 size))
1833 {
1834 struct iovec iov;
1835
1836 iov.iov_base = regs;
1837 iov.iov_len = size;
1838 if (ptrace (PT_GETREGSET, pid, (PTRACE_TYPE_ARG3) &iov, note) == -1)
1839 perror_with_name (_("Couldn't get registers"));
1840
1841 regcache->collect_regset (regset, regnum, regs, size);
1842
1843 if (ptrace (PT_SETREGSET, pid, (PTRACE_TYPE_ARG3) &iov, note) == -1)
1844 perror_with_name (_("Couldn't write registers"));
1845 return true;
1846 }
1847 return false;
1848 }
1849
1850 /* See fbsd-nat.h. */
1851
1852 bool
1853 fbsd_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
1854 {
1855 struct ptrace_lwpinfo pl;
1856 pid_t pid = get_ptrace_pid (ptid);
1857
1858 if (ptrace (PT_LWPINFO, pid, (caddr_t) &pl, sizeof pl) == -1)
1859 return false;
1860 if (!(pl.pl_flags & PL_FLAG_SI))
1861 return false;;
1862 *siginfo = pl.pl_siginfo;
1863 return (true);
1864 }
1865
1866 void _initialize_fbsd_nat ();
1867 void
1868 _initialize_fbsd_nat ()
1869 {
1870 add_setshow_boolean_cmd ("fbsd-lwp", class_maintenance,
1871 &debug_fbsd_lwp, _("\
1872 Set debugging of FreeBSD lwp module."), _("\
1873 Show debugging of FreeBSD lwp module."), _("\
1874 Enables printf debugging output."),
1875 NULL,
1876 &show_fbsd_lwp_debug,
1877 &setdebuglist, &showdebuglist);
1878 add_setshow_boolean_cmd ("fbsd-nat", class_maintenance,
1879 &debug_fbsd_nat, _("\
1880 Set debugging of FreeBSD native target."), _("\
1881 Show debugging of FreeBSD native target."), _("\
1882 Enables printf debugging output."),
1883 NULL,
1884 &show_fbsd_nat_debug,
1885 &setdebuglist, &showdebuglist);
1886
1887 /* Install a SIGCHLD handler. */
1888 signal (SIGCHLD, sigchld_handler);
1889 }