gdb: move some commands into the tui namespace
[binutils-gdb.git] / gdbserver / linux-low.h
1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2022 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #ifndef GDBSERVER_LINUX_LOW_H
20 #define GDBSERVER_LINUX_LOW_H
21
22 #include "nat/linux-nat.h"
23 #include "nat/gdb_thread_db.h"
24 #include <signal.h>
25
26 #include "gdbthread.h"
27 #include "gdb_proc_service.h"
28
29 /* Included for ptrace type definitions. */
30 #include "nat/linux-ptrace.h"
31 #include "target/waitstatus.h" /* For enum target_stop_reason. */
32 #include "tracepoint.h"
33
34 #include <list>
35
36 #define PTRACE_XFER_TYPE long
37
38 #ifdef HAVE_LINUX_REGSETS
39 typedef void (*regset_fill_func) (struct regcache *, void *);
40 typedef void (*regset_store_func) (struct regcache *, const void *);
41 enum regset_type {
42 GENERAL_REGS,
43 FP_REGS,
44 EXTENDED_REGS,
45 OPTIONAL_REGS, /* Do not error if the regset cannot be accessed. */
46 };
47
48 /* The arch's regsets array initializer must be terminated with a NULL
49 regset. */
50 #define NULL_REGSET \
51 { 0, 0, 0, -1, (enum regset_type) -1, NULL, NULL }
52
53 struct regset_info
54 {
55 int get_request, set_request;
56 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
57 argument and the 4th argument should be "const struct iovec *". */
58 int nt_type;
59 int size;
60 enum regset_type type;
61 regset_fill_func fill_function;
62 regset_store_func store_function;
63 };
64
65 /* Aggregation of all the supported regsets of a given
66 architecture/mode. */
67
68 struct regsets_info
69 {
70 /* The regsets array. */
71 struct regset_info *regsets;
72
73 /* The number of regsets in the REGSETS array. */
74 int num_regsets;
75
76 /* If we get EIO on a regset, do not try it again. Note the set of
77 supported regsets may depend on processor mode on biarch
78 machines. This is a (lazily allocated) array holding one boolean
79 byte (0/1) per regset, with each element corresponding to the
80 regset in the REGSETS array above at the same offset. */
81 char *disabled_regsets;
82 };
83
84 #endif
85
86 /* Mapping between the general-purpose registers in `struct user'
87 format and GDB's register array layout. */
88
89 struct usrregs_info
90 {
91 /* The number of registers accessible. */
92 int num_regs;
93
94 /* The registers map. */
95 int *regmap;
96 };
97
98 /* All info needed to access an architecture/mode's registers. */
99
100 struct regs_info
101 {
102 /* Regset support bitmap: 1 for registers that are transferred as a part
103 of a regset, 0 for ones that need to be handled individually. This
104 can be NULL if all registers are transferred with regsets or regsets
105 are not supported. */
106 unsigned char *regset_bitmap;
107
108 /* Info used when accessing registers with PTRACE_PEEKUSER /
109 PTRACE_POKEUSER. This can be NULL if all registers are
110 transferred with regsets .*/
111 struct usrregs_info *usrregs;
112
113 #ifdef HAVE_LINUX_REGSETS
114 /* Info used when accessing registers with regsets. */
115 struct regsets_info *regsets_info;
116 #endif
117 };
118
119 struct process_info_private
120 {
121 /* Arch-specific additions. */
122 struct arch_process_info *arch_private;
123
124 /* libthread_db-specific additions. Not NULL if this process has loaded
125 thread_db, and it is active. */
126 struct thread_db *thread_db;
127
128 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
129 CORE_ADDR r_debug;
130 };
131
132 struct lwp_info;
133
134 /* Target ops definitions for a Linux target. */
135
136 class linux_process_target : public process_stratum_target
137 {
138 public:
139
140 int create_inferior (const char *program,
141 const std::vector<char *> &program_args) override;
142
143 void post_create_inferior () override;
144
145 int attach (unsigned long pid) override;
146
147 int kill (process_info *proc) override;
148
149 int detach (process_info *proc) override;
150
151 void mourn (process_info *proc) override;
152
153 void join (int pid) override;
154
155 bool thread_alive (ptid_t pid) override;
156
157 void resume (thread_resume *resume_info, size_t n) override;
158
159 ptid_t wait (ptid_t ptid, target_waitstatus *status,
160 target_wait_flags options) override;
161
162 void fetch_registers (regcache *regcache, int regno) override;
163
164 void store_registers (regcache *regcache, int regno) override;
165
166 int prepare_to_access_memory () override;
167
168 void done_accessing_memory () override;
169
170 int read_memory (CORE_ADDR memaddr, unsigned char *myaddr,
171 int len) override;
172
173 int write_memory (CORE_ADDR memaddr, const unsigned char *myaddr,
174 int len) override;
175
176 void look_up_symbols () override;
177
178 void request_interrupt () override;
179
180 bool supports_read_auxv () override;
181
182 int read_auxv (CORE_ADDR offset, unsigned char *myaddr,
183 unsigned int len) override;
184
185 int insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
186 int size, raw_breakpoint *bp) override;
187
188 int remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
189 int size, raw_breakpoint *bp) override;
190
191 bool stopped_by_sw_breakpoint () override;
192
193 bool supports_stopped_by_sw_breakpoint () override;
194
195 bool stopped_by_hw_breakpoint () override;
196
197 bool supports_stopped_by_hw_breakpoint () override;
198
199 bool supports_hardware_single_step () override;
200
201 bool stopped_by_watchpoint () override;
202
203 CORE_ADDR stopped_data_address () override;
204
205 bool supports_read_offsets () override;
206
207 int read_offsets (CORE_ADDR *text, CORE_ADDR *data) override;
208
209 bool supports_get_tls_address () override;
210
211 int get_tls_address (thread_info *thread, CORE_ADDR offset,
212 CORE_ADDR load_module, CORE_ADDR *address) override;
213
214 bool supports_qxfer_osdata () override;
215
216 int qxfer_osdata (const char *annex, unsigned char *readbuf,
217 unsigned const char *writebuf,
218 CORE_ADDR offset, int len) override;
219
220 bool supports_qxfer_siginfo () override;
221
222 int qxfer_siginfo (const char *annex, unsigned char *readbuf,
223 unsigned const char *writebuf,
224 CORE_ADDR offset, int len) override;
225
226 bool supports_non_stop () override;
227
228 bool async (bool enable) override;
229
230 int start_non_stop (bool enable) override;
231
232 bool supports_multi_process () override;
233
234 bool supports_fork_events () override;
235
236 bool supports_vfork_events () override;
237
238 bool supports_exec_events () override;
239
240 void handle_new_gdb_connection () override;
241
242 int handle_monitor_command (char *mon) override;
243
244 int core_of_thread (ptid_t ptid) override;
245
246 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
247 bool supports_read_loadmap () override;
248
249 int read_loadmap (const char *annex, CORE_ADDR offset,
250 unsigned char *myaddr, unsigned int len) override;
251 #endif
252
253 CORE_ADDR read_pc (regcache *regcache) override;
254
255 void write_pc (regcache *regcache, CORE_ADDR pc) override;
256
257 bool supports_thread_stopped () override;
258
259 bool thread_stopped (thread_info *thread) override;
260
261 void pause_all (bool freeze) override;
262
263 void unpause_all (bool unfreeze) override;
264
265 void stabilize_threads () override;
266
267 bool supports_disable_randomization () override;
268
269 bool supports_qxfer_libraries_svr4 () override;
270
271 int qxfer_libraries_svr4 (const char *annex,
272 unsigned char *readbuf,
273 unsigned const char *writebuf,
274 CORE_ADDR offset, int len) override;
275
276 bool supports_agent () override;
277
278 #ifdef HAVE_LINUX_BTRACE
279 btrace_target_info *enable_btrace (thread_info *tp,
280 const btrace_config *conf) override;
281
282 int disable_btrace (btrace_target_info *tinfo) override;
283
284 int read_btrace (btrace_target_info *tinfo, buffer *buf,
285 enum btrace_read_type type) override;
286
287 int read_btrace_conf (const btrace_target_info *tinfo,
288 buffer *buf) override;
289 #endif
290
291 bool supports_range_stepping () override;
292
293 bool supports_pid_to_exec_file () override;
294
295 const char *pid_to_exec_file (int pid) override;
296
297 bool supports_multifs () override;
298
299 int multifs_open (int pid, const char *filename, int flags,
300 mode_t mode) override;
301
302 int multifs_unlink (int pid, const char *filename) override;
303
304 ssize_t multifs_readlink (int pid, const char *filename, char *buf,
305 size_t bufsiz) override;
306
307 const char *thread_name (ptid_t thread) override;
308
309 #if USE_THREAD_DB
310 bool thread_handle (ptid_t ptid, gdb_byte **handle,
311 int *handle_len) override;
312 #endif
313
314 thread_info *thread_pending_parent (thread_info *thread) override;
315 thread_info *thread_pending_child (thread_info *thread) override;
316
317 bool supports_catch_syscall () override;
318
319 /* Return the information to access registers. This has public
320 visibility because proc-service uses it. */
321 virtual const regs_info *get_regs_info () = 0;
322
323 private:
324
325 /* Handle a GNU/Linux extended wait response. If we see a clone,
326 fork, or vfork event, we need to add the new LWP to our list
327 (and return 0 so as not to report the trap to higher layers).
328 If we see an exec event, we will modify ORIG_EVENT_LWP to point
329 to a new LWP representing the new program. */
330 int handle_extended_wait (lwp_info **orig_event_lwp, int wstat);
331
332 /* Do low-level handling of the event, and check if this is an event we want
333 to report. Is so, store it as a pending status in the lwp_info structure
334 corresponding to LWPID. */
335 void filter_event (int lwpid, int wstat);
336
337 /* Wait for an event from child(ren) WAIT_PTID, and return any that
338 match FILTER_PTID (leaving others pending). The PTIDs can be:
339 minus_one_ptid, to specify any child; a pid PTID, specifying all
340 lwps of a thread group; or a PTID representing a single lwp. Store
341 the stop status through the status pointer WSTAT. OPTIONS is
342 passed to the waitpid call. Return 0 if no event was found and
343 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
344 was found. Return the PID of the stopped child otherwise. */
345 int wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
346 int *wstatp, int options);
347
348 /* Wait for an event from child(ren) PTID. PTIDs can be:
349 minus_one_ptid, to specify any child; a pid PTID, specifying all
350 lwps of a thread group; or a PTID representing a single lwp. Store
351 the stop status through the status pointer WSTAT. OPTIONS is
352 passed to the waitpid call. Return 0 if no event was found and
353 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
354 was found. Return the PID of the stopped child otherwise. */
355 int wait_for_event (ptid_t ptid, int *wstatp, int options);
356
357 /* Wait for all children to stop for the SIGSTOPs we just queued. */
358 void wait_for_sigstop ();
359
360 /* Wait for process, returns status. */
361 ptid_t wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
362 target_wait_flags target_options);
363
364 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
365 If SUSPEND, then also increase the suspend count of every LWP,
366 except EXCEPT. */
367 void stop_all_lwps (int suspend, lwp_info *except);
368
369 /* Stopped LWPs that the client wanted to be running, that don't have
370 pending statuses, are set to run again, except for EXCEPT, if not
371 NULL. This undoes a stop_all_lwps call. */
372 void unstop_all_lwps (int unsuspend, lwp_info *except);
373
374 /* Start a step-over operation on LWP. When LWP stopped at a
375 breakpoint, to make progress, we need to remove the breakpoint out
376 of the way. If we let other threads run while we do that, they may
377 pass by the breakpoint location and miss hitting it. To avoid
378 that, a step-over momentarily stops all threads while LWP is
379 single-stepped by either hardware or software while the breakpoint
380 is temporarily uninserted from the inferior. When the single-step
381 finishes, we reinsert the breakpoint, and let all threads that are
382 supposed to be running, run again. */
383 void start_step_over (lwp_info *lwp);
384
385 /* If there's a step over in progress, wait until all threads stop
386 (that is, until the stepping thread finishes its step), and
387 unsuspend all lwps. The stepping thread ends with its status
388 pending, which is processed later when we get back to processing
389 events. */
390 void complete_ongoing_step_over ();
391
392 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
393 start_step_over, if still there, and delete any single-step
394 breakpoints we've set, on non hardware single-step targets.
395 Return true if step over finished. */
396 bool finish_step_over (lwp_info *lwp);
397
398 /* When we finish a step-over, set threads running again. If there's
399 another thread that may need a step-over, now's the time to start
400 it. Eventually, we'll move all threads past their breakpoints. */
401 void proceed_all_lwps ();
402
403 /* The reason we resume in the caller, is because we want to be able
404 to pass lwp->status_pending as WSTAT, and we need to clear
405 status_pending_p before resuming, otherwise, resume_one_lwp
406 refuses to resume. */
407 bool maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat);
408
409 /* Move THREAD out of the jump pad. */
410 void move_out_of_jump_pad (thread_info *thread);
411
412 /* Call low_arch_setup on THREAD. */
413 void arch_setup_thread (thread_info *thread);
414
415 #ifdef HAVE_LINUX_USRREGS
416 /* Fetch one register. */
417 void fetch_register (const usrregs_info *usrregs, regcache *regcache,
418 int regno);
419
420 /* Store one register. */
421 void store_register (const usrregs_info *usrregs, regcache *regcache,
422 int regno);
423 #endif
424
425 /* Fetch all registers, or just one, from the child process.
426 If REGNO is -1, do this for all registers, skipping any that are
427 assumed to have been retrieved by regsets_fetch_inferior_registers,
428 unless ALL is non-zero.
429 Otherwise, REGNO specifies which register (so we can save time). */
430 void usr_fetch_inferior_registers (const regs_info *regs_info,
431 regcache *regcache, int regno, int all);
432
433 /* Store our register values back into the inferior.
434 If REGNO is -1, do this for all registers, skipping any that are
435 assumed to have been saved by regsets_store_inferior_registers,
436 unless ALL is non-zero.
437 Otherwise, REGNO specifies which register (so we can save time). */
438 void usr_store_inferior_registers (const regs_info *regs_info,
439 regcache *regcache, int regno, int all);
440
441 /* Return the PC as read from the regcache of LWP, without any
442 adjustment. */
443 CORE_ADDR get_pc (lwp_info *lwp);
444
445 /* Called when the LWP stopped for a signal/trap. If it stopped for a
446 trap check what caused it (breakpoint, watchpoint, trace, etc.),
447 and save the result in the LWP's stop_reason field. If it stopped
448 for a breakpoint, decrement the PC if necessary on the lwp's
449 architecture. Returns true if we now have the LWP's stop PC. */
450 bool save_stop_reason (lwp_info *lwp);
451
452 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
453 SIGNAL is nonzero, give it that signal. */
454 void resume_one_lwp_throw (lwp_info *lwp, int step, int signal,
455 siginfo_t *info);
456
457 /* Like resume_one_lwp_throw, but no error is thrown if the LWP
458 disappears while we try to resume it. */
459 void resume_one_lwp (lwp_info *lwp, int step, int signal, siginfo_t *info);
460
461 /* This function is called once per thread. We check the thread's
462 last resume request, which will tell us whether to resume, step, or
463 leave the thread stopped. Any signal the client requested to be
464 delivered has already been enqueued at this point.
465
466 If any thread that GDB wants running is stopped at an internal
467 breakpoint that needs stepping over, we start a step-over operation
468 on that particular thread, and leave all others stopped. */
469 void proceed_one_lwp (thread_info *thread, lwp_info *except);
470
471 /* This function is called once per thread. We check the thread's
472 resume request, which will tell us whether to resume, step, or
473 leave the thread stopped; and what signal, if any, it should be
474 sent.
475
476 For threads which we aren't explicitly told otherwise, we preserve
477 the stepping flag; this is used for stepping over gdbserver-placed
478 breakpoints.
479
480 If pending_flags was set in any thread, we queue any needed
481 signals, since we won't actually resume. We already have a pending
482 event to report, so we don't need to preserve any step requests;
483 they should be re-issued if necessary. */
484 void resume_one_thread (thread_info *thread, bool leave_all_stopped);
485
486 /* Return true if this lwp has an interesting status pending. */
487 bool status_pending_p_callback (thread_info *thread, ptid_t ptid);
488
489 /* Resume LWPs that are currently stopped without any pending status
490 to report, but are resumed from the core's perspective. */
491 void resume_stopped_resumed_lwps (thread_info *thread);
492
493 /* Unsuspend THREAD, except EXCEPT, and proceed. */
494 void unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except);
495
496 /* Return true if this lwp still has an interesting status pending.
497 If not (e.g., it had stopped for a breakpoint that is gone), return
498 false. */
499 bool thread_still_has_status_pending (thread_info *thread);
500
501 /* Return true if this lwp is to-be-resumed and has an interesting
502 status pending. */
503 bool resume_status_pending (thread_info *thread);
504
505 /* Return true if this lwp that GDB wants running is stopped at an
506 internal breakpoint that we need to step over. It assumes that
507 any required STOP_PC adjustment has already been propagated to
508 the inferior's regcache. */
509 bool thread_needs_step_over (thread_info *thread);
510
511 /* Single step via hardware or software single step.
512 Return 1 if hardware single stepping, 0 if software single stepping
513 or can't single step. */
514 int single_step (lwp_info* lwp);
515
516 /* Return true if THREAD is doing hardware single step. */
517 bool maybe_hw_step (thread_info *thread);
518
519 /* Install breakpoints for software single stepping. */
520 void install_software_single_step_breakpoints (lwp_info *lwp);
521
522 /* Fetch the possibly triggered data watchpoint info and store it in
523 CHILD.
524
525 On some archs, like x86, that use debug registers to set
526 watchpoints, it's possible that the way to know which watched
527 address trapped, is to check the register that is used to select
528 which address to watch. Problem is, between setting the watchpoint
529 and reading back which data address trapped, the user may change
530 the set of watchpoints, and, as a consequence, GDB changes the
531 debug registers in the inferior. To avoid reading back a stale
532 stopped-data-address when that happens, we cache in LP the fact
533 that a watchpoint trapped, and the corresponding data address, as
534 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
535 registers meanwhile, we have the cached data we can rely on. */
536 bool check_stopped_by_watchpoint (lwp_info *child);
537
538 /* Convert a native/host siginfo object, into/from the siginfo in the
539 layout of the inferiors' architecture. */
540 void siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo,
541 int direction);
542
543 /* Add a process to the common process list, and set its private
544 data. */
545 process_info *add_linux_process (int pid, int attached);
546
547 /* Add a new thread. */
548 lwp_info *add_lwp (ptid_t ptid);
549
550 /* Delete a thread. */
551 void delete_lwp (lwp_info *lwp);
552
553 public: /* Make this public because it's used from outside. */
554 /* Attach to an inferior process. Returns 0 on success, ERRNO on
555 error. */
556 int attach_lwp (ptid_t ptid);
557
558 private: /* Back to private. */
559 /* Detach from LWP. */
560 void detach_one_lwp (lwp_info *lwp);
561
562 /* Detect zombie thread group leaders, and "exit" them. We can't
563 reap their exits until all other threads in the group have
564 exited. */
565 void check_zombie_leaders ();
566
567 /* Convenience function that is called when the kernel reports an exit
568 event. This decides whether to report the event to GDB as a
569 process exit event, a thread exit event, or to suppress the
570 event. */
571 ptid_t filter_exit_event (lwp_info *event_child,
572 target_waitstatus *ourstatus);
573
574 /* Returns true if THREAD is stopped in a jump pad, and we can't
575 move it out, because we need to report the stop event to GDB. For
576 example, if the user puts a breakpoint in the jump pad, it's
577 because she wants to debug it. */
578 bool stuck_in_jump_pad (thread_info *thread);
579
580 /* Convenience wrapper. Returns information about LWP's fast tracepoint
581 collection status. */
582 fast_tpoint_collect_result linux_fast_tracepoint_collecting
583 (lwp_info *lwp, fast_tpoint_collect_status *status);
584
585 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
586 Fill *SYSNO with the syscall nr trapped. */
587 void get_syscall_trapinfo (lwp_info *lwp, int *sysno);
588
589 /* Returns true if GDB is interested in the event_child syscall.
590 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
591 bool gdb_catch_this_syscall (lwp_info *event_child);
592
593 protected:
594 /* The architecture-specific "low" methods are listed below. */
595
596 /* Architecture-specific setup for the current thread. */
597 virtual void low_arch_setup () = 0;
598
599 /* Return false if we can fetch/store the register, true if we cannot
600 fetch/store the register. */
601 virtual bool low_cannot_fetch_register (int regno) = 0;
602
603 virtual bool low_cannot_store_register (int regno) = 0;
604
605 /* Hook to fetch a register in some non-standard way. Used for
606 example by backends that have read-only registers with hardcoded
607 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
608 REGNO was supplied, false if not, and we should fallback to the
609 standard ptrace methods. */
610 virtual bool low_fetch_register (regcache *regcache, int regno);
611
612 /* Return true if breakpoints are supported. Such targets must
613 implement the GET_PC and SET_PC methods. */
614 virtual bool low_supports_breakpoints ();
615
616 virtual CORE_ADDR low_get_pc (regcache *regcache);
617
618 virtual void low_set_pc (regcache *regcache, CORE_ADDR newpc);
619
620 /* Find the next possible PCs after the current instruction executes.
621 Targets that override this method should also override
622 'supports_software_single_step' to return true. */
623 virtual std::vector<CORE_ADDR> low_get_next_pcs (regcache *regcache);
624
625 /* Return true if there is a breakpoint at PC. */
626 virtual bool low_breakpoint_at (CORE_ADDR pc) = 0;
627
628 /* Breakpoint and watchpoint related functions. See target.h for
629 comments. */
630 virtual int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
631 int size, raw_breakpoint *bp);
632
633 virtual int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
634 int size, raw_breakpoint *bp);
635
636 virtual bool low_stopped_by_watchpoint ();
637
638 virtual CORE_ADDR low_stopped_data_address ();
639
640 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
641 for registers smaller than an xfer unit). */
642 virtual void low_collect_ptrace_register (regcache *regcache, int regno,
643 char *buf);
644
645 virtual void low_supply_ptrace_register (regcache *regcache, int regno,
646 const char *buf);
647
648 /* Hook to convert from target format to ptrace format and back.
649 Returns true if any conversion was done; false otherwise.
650 If DIRECTION is 1, then copy from INF to NATIVE.
651 If DIRECTION is 0, copy from NATIVE to INF. */
652 virtual bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
653 int direction);
654
655 /* Hook to call when a new process is created or attached to.
656 If extra per-process architecture-specific data is needed,
657 allocate it here. */
658 virtual arch_process_info *low_new_process ();
659
660 /* Hook to call when a process is being deleted. If extra per-process
661 architecture-specific data is needed, delete it here. */
662 virtual void low_delete_process (arch_process_info *info);
663
664 /* Hook to call when a new thread is detected.
665 If extra per-thread architecture-specific data is needed,
666 allocate it here. */
667 virtual void low_new_thread (lwp_info *);
668
669 /* Hook to call when a thread is being deleted. If extra per-thread
670 architecture-specific data is needed, delete it here. */
671 virtual void low_delete_thread (arch_lwp_info *);
672
673 /* Hook to call, if any, when a new fork is attached. */
674 virtual void low_new_fork (process_info *parent, process_info *child);
675
676 /* Hook to call prior to resuming a thread. */
677 virtual void low_prepare_to_resume (lwp_info *lwp);
678
679 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
680 success, -1 on failure. */
681 virtual int low_get_thread_area (int lwpid, CORE_ADDR *addrp);
682
683 /* Returns true if the low target supports range stepping. */
684 virtual bool low_supports_range_stepping ();
685
686 /* Return true if the target supports catch syscall. Such targets
687 override the low_get_syscall_trapinfo method below. */
688 virtual bool low_supports_catch_syscall ();
689
690 /* Fill *SYSNO with the syscall nr trapped. Only to be called when
691 inferior is stopped due to SYSCALL_SIGTRAP. */
692 virtual void low_get_syscall_trapinfo (regcache *regcache, int *sysno);
693
694 /* How many bytes the PC should be decremented after a break. */
695 virtual int low_decr_pc_after_break ();
696 };
697
698 extern linux_process_target *the_linux_target;
699
700 #define get_thread_lwp(thr) ((struct lwp_info *) (thread_target_data (thr)))
701 #define get_lwp_thread(lwp) ((lwp)->thread)
702
703 /* Information about a signal that is to be delivered to a thread. */
704
705 struct pending_signal
706 {
707 pending_signal (int signal)
708 : signal {signal}
709 {};
710
711 int signal;
712 siginfo_t info;
713 };
714
715 /* This struct is recorded in the target_data field of struct thread_info.
716
717 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
718 GDB protocol representation of the thread ID. Threads also have
719 a "process ID" (poorly named) which is (presently) the same as the
720 LWP ID.
721
722 There is also ``all_processes'' is keyed by the "overall process ID",
723 which GNU/Linux calls tgid, "thread group ID". */
724
725 struct lwp_info
726 {
727 /* If this LWP is a fork child that wasn't reported to GDB yet, return
728 its parent, else nullptr. */
729 lwp_info *pending_parent () const
730 {
731 if (this->fork_relative == nullptr)
732 return nullptr;
733
734 gdb_assert (this->fork_relative->fork_relative == this);
735
736 /* In a fork parent/child relationship, the parent has a status pending and
737 the child does not, and a thread can only be in one such relationship
738 at most. So we can recognize who is the parent based on which one has
739 a pending status. */
740 gdb_assert (!!this->status_pending_p
741 != !!this->fork_relative->status_pending_p);
742
743 if (!this->fork_relative->status_pending_p)
744 return nullptr;
745
746 const target_waitstatus &ws
747 = this->fork_relative->waitstatus;
748 gdb_assert (ws.kind () == TARGET_WAITKIND_FORKED
749 || ws.kind () == TARGET_WAITKIND_VFORKED);
750
751 return this->fork_relative;
752 }
753
754 /* If this LWP is the parent of a fork child we haven't reported to GDB yet,
755 return that child, else nullptr. */
756 lwp_info *pending_child () const
757 {
758 if (this->fork_relative == nullptr)
759 return nullptr;
760
761 gdb_assert (this->fork_relative->fork_relative == this);
762
763 /* In a fork parent/child relationship, the parent has a status pending and
764 the child does not, and a thread can only be in one such relationship
765 at most. So we can recognize who is the parent based on which one has
766 a pending status. */
767 gdb_assert (!!this->status_pending_p
768 != !!this->fork_relative->status_pending_p);
769
770 if (!this->status_pending_p)
771 return nullptr;
772
773 const target_waitstatus &ws = this->waitstatus;
774 gdb_assert (ws.kind () == TARGET_WAITKIND_FORKED
775 || ws.kind () == TARGET_WAITKIND_VFORKED);
776
777 return this->fork_relative;
778 }
779
780 /* Backlink to the parent object. */
781 struct thread_info *thread = nullptr;
782
783 /* If this flag is set, the next SIGSTOP will be ignored (the
784 process will be immediately resumed). This means that either we
785 sent the SIGSTOP to it ourselves and got some other pending event
786 (so the SIGSTOP is still pending), or that we stopped the
787 inferior implicitly via PTRACE_ATTACH and have not waited for it
788 yet. */
789 int stop_expected = 0;
790
791 /* When this is true, we shall not try to resume this thread, even
792 if last_resume_kind isn't resume_stop. */
793 int suspended = 0;
794
795 /* If this flag is set, the lwp is known to be stopped right now (stop
796 event already received in a wait()). */
797 int stopped = 0;
798
799 /* Signal whether we are in a SYSCALL_ENTRY or
800 in a SYSCALL_RETURN event.
801 Values:
802 - TARGET_WAITKIND_SYSCALL_ENTRY
803 - TARGET_WAITKIND_SYSCALL_RETURN */
804 enum target_waitkind syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
805
806 /* When stopped is set, the last wait status recorded for this lwp. */
807 int last_status = 0;
808
809 /* If WAITSTATUS->KIND != TARGET_WAITKIND_IGNORE, the waitstatus for
810 this LWP's last event, to pass to GDB without any further
811 processing. This is used to store extended ptrace event
812 information or exit status until it can be reported to GDB. */
813 struct target_waitstatus waitstatus;
814
815 /* A pointer to the fork child/parent relative. Valid only while
816 the parent fork event is not reported to higher layers. Used to
817 avoid wildcard vCont actions resuming a fork child before GDB is
818 notified about the parent's fork event. */
819 struct lwp_info *fork_relative = nullptr;
820
821 /* When stopped is set, this is where the lwp last stopped, with
822 decr_pc_after_break already accounted for. If the LWP is
823 running, this is the address at which the lwp was resumed. */
824 CORE_ADDR stop_pc = 0;
825
826 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
827 been reported. */
828 int status_pending_p = 0;
829 int status_pending = 0;
830
831 /* The reason the LWP last stopped, if we need to track it
832 (breakpoint, watchpoint, etc.) */
833 enum target_stop_reason stop_reason = TARGET_STOPPED_BY_NO_REASON;
834
835 /* On architectures where it is possible to know the data address of
836 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
837 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
838 is true. */
839 CORE_ADDR stopped_data_address = 0;
840
841 /* If this is non-zero, it is a breakpoint to be reinserted at our next
842 stop (SIGTRAP stops only). */
843 CORE_ADDR bp_reinsert = 0;
844
845 /* If this flag is set, the last continue operation at the ptrace
846 level on this process was a single-step. */
847 int stepping = 0;
848
849 /* Range to single step within. This is a copy of the step range
850 passed along the last resume request. See 'struct
851 thread_resume'. */
852 CORE_ADDR step_range_start = 0; /* Inclusive */
853 CORE_ADDR step_range_end = 0; /* Exclusive */
854
855 /* If this flag is set, we need to set the event request flags the
856 next time we see this LWP stop. */
857 int must_set_ptrace_flags = 0;
858
859 /* A chain of signals that need to be delivered to this process. */
860 std::list<pending_signal> pending_signals;
861
862 /* A link used when resuming. It is initialized from the resume request,
863 and then processed and cleared in linux_resume_one_lwp. */
864 struct thread_resume *resume = nullptr;
865
866 /* Information bout this lwp's fast tracepoint collection status (is it
867 currently stopped in the jump pad, and if so, before or at/after the
868 relocated instruction). Normally, we won't care about this, but we will
869 if a signal arrives to this lwp while it is collecting. */
870 fast_tpoint_collect_result collecting_fast_tracepoint
871 = fast_tpoint_collect_result::not_collecting;
872
873 /* A chain of signals that need to be reported to GDB. These were
874 deferred because the thread was doing a fast tracepoint collect
875 when they arrived. */
876 std::list<pending_signal> pending_signals_to_report;
877
878 /* When collecting_fast_tracepoint is first found to be 1, we insert
879 a exit-jump-pad-quickly breakpoint. This is it. */
880 struct breakpoint *exit_jump_pad_bkpt = nullptr;
881
882 #ifdef USE_THREAD_DB
883 int thread_known = 0;
884 /* The thread handle, used for e.g. TLS access. Only valid if
885 THREAD_KNOWN is set. */
886 td_thrhandle_t th {};
887
888 /* The pthread_t handle. */
889 thread_t thread_handle {};
890 #endif
891
892 /* Arch-specific additions. */
893 struct arch_lwp_info *arch_private = nullptr;
894 };
895
896 int linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine);
897
898 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an
899 errno). */
900 int linux_attach_lwp (ptid_t ptid);
901
902 struct lwp_info *find_lwp_pid (ptid_t ptid);
903 /* For linux_stop_lwp see nat/linux-nat.h. */
904
905 #ifdef HAVE_LINUX_REGSETS
906 void initialize_regsets_info (struct regsets_info *regsets_info);
907 #endif
908
909 void initialize_low_arch (void);
910
911 void linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc);
912 CORE_ADDR linux_get_pc_32bit (struct regcache *regcache);
913
914 void linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc);
915 CORE_ADDR linux_get_pc_64bit (struct regcache *regcache);
916
917 /* From thread-db.c */
918 int thread_db_init (void);
919 void thread_db_detach (struct process_info *);
920 void thread_db_mourn (struct process_info *);
921 int thread_db_handle_monitor_command (char *);
922 int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
923 CORE_ADDR load_module, CORE_ADDR *address);
924 int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp);
925
926 /* Called from linux-low.c when a clone event is detected. Upon entry,
927 both the clone and the parent should be stopped. This function does
928 whatever is required have the clone under thread_db's control. */
929
930 void thread_db_notice_clone (struct thread_info *parent_thr, ptid_t child_ptid);
931
932 bool thread_db_thread_handle (ptid_t ptid, gdb_byte **handle, int *handle_len);
933
934 extern int have_ptrace_getregset;
935
936 /* Search for the value with type MATCH in the auxv vector with
937 entries of length WORDSIZE bytes. If found, store the value in
938 *VALP and return 1. If not found or if there is an error, return
939 0. */
940
941 int linux_get_auxv (int wordsize, CORE_ADDR match,
942 CORE_ADDR *valp);
943
944 /* Fetch the AT_HWCAP entry from the auxv vector, where entries are length
945 WORDSIZE. If no entry was found, return zero. */
946
947 CORE_ADDR linux_get_hwcap (int wordsize);
948
949 /* Fetch the AT_HWCAP2 entry from the auxv vector, where entries are length
950 WORDSIZE. If no entry was found, return zero. */
951
952 CORE_ADDR linux_get_hwcap2 (int wordsize);
953
954 #endif /* GDBSERVER_LINUX_LOW_H */