1 /* Internal interfaces for the GNU/Linux specific target code for gdbserver.
2 Copyright (C) 2002-2020 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19 #ifndef GDBSERVER_LINUX_LOW_H
20 #define GDBSERVER_LINUX_LOW_H
22 #include "nat/linux-nat.h"
23 #include "nat/gdb_thread_db.h"
26 #include "gdbthread.h"
27 #include "gdb_proc_service.h"
29 /* Included for ptrace type definitions. */
30 #include "nat/linux-ptrace.h"
31 #include "target/waitstatus.h" /* For enum target_stop_reason. */
32 #include "tracepoint.h"
34 #define PTRACE_XFER_TYPE long
36 #ifdef HAVE_LINUX_REGSETS
37 typedef void (*regset_fill_func
) (struct regcache
*, void *);
38 typedef void (*regset_store_func
) (struct regcache
*, const void *);
43 OPTIONAL_REGS
, /* Do not error if the regset cannot be accessed. */
46 /* The arch's regsets array initializer must be terminated with a NULL
49 { 0, 0, 0, -1, (enum regset_type) -1, NULL, NULL }
53 int get_request
, set_request
;
54 /* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
55 argument and the 4th argument should be "const struct iovec *". */
58 enum regset_type type
;
59 regset_fill_func fill_function
;
60 regset_store_func store_function
;
63 /* Aggregation of all the supported regsets of a given
68 /* The regsets array. */
69 struct regset_info
*regsets
;
71 /* The number of regsets in the REGSETS array. */
74 /* If we get EIO on a regset, do not try it again. Note the set of
75 supported regsets may depend on processor mode on biarch
76 machines. This is a (lazily allocated) array holding one boolean
77 byte (0/1) per regset, with each element corresponding to the
78 regset in the REGSETS array above at the same offset. */
79 char *disabled_regsets
;
84 /* Mapping between the general-purpose registers in `struct user'
85 format and GDB's register array layout. */
89 /* The number of registers accessible. */
92 /* The registers map. */
96 /* All info needed to access an architecture/mode's registers. */
100 /* Regset support bitmap: 1 for registers that are transferred as a part
101 of a regset, 0 for ones that need to be handled individually. This
102 can be NULL if all registers are transferred with regsets or regsets
103 are not supported. */
104 unsigned char *regset_bitmap
;
106 /* Info used when accessing registers with PTRACE_PEEKUSER /
107 PTRACE_POKEUSER. This can be NULL if all registers are
108 transferred with regsets .*/
109 struct usrregs_info
*usrregs
;
111 #ifdef HAVE_LINUX_REGSETS
112 /* Info used when accessing registers with regsets. */
113 struct regsets_info
*regsets_info
;
117 struct process_info_private
119 /* Arch-specific additions. */
120 struct arch_process_info
*arch_private
;
122 /* libthread_db-specific additions. Not NULL if this process has loaded
123 thread_db, and it is active. */
124 struct thread_db
*thread_db
;
126 /* &_r_debug. 0 if not yet determined. -1 if no PT_DYNAMIC in Phdrs. */
132 struct linux_target_ops
134 /* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
135 for registers smaller than an xfer unit). */
136 void (*collect_ptrace_register
) (struct regcache
*regcache
,
137 int regno
, char *buf
);
138 void (*supply_ptrace_register
) (struct regcache
*regcache
,
139 int regno
, const char *buf
);
141 /* Hook to convert from target format to ptrace format and back.
142 Returns true if any conversion was done; false otherwise.
143 If DIRECTION is 1, then copy from INF to NATIVE.
144 If DIRECTION is 0, copy from NATIVE to INF. */
145 int (*siginfo_fixup
) (siginfo_t
*native
, gdb_byte
*inf
, int direction
);
147 /* Hook to call when a new process is created or attached to.
148 If extra per-process architecture-specific data is needed,
150 struct arch_process_info
* (*new_process
) (void);
152 /* Hook to call when a process is being deleted. If extra per-process
153 architecture-specific data is needed, delete it here. */
154 void (*delete_process
) (struct arch_process_info
*info
);
156 /* Hook to call when a new thread is detected.
157 If extra per-thread architecture-specific data is needed,
159 void (*new_thread
) (struct lwp_info
*);
161 /* Hook to call when a thread is being deleted. If extra per-thread
162 architecture-specific data is needed, delete it here. */
163 void (*delete_thread
) (struct arch_lwp_info
*);
165 /* Hook to call, if any, when a new fork is attached. */
166 void (*new_fork
) (struct process_info
*parent
, struct process_info
*child
);
168 /* Hook to call prior to resuming a thread. */
169 void (*prepare_to_resume
) (struct lwp_info
*);
171 /* Hook to support target specific qSupported. */
172 void (*process_qsupported
) (char **, int count
);
174 /* Returns true if the low target supports tracepoints. */
175 int (*supports_tracepoints
) (void);
177 /* Fill ADDRP with the thread area address of LWPID. Returns 0 on
178 success, -1 on failure. */
179 int (*get_thread_area
) (int lwpid
, CORE_ADDR
*addrp
);
181 /* Install a fast tracepoint jump pad. See target.h for
183 int (*install_fast_tracepoint_jump_pad
) (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
187 CORE_ADDR
*jump_entry
,
188 CORE_ADDR
*trampoline
,
189 ULONGEST
*trampoline_size
,
190 unsigned char *jjump_pad_insn
,
191 ULONGEST
*jjump_pad_insn_size
,
192 CORE_ADDR
*adjusted_insn_addr
,
193 CORE_ADDR
*adjusted_insn_addr_end
,
196 /* Return the bytecode operations vector for the current inferior.
197 Returns NULL if bytecode compilation is not supported. */
198 struct emit_ops
*(*emit_ops
) (void);
200 /* Return the minimum length of an instruction that can be safely overwritten
201 for use as a fast tracepoint. */
202 int (*get_min_fast_tracepoint_insn_len
) (void);
204 /* Returns true if the low target supports range stepping. */
205 int (*supports_range_stepping
) (void);
208 int (*supports_hardware_single_step
) (void);
210 /* Fill *SYSNO with the syscall nr trapped. Only to be called when
211 inferior is stopped due to SYSCALL_SIGTRAP. */
212 void (*get_syscall_trapinfo
) (struct regcache
*regcache
, int *sysno
);
215 int (*get_ipa_tdesc_idx
) (void);
218 extern struct linux_target_ops the_low_target
;
220 /* Target ops definitions for a Linux target. */
222 class linux_process_target
: public process_stratum_target
226 int create_inferior (const char *program
,
227 const std::vector
<char *> &program_args
) override
;
229 void post_create_inferior () override
;
231 int attach (unsigned long pid
) override
;
233 int kill (process_info
*proc
) override
;
235 int detach (process_info
*proc
) override
;
237 void mourn (process_info
*proc
) override
;
239 void join (int pid
) override
;
241 bool thread_alive (ptid_t pid
) override
;
243 void resume (thread_resume
*resume_info
, size_t n
) override
;
245 ptid_t
wait (ptid_t ptid
, target_waitstatus
*status
,
246 int options
) override
;
248 void fetch_registers (regcache
*regcache
, int regno
) override
;
250 void store_registers (regcache
*regcache
, int regno
) override
;
252 int prepare_to_access_memory () override
;
254 void done_accessing_memory () override
;
256 int read_memory (CORE_ADDR memaddr
, unsigned char *myaddr
,
259 int write_memory (CORE_ADDR memaddr
, const unsigned char *myaddr
,
262 void look_up_symbols () override
;
264 void request_interrupt () override
;
266 bool supports_read_auxv () override
;
268 int read_auxv (CORE_ADDR offset
, unsigned char *myaddr
,
269 unsigned int len
) override
;
271 int insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
272 int size
, raw_breakpoint
*bp
) override
;
274 int remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
275 int size
, raw_breakpoint
*bp
) override
;
277 bool stopped_by_sw_breakpoint () override
;
279 bool supports_stopped_by_sw_breakpoint () override
;
281 bool stopped_by_hw_breakpoint () override
;
283 bool supports_stopped_by_hw_breakpoint () override
;
285 bool supports_hardware_single_step () override
;
287 bool stopped_by_watchpoint () override
;
289 CORE_ADDR
stopped_data_address () override
;
291 bool supports_read_offsets () override
;
293 int read_offsets (CORE_ADDR
*text
, CORE_ADDR
*data
) override
;
295 bool supports_get_tls_address () override
;
297 int get_tls_address (thread_info
*thread
, CORE_ADDR offset
,
298 CORE_ADDR load_module
, CORE_ADDR
*address
) override
;
300 bool supports_qxfer_osdata () override
;
302 int qxfer_osdata (const char *annex
, unsigned char *readbuf
,
303 unsigned const char *writebuf
,
304 CORE_ADDR offset
, int len
) override
;
306 bool supports_qxfer_siginfo () override
;
308 int qxfer_siginfo (const char *annex
, unsigned char *readbuf
,
309 unsigned const char *writebuf
,
310 CORE_ADDR offset
, int len
) override
;
312 bool supports_non_stop () override
;
314 bool async (bool enable
) override
;
316 int start_non_stop (bool enable
) override
;
318 bool supports_multi_process () override
;
320 bool supports_fork_events () override
;
322 bool supports_vfork_events () override
;
324 bool supports_exec_events () override
;
326 void handle_new_gdb_connection () override
;
328 int handle_monitor_command (char *mon
) override
;
330 int core_of_thread (ptid_t ptid
) override
;
332 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
333 bool supports_read_loadmap () override
;
335 int read_loadmap (const char *annex
, CORE_ADDR offset
,
336 unsigned char *myaddr
, unsigned int len
) override
;
339 void process_qsupported (char **features
, int count
) override
;
341 bool supports_tracepoints () override
;
343 CORE_ADDR
read_pc (regcache
*regcache
) override
;
345 void write_pc (regcache
*regcache
, CORE_ADDR pc
) override
;
347 bool supports_thread_stopped () override
;
349 bool thread_stopped (thread_info
*thread
) override
;
351 void pause_all (bool freeze
) override
;
353 void unpause_all (bool unfreeze
) override
;
355 void stabilize_threads () override
;
357 bool supports_fast_tracepoints () override
;
359 int install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
364 CORE_ADDR
*jump_entry
,
365 CORE_ADDR
*trampoline
,
366 ULONGEST
*trampoline_size
,
367 unsigned char *jjump_pad_insn
,
368 ULONGEST
*jjump_pad_insn_size
,
369 CORE_ADDR
*adjusted_insn_addr
,
370 CORE_ADDR
*adjusted_insn_addr_end
,
373 int get_min_fast_tracepoint_insn_len () override
;
375 struct emit_ops
*emit_ops () override
;
377 bool supports_disable_randomization () override
;
379 bool supports_qxfer_libraries_svr4 () override
;
381 int qxfer_libraries_svr4 (const char *annex
,
382 unsigned char *readbuf
,
383 unsigned const char *writebuf
,
384 CORE_ADDR offset
, int len
) override
;
386 bool supports_agent () override
;
388 #ifdef HAVE_LINUX_BTRACE
389 btrace_target_info
*enable_btrace (ptid_t ptid
,
390 const btrace_config
*conf
) override
;
392 int disable_btrace (btrace_target_info
*tinfo
) override
;
394 int read_btrace (btrace_target_info
*tinfo
, buffer
*buf
,
395 enum btrace_read_type type
) override
;
397 int read_btrace_conf (const btrace_target_info
*tinfo
,
398 buffer
*buf
) override
;
401 bool supports_range_stepping () override
;
403 bool supports_pid_to_exec_file () override
;
405 char *pid_to_exec_file (int pid
) override
;
407 bool supports_multifs () override
;
409 int multifs_open (int pid
, const char *filename
, int flags
,
410 mode_t mode
) override
;
412 int multifs_unlink (int pid
, const char *filename
) override
;
414 ssize_t
multifs_readlink (int pid
, const char *filename
, char *buf
,
415 size_t bufsiz
) override
;
417 const char *thread_name (ptid_t thread
) override
;
420 bool thread_handle (ptid_t ptid
, gdb_byte
**handle
,
421 int *handle_len
) override
;
424 bool supports_catch_syscall () override
;
426 int get_ipa_tdesc_idx () override
;
428 /* Return the information to access registers. This has public
429 visibility because proc-service uses it. */
430 virtual const regs_info
*get_regs_info () = 0;
434 /* Handle a GNU/Linux extended wait response. If we see a clone,
435 fork, or vfork event, we need to add the new LWP to our list
436 (and return 0 so as not to report the trap to higher layers).
437 If we see an exec event, we will modify ORIG_EVENT_LWP to point
438 to a new LWP representing the new program. */
439 int handle_extended_wait (lwp_info
**orig_event_lwp
, int wstat
);
441 /* Do low-level handling of the event, and check if we should go on
442 and pass it to caller code. Return the affected lwp if we are, or
444 lwp_info
*filter_event (int lwpid
, int wstat
);
446 /* Wait for an event from child(ren) WAIT_PTID, and return any that
447 match FILTER_PTID (leaving others pending). The PTIDs can be:
448 minus_one_ptid, to specify any child; a pid PTID, specifying all
449 lwps of a thread group; or a PTID representing a single lwp. Store
450 the stop status through the status pointer WSTAT. OPTIONS is
451 passed to the waitpid call. Return 0 if no event was found and
452 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
453 was found. Return the PID of the stopped child otherwise. */
454 int wait_for_event_filtered (ptid_t wait_ptid
, ptid_t filter_ptid
,
455 int *wstatp
, int options
);
457 /* Wait for an event from child(ren) PTID. PTIDs can be:
458 minus_one_ptid, to specify any child; a pid PTID, specifying all
459 lwps of a thread group; or a PTID representing a single lwp. Store
460 the stop status through the status pointer WSTAT. OPTIONS is
461 passed to the waitpid call. Return 0 if no event was found and
462 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
463 was found. Return the PID of the stopped child otherwise. */
464 int wait_for_event (ptid_t ptid
, int *wstatp
, int options
);
466 /* Wait for all children to stop for the SIGSTOPs we just queued. */
467 void wait_for_sigstop ();
469 /* Wait for process, returns status. */
470 ptid_t
wait_1 (ptid_t ptid
, target_waitstatus
*ourstatus
,
473 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
474 If SUSPEND, then also increase the suspend count of every LWP,
476 void stop_all_lwps (int suspend
, lwp_info
*except
);
478 /* Stopped LWPs that the client wanted to be running, that don't have
479 pending statuses, are set to run again, except for EXCEPT, if not
480 NULL. This undoes a stop_all_lwps call. */
481 void unstop_all_lwps (int unsuspend
, lwp_info
*except
);
483 /* Start a step-over operation on LWP. When LWP stopped at a
484 breakpoint, to make progress, we need to remove the breakpoint out
485 of the way. If we let other threads run while we do that, they may
486 pass by the breakpoint location and miss hitting it. To avoid
487 that, a step-over momentarily stops all threads while LWP is
488 single-stepped by either hardware or software while the breakpoint
489 is temporarily uninserted from the inferior. When the single-step
490 finishes, we reinsert the breakpoint, and let all threads that are
491 supposed to be running, run again. */
492 void start_step_over (lwp_info
*lwp
);
494 /* If there's a step over in progress, wait until all threads stop
495 (that is, until the stepping thread finishes its step), and
496 unsuspend all lwps. The stepping thread ends with its status
497 pending, which is processed later when we get back to processing
499 void complete_ongoing_step_over ();
501 /* When we finish a step-over, set threads running again. If there's
502 another thread that may need a step-over, now's the time to start
503 it. Eventually, we'll move all threads past their breakpoints. */
504 void proceed_all_lwps ();
506 /* The reason we resume in the caller, is because we want to be able
507 to pass lwp->status_pending as WSTAT, and we need to clear
508 status_pending_p before resuming, otherwise, resume_one_lwp
509 refuses to resume. */
510 bool maybe_move_out_of_jump_pad (lwp_info
*lwp
, int *wstat
);
512 /* Move THREAD out of the jump pad. */
513 void move_out_of_jump_pad (thread_info
*thread
);
515 /* Call low_arch_setup on THREAD. */
516 void arch_setup_thread (thread_info
*thread
);
518 #ifdef HAVE_LINUX_USRREGS
519 /* Fetch one register. */
520 void fetch_register (const usrregs_info
*usrregs
, regcache
*regcache
,
523 /* Store one register. */
524 void store_register (const usrregs_info
*usrregs
, regcache
*regcache
,
528 /* Fetch all registers, or just one, from the child process.
529 If REGNO is -1, do this for all registers, skipping any that are
530 assumed to have been retrieved by regsets_fetch_inferior_registers,
531 unless ALL is non-zero.
532 Otherwise, REGNO specifies which register (so we can save time). */
533 void usr_fetch_inferior_registers (const regs_info
*regs_info
,
534 regcache
*regcache
, int regno
, int all
);
536 /* Store our register values back into the inferior.
537 If REGNO is -1, do this for all registers, skipping any that are
538 assumed to have been saved by regsets_store_inferior_registers,
539 unless ALL is non-zero.
540 Otherwise, REGNO specifies which register (so we can save time). */
541 void usr_store_inferior_registers (const regs_info
*regs_info
,
542 regcache
*regcache
, int regno
, int all
);
544 /* Return the PC as read from the regcache of LWP, without any
546 CORE_ADDR
get_pc (lwp_info
*lwp
);
548 /* Called when the LWP stopped for a signal/trap. If it stopped for a
549 trap check what caused it (breakpoint, watchpoint, trace, etc.),
550 and save the result in the LWP's stop_reason field. If it stopped
551 for a breakpoint, decrement the PC if necessary on the lwp's
552 architecture. Returns true if we now have the LWP's stop PC. */
553 bool save_stop_reason (lwp_info
*lwp
);
555 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
556 SIGNAL is nonzero, give it that signal. */
557 void resume_one_lwp_throw (lwp_info
*lwp
, int step
, int signal
,
560 /* Like resume_one_lwp_throw, but no error is thrown if the LWP
561 disappears while we try to resume it. */
562 void resume_one_lwp (lwp_info
*lwp
, int step
, int signal
, siginfo_t
*info
);
564 /* This function is called once per thread. We check the thread's
565 last resume request, which will tell us whether to resume, step, or
566 leave the thread stopped. Any signal the client requested to be
567 delivered has already been enqueued at this point.
569 If any thread that GDB wants running is stopped at an internal
570 breakpoint that needs stepping over, we start a step-over operation
571 on that particular thread, and leave all others stopped. */
572 void proceed_one_lwp (thread_info
*thread
, lwp_info
*except
);
574 /* This function is called once per thread. We check the thread's
575 resume request, which will tell us whether to resume, step, or
576 leave the thread stopped; and what signal, if any, it should be
579 For threads which we aren't explicitly told otherwise, we preserve
580 the stepping flag; this is used for stepping over gdbserver-placed
583 If pending_flags was set in any thread, we queue any needed
584 signals, since we won't actually resume. We already have a pending
585 event to report, so we don't need to preserve any step requests;
586 they should be re-issued if necessary. */
587 void resume_one_thread (thread_info
*thread
, bool leave_all_stopped
);
589 /* Return true if this lwp has an interesting status pending. */
590 bool status_pending_p_callback (thread_info
*thread
, ptid_t ptid
);
592 /* Resume LWPs that are currently stopped without any pending status
593 to report, but are resumed from the core's perspective. */
594 void resume_stopped_resumed_lwps (thread_info
*thread
);
596 /* Unsuspend THREAD, except EXCEPT, and proceed. */
597 void unsuspend_and_proceed_one_lwp (thread_info
*thread
, lwp_info
*except
);
599 /* Return true if this lwp still has an interesting status pending.
600 If not (e.g., it had stopped for a breakpoint that is gone), return
602 bool thread_still_has_status_pending (thread_info
*thread
);
604 /* Return true if this lwp is to-be-resumed and has an interesting
606 bool resume_status_pending (thread_info
*thread
);
608 /* Return true if this lwp that GDB wants running is stopped at an
609 internal breakpoint that we need to step over. It assumes that
610 any required STOP_PC adjustment has already been propagated to
611 the inferior's regcache. */
612 bool thread_needs_step_over (thread_info
*thread
);
614 /* Single step via hardware or software single step.
615 Return 1 if hardware single stepping, 0 if software single stepping
616 or can't single step. */
617 int single_step (lwp_info
* lwp
);
619 /* Install breakpoints for software single stepping. */
620 void install_software_single_step_breakpoints (lwp_info
*lwp
);
622 /* Fetch the possibly triggered data watchpoint info and store it in
625 On some archs, like x86, that use debug registers to set
626 watchpoints, it's possible that the way to know which watched
627 address trapped, is to check the register that is used to select
628 which address to watch. Problem is, between setting the watchpoint
629 and reading back which data address trapped, the user may change
630 the set of watchpoints, and, as a consequence, GDB changes the
631 debug registers in the inferior. To avoid reading back a stale
632 stopped-data-address when that happens, we cache in LP the fact
633 that a watchpoint trapped, and the corresponding data address, as
634 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
635 registers meanwhile, we have the cached data we can rely on. */
636 bool check_stopped_by_watchpoint (lwp_info
*child
);
639 /* The architecture-specific "low" methods are listed below. */
641 /* Architecture-specific setup for the current thread. */
642 virtual void low_arch_setup () = 0;
644 /* Return false if we can fetch/store the register, true if we cannot
645 fetch/store the register. */
646 virtual bool low_cannot_fetch_register (int regno
) = 0;
648 virtual bool low_cannot_store_register (int regno
) = 0;
650 /* Hook to fetch a register in some non-standard way. Used for
651 example by backends that have read-only registers with hardcoded
652 values (e.g., IA64's gr0/fr0/fr1). Returns true if register
653 REGNO was supplied, false if not, and we should fallback to the
654 standard ptrace methods. */
655 virtual bool low_fetch_register (regcache
*regcache
, int regno
);
657 /* Return true if breakpoints are supported. Such targets must
658 implement the GET_PC and SET_PC methods. */
659 virtual bool low_supports_breakpoints ();
661 virtual CORE_ADDR
low_get_pc (regcache
*regcache
);
663 virtual void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
);
665 /* Find the next possible PCs after the current instruction executes.
666 Targets that override this method should also override
667 'supports_software_single_step' to return true. */
668 virtual std::vector
<CORE_ADDR
> low_get_next_pcs (regcache
*regcache
);
670 /* Return true if there is a breakpoint at PC. */
671 virtual bool low_breakpoint_at (CORE_ADDR pc
) = 0;
673 /* Breakpoint and watchpoint related functions. See target.h for
675 virtual int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
676 int size
, raw_breakpoint
*bp
);
678 virtual int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
679 int size
, raw_breakpoint
*bp
);
681 virtual bool low_stopped_by_watchpoint ();
683 virtual CORE_ADDR
low_stopped_data_address ();
685 /* How many bytes the PC should be decremented after a break. */
686 virtual int low_decr_pc_after_break ();
689 extern linux_process_target
*the_linux_target
;
691 #define get_thread_lwp(thr) ((struct lwp_info *) (thread_target_data (thr)))
692 #define get_lwp_thread(lwp) ((lwp)->thread)
694 /* This struct is recorded in the target_data field of struct thread_info.
696 On linux ``all_threads'' is keyed by the LWP ID, which we use as the
697 GDB protocol representation of the thread ID. Threads also have
698 a "process ID" (poorly named) which is (presently) the same as the
701 There is also ``all_processes'' is keyed by the "overall process ID",
702 which GNU/Linux calls tgid, "thread group ID". */
706 /* Backlink to the parent object. */
707 struct thread_info
*thread
;
709 /* If this flag is set, the next SIGSTOP will be ignored (the
710 process will be immediately resumed). This means that either we
711 sent the SIGSTOP to it ourselves and got some other pending event
712 (so the SIGSTOP is still pending), or that we stopped the
713 inferior implicitly via PTRACE_ATTACH and have not waited for it
717 /* When this is true, we shall not try to resume this thread, even
718 if last_resume_kind isn't resume_stop. */
721 /* If this flag is set, the lwp is known to be stopped right now (stop
722 event already received in a wait()). */
725 /* Signal whether we are in a SYSCALL_ENTRY or
726 in a SYSCALL_RETURN event.
728 - TARGET_WAITKIND_SYSCALL_ENTRY
729 - TARGET_WAITKIND_SYSCALL_RETURN */
730 enum target_waitkind syscall_state
;
732 /* When stopped is set, the last wait status recorded for this lwp. */
735 /* If WAITSTATUS->KIND != TARGET_WAITKIND_IGNORE, the waitstatus for
736 this LWP's last event, to pass to GDB without any further
737 processing. This is used to store extended ptrace event
738 information or exit status until it can be reported to GDB. */
739 struct target_waitstatus waitstatus
;
741 /* A pointer to the fork child/parent relative. Valid only while
742 the parent fork event is not reported to higher layers. Used to
743 avoid wildcard vCont actions resuming a fork child before GDB is
744 notified about the parent's fork event. */
745 struct lwp_info
*fork_relative
;
747 /* When stopped is set, this is where the lwp last stopped, with
748 decr_pc_after_break already accounted for. If the LWP is
749 running, this is the address at which the lwp was resumed. */
752 /* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
754 int status_pending_p
;
757 /* The reason the LWP last stopped, if we need to track it
758 (breakpoint, watchpoint, etc.) */
759 enum target_stop_reason stop_reason
;
761 /* On architectures where it is possible to know the data address of
762 a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
763 contains such data address. Only valid if STOPPED_BY_WATCHPOINT
765 CORE_ADDR stopped_data_address
;
767 /* If this is non-zero, it is a breakpoint to be reinserted at our next
768 stop (SIGTRAP stops only). */
769 CORE_ADDR bp_reinsert
;
771 /* If this flag is set, the last continue operation at the ptrace
772 level on this process was a single-step. */
775 /* Range to single step within. This is a copy of the step range
776 passed along the last resume request. See 'struct
778 CORE_ADDR step_range_start
; /* Inclusive */
779 CORE_ADDR step_range_end
; /* Exclusive */
781 /* If this flag is set, we need to set the event request flags the
782 next time we see this LWP stop. */
783 int must_set_ptrace_flags
;
785 /* If this is non-zero, it points to a chain of signals which need to
786 be delivered to this process. */
787 struct pending_signals
*pending_signals
;
789 /* A link used when resuming. It is initialized from the resume request,
790 and then processed and cleared in linux_resume_one_lwp. */
791 struct thread_resume
*resume
;
793 /* Information bout this lwp's fast tracepoint collection status (is it
794 currently stopped in the jump pad, and if so, before or at/after the
795 relocated instruction). Normally, we won't care about this, but we will
796 if a signal arrives to this lwp while it is collecting. */
797 fast_tpoint_collect_result collecting_fast_tracepoint
;
799 /* If this is non-zero, it points to a chain of signals which need
800 to be reported to GDB. These were deferred because the thread
801 was doing a fast tracepoint collect when they arrived. */
802 struct pending_signals
*pending_signals_to_report
;
804 /* When collecting_fast_tracepoint is first found to be 1, we insert
805 a exit-jump-pad-quickly breakpoint. This is it. */
806 struct breakpoint
*exit_jump_pad_bkpt
;
810 /* The thread handle, used for e.g. TLS access. Only valid if
811 THREAD_KNOWN is set. */
814 /* The pthread_t handle. */
815 thread_t thread_handle
;
818 /* Arch-specific additions. */
819 struct arch_lwp_info
*arch_private
;
822 int linux_pid_exe_is_elf_64_file (int pid
, unsigned int *machine
);
824 /* Attach to PTID. Returns 0 on success, non-zero otherwise (an
826 int linux_attach_lwp (ptid_t ptid
);
828 struct lwp_info
*find_lwp_pid (ptid_t ptid
);
829 /* For linux_stop_lwp see nat/linux-nat.h. */
831 #ifdef HAVE_LINUX_REGSETS
832 void initialize_regsets_info (struct regsets_info
*regsets_info
);
835 void initialize_low_arch (void);
837 void linux_set_pc_32bit (struct regcache
*regcache
, CORE_ADDR pc
);
838 CORE_ADDR
linux_get_pc_32bit (struct regcache
*regcache
);
840 void linux_set_pc_64bit (struct regcache
*regcache
, CORE_ADDR pc
);
841 CORE_ADDR
linux_get_pc_64bit (struct regcache
*regcache
);
843 /* From thread-db.c */
844 int thread_db_init (void);
845 void thread_db_detach (struct process_info
*);
846 void thread_db_mourn (struct process_info
*);
847 int thread_db_handle_monitor_command (char *);
848 int thread_db_get_tls_address (struct thread_info
*thread
, CORE_ADDR offset
,
849 CORE_ADDR load_module
, CORE_ADDR
*address
);
850 int thread_db_look_up_one_symbol (const char *name
, CORE_ADDR
*addrp
);
852 /* Called from linux-low.c when a clone event is detected. Upon entry,
853 both the clone and the parent should be stopped. This function does
854 whatever is required have the clone under thread_db's control. */
856 void thread_db_notice_clone (struct thread_info
*parent_thr
, ptid_t child_ptid
);
858 bool thread_db_thread_handle (ptid_t ptid
, gdb_byte
**handle
, int *handle_len
);
860 extern int have_ptrace_getregset
;
862 /* Search for the value with type MATCH in the auxv vector with
863 entries of length WORDSIZE bytes. If found, store the value in
864 *VALP and return 1. If not found or if there is an error, return
867 int linux_get_auxv (int wordsize
, CORE_ADDR match
,
870 /* Fetch the AT_HWCAP entry from the auxv vector, where entries are length
871 WORDSIZE. If no entry was found, return zero. */
873 CORE_ADDR
linux_get_hwcap (int wordsize
);
875 /* Fetch the AT_HWCAP2 entry from the auxv vector, where entries are length
876 WORDSIZE. If no entry was found, return zero. */
878 CORE_ADDR
linux_get_hwcap2 (int wordsize
);
880 #endif /* GDBSERVER_LINUX_LOW_H */