gdb: adjust gdbarch_tdep calls in nat files
[binutils-gdb.git] / gdb / gdbthread.h
1 /* Multi-process/thread control defs for GDB, the GNU debugger.
2 Copyright (C) 1987-2021 Free Software Foundation, Inc.
3 Contributed by Lynx Real-Time Systems, Inc. Los Gatos, CA.
4
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #ifndef GDBTHREAD_H
22 #define GDBTHREAD_H
23
24 struct symtab;
25
26 #include "breakpoint.h"
27 #include "frame.h"
28 #include "ui-out.h"
29 #include "btrace.h"
30 #include "target/waitstatus.h"
31 #include "cli/cli-utils.h"
32 #include "gdbsupport/refcounted-object.h"
33 #include "gdbsupport/common-gdbthread.h"
34 #include "gdbsupport/forward-scope-exit.h"
35 #include "displaced-stepping.h"
36 #include "gdbsupport/intrusive_list.h"
37
38 struct inferior;
39 struct process_stratum_target;
40
41 /* Frontend view of the thread state. Possible extensions: stepping,
42 finishing, until(ling),...
43
44 NOTE: Since the thread state is not a boolean, most times, you do
45 not want to check it with negation. If you really want to check if
46 the thread is stopped,
47
48 use (good):
49
50 if (tp->state == THREAD_STOPPED)
51
52 instead of (bad):
53
54 if (tp->state != THREAD_RUNNING)
55
56 The latter is also true for exited threads, most likely not what
57 you want. */
58 enum thread_state
59 {
60 /* In the frontend's perpective, the thread is stopped. */
61 THREAD_STOPPED,
62
63 /* In the frontend's perpective, the thread is running. */
64 THREAD_RUNNING,
65
66 /* The thread is listed, but known to have exited. We keep it
67 listed (but not visible) until it's safe to delete it. */
68 THREAD_EXITED,
69 };
70
71 /* STEP_OVER_ALL means step over all subroutine calls.
72 STEP_OVER_UNDEBUGGABLE means step over calls to undebuggable functions.
73 STEP_OVER_NONE means don't step over any subroutine calls. */
74
75 enum step_over_calls_kind
76 {
77 STEP_OVER_NONE,
78 STEP_OVER_ALL,
79 STEP_OVER_UNDEBUGGABLE
80 };
81
82 /* Inferior thread specific part of `struct infcall_control_state'.
83
84 Inferior process counterpart is `struct inferior_control_state'. */
85
86 struct thread_control_state
87 {
88 /* User/external stepping state. */
89
90 /* Step-resume or longjmp-resume breakpoint. */
91 struct breakpoint *step_resume_breakpoint = nullptr;
92
93 /* Exception-resume breakpoint. */
94 struct breakpoint *exception_resume_breakpoint = nullptr;
95
96 /* Breakpoints used for software single stepping. Plural, because
97 it may have multiple locations. E.g., if stepping over a
98 conditional branch instruction we can't decode the condition for,
99 we'll need to put a breakpoint at the branch destination, and
100 another at the instruction after the branch. */
101 struct breakpoint *single_step_breakpoints = nullptr;
102
103 /* Range to single step within.
104
105 If this is nonzero, respond to a single-step signal by continuing
106 to step if the pc is in this range.
107
108 If step_range_start and step_range_end are both 1, it means to
109 step for a single instruction (FIXME: it might clean up
110 wait_for_inferior in a minor way if this were changed to the
111 address of the instruction and that address plus one. But maybe
112 not). */
113 CORE_ADDR step_range_start = 0; /* Inclusive */
114 CORE_ADDR step_range_end = 0; /* Exclusive */
115
116 /* Function the thread was in as of last it started stepping. */
117 struct symbol *step_start_function = nullptr;
118
119 /* If GDB issues a target step request, and this is nonzero, the
120 target should single-step this thread once, and then continue
121 single-stepping it without GDB core involvement as long as the
122 thread stops in the step range above. If this is zero, the
123 target should ignore the step range, and only issue one single
124 step. */
125 int may_range_step = 0;
126
127 /* Stack frame address as of when stepping command was issued.
128 This is how we know when we step into a subroutine call, and how
129 to set the frame for the breakpoint used to step out. */
130 struct frame_id step_frame_id {};
131
132 /* Similarly, the frame ID of the underlying stack frame (skipping
133 any inlined frames). */
134 struct frame_id step_stack_frame_id {};
135
136 /* True if the the thread is presently stepping over a breakpoint or
137 a watchpoint, either with an inline step over or a displaced (out
138 of line) step, and we're now expecting it to report a trap for
139 the finished single step. */
140 int trap_expected = 0;
141
142 /* Nonzero if the thread is being proceeded for a "finish" command
143 or a similar situation when return value should be printed. */
144 int proceed_to_finish = 0;
145
146 /* Nonzero if the thread is being proceeded for an inferior function
147 call. */
148 int in_infcall = 0;
149
150 enum step_over_calls_kind step_over_calls = STEP_OVER_NONE;
151
152 /* Nonzero if stopped due to a step command. */
153 int stop_step = 0;
154
155 /* Chain containing status of breakpoint(s) the thread stopped
156 at. */
157 bpstat *stop_bpstat = nullptr;
158
159 /* Whether the command that started the thread was a stepping
160 command. This is used to decide whether "set scheduler-locking
161 step" behaves like "on" or "off". */
162 int stepping_command = 0;
163 };
164
165 /* Inferior thread specific part of `struct infcall_suspend_state'. */
166
167 struct thread_suspend_state
168 {
169 /* Last signal that the inferior received (why it stopped). When
170 the thread is resumed, this signal is delivered. Note: the
171 target should not check whether the signal is in pass state,
172 because the signal may have been explicitly passed with the
173 "signal" command, which overrides "handle nopass". If the signal
174 should be suppressed, the core will take care of clearing this
175 before the target is resumed. */
176 enum gdb_signal stop_signal = GDB_SIGNAL_0;
177
178 /* The reason the thread last stopped, if we need to track it
179 (breakpoint, watchpoint, etc.) */
180 enum target_stop_reason stop_reason = TARGET_STOPPED_BY_NO_REASON;
181
182 /* The waitstatus for this thread's last event. */
183 struct target_waitstatus waitstatus;
184 /* If true WAITSTATUS hasn't been handled yet. */
185 int waitstatus_pending_p = 0;
186
187 /* Record the pc of the thread the last time it stopped. (This is
188 not the current thread's PC as that may have changed since the
189 last stop, e.g., "return" command, or "p $pc = 0xf000").
190
191 - If the thread's PC has not changed since the thread last
192 stopped, then proceed skips a breakpoint at the current PC,
193 otherwise we let the thread run into the breakpoint.
194
195 - If the thread has an unprocessed event pending, as indicated by
196 waitstatus_pending_p, this is used in coordination with
197 stop_reason: if the thread's PC has changed since the thread
198 last stopped, a pending breakpoint waitstatus is discarded.
199
200 - If the thread is running, then this field has its value removed by
201 calling stop_pc.reset() (see thread_info::set_executing()).
202 Attempting to read a gdb::optional with no value is undefined
203 behaviour and will trigger an assertion error when _GLIBCXX_DEBUG is
204 defined, which should make error easier to track down. */
205 gdb::optional<CORE_ADDR> stop_pc;
206 };
207
208 /* Base class for target-specific thread data. */
209 struct private_thread_info
210 {
211 virtual ~private_thread_info () = 0;
212 };
213
214 /* Threads are intrusively refcounted objects. Being the
215 user-selected thread is normally considered an implicit strong
216 reference and is thus not accounted in the refcount, unlike
217 inferior objects. This is necessary, because there's no "current
218 thread" pointer. Instead the current thread is inferred from the
219 inferior_ptid global. However, when GDB needs to remember the
220 selected thread to later restore it, GDB bumps the thread object's
221 refcount, to prevent something deleting the thread object before
222 reverting back (e.g., due to a "kill" command). If the thread
223 meanwhile exits before being re-selected, then the thread object is
224 left listed in the thread list, but marked with state
225 THREAD_EXITED. (See scoped_restore_current_thread and
226 delete_thread). All other thread references are considered weak
227 references. Placing a thread in the thread list is an implicit
228 strong reference, and is thus not accounted for in the thread's
229 refcount.
230
231 The intrusive_list_node base links threads in a per-inferior list. */
232
233 class thread_info : public refcounted_object,
234 public intrusive_list_node<thread_info>
235 {
236 public:
237 explicit thread_info (inferior *inf, ptid_t ptid);
238
239 bool deletable () const;
240
241 /* Mark this thread as running and notify observers. */
242 void set_running (bool running);
243
244 ptid_t ptid; /* "Actual process id";
245 In fact, this may be overloaded with
246 kernel thread id, etc. */
247
248 /* Each thread has two GDB IDs.
249
250 a) The thread ID (Id). This consists of the pair of:
251
252 - the number of the thread's inferior and,
253
254 - the thread's thread number in its inferior, aka, the
255 per-inferior thread number. This number is unique in the
256 inferior but not unique between inferiors.
257
258 b) The global ID (GId). This is a a single integer unique
259 between all inferiors.
260
261 E.g.:
262
263 (gdb) info threads -gid
264 Id GId Target Id Frame
265 * 1.1 1 Thread A 0x16a09237 in foo () at foo.c:10
266 1.2 3 Thread B 0x15ebc6ed in bar () at foo.c:20
267 1.3 5 Thread C 0x15ebc6ed in bar () at foo.c:20
268 2.1 2 Thread A 0x16a09237 in foo () at foo.c:10
269 2.2 4 Thread B 0x15ebc6ed in bar () at foo.c:20
270 2.3 6 Thread C 0x15ebc6ed in bar () at foo.c:20
271
272 Above, both inferiors 1 and 2 have threads numbered 1-3, but each
273 thread has its own unique global ID. */
274
275 /* The thread's global GDB thread number. This is exposed to MI,
276 Python/Scheme, visible with "info threads -gid", and is also what
277 the $_gthread convenience variable is bound to. */
278 int global_num;
279
280 /* The per-inferior thread number. This is unique in the inferior
281 the thread belongs to, but not unique between inferiors. This is
282 what the $_thread convenience variable is bound to. */
283 int per_inf_num;
284
285 /* The inferior this thread belongs to. */
286 struct inferior *inf;
287
288 /* The user-given name of the thread.
289
290 Returns nullptr if the thread does not have a user-given name. */
291 const char *name () const
292 {
293 return m_name.get ();
294 }
295
296 /* Set the user-given name of the thread.
297
298 Pass nullptr to clear the name. */
299 void set_name (gdb::unique_xmalloc_ptr<char> name)
300 {
301 m_name = std::move (name);
302 }
303
304 bool executing () const
305 { return m_executing; }
306
307 /* Set the thread's 'm_executing' field from EXECUTING, and if EXECUTING
308 is true also clears the thread's stop_pc. */
309 void set_executing (bool executing);
310
311 bool resumed () const
312 { return m_resumed; }
313
314 /* Set the thread's 'm_resumed' field from RESUMED. The thread may also
315 be added to (when RESUMED is true), or removed from (when RESUMED is
316 false), the list of threads with a pending wait status. */
317 void set_resumed (bool resumed);
318
319 /* Frontend view of the thread state. Note that the THREAD_RUNNING/
320 THREAD_STOPPED states are different from EXECUTING. When the
321 thread is stopped internally while handling an internal event,
322 like a software single-step breakpoint, EXECUTING will be false,
323 but STATE will still be THREAD_RUNNING. */
324 enum thread_state state = THREAD_STOPPED;
325
326 /* State of GDB control of inferior thread execution.
327 See `struct thread_control_state'. */
328 thread_control_state control;
329
330 /* Save M_SUSPEND to SUSPEND. */
331
332 void save_suspend_to (thread_suspend_state &suspend) const
333 {
334 suspend = m_suspend;
335 }
336
337 /* Restore M_SUSPEND from SUSPEND. */
338
339 void restore_suspend_from (const thread_suspend_state &suspend)
340 {
341 m_suspend = suspend;
342 }
343
344 /* Return this thread's stop PC. This should only be called when it is
345 known that stop_pc has a value. If this function is being used in a
346 situation where a thread may not have had a stop_pc assigned, then
347 stop_pc_p() can be used to check if the stop_pc is defined. */
348
349 CORE_ADDR stop_pc () const
350 {
351 gdb_assert (m_suspend.stop_pc.has_value ());
352 return *m_suspend.stop_pc;
353 }
354
355 /* Set this thread's stop PC. */
356
357 void set_stop_pc (CORE_ADDR stop_pc)
358 {
359 m_suspend.stop_pc = stop_pc;
360 }
361
362 /* Remove the stop_pc stored on this thread. */
363
364 void clear_stop_pc ()
365 {
366 m_suspend.stop_pc.reset ();
367 }
368
369 /* Return true if this thread has a cached stop pc value, otherwise
370 return false. */
371
372 bool stop_pc_p () const
373 {
374 return m_suspend.stop_pc.has_value ();
375 }
376
377 /* Return true if this thread has a pending wait status. */
378
379 bool has_pending_waitstatus () const
380 {
381 return m_suspend.waitstatus_pending_p;
382 }
383
384 /* Get this thread's pending wait status.
385
386 May only be called if has_pending_waitstatus returns true. */
387
388 const target_waitstatus &pending_waitstatus () const
389 {
390 gdb_assert (this->has_pending_waitstatus ());
391
392 return m_suspend.waitstatus;
393 }
394
395 /* Set this thread's pending wait status.
396
397 May only be called if has_pending_waitstatus returns false. */
398
399 void set_pending_waitstatus (const target_waitstatus &ws);
400
401 /* Clear this thread's pending wait status.
402
403 May only be called if has_pending_waitstatus returns true. */
404
405 void clear_pending_waitstatus ();
406
407 /* Return this thread's stop signal. */
408
409 gdb_signal stop_signal () const
410 {
411 return m_suspend.stop_signal;
412 }
413
414 /* Set this thread's stop signal. */
415
416 void set_stop_signal (gdb_signal sig)
417 {
418 m_suspend.stop_signal = sig;
419 }
420
421 /* Return this thread's stop reason. */
422
423 target_stop_reason stop_reason () const
424 {
425 return m_suspend.stop_reason;
426 }
427
428 /* Set this thread's stop reason. */
429
430 void set_stop_reason (target_stop_reason reason)
431 {
432 m_suspend.stop_reason = reason;
433 }
434
435 int current_line = 0;
436 struct symtab *current_symtab = NULL;
437
438 /* Internal stepping state. */
439
440 /* Record the pc of the thread the last time it was resumed. (It
441 can't be done on stop as the PC may change since the last stop,
442 e.g., "return" command, or "p $pc = 0xf000"). This is maintained
443 by proceed and keep_going, and among other things, it's used in
444 adjust_pc_after_break to distinguish a hardware single-step
445 SIGTRAP from a breakpoint SIGTRAP. */
446 CORE_ADDR prev_pc = 0;
447
448 /* Did we set the thread stepping a breakpoint instruction? This is
449 used in conjunction with PREV_PC to decide whether to adjust the
450 PC. */
451 int stepped_breakpoint = 0;
452
453 /* Should we step over breakpoint next time keep_going is called? */
454 int stepping_over_breakpoint = 0;
455
456 /* Should we step over a watchpoint next time keep_going is called?
457 This is needed on targets with non-continuable, non-steppable
458 watchpoints. */
459 int stepping_over_watchpoint = 0;
460
461 /* Set to TRUE if we should finish single-stepping over a breakpoint
462 after hitting the current step-resume breakpoint. The context here
463 is that GDB is to do `next' or `step' while signal arrives.
464 When stepping over a breakpoint and signal arrives, GDB will attempt
465 to skip signal handler, so it inserts a step_resume_breakpoint at the
466 signal return address, and resume inferior.
467 step_after_step_resume_breakpoint is set to TRUE at this moment in
468 order to keep GDB in mind that there is still a breakpoint to step over
469 when GDB gets back SIGTRAP from step_resume_breakpoint. */
470 int step_after_step_resume_breakpoint = 0;
471
472 /* Pointer to the state machine manager object that handles what is
473 left to do for the thread's execution command after the target
474 stops. Several execution commands use it. */
475 struct thread_fsm *thread_fsm = NULL;
476
477 /* This is used to remember when a fork or vfork event was caught by
478 a catchpoint, and thus the event is to be followed at the next
479 resume of the thread, and not immediately. */
480 struct target_waitstatus pending_follow;
481
482 /* True if this thread has been explicitly requested to stop. */
483 int stop_requested = 0;
484
485 /* The initiating frame of a nexting operation, used for deciding
486 which exceptions to intercept. If it is null_frame_id no
487 bp_longjmp or bp_exception but longjmp has been caught just for
488 bp_longjmp_call_dummy. */
489 struct frame_id initiating_frame = null_frame_id;
490
491 /* Private data used by the target vector implementation. */
492 std::unique_ptr<private_thread_info> priv;
493
494 /* Branch trace information for this thread. */
495 struct btrace_thread_info btrace {};
496
497 /* Flag which indicates that the stack temporaries should be stored while
498 evaluating expressions. */
499 bool stack_temporaries_enabled = false;
500
501 /* Values that are stored as temporaries on stack while evaluating
502 expressions. */
503 std::vector<struct value *> stack_temporaries;
504
505 /* Step-over chain. A thread is in the step-over queue if this node is
506 linked. */
507 intrusive_list_node<thread_info> step_over_list_node;
508
509 /* Node for list of threads that are resumed and have a pending wait status.
510
511 The list head for this is in process_stratum_target, hence all threads in
512 this list belong to that process target. */
513 intrusive_list_node<thread_info> resumed_with_pending_wait_status_node;
514
515 /* Displaced-step state for this thread. */
516 displaced_step_thread_state displaced_step_state;
517
518 private:
519 /* True if this thread is resumed from infrun's perspective.
520 Note that a thread can be marked both as not-executing and
521 resumed at the same time. This happens if we try to resume a
522 thread that has a wait status pending. We shouldn't let the
523 thread really run until that wait status has been processed, but
524 we should not process that wait status if we didn't try to let
525 the thread run. */
526 bool m_resumed = false;
527
528 /* True means the thread is executing. Note: this is different
529 from saying that there is an active target and we are stopped at
530 a breakpoint, for instance. This is a real indicator whether the
531 thread is off and running. */
532 bool m_executing = false;
533
534 /* State of inferior thread to restore after GDB is done with an inferior
535 call. See `struct thread_suspend_state'. */
536 thread_suspend_state m_suspend;
537
538 /* The user-given name of the thread.
539
540 Nullptr if the thread does not have a user-given name. */
541 gdb::unique_xmalloc_ptr<char> m_name;
542 };
543
544 using thread_info_resumed_with_pending_wait_status_node
545 = intrusive_member_node<thread_info,
546 &thread_info::resumed_with_pending_wait_status_node>;
547 using thread_info_resumed_with_pending_wait_status_list
548 = intrusive_list<thread_info,
549 thread_info_resumed_with_pending_wait_status_node>;
550
551 /* A gdb::ref_ptr pointer to a thread_info. */
552
553 using thread_info_ref
554 = gdb::ref_ptr<struct thread_info, refcounted_object_ref_policy>;
555
556 /* A gdb::ref_ptr pointer to an inferior. This would ideally be in
557 inferior.h, but it can't due to header dependencies (inferior.h
558 includes gdbthread.h). */
559
560 using inferior_ref
561 = gdb::ref_ptr<struct inferior, refcounted_object_ref_policy>;
562
563 /* Create an empty thread list, or empty the existing one. */
564 extern void init_thread_list (void);
565
566 /* Add a thread to the thread list, print a message
567 that a new thread is found, and return the pointer to
568 the new thread. Caller my use this pointer to
569 initialize the private thread data. */
570 extern struct thread_info *add_thread (process_stratum_target *targ,
571 ptid_t ptid);
572
573 /* Same as add_thread, but does not print a message about new
574 thread. */
575 extern struct thread_info *add_thread_silent (process_stratum_target *targ,
576 ptid_t ptid);
577
578 /* Same as add_thread, and sets the private info. */
579 extern struct thread_info *add_thread_with_info (process_stratum_target *targ,
580 ptid_t ptid,
581 private_thread_info *);
582
583 /* Delete thread THREAD and notify of thread exit. If the thread is
584 currently not deletable, don't actually delete it but still tag it
585 as exited and do the notification. */
586 extern void delete_thread (struct thread_info *thread);
587
588 /* Like delete_thread, but be quiet about it. Used when the process
589 this thread belonged to has already exited, for example. */
590 extern void delete_thread_silent (struct thread_info *thread);
591
592 /* Mark the thread exited, but don't delete it or remove it from the
593 inferior thread list. */
594 extern void set_thread_exited (thread_info *tp, bool silent);
595
596 /* Delete a step_resume_breakpoint from the thread database. */
597 extern void delete_step_resume_breakpoint (struct thread_info *);
598
599 /* Delete an exception_resume_breakpoint from the thread database. */
600 extern void delete_exception_resume_breakpoint (struct thread_info *);
601
602 /* Delete the single-step breakpoints of thread TP, if any. */
603 extern void delete_single_step_breakpoints (struct thread_info *tp);
604
605 /* Check if the thread has software single stepping breakpoints
606 set. */
607 extern int thread_has_single_step_breakpoints_set (struct thread_info *tp);
608
609 /* Check whether the thread has software single stepping breakpoints
610 set at PC. */
611 extern int thread_has_single_step_breakpoint_here (struct thread_info *tp,
612 const address_space *aspace,
613 CORE_ADDR addr);
614
615 /* Returns whether to show inferior-qualified thread IDs, or plain
616 thread numbers. Inferior-qualified IDs are shown whenever we have
617 multiple inferiors, or the only inferior left has number > 1. */
618 extern int show_inferior_qualified_tids (void);
619
620 /* Return a string version of THR's thread ID. If there are multiple
621 inferiors, then this prints the inferior-qualifier form, otherwise
622 it only prints the thread number. The result is stored in a
623 circular static buffer, NUMCELLS deep. */
624 const char *print_thread_id (struct thread_info *thr);
625
626 /* Boolean test for an already-known ptid. */
627 extern bool in_thread_list (process_stratum_target *targ, ptid_t ptid);
628
629 /* Boolean test for an already-known global thread id (GDB's homegrown
630 global id, not the system's). */
631 extern int valid_global_thread_id (int global_id);
632
633 /* Find (non-exited) thread PTID of inferior INF. */
634 extern thread_info *find_thread_ptid (inferior *inf, ptid_t ptid);
635
636 /* Search function to lookup a (non-exited) thread by 'ptid'. */
637 extern struct thread_info *find_thread_ptid (process_stratum_target *targ,
638 ptid_t ptid);
639
640 /* Find thread by GDB global thread ID. */
641 struct thread_info *find_thread_global_id (int global_id);
642
643 /* Find thread by thread library specific handle in inferior INF. */
644 struct thread_info *find_thread_by_handle
645 (gdb::array_view<const gdb_byte> handle, struct inferior *inf);
646
647 /* Finds the first thread of the specified inferior. */
648 extern struct thread_info *first_thread_of_inferior (inferior *inf);
649
650 /* Returns any thread of inferior INF, giving preference to the
651 current thread. */
652 extern struct thread_info *any_thread_of_inferior (inferior *inf);
653
654 /* Returns any non-exited thread of inferior INF, giving preference to
655 the current thread, and to not executing threads. */
656 extern struct thread_info *any_live_thread_of_inferior (inferior *inf);
657
658 /* Change the ptid of thread OLD_PTID to NEW_PTID. */
659 void thread_change_ptid (process_stratum_target *targ,
660 ptid_t old_ptid, ptid_t new_ptid);
661
662 /* Iterator function to call a user-provided callback function
663 once for each known thread. */
664 typedef int (*thread_callback_func) (struct thread_info *, void *);
665 extern struct thread_info *iterate_over_threads (thread_callback_func, void *);
666
667 /* Pull in the internals of the inferiors/threads ranges and
668 iterators. Must be done after struct thread_info is defined. */
669 #include "thread-iter.h"
670
671 /* Return a range that can be used to walk over threads, with
672 range-for.
673
674 Used like this, it walks over all threads of all inferiors of all
675 targets:
676
677 for (thread_info *thr : all_threads ())
678 { .... }
679
680 FILTER_PTID can be used to filter out threads that don't match.
681 FILTER_PTID can be:
682
683 - minus_one_ptid, meaning walk all threads of all inferiors of
684 PROC_TARGET. If PROC_TARGET is NULL, then of all targets.
685
686 - A process ptid, in which case walk all threads of the specified
687 process. PROC_TARGET must be non-NULL in this case.
688
689 - A thread ptid, in which case walk that thread only. PROC_TARGET
690 must be non-NULL in this case.
691 */
692
693 inline all_matching_threads_range
694 all_threads (process_stratum_target *proc_target = nullptr,
695 ptid_t filter_ptid = minus_one_ptid)
696 {
697 return all_matching_threads_range (proc_target, filter_ptid);
698 }
699
700 /* Return a range that can be used to walk over all non-exited threads
701 of all inferiors, with range-for. Arguments are like all_threads
702 above. */
703
704 inline all_non_exited_threads_range
705 all_non_exited_threads (process_stratum_target *proc_target = nullptr,
706 ptid_t filter_ptid = minus_one_ptid)
707 {
708 return all_non_exited_threads_range (proc_target, filter_ptid);
709 }
710
711 /* Return a range that can be used to walk over all threads of all
712 inferiors, with range-for, safely. I.e., it is safe to delete the
713 currently-iterated thread. When combined with range-for, this
714 allow convenient patterns like this:
715
716 for (thread_info *t : all_threads_safe ())
717 if (some_condition ())
718 delete f;
719 */
720
721 inline all_threads_safe_range
722 all_threads_safe ()
723 {
724 return all_threads_safe_range (all_threads_iterator::begin_t {});
725 }
726
727 extern int thread_count (process_stratum_target *proc_target);
728
729 /* Return true if we have any thread in any inferior. */
730 extern bool any_thread_p ();
731
732 /* Switch context to thread THR. Also sets the STOP_PC global. */
733 extern void switch_to_thread (struct thread_info *thr);
734
735 /* Switch context to no thread selected. */
736 extern void switch_to_no_thread ();
737
738 /* Switch from one thread to another. Does not read registers. */
739 extern void switch_to_thread_no_regs (struct thread_info *thread);
740
741 /* Marks or clears thread(s) PTID of TARG as resumed. If PTID is
742 MINUS_ONE_PTID, applies to all threads of TARG. If
743 ptid_is_pid(PTID) is true, applies to all threads of the process
744 pointed at by {TARG,PTID}. */
745 extern void set_resumed (process_stratum_target *targ,
746 ptid_t ptid, bool resumed);
747
748 /* Marks thread PTID of TARG as running, or as stopped. If PTID is
749 minus_one_ptid, marks all threads of TARG. */
750 extern void set_running (process_stratum_target *targ,
751 ptid_t ptid, bool running);
752
753 /* Marks or clears thread(s) PTID of TARG as having been requested to
754 stop. If PTID is MINUS_ONE_PTID, applies to all threads of TARG.
755 If ptid_is_pid(PTID) is true, applies to all threads of the process
756 pointed at by {TARG, PTID}. If STOP, then the
757 THREAD_STOP_REQUESTED observer is called with PTID as argument. */
758 extern void set_stop_requested (process_stratum_target *targ,
759 ptid_t ptid, bool stop);
760
761 /* Marks thread PTID of TARG as executing, or not. If PTID is
762 minus_one_ptid, marks all threads of TARG.
763
764 Note that this is different from the running state. See the
765 description of state and executing fields of struct
766 thread_info. */
767 extern void set_executing (process_stratum_target *targ,
768 ptid_t ptid, bool executing);
769
770 /* True if any (known or unknown) thread of TARG is or may be
771 executing. */
772 extern bool threads_are_executing (process_stratum_target *targ);
773
774 /* Merge the executing property of thread PTID of TARG over to its
775 thread state property (frontend running/stopped view).
776
777 "not executing" -> "stopped"
778 "executing" -> "running"
779 "exited" -> "exited"
780
781 If PTID is minus_one_ptid, go over all threads of TARG.
782
783 Notifications are only emitted if the thread state did change. */
784 extern void finish_thread_state (process_stratum_target *targ, ptid_t ptid);
785
786 /* Calls finish_thread_state on scope exit, unless release() is called
787 to disengage. */
788 using scoped_finish_thread_state
789 = FORWARD_SCOPE_EXIT (finish_thread_state);
790
791 /* Commands with a prefix of `thread'. */
792 extern struct cmd_list_element *thread_cmd_list;
793
794 extern void thread_command (const char *tidstr, int from_tty);
795
796 /* Print notices on thread events (attach, detach, etc.), set with
797 `set print thread-events'. */
798 extern bool print_thread_events;
799
800 /* Prints the list of threads and their details on UIOUT. If
801 REQUESTED_THREADS, a list of GDB ids/ranges, is not NULL, only
802 print threads whose ID is included in the list. If PID is not -1,
803 only print threads from the process PID. Otherwise, threads from
804 all attached PIDs are printed. If both REQUESTED_THREADS is not
805 NULL and PID is not -1, then the thread is printed if it belongs to
806 the specified process. Otherwise, an error is raised. */
807 extern void print_thread_info (struct ui_out *uiout,
808 const char *requested_threads,
809 int pid);
810
811 /* Save/restore current inferior/thread/frame. */
812
813 class scoped_restore_current_thread
814 {
815 public:
816 scoped_restore_current_thread ();
817 ~scoped_restore_current_thread ();
818
819 DISABLE_COPY_AND_ASSIGN (scoped_restore_current_thread);
820
821 /* Cancel restoring on scope exit. */
822 void dont_restore () { m_dont_restore = true; }
823
824 private:
825 void restore ();
826
827 bool m_dont_restore = false;
828 thread_info_ref m_thread;
829 inferior_ref m_inf;
830
831 frame_id m_selected_frame_id;
832 int m_selected_frame_level;
833 bool m_was_stopped;
834 /* Save/restore the language as well, because selecting a frame
835 changes the current language to the frame's language if "set
836 language auto". */
837 enum language m_lang;
838 };
839
840 /* Returns a pointer into the thread_info corresponding to
841 INFERIOR_PTID. INFERIOR_PTID *must* be in the thread list. */
842 extern struct thread_info* inferior_thread (void);
843
844 extern void update_thread_list (void);
845
846 /* Delete any thread the target says is no longer alive. */
847
848 extern void prune_threads (void);
849
850 /* Delete threads marked THREAD_EXITED. Unlike prune_threads, this
851 does not consult the target about whether the thread is alive right
852 now. */
853 extern void delete_exited_threads (void);
854
855 /* Return true if PC is in the stepping range of THREAD. */
856
857 int pc_in_thread_step_range (CORE_ADDR pc, struct thread_info *thread);
858
859 /* Enable storing stack temporaries for thread THR and disable and
860 clear the stack temporaries on destruction. Holds a strong
861 reference to THR. */
862
863 class enable_thread_stack_temporaries
864 {
865 public:
866
867 explicit enable_thread_stack_temporaries (struct thread_info *thr)
868 : m_thr (thread_info_ref::new_reference (thr))
869 {
870 m_thr->stack_temporaries_enabled = true;
871 m_thr->stack_temporaries.clear ();
872 }
873
874 ~enable_thread_stack_temporaries ()
875 {
876 m_thr->stack_temporaries_enabled = false;
877 m_thr->stack_temporaries.clear ();
878 }
879
880 DISABLE_COPY_AND_ASSIGN (enable_thread_stack_temporaries);
881
882 private:
883
884 thread_info_ref m_thr;
885 };
886
887 extern bool thread_stack_temporaries_enabled_p (struct thread_info *tp);
888
889 extern void push_thread_stack_temporary (struct thread_info *tp, struct value *v);
890
891 extern value *get_last_thread_stack_temporary (struct thread_info *tp);
892
893 extern bool value_in_thread_stack_temporaries (struct value *,
894 struct thread_info *thr);
895
896 /* Thread step-over list type. */
897 using thread_step_over_list_node
898 = intrusive_member_node<thread_info, &thread_info::step_over_list_node>;
899 using thread_step_over_list
900 = intrusive_list<thread_info, thread_step_over_list_node>;
901 using thread_step_over_list_iterator
902 = reference_to_pointer_iterator<thread_step_over_list::iterator>;
903 using thread_step_over_list_safe_iterator
904 = basic_safe_iterator<thread_step_over_list_iterator>;
905 using thread_step_over_list_safe_range
906 = iterator_range<thread_step_over_list_safe_iterator>;
907
908 static inline thread_step_over_list_safe_range
909 make_thread_step_over_list_safe_range (thread_step_over_list &list)
910 {
911 return thread_step_over_list_safe_range
912 (thread_step_over_list_safe_iterator (list.begin (),
913 list.end ()),
914 thread_step_over_list_safe_iterator (list.end (),
915 list.end ()));
916 }
917
918 /* Add TP to the end of the global pending step-over chain. */
919
920 extern void global_thread_step_over_chain_enqueue (thread_info *tp);
921
922 /* Append the thread step over list LIST to the global thread step over
923 chain. */
924
925 extern void global_thread_step_over_chain_enqueue_chain
926 (thread_step_over_list &&list);
927
928 /* Remove TP from the global pending step-over chain. */
929
930 extern void global_thread_step_over_chain_remove (thread_info *tp);
931
932 /* Return true if TP is in any step-over chain. */
933
934 extern int thread_is_in_step_over_chain (struct thread_info *tp);
935
936 /* Return the length of the the step over chain TP is in.
937
938 If TP is non-nullptr, the thread must be in a step over chain.
939 TP may be nullptr, in which case it denotes an empty list, so a length of
940 0. */
941
942 extern int thread_step_over_chain_length (const thread_step_over_list &l);
943
944 /* Cancel any ongoing execution command. */
945
946 extern void thread_cancel_execution_command (struct thread_info *thr);
947
948 /* Check whether it makes sense to access a register of the current
949 thread at this point. If not, throw an error (e.g., the thread is
950 executing). */
951 extern void validate_registers_access (void);
952
953 /* Check whether it makes sense to access a register of THREAD at this point.
954 Returns true if registers may be accessed; false otherwise. */
955 extern bool can_access_registers_thread (struct thread_info *thread);
956
957 /* Returns whether to show which thread hit the breakpoint, received a
958 signal, etc. and ended up causing a user-visible stop. This is
959 true iff we ever detected multiple threads. */
960 extern int show_thread_that_caused_stop (void);
961
962 /* Print the message for a thread or/and frame selected. */
963 extern void print_selected_thread_frame (struct ui_out *uiout,
964 user_selected_what selection);
965
966 /* Helper for the CLI's "thread" command and for MI's -thread-select.
967 Selects thread THR. TIDSTR is the original string the thread ID
968 was parsed from. This is used in the error message if THR is not
969 alive anymore. */
970 extern void thread_select (const char *tidstr, class thread_info *thr);
971
972 /* Return THREAD's name.
973
974 If THREAD has a user-given name, return it. Otherwise, query the thread's
975 target to get the name. May return nullptr. */
976 extern const char *thread_name (thread_info *thread);
977
978 #endif /* GDBTHREAD_H */