enum target_hw_bp_type type,
struct expression *cond)
{
- struct lwp_info *lp;
int idx;
long dbr_addr, dbr_mask;
int max_watchpoints = 4;
debug_registers[2 * idx] = dbr_addr;
debug_registers[2 * idx + 1] = dbr_mask;
- ALL_LWPS (lp)
+
+ for (const lwp_info *lp : all_lwps ())
{
store_debug_register_pair (lp->ptid, idx, &dbr_addr, &dbr_mask);
enable_watchpoints_in_psr (lp->ptid);
dbr_mask = debug_registers[2 * idx + 1];
if ((dbr_mask & (0x3UL << 62)) && addr == (CORE_ADDR) dbr_addr)
{
- struct lwp_info *lp;
-
debug_registers[2 * idx] = 0;
debug_registers[2 * idx + 1] = 0;
dbr_addr = 0;
dbr_mask = 0;
- ALL_LWPS (lp)
+ for (const lwp_info *lp : all_lwps ())
store_debug_register_pair (lp->ptid, idx, &dbr_addr, &dbr_mask);
return 0;
num_lwps (int pid)
{
int count = 0;
- struct lwp_info *lp;
- for (lp = lwp_list; lp; lp = lp->next)
+ for (const lwp_info *lp ATTRIBUTE_UNUSED : all_lwps ())
if (lp->ptid.pid () == pid)
count++;
creation order. This order is assumed in some cases. E.g.,
reaping status after killing alls lwps of a process: the leader LWP
must be reaped last. */
-struct lwp_info *lwp_list;
+
+static intrusive_list<lwp_info> lwp_list;
+
+/* See linux-nat.h. */
+
+lwp_info_range
+all_lwps ()
+{
+ return lwp_info_range (lwp_list.begin ());
+}
+
+/* See linux-nat.h. */
+
+lwp_info_safe_range
+all_lwps_safe ()
+{
+ return lwp_info_safe_range (lwp_list.begin ());
+}
/* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
static void
lwp_list_add (struct lwp_info *lp)
{
- lp->next = lwp_list;
- if (lwp_list != NULL)
- lwp_list->prev = lp;
- lwp_list = lp;
+ lwp_list.push_front (*lp);
}
/* Remove LP from sorted-by-reverse-creation-order doubly-linked
lwp_list_remove (struct lwp_info *lp)
{
/* Remove from sorted-by-creation-order list. */
- if (lp->next != NULL)
- lp->next->prev = lp->prev;
- if (lp->prev != NULL)
- lp->prev->next = lp->next;
- if (lp == lwp_list)
- lwp_list = lp->next;
+ lwp_list.erase (lwp_list.iterator_to (*lp));
}
\f
iterate_over_lwps (ptid_t filter,
gdb::function_view<iterate_over_lwps_ftype> callback)
{
- struct lwp_info *lp, *lpnext;
-
- for (lp = lwp_list; lp; lp = lpnext)
+ for (lwp_info *lp : all_lwps_safe ())
{
- lpnext = lp->next;
-
if (lp->ptid.matches (filter))
{
if (callback (lp) != 0)
void
linux_nat_target::update_thread_list ()
{
- struct lwp_info *lwp;
-
/* We add/delete threads from the list as clone/exit events are
processed, so just try deleting exited threads still in the
thread list. */
/* Update the processor core that each lwp/thread was last seen
running on. */
- ALL_LWPS (lwp)
+ for (lwp_info *lwp : all_lwps ())
{
/* Avoid accessing /proc if the thread hasn't run since we last
time we fetched the thread's core. Accessing /proc becomes
/* Iterate over LWPs of the current inferior, trying to access
memory through one of them. */
- for (lwp_info *lp = lwp_list; lp != nullptr; lp = lp->next)
+ for (lwp_info *lp : all_lwps ())
{
if (lp->ptid.pid () != cur_pid)
continue;
struct arch_lwp_info;
-/* Structure describing an LWP. This is public only for the purposes
- of ALL_LWPS; target-specific code should generally not access it
- directly. */
+/* Structure describing an LWP. */
-struct lwp_info
+struct lwp_info : intrusive_list_node<lwp_info>
{
lwp_info (ptid_t ptid)
: ptid (ptid)
/* Arch-specific additions. */
struct arch_lwp_info *arch_private = nullptr;
-
- /* Previous and next pointers in doubly-linked list of known LWPs,
- sorted by reverse creation order. */
- struct lwp_info *prev = nullptr;
- struct lwp_info *next = nullptr;
};
-/* The global list of LWPs, for ALL_LWPS. Unlike the threads list,
- there is always at least one LWP on the list while the GNU/Linux
- native target is active. */
-extern struct lwp_info *lwp_list;
+/* lwp_info iterator and range types. */
+
+using lwp_info_iterator
+ = reference_to_pointer_iterator<intrusive_list<lwp_info>::iterator>;
+using lwp_info_range = iterator_range<lwp_info_iterator>;
+using lwp_info_safe_range = basic_safe_range<lwp_info_range>;
+
+/* Get an iterable range over all lwps. */
+
+lwp_info_range all_lwps ();
+
+/* Same as the above, but safe against deletion while iterating. */
+
+lwp_info_safe_range all_lwps_safe ();
/* Does the current host support PTRACE_GETREGSET? */
extern enum tribool have_ptrace_getregset;
-/* Iterate over each active thread (light-weight process). */
-#define ALL_LWPS(LP) \
- for ((LP) = lwp_list; \
- (LP) != NULL; \
- (LP) = (LP)->next)
-
/* Called from the LWP layer to inform the thread_db layer that PARENT
spawned CHILD. Both LWPs are currently stopped. This function
does whatever is required to have the child LWP under the
if (info->td_ta_thr_iter_p == NULL)
{
- struct lwp_info *lp;
int pid = inferior_ptid.pid ();
thread_info *curr_thread = inferior_thread ();
linux_stop_and_wait_all_lwps ();
- ALL_LWPS (lp)
+ for (const lwp_info *lp : all_lwps ())
if (lp->ptid.pid () == pid)
thread_from_lwp (curr_thread, lp->ptid);
static int
write_watchpoint_regs (void)
{
- struct lwp_info *lp;
- int tid;
-
- ALL_LWPS (lp)
+ for (const lwp_info *lp : all_lwps ())
{
- tid = lp->ptid.lwp ();
+ int tid = lp->ptid.lwp ();
if (ptrace (PTRACE_SET_WATCH_REGS, tid, &watch_mirror, NULL) == -1)
perror_with_name (_("Couldn't write debug register"));
}