static void save_stop_reason (struct lwp_info *lp);
+static void maybe_close_proc_mem_file (pid_t pid);
+
\f
/* LWP accessors. */
num_lwps (int pid)
{
int count = 0;
- struct lwp_info *lp;
- for (lp = lwp_list; lp; lp = lp->next)
+ for (const lwp_info *lp ATTRIBUTE_UNUSED : all_lwps ())
if (lp->ptid.pid () == pid)
count++;
typedef std::unique_ptr<struct lwp_info, lwp_deleter> lwp_info_up;
-/* Target hook for follow_fork. On entry inferior_ptid must be the
- ptid of the followed inferior. At return, inferior_ptid will be
- unchanged. */
+/* Target hook for follow_fork. */
-bool
-linux_nat_target::follow_fork (bool follow_child, bool detach_fork)
+void
+linux_nat_target::follow_fork (inferior *child_inf, ptid_t child_ptid,
+ target_waitkind fork_kind, bool follow_child,
+ bool detach_fork)
{
+ inf_ptrace_target::follow_fork (child_inf, child_ptid, fork_kind,
+ follow_child, detach_fork);
+
if (!follow_child)
{
- struct lwp_info *child_lp = NULL;
- int has_vforked;
- ptid_t parent_ptid, child_ptid;
- int parent_pid, child_pid;
-
- has_vforked = (inferior_thread ()->pending_follow.kind
- == TARGET_WAITKIND_VFORKED);
- parent_ptid = inferior_ptid;
- child_ptid = inferior_thread ()->pending_follow.value.related_pid;
- parent_pid = parent_ptid.lwp ();
- child_pid = child_ptid.lwp ();
+ bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
+ ptid_t parent_ptid = inferior_ptid;
+ int parent_pid = parent_ptid.lwp ();
+ int child_pid = child_ptid.lwp ();
/* We're already attached to the parent, by default. */
- child_lp = add_lwp (child_ptid);
+ lwp_info *child_lp = add_lwp (child_ptid);
child_lp->stopped = 1;
child_lp->last_resume_kind = resume_stop;
ptrace (PTRACE_DETACH, child_pid, 0, signo);
}
}
- else
- {
- /* Switching inferior_ptid is not enough, because then
- inferior_thread () would crash by not finding the thread
- in the current inferior. */
- scoped_restore_current_thread restore_current_thread;
- thread_info *child = find_thread_ptid (this, child_ptid);
- switch_to_thread (child);
-
- /* Let the thread_db layer learn about this new process. */
- check_for_thread_db ();
- }
if (has_vforked)
{
will notice a pending event, and bypasses actually
resuming the inferior. */
parent_lp->status = 0;
- parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
+ parent_lp->waitstatus.set_vfork_done ();
parent_lp->stopped = 1;
/* If we're in async mode, need to tell the event loop
{
struct lwp_info *child_lp;
- child_lp = add_lwp (inferior_ptid);
+ child_lp = add_lwp (child_ptid);
child_lp->stopped = 1;
child_lp->last_resume_kind = resume_stop;
-
- /* Let the thread_db layer learn about this new process. */
- check_for_thread_db ();
}
-
- return false;
}
\f
creation order. This order is assumed in some cases. E.g.,
reaping status after killing alls lwps of a process: the leader LWP
must be reaped last. */
-struct lwp_info *lwp_list;
+
+static intrusive_list<lwp_info> lwp_list;
+
+/* See linux-nat.h. */
+
+lwp_info_range
+all_lwps ()
+{
+ return lwp_info_range (lwp_list.begin ());
+}
+
+/* See linux-nat.h. */
+
+lwp_info_safe_range
+all_lwps_safe ()
+{
+ return lwp_info_safe_range (lwp_list.begin ());
+}
/* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
static void
lwp_list_add (struct lwp_info *lp)
{
- lp->next = lwp_list;
- if (lwp_list != NULL)
- lwp_list->prev = lp;
- lwp_list = lp;
+ lwp_list.push_front (*lp);
}
/* Remove LP from sorted-by-reverse-creation-order doubly-linked
lwp_list_remove (struct lwp_info *lp)
{
/* Remove from sorted-by-creation-order list. */
- if (lp->next != NULL)
- lp->next->prev = lp->prev;
- if (lp->prev != NULL)
- lp->prev->next = lp->next;
- if (lp == lwp_list)
- lwp_list = lp->next;
+ lwp_list.erase (lwp_list.iterator_to (*lp));
}
\f
/* Destroy and free LP. */
-static void
-lwp_free (struct lwp_info *lp)
+lwp_info::~lwp_info ()
{
/* Let the arch specific bits release arch_lwp_info. */
- linux_target->low_delete_thread (lp->arch_private);
-
- xfree (lp);
+ linux_target->low_delete_thread (this->arch_private);
}
/* Traversal function for purge_lwp_list. */
{
htab_clear_slot (lwp_lwpid_htab, slot);
lwp_list_remove (lp);
- lwp_free (lp);
+ delete lp;
}
return 1;
static struct lwp_info *
add_initial_lwp (ptid_t ptid)
{
- struct lwp_info *lp;
-
gdb_assert (ptid.lwp_p ());
- lp = XNEW (struct lwp_info);
-
- memset (lp, 0, sizeof (struct lwp_info));
+ lwp_info *lp = new lwp_info (ptid);
- lp->last_resume_kind = resume_continue;
- lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
-
- lp->ptid = ptid;
- lp->core = -1;
/* Add to sorted-by-reverse-creation-order list. */
lwp_list_add (lp);
static void
delete_lwp (ptid_t ptid)
{
- struct lwp_info *lp;
- void **slot;
- struct lwp_info dummy;
+ lwp_info dummy (ptid);
- dummy.ptid = ptid;
- slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
+ void **slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
if (slot == NULL)
return;
- lp = *(struct lwp_info **) slot;
+ lwp_info *lp = *(struct lwp_info **) slot;
gdb_assert (lp != NULL);
htab_clear_slot (lwp_lwpid_htab, slot);
lwp_list_remove (lp);
/* Release. */
- lwp_free (lp);
+ delete lp;
}
/* Return a pointer to the structure describing the LWP corresponding
static struct lwp_info *
find_lwp_pid (ptid_t ptid)
{
- struct lwp_info *lp;
int lwp;
- struct lwp_info dummy;
if (ptid.lwp_p ())
lwp = ptid.lwp ();
else
lwp = ptid.pid ();
- dummy.ptid = ptid_t (0, lwp, 0);
- lp = (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
- return lp;
+ lwp_info dummy (ptid_t (0, lwp));
+ return (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
}
/* See nat/linux-nat.h. */
iterate_over_lwps (ptid_t filter,
gdb::function_view<iterate_over_lwps_ftype> callback)
{
- struct lwp_info *lp, *lpnext;
-
- for (lp = lwp_list; lp; lp = lpnext)
+ for (lwp_info *lp : all_lwps_safe ())
{
- lpnext = lp->next;
-
if (lp->ptid.matches (filter))
{
if (callback (lp) != 0)
{
/* The pid we tried to attach has apparently just exited. */
linux_nat_debug_printf ("Failed to stop %d: %s", pid,
- status_to_str (status));
+ status_to_str (status).c_str ());
return status;
}
{
*signalled = 1;
linux_nat_debug_printf ("Received %s after attaching",
- status_to_str (status));
+ status_to_str (status).c_str ());
}
return status;
/* The ptrace base target adds the main thread with (pid,0,0)
format. Decorate it with lwp info. */
ptid = ptid_t (inferior_ptid.pid (),
- inferior_ptid.pid (),
- 0);
+ inferior_ptid.pid ());
thread_change_ptid (linux_target, inferior_ptid, ptid);
/* Add the initial process as the first LWP to the list. */
/* Save the wait status to report later. */
lp->resumed = 1;
linux_nat_debug_printf ("waitpid %ld, saving status %s",
- (long) lp->ptid.pid (), status_to_str (status));
+ (long) lp->ptid.pid (),
+ status_to_str (status).c_str ());
lp->status = status;
signal pass state). Normally SIGTRAP isn't set to pass state, so
this is really a corner case. */
- if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
+ if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
else if (lp->status)
signo = gdb_signal_from_host (WSTOPSIG (lp->status));
{
struct thread_info *tp = find_thread_ptid (linux_target, lp->ptid);
- if (target_is_non_stop_p () && !tp->executing)
+ if (target_is_non_stop_p () && !tp->executing ())
{
- if (tp->suspend.waitstatus_pending_p)
- signo = tp->suspend.waitstatus.value.sig;
+ if (tp->has_pending_waitstatus ())
+ signo = tp->pending_waitstatus ().sig ();
else
- signo = tp->suspend.stop_signal;
+ signo = tp->stop_signal ();
}
else if (!target_is_non_stop_p ())
{
if (last_target == linux_target
&& lp->ptid.lwp () == last_ptid.lwp ())
- signo = tp->suspend.stop_signal;
+ signo = tp->stop_signal ();
}
}
detach_success (inf);
}
+
+ maybe_close_proc_mem_file (pid);
}
/* Resume execution of the inferior process. If STEP is nonzero,
{
lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
lp->status = 0;
- lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
+ lp->waitstatus.set_ignore ();
return 1;
}
return 0;
thread = find_thread_ptid (linux_target, lp->ptid);
if (thread != NULL)
{
- signo = thread->suspend.stop_signal;
- thread->suspend.stop_signal = GDB_SIGNAL_0;
+ signo = thread->stop_signal ();
+ thread->set_stop_signal (GDB_SIGNAL_0);
}
}
if (catching_syscall_number (syscall_number))
{
/* Alright, an event to report. */
- ourstatus->kind = lp->syscall_state;
- ourstatus->value.syscall_number = syscall_number;
+ if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
+ ourstatus->set_syscall_entry (syscall_number);
+ else if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
+ ourstatus->set_syscall_return (syscall_number);
+ else
+ gdb_assert_not_reached ("unexpected syscall state");
linux_nat_debug_printf
("stopping for %s of syscall %d for LWP %ld",
_("wait returned unexpected status 0x%x"), status);
}
- ourstatus->value.related_pid = ptid_t (new_pid, new_pid, 0);
+ ptid_t child_ptid (new_pid, new_pid);
if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
{
/* This won't actually modify the breakpoint list, but will
physically remove the breakpoints from the child. */
- detach_breakpoints (ptid_t (new_pid, new_pid, 0));
+ detach_breakpoints (ptid_t (new_pid, new_pid));
/* Retain child fork in ptrace (stopped) state. */
if (!find_fork_pid (new_pid))
/* Report as spurious, so that infrun doesn't want to follow
this fork. We're actually doing an infcall in
linux-fork.c. */
- ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
+ ourstatus->set_spurious ();
/* Report the stop to the core. */
return 0;
}
if (event == PTRACE_EVENT_FORK)
- ourstatus->kind = TARGET_WAITKIND_FORKED;
+ ourstatus->set_forked (child_ptid);
else if (event == PTRACE_EVENT_VFORK)
- ourstatus->kind = TARGET_WAITKIND_VFORKED;
+ ourstatus->set_vforked (child_ptid);
else if (event == PTRACE_EVENT_CLONE)
{
struct lwp_info *new_lp;
- ourstatus->kind = TARGET_WAITKIND_IGNORE;
+ ourstatus->set_ignore ();
linux_nat_debug_printf
("Got clone event from LWP %d, new child is LWP %ld", pid, new_pid);
- new_lp = add_lwp (ptid_t (lp->ptid.pid (), new_pid, 0));
+ new_lp = add_lwp (ptid_t (lp->ptid.pid (), new_pid));
new_lp->stopped = 1;
new_lp->resumed = 1;
/* Save the wait status to report later. */
linux_nat_debug_printf
("waitpid of new LWP %ld, saving status %s",
- (long) new_lp->ptid.lwp (), status_to_str (status));
+ (long) new_lp->ptid.lwp (), status_to_str (status).c_str ());
new_lp->status = status;
}
else if (report_thread_events)
{
- new_lp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
+ new_lp->waitstatus.set_thread_created ();
new_lp->status = status;
}
{
linux_nat_debug_printf ("Got exec event from LWP %ld", lp->ptid.lwp ());
- ourstatus->kind = TARGET_WAITKIND_EXECD;
- ourstatus->value.execd_pathname
- = xstrdup (linux_proc_pid_to_exec_file (pid));
+ /* Close the /proc/<pid>/mem file if it was open for this
+ inferior. */
+ maybe_close_proc_mem_file (lp->ptid.pid ());
+
+ ourstatus->set_execd
+ (make_unique_xstrdup (linux_proc_pid_to_exec_file (pid)));
/* The thread that execed must have been resumed, but, when a
thread execs, it changes its tid to the tgid, and the old
("Got expected PTRACE_EVENT_VFORK_DONE from LWP %ld: stopping",
lp->ptid.lwp ());
- ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
+ ourstatus->set_vfork_done ();
return 0;
}
linux_nat_debug_printf ("waitpid %s received %s",
target_pid_to_str (lp->ptid).c_str (),
- status_to_str (status));
+ status_to_str (status).c_str ());
/* Check if the thread has exited. */
if (WIFEXITED (status) || WIFSIGNALED (status))
/* The thread was stopped with a signal other than SIGSTOP. */
linux_nat_debug_printf ("Pending event %s in %s",
- status_to_str ((int) status),
+ status_to_str ((int) status).c_str (),
target_pid_to_str (lp->ptid).c_str ());
/* Save the sigtrap event. */
/* We check for lp->waitstatus in addition to lp->status, because we
can have pending process exits recorded in lp->status and
W_EXITCODE(0,0) happens to be 0. */
- return lp->status != 0 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE;
+ return lp->status != 0 || lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE;
}
/* Select the Nth LWP that has had an event. */
}
/* Check if we should go on and pass this event to common code.
- Return the affected lwp if we should, or NULL otherwise. */
-static struct lwp_info *
+ If so, save the status to the lwp_info structure associated to LWPID. */
+
+static void
linux_nat_filter_event (int lwpid, int status)
{
struct lwp_info *lp;
/* A multi-thread exec after we had seen the leader exiting. */
linux_nat_debug_printf ("Re-adding thread group leader LWP %d.", lwpid);
- lp = add_lwp (ptid_t (lwpid, lwpid, 0));
+ lp = add_lwp (ptid_t (lwpid, lwpid));
lp->stopped = 1;
lp->resumed = 1;
add_thread (linux_target, lp->ptid);
if (WIFSTOPPED (status) && !lp)
{
linux_nat_debug_printf ("saving LWP %ld status %s in stopped_pids list",
- (long) lwpid, status_to_str (status));
+ (long) lwpid, status_to_str (status).c_str ());
add_to_pid_list (&stopped_pids, lwpid, status);
- return NULL;
+ return;
}
/* Make sure we don't report an event for the exit of an LWP not in
if we detach from a program we originally forked and then it
exits. */
if (!WIFSTOPPED (status) && !lp)
- return NULL;
+ return;
/* This LWP is stopped now. (And if dead, this prevents it from
ever being continued.) */
on. */
status = W_STOPCODE (SIGTRAP);
if (linux_handle_syscall_trap (lp, 0))
- return NULL;
+ return;
}
else
{
linux_nat_debug_printf ("Handling extended status 0x%06x", status);
if (linux_handle_extended_wait (lp, status))
- return NULL;
+ return;
}
/* Check if the thread has exited. */
was not the end of the debugged application and should be
ignored. */
exit_lwp (lp);
- return NULL;
+ return;
}
/* Note that even if the leader was ptrace-stopped, it can still
/* Store the pending event in the waitstatus, because
W_EXITCODE(0,0) == 0. */
store_waitstatus (&lp->waitstatus, status);
- return lp;
+ return;
}
/* Make sure we don't report a SIGSTOP that we sent ourselves in
linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
gdb_assert (lp->resumed);
- return NULL;
+ return;
}
}
gdb_assert (lp->resumed);
/* Discard the event. */
- return NULL;
+ return;
}
/* Don't report signals that GDB isn't interested in, such as
target_pid_to_str (lp->ptid).c_str (),
(signo != GDB_SIGNAL_0
? strsignal (gdb_signal_to_host (signo)) : "0"));
- return NULL;
+ return;
}
}
gdb_assert (lp);
lp->status = status;
save_stop_reason (lp);
- return lp;
}
/* Detect zombie thread group leaders, and "exit" them. We can't reap
if (num_lwps (ptid.pid ()) > 1)
{
if (report_thread_events)
- ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
+ ourstatus->set_thread_exited (0);
else
- ourstatus->kind = TARGET_WAITKIND_IGNORE;
+ ourstatus->set_ignore ();
exit_lwp (event_child);
}
if (lp != NULL)
{
linux_nat_debug_printf ("Using pending wait status %s for %s.",
- status_to_str (lp->status),
+ status_to_str (lp->status).c_str (),
target_pid_to_str (lp->ptid).c_str ());
}
if (lwpid > 0)
{
linux_nat_debug_printf ("waitpid %ld received %s",
- (long) lwpid, status_to_str (status));
+ (long) lwpid,
+ status_to_str (status).c_str ());
linux_nat_filter_event (lwpid, status);
/* Retry until nothing comes out of waitpid. A single
{
linux_nat_debug_printf ("exit (no resumed LWP)");
- ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
+ ourstatus->set_no_resumed ();
restore_child_signals_mask (&prev_mask);
return minus_one_ptid;
{
linux_nat_debug_printf ("exit (ignore)");
- ourstatus->kind = TARGET_WAITKIND_IGNORE;
+ ourstatus->set_ignore ();
restore_child_signals_mask (&prev_mask);
return minus_one_ptid;
}
target_pid_to_str (lp->ptid).c_str ());
}
- if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
+ if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
{
*ourstatus = lp->waitstatus;
- lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
+ lp->waitstatus.set_ignore ();
}
else
store_waitstatus (ourstatus, status);
restore_child_signals_mask (&prev_mask);
if (last_resume_kind == resume_stop
- && ourstatus->kind == TARGET_WAITKIND_STOPPED
+ && ourstatus->kind () == TARGET_WAITKIND_STOPPED
&& WSTOPSIG (status) == SIGSTOP)
{
/* A thread that has been requested to stop by GDB with
target_stop, and it stopped cleanly, so report as SIG0. The
use of SIGSTOP is an implementation detail. */
- ourstatus->value.sig = GDB_SIGNAL_0;
+ ourstatus->set_stopped (GDB_SIGNAL_0);
}
- if (ourstatus->kind == TARGET_WAITKIND_EXITED
- || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
+ if (ourstatus->kind () == TARGET_WAITKIND_EXITED
+ || ourstatus->kind () == TARGET_WAITKIND_SIGNALLED)
lp->core = -1;
else
lp->core = linux_common_core_of_thread (lp->ptid);
- if (ourstatus->kind == TARGET_WAITKIND_EXITED)
+ if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
return filter_exit_event (lp, ourstatus);
return lp->ptid;
may be more. If we requested a specific lwp or process, also
assume there may be more. */
if (target_is_async_p ()
- && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
- && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
+ && ((ourstatus->kind () != TARGET_WAITKIND_IGNORE
+ && ourstatus->kind () != TARGET_WAITKIND_NO_RESUMED)
|| ptid != minus_one_ptid))
async_file_mark ();
{
struct target_waitstatus *ws = &thread->pending_follow;
- if (ws->kind == TARGET_WAITKIND_FORKED
- || ws->kind == TARGET_WAITKIND_VFORKED)
+ if (ws->kind () == TARGET_WAITKIND_FORKED
+ || ws->kind () == TARGET_WAITKIND_VFORKED)
{
- ptid_t child_ptid = ws->value.related_pid;
+ ptid_t child_ptid = ws->child_ptid ();
int child_pid = child_ptid.pid ();
int child_lwp = child_ptid.lwp ();
purge_lwp_list (pid);
+ /* Close the /proc/<pid>/mem file if it was open for this
+ inferior. */
+ maybe_close_proc_mem_file (pid);
+
if (! forks_exist_p ())
/* Normal case, no other forks available. */
inf_ptrace_target::mourn_inferior ();
ULONGEST *xfered_len);
static enum target_xfer_status
-linux_proc_xfer_partial (enum target_object object,
- const char *annex, gdb_byte *readbuf,
- const gdb_byte *writebuf,
- ULONGEST offset, LONGEST len, ULONGEST *xfered_len);
+linux_proc_xfer_memory_partial (gdb_byte *readbuf, const gdb_byte *writebuf,
+ ULONGEST offset, LONGEST len, ULONGEST *xfered_len);
enum target_xfer_status
linux_nat_target::xfer_partial (enum target_object object,
const gdb_byte *writebuf,
ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
{
- enum target_xfer_status xfer;
-
if (object == TARGET_OBJECT_SIGNAL_INFO)
return linux_xfer_siginfo (object, annex, readbuf, writebuf,
offset, len, xfered_len);
return linux_nat_xfer_osdata (object, annex, readbuf, writebuf,
offset, len, xfered_len);
- /* GDB calculates all addresses in the largest possible address
- width.
- The address width must be masked before its final use - either by
- linux_proc_xfer_partial or inf_ptrace_target::xfer_partial.
-
- Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
-
if (object == TARGET_OBJECT_MEMORY)
{
+ /* GDB calculates all addresses in the largest possible address
+ width. The address width must be masked before its final use
+ by linux_proc_xfer_partial.
+
+ Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
int addr_bit = gdbarch_addr_bit (target_gdbarch ());
if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
offset &= ((ULONGEST) 1 << addr_bit) - 1;
- }
- xfer = linux_proc_xfer_partial (object, annex, readbuf, writebuf,
- offset, len, xfered_len);
- if (xfer != TARGET_XFER_EOF)
- return xfer;
+ return linux_proc_xfer_memory_partial (readbuf, writebuf,
+ offset, len, xfered_len);
+ }
return inf_ptrace_target::xfer_partial (object, annex, readbuf, writebuf,
offset, len, xfered_len);
void
linux_nat_target::update_thread_list ()
{
- struct lwp_info *lwp;
-
/* We add/delete threads from the list as clone/exit events are
processed, so just try deleting exited threads still in the
thread list. */
/* Update the processor core that each lwp/thread was last seen
running on. */
- ALL_LWPS (lwp)
+ for (lwp_info *lwp : all_lwps ())
{
/* Avoid accessing /proc if the thread hasn't run since we last
time we fetched the thread's core. Accessing /proc becomes
return linux_proc_pid_to_exec_file (pid);
}
-/* Implement the to_xfer_partial target method using /proc/<pid>/mem.
- Because we can use a single read/write call, this can be much more
- efficient than banging away at PTRACE_PEEKTEXT. */
+/* Keep the /proc/<pid>/mem file open between memory accesses, as a
+ cache to avoid constantly closing/opening the file in the common
+ case of multiple memory reads/writes from/to the same inferior.
+ Note we don't keep a file open per inferior to avoid keeping too
+ many file descriptors open, which can run into resource limits. */
+static struct
+{
+ /* The LWP this open file is for. Note that after opening the file,
+ even if the thread subsequently exits, the open file is still
+ usable for accessing memory. It's only when the whole process
+ exits or execs that the file becomes invalid (at which point
+ reads/writes return EOF). */
+ ptid_t ptid;
-static enum target_xfer_status
-linux_proc_xfer_partial (enum target_object object,
- const char *annex, gdb_byte *readbuf,
- const gdb_byte *writebuf,
- ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
+ /* The file descriptor. -1 if file is not open. */
+ int fd = -1;
+
+ /* Close FD and clear it to -1. */
+ void close ()
+ {
+ linux_nat_debug_printf ("closing fd %d for /proc/%d/task/%ld/mem",
+ fd, ptid.pid (), ptid.lwp ());
+ ::close (fd);
+ fd = -1;
+ }
+} last_proc_mem_file;
+
+/* Close the /proc/<pid>/mem file if its LWP matches PTID. */
+
+static void
+maybe_close_proc_mem_file (pid_t pid)
{
- LONGEST ret;
- int fd;
- char filename[64];
+ if (last_proc_mem_file.ptid.pid () == pid)
+ last_proc_mem_file.close ();
+}
- if (object != TARGET_OBJECT_MEMORY)
- return TARGET_XFER_EOF;
+/* Helper for linux_proc_xfer_memory_partial. Accesses /proc via
+ PTID. Returns -1 on error, with errno set. Returns number of
+ read/written bytes otherwise. Returns 0 on EOF, which indicates
+ the address space is gone (because the process exited or
+ execed). */
- /* Don't bother for one word. */
- if (len < 3 * sizeof (long))
- return TARGET_XFER_EOF;
+static ssize_t
+linux_proc_xfer_memory_partial_pid (ptid_t ptid,
+ gdb_byte *readbuf, const gdb_byte *writebuf,
+ ULONGEST offset, LONGEST len)
+{
+ ssize_t ret;
- /* We could keep this file open and cache it - possibly one per
- thread. That requires some juggling, but is even faster. */
- xsnprintf (filename, sizeof filename, "/proc/%ld/mem",
- inferior_ptid.lwp ());
- fd = gdb_open_cloexec (filename, ((readbuf ? O_RDONLY : O_WRONLY)
- | O_LARGEFILE), 0);
- if (fd == -1)
- return TARGET_XFER_EOF;
+ /* As long as we're hitting the same inferior, the previously open
+ file is good, even if the thread it was open for exits. */
+ if (last_proc_mem_file.fd != -1
+ && last_proc_mem_file.ptid.pid () != ptid.pid ())
+ last_proc_mem_file.close ();
+
+ if (last_proc_mem_file.fd == -1)
+ {
+ /* Actually use /proc/<pid>/task/<lwp>/mem instead of
+ /proc/<lwp>/mem to avoid PID-reuse races, as we may be trying
+ to read memory via a thread which we've already reaped.
+ /proc/<lwp>/mem could open a file for the wrong process. If
+ the LWPID is reused for the same process it's OK, we can read
+ memory through it just fine. If the LWPID is reused for a
+ different process, then the open will fail because the path
+ won't exist. */
+ char filename[64];
+ xsnprintf (filename, sizeof filename,
+ "/proc/%d/task/%ld/mem", ptid.pid (), ptid.lwp ());
+
+ last_proc_mem_file.fd
+ = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
+
+ if (last_proc_mem_file.fd == -1)
+ {
+ linux_nat_debug_printf ("opening %s failed: %s (%d)\n",
+ filename, safe_strerror (errno), errno);
+ return -1;
+ }
+ last_proc_mem_file.ptid = ptid;
+
+ linux_nat_debug_printf ("opened fd %d for %s",
+ last_proc_mem_file.fd, filename);
+ }
+
+ int fd = last_proc_mem_file.fd;
/* Use pread64/pwrite64 if available, since they save a syscall and can
handle 64-bit offsets even on 32-bit platforms (for instance, SPARC
: write (fd, writebuf, len));
#endif
- close (fd);
+ if (ret == -1)
+ {
+ linux_nat_debug_printf ("accessing fd %d for pid %ld failed: %s (%d)\n",
+ fd, ptid.lwp (),
+ safe_strerror (errno), errno);
+ }
+ else if (ret == 0)
+ {
+ linux_nat_debug_printf ("accessing fd %d for pid %ld got EOF\n",
+ fd, ptid.lwp ());
+ }
- if (ret == -1 || ret == 0)
- return TARGET_XFER_EOF;
+ return ret;
+}
+
+/* Implement the to_xfer_partial target method using /proc/<pid>/mem.
+ Because we can use a single read/write call, this can be much more
+ efficient than banging away at PTRACE_PEEKTEXT. Also, unlike
+ PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running
+ threads. */
+
+static enum target_xfer_status
+linux_proc_xfer_memory_partial (gdb_byte *readbuf, const gdb_byte *writebuf,
+ ULONGEST offset, LONGEST len,
+ ULONGEST *xfered_len)
+{
+ /* Unlike PTRACE_PEEKTEXT/PTRACE_POKETEXT, reading/writing from/to
+ /proc/<pid>/mem works with running threads, and even exited
+ threads if the file was already open. If we need to open or
+ reopen the /proc file though, we may get an EACCES error
+ ("Permission denied"), meaning the thread is gone but its exit
+ status isn't reaped yet, or ENOENT if the thread is gone and
+ already reaped. In that case, just try opening the file for
+ another thread in the process. If all threads fail, then it must
+ mean the whole process exited, in which case there's nothing else
+ to do and we just fail the memory access.
+
+ Note we don't simply always access via the leader thread because
+ the leader may exit without exiting the whole process. See
+ gdb.threads/leader-exit.exp, for example. */
+
+ /* It's frequently the case that the selected thread is stopped, and
+ is thus not likely to exit (unless something kills the process
+ outside our control, with e.g., SIGKILL). Give that one a try
+ first.
+
+ Also, inferior_ptid may actually point at an LWP not in lwp_list.
+ This happens when we're detaching from a fork child that we don't
+ want to debug ("set detach-on-fork on"), and the breakpoints
+ module uninstalls breakpoints from the fork child. Which process
+ to access is given by inferior_ptid. */
+ int res = linux_proc_xfer_memory_partial_pid (inferior_ptid,
+ readbuf, writebuf,
+ offset, len);
+ if (res == 0)
+ {
+ /* EOF means the address space is gone, the whole
+ process exited or execed. */
+ return TARGET_XFER_EOF;
+ }
+ else if (res != -1)
+ {
+ *xfered_len = res;
+ return TARGET_XFER_OK;
+ }
else
{
- *xfered_len = ret;
+ /* If we simply raced with the thread exiting (EACCES), or the
+ current thread is THREAD_EXITED (ENOENT), try some other
+ thread. It's easier to handle an ENOENT failure than check
+ for THREAD_EXIT upfront because this function is called
+ before a thread for inferior_ptid is added to the thread
+ list. */
+ if (errno != EACCES && errno != ENOENT)
+ return TARGET_XFER_EOF;
+ }
+
+ int cur_pid = current_inferior ()->pid;
+
+ if (inferior_ptid.pid () != cur_pid)
+ {
+ /* We're accessing a fork child, and the access above failed.
+ Don't bother iterating the LWP list, since there's no other
+ LWP for this process. */
+ return TARGET_XFER_EOF;
+ }
+
+ /* Iterate over LWPs of the current inferior, trying to access
+ memory through one of them. */
+ for (lwp_info *lp : all_lwps ())
+ {
+ if (lp->ptid.pid () != cur_pid)
+ continue;
+
+ res = linux_proc_xfer_memory_partial_pid (lp->ptid,
+ readbuf, writebuf,
+ offset, len);
+
+ if (res == 0)
+ {
+ /* EOF means the address space is gone, the whole process
+ exited or execed. */
+ return TARGET_XFER_EOF;
+ }
+ else if (res == -1)
+ {
+ if (errno == EACCES)
+ {
+ /* This LWP is gone, try another one. */
+ continue;
+ }
+
+ return TARGET_XFER_EOF;
+ }
+
+ *xfered_len = res;
return TARGET_XFER_OK;
}
-}
+ /* No luck. */
+ return TARGET_XFER_EOF;
+}
/* Parse LINE as a signal set and add its set bits to SIGS. */
int pid = inferior_ptid.pid ();
std::vector<static_tracepoint_marker> markers;
const char *p = s;
- ptid_t ptid = ptid_t (pid, 0, 0);
+ ptid_t ptid = ptid_t (pid, 0);
static_tracepoint_marker marker;
/* Pause all */
bool
linux_nat_target::supports_disable_randomization ()
{
-#ifdef HAVE_PERSONALITY
return true;
-#else
- return false;
-#endif
}
/* SIGCHLD handler that serves two purposes: In non-stop/async mode,