/* GNU/Linux native-dependent code common to multiple platforms.
- Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
This file is part of GDB.
#include "xml-support.h"
#include "terminal.h"
#include <sys/vfs.h>
+#include "solib.h"
#ifndef SPUFS_MAGIC
#define SPUFS_MAGIC 0x23c9b64e
#endif /* !HAVE_PERSONALITY */
}
-static int linux_parent_pid;
-
struct simple_pid_list
{
int pid;
add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
{
struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
+
new_pid->pid = pid;
new_pid->status = status;
new_pid->next = *listp;
if ((*p)->pid == pid)
{
struct simple_pid_list *next = (*p)->next;
+
*status = (*p)->status;
xfree (*p);
*p = next;
static void
linux_tracefork_child (void)
{
- int ret;
-
ptrace (PTRACE_TRACEME, 0, 0, 0);
kill (getpid (), SIGSTOP);
fork ();
if (!detach_fork)
linux_enable_event_reporting (pid_to_ptid (child_pid));
+ if (has_vforked
+ && !non_stop /* Non-stop always resumes both branches. */
+ && (!target_is_async_p () || sync_execution)
+ && !(follow_child || detach_fork || sched_multi))
+ {
+ /* The parent stays blocked inside the vfork syscall until the
+ child execs or exits. If we don't let the child run, then
+ the parent stays blocked. If we're telling the parent to run
+ in the foreground, the user will not be able to ctrl-c to get
+ back the terminal, effectively hanging the debug session. */
+ fprintf_filtered (gdb_stderr, _("\
+Can not resume the parent process over vfork in the foreground while \n\
+holding the child stopped. Try \"set detach-on-fork\" or \
+\"set schedule-multiple\".\n"));
+ return 1;
+ }
+
if (! follow_child)
{
- /* We're already attached to the parent, by default. */
+ struct lwp_info *child_lp = NULL;
- /* Before detaching from the child, remove all breakpoints from
- it. If we forked, then this has already been taken care of
- by infrun.c. If we vforked however, any breakpoint inserted
- in the parent is visible in the child, even those added while
- stopped in a vfork catchpoint. This won't actually modify
- the breakpoint list, but will physically remove the
- breakpoints from the child. This will remove the breakpoints
- from the parent also, but they'll be reinserted below. */
- if (has_vforked)
- detach_breakpoints (child_pid);
+ /* We're already attached to the parent, by default. */
/* Detach new forked process? */
if (detach_fork)
{
+ /* Before detaching from the child, remove all breakpoints
+ from it. If we forked, then this has already been taken
+ care of by infrun.c. If we vforked however, any
+ breakpoint inserted in the parent is visible in the
+ child, even those added while stopped in a vfork
+ catchpoint. This will remove the breakpoints from the
+ parent also, but they'll be reinserted below. */
+ if (has_vforked)
+ {
+ /* keep breakpoints list in sync. */
+ remove_breakpoints_pid (GET_PID (inferior_ptid));
+ }
+
if (info_verbose || debug_linux_nat)
{
target_terminal_ours ();
else
{
struct inferior *parent_inf, *child_inf;
- struct lwp_info *lp;
struct cleanup *old_chain;
/* Add process to GDB's tables. */
copy_terminal_info (child_inf, parent_inf);
old_chain = save_inferior_ptid ();
+ save_current_program_space ();
inferior_ptid = ptid_build (child_pid, child_pid, 0);
add_thread (inferior_ptid);
- lp = add_lwp (inferior_ptid);
- lp->stopped = 1;
+ child_lp = add_lwp (inferior_ptid);
+ child_lp->stopped = 1;
+ child_lp->resumed = 1;
+ /* If this is a vfork child, then the address-space is
+ shared with the parent. */
+ if (has_vforked)
+ {
+ child_inf->pspace = parent_inf->pspace;
+ child_inf->aspace = parent_inf->aspace;
+
+ /* The parent will be frozen until the child is done
+ with the shared region. Keep track of the
+ parent. */
+ child_inf->vfork_parent = parent_inf;
+ child_inf->pending_detach = 0;
+ parent_inf->vfork_child = child_inf;
+ parent_inf->pending_detach = 0;
+ }
+ else
+ {
+ child_inf->aspace = new_address_space ();
+ child_inf->pspace = add_program_space (child_inf->aspace);
+ child_inf->removable = 1;
+ set_current_program_space (child_inf->pspace);
+ clone_program_space (child_inf->pspace, parent_inf->pspace);
+
+ /* Let the shared library layer (solib-svr4) learn about
+ this new process, relocate the cloned exec, pull in
+ shared libraries, and install the solib event
+ breakpoint. If a "cloned-VM" event was propagated
+ better throughout the core, this wouldn't be
+ required. */
+ solib_create_inferior_hook (0);
+ }
+
+ /* Let the thread_db layer learn about this new process. */
check_for_thread_db ();
do_cleanups (old_chain);
if (has_vforked)
{
+ struct lwp_info *lp;
+ struct inferior *parent_inf;
+
+ parent_inf = current_inferior ();
+
+ /* If we detached from the child, then we have to be careful
+ to not insert breakpoints in the parent until the child
+ is done with the shared memory region. However, if we're
+ staying attached to the child, then we can and should
+ insert breakpoints, so that we can debug it. A
+ subsequent child exec or exit is enough to know when does
+ the child stops using the parent's address space. */
+ parent_inf->waiting_for_vfork_done = detach_fork;
+ parent_inf->pspace->breakpoints_not_allowed = detach_fork;
+
+ lp = find_lwp_pid (pid_to_ptid (parent_pid));
gdb_assert (linux_supports_tracefork_flag >= 0);
if (linux_supports_tracevforkdone (0))
{
- int status;
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LCFF: waiting for VFORK_DONE on %d\n",
+ parent_pid);
- ptrace (PTRACE_CONT, parent_pid, 0, 0);
- my_waitpid (parent_pid, &status, __WALL);
- if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
- warning (_("Unexpected waitpid result %06x when waiting for "
- "vfork-done"), status);
+ lp->stopped = 1;
+ lp->resumed = 1;
+
+ /* We'll handle the VFORK_DONE event like any other
+ event, in target_wait. */
}
else
{
is only the single-step breakpoint at vfork's return
point. */
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LCFF: no VFORK_DONE support, sleeping a bit\n");
+
usleep (10000);
- }
- /* Since we vforked, breakpoints were removed in the parent
- too. Put them back. */
- reattach_breakpoints (parent_pid);
+ /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
+ and leave it pending. The next linux_nat_resume call
+ will notice a pending event, and bypasses actually
+ resuming the inferior. */
+ lp->status = 0;
+ lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
+ lp->stopped = 0;
+ lp->resumed = 1;
+
+ /* If we're in async mode, need to tell the event loop
+ there's something here to process. */
+ if (target_can_async_p ())
+ async_file_mark ();
+ }
}
}
else
{
- struct thread_info *tp;
struct inferior *parent_inf, *child_inf;
struct lwp_info *lp;
-
- /* Before detaching from the parent, remove all breakpoints from it. */
- remove_breakpoints ();
+ struct program_space *parent_pspace;
if (info_verbose || debug_linux_nat)
{
target_terminal_ours ();
- fprintf_filtered (gdb_stdlog,
- "Attaching after fork to child process %d.\n",
- child_pid);
+ if (has_vforked)
+ fprintf_filtered (gdb_stdlog, _("\
+Attaching after process %d vfork to child process %d.\n"),
+ parent_pid, child_pid);
+ else
+ fprintf_filtered (gdb_stdlog, _("\
+Attaching after process %d fork to child process %d.\n"),
+ parent_pid, child_pid);
}
/* Add the new inferior first, so that the target_detach below
child_inf->attach_flag = parent_inf->attach_flag;
copy_terminal_info (child_inf, parent_inf);
- /* If we're vforking, we may want to hold on to the parent until
- the child exits or execs. At exec time we can remove the old
- breakpoints from the parent and detach it; at exit time we
- could do the same (or even, sneakily, resume debugging it - the
- child's exec has failed, or something similar).
+ parent_pspace = parent_inf->pspace;
- This doesn't clean up "properly", because we can't call
- target_detach, but that's OK; if the current target is "child",
- then it doesn't need any further cleanups, and lin_lwp will
- generally not encounter vfork (vfork is defined to fork
- in libpthread.so).
-
- The holding part is very easy if we have VFORKDONE events;
- but keeping track of both processes is beyond GDB at the
- moment. So we don't expose the parent to the rest of GDB.
- Instead we quietly hold onto it until such time as we can
- safely resume it. */
+ /* If we're vforking, we want to hold on to the parent until the
+ child exits or execs. At child exec or exit time we can
+ remove the old breakpoints from the parent and detach or
+ resume debugging it. Otherwise, detach the parent now; we'll
+ want to reuse it's program/address spaces, but we can't set
+ them to the child before removing breakpoints from the
+ parent, otherwise, the breakpoints module could decide to
+ remove breakpoints from the wrong process (since they'd be
+ assigned to the same address space). */
if (has_vforked)
{
- struct lwp_info *parent_lwp;
-
- linux_parent_pid = parent_pid;
-
- /* Get rid of the inferior on the core side as well. */
- inferior_ptid = null_ptid;
- detach_inferior (parent_pid);
-
- /* Also get rid of all its lwps. We will detach from this
- inferior soon-ish, but, we will still get an exit event
- reported through waitpid when it exits. If we didn't get
- rid of the lwps from our list, we would end up reporting
- the inferior exit to the core, which would then try to
- mourn a non-existing (from the core's perspective)
- inferior. */
- parent_lwp = find_lwp_pid (pid_to_ptid (parent_pid));
- purge_lwp_list (GET_PID (parent_lwp->ptid));
- linux_parent_pid = parent_pid;
+ gdb_assert (child_inf->vfork_parent == NULL);
+ gdb_assert (parent_inf->vfork_child == NULL);
+ child_inf->vfork_parent = parent_inf;
+ child_inf->pending_detach = 0;
+ parent_inf->vfork_child = child_inf;
+ parent_inf->pending_detach = detach_fork;
+ parent_inf->waiting_for_vfork_done = 0;
}
else if (detach_fork)
target_detach (NULL, 0);
+ /* Note that the detach above makes PARENT_INF dangling. */
+
+ /* Add the child thread to the appropriate lists, and switch to
+ this new thread, before cloning the program space, and
+ informing the solib layer about this new process. */
+
inferior_ptid = ptid_build (child_pid, child_pid, 0);
add_thread (inferior_ptid);
lp = add_lwp (inferior_ptid);
lp->stopped = 1;
+ lp->resumed = 1;
+ /* If this is a vfork child, then the address-space is shared
+ with the parent. If we detached from the parent, then we can
+ reuse the parent's program/address spaces. */
+ if (has_vforked || detach_fork)
+ {
+ child_inf->pspace = parent_pspace;
+ child_inf->aspace = child_inf->pspace->aspace;
+ }
+ else
+ {
+ child_inf->aspace = new_address_space ();
+ child_inf->pspace = add_program_space (child_inf->aspace);
+ child_inf->removable = 1;
+ set_current_program_space (child_inf->pspace);
+ clone_program_space (child_inf->pspace, parent_pspace);
+
+ /* Let the shared library layer (solib-svr4) learn about
+ this new process, relocate the cloned exec, pull in
+ shared libraries, and install the solib event breakpoint.
+ If a "cloned-VM" event was propagated better throughout
+ the core, this wouldn't be required. */
+ solib_create_inferior_hook (0);
+ }
+
+ /* Let the thread_db layer learn about this new process. */
check_for_thread_db ();
}
return buf;
}
-/* Initialize the list of LWPs. Note that this module, contrary to
- what GDB's generic threads layer does for its thread list,
- re-initializes the LWP lists whenever we mourn or detach (which
- doesn't involve mourning) the inferior. */
-
-static void
-init_lwp_list (void)
-{
- struct lwp_info *lp, *lpnext;
-
- for (lp = lwp_list; lp; lp = lpnext)
- {
- lpnext = lp->next;
- xfree (lp);
- }
-
- lwp_list = NULL;
-}
-
/* Remove all LWPs belong to PID from the lwp list. */
static void
lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
lp->ptid = ptid;
+ lp->core = -1;
lp->next = lwp_list;
lwp_list = lp;
return NULL;
}
-/* Returns true if PTID matches filter FILTER. FILTER can be the wild
- card MINUS_ONE_PTID (all ptid match it); can be a ptid representing
- a process (ptid_is_pid returns true), in which case, all lwps of
- that give process match, lwps of other process do not; or, it can
- represent a specific thread, in which case, only that thread will
- match true. PTID must represent an LWP, it can never be a wild
- card. */
-
-static int
-ptid_match (ptid_t ptid, ptid_t filter)
-{
- /* Since both parameters have the same type, prevent easy mistakes
- from happening. */
- gdb_assert (!ptid_equal (ptid, minus_one_ptid)
- && !ptid_equal (ptid, null_ptid));
-
- if (ptid_equal (filter, minus_one_ptid))
- return 1;
- if (ptid_is_pid (filter)
- && ptid_get_pid (ptid) == ptid_get_pid (filter))
- return 1;
- else if (ptid_equal (ptid, filter))
- return 1;
-
- return 0;
-}
-
/* Call CALLBACK with its second argument set to DATA for every LWP in
the list. If CALLBACK returns 1 for a particular LWP, return a
pointer to the structure describing that LWP immediately.
else if (non_stop && !is_executing (lp->ptid))
{
struct thread_info *tp = find_thread_ptid (lp->ptid);
+
signo = tp->stop_signal;
}
else if (!non_stop)
if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
{
struct thread_info *tp = find_thread_ptid (lp->ptid);
+
signo = tp->stop_signal;
}
}
{
int pid;
int status;
- enum target_signal sig;
struct lwp_info *main_lwp;
pid = GET_PID (inferior_ptid);
pass it along with PTRACE_DETACH. */
args = alloca (8);
sprintf (args, "%d", (int) WSTOPSIG (status));
- fprintf_unfiltered (gdb_stdlog,
- "LND: Sending signal %s to %s\n",
- args,
- target_pid_to_str (main_lwp->ptid));
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "LND: Sending signal %s to %s\n",
+ args,
+ target_pid_to_str (main_lwp->ptid));
}
delete_lwp (main_lwp->ptid);
static int
resume_callback (struct lwp_info *lp, void *data)
{
- if (lp->stopped && lp->status == 0)
+ struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
+
+ if (lp->stopped && inf->vfork_child != NULL)
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "RC: Not resuming %s (vfork parent)\n",
+ target_pid_to_str (lp->ptid));
+ }
+ else if (lp->stopped && lp->status == 0)
{
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
lp->stopped = 0;
lp->step = 0;
memset (&lp->siginfo, 0, sizeof (lp->siginfo));
+ lp->stopped_by_watchpoint = 0;
}
else if (lp->stopped && debug_linux_nat)
fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (has pending)\n",
resume_many = (ptid_equal (minus_one_ptid, ptid)
|| ptid_is_pid (ptid));
- if (!non_stop)
- {
- /* Mark the lwps we're resuming as resumed. */
- iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
- iterate_over_lwps (ptid, resume_set_callback, NULL);
- }
- else
- iterate_over_lwps (minus_one_ptid, resume_set_callback, NULL);
+ /* Mark the lwps we're resuming as resumed. */
+ iterate_over_lwps (ptid, resume_set_callback, NULL);
/* See if it's the current inferior that should be handled
specially. */
}
}
- if (lp->status)
+ if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
{
/* FIXME: What should we do if we are supposed to continue
this thread with a signal? */
linux_ops->to_resume (linux_ops, ptid, step, signo);
memset (&lp->siginfo, 0, sizeof (lp->siginfo));
+ lp->stopped_by_watchpoint = 0;
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
target_async (inferior_event_handler, 0);
}
-/* Issue kill to specified lwp. */
-
-static int tkill_failed;
+/* Send a signal to an LWP. */
static int
kill_lwp (int lwpid, int signo)
{
- errno = 0;
-
-/* Use tkill, if possible, in case we are using nptl threads. If tkill
- fails, then we are not using nptl threads and we should be using kill. */
+ /* Use tkill, if possible, in case we are using nptl threads. If tkill
+ fails, then we are not using nptl threads and we should be using kill. */
#ifdef HAVE_TKILL_SYSCALL
- if (!tkill_failed)
- {
- int ret = syscall (__NR_tkill, lwpid, signo);
- if (errno != ENOSYS)
- return ret;
- errno = 0;
- tkill_failed = 1;
- }
+ {
+ static int tkill_failed;
+
+ if (!tkill_failed)
+ {
+ int ret;
+
+ errno = 0;
+ ret = syscall (__NR_tkill, lwpid, signo);
+ if (errno != ENOSYS)
+ return ret;
+ tkill_failed = 1;
+ }
+ }
#endif
return kill (lwpid, signo);
ourstatus->kind = TARGET_WAITKIND_VFORKED;
else
{
- struct cleanup *old_chain;
-
ourstatus->kind = TARGET_WAITKIND_IGNORE;
new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
new_lp->cloned = 1;
ourstatus->value.execd_pathname
= xstrdup (linux_child_pid_to_exec_file (pid));
- if (linux_parent_pid)
+ return 0;
+ }
+
+ if (event == PTRACE_EVENT_VFORK_DONE)
+ {
+ if (current_inferior ()->waiting_for_vfork_done)
{
- detach_breakpoints (linux_parent_pid);
- ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog, "\
+LHEW: Got expected PTRACE_EVENT_VFORK_DONE from LWP %ld: stopping\n",
+ GET_LWP (lp->ptid));
- linux_parent_pid = 0;
+ ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
+ return 0;
}
- /* At this point, all inserted breakpoints are gone. Doing this
- as soon as we detect an exec prevents the badness of deleting
- a breakpoint writing the current "shadow contents" to lift
- the bp. That shadow is NOT valid after an exec.
-
- Note that we have to do this after the detach_breakpoints
- call above, otherwise breakpoints wouldn't be lifted from the
- parent on a vfork, because detach_breakpoints would think
- that breakpoints are not inserted. */
- mark_breakpoints_out ();
- return 0;
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog, "\
+LHEW: Got PTRACE_EVENT_VFORK_DONE from LWP %ld: resuming\n",
+ GET_LWP (lp->ptid));
+ ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
+ return 1;
}
internal_error (__FILE__, __LINE__,
linux_nat_has_pending_sigint (int pid)
{
sigset_t pending, blocked, ignored;
- int i;
linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
}
}
+/* Fetch the possible triggered data watchpoint info and store it in
+ LP.
+
+ On some archs, like x86, that use debug registers to set
+ watchpoints, it's possible that the way to know which watched
+ address trapped, is to check the register that is used to select
+ which address to watch. Problem is, between setting the watchpoint
+ and reading back which data address trapped, the user may change
+ the set of watchpoints, and, as a consequence, GDB changes the
+ debug registers in the inferior. To avoid reading back a stale
+ stopped-data-address when that happens, we cache in LP the fact
+ that a watchpoint trapped, and the corresponding data address, as
+ soon as we see LP stop with a SIGTRAP. If GDB changes the debug
+ registers meanwhile, we have the cached data we can rely on. */
+
+static void
+save_sigtrap (struct lwp_info *lp)
+{
+ struct cleanup *old_chain;
+
+ if (linux_ops->to_stopped_by_watchpoint == NULL)
+ {
+ lp->stopped_by_watchpoint = 0;
+ return;
+ }
+
+ old_chain = save_inferior_ptid ();
+ inferior_ptid = lp->ptid;
+
+ lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
+
+ if (lp->stopped_by_watchpoint)
+ {
+ if (linux_ops->to_stopped_data_address != NULL)
+ lp->stopped_data_address_p =
+ linux_ops->to_stopped_data_address (¤t_target,
+ &lp->stopped_data_address);
+ else
+ lp->stopped_data_address_p = 0;
+ }
+
+ do_cleanups (old_chain);
+}
+
+/* See save_sigtrap. */
+
+static int
+linux_nat_stopped_by_watchpoint (void)
+{
+ struct lwp_info *lp = find_lwp_pid (inferior_ptid);
+
+ gdb_assert (lp != NULL);
+
+ return lp->stopped_by_watchpoint;
+}
+
+static int
+linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
+{
+ struct lwp_info *lp = find_lwp_pid (inferior_ptid);
+
+ gdb_assert (lp != NULL);
+
+ *addr_p = lp->stopped_data_address;
+
+ return lp->stopped_data_address_p;
+}
+
/* Wait until LP is stopped. */
static int
stop_wait_callback (struct lwp_info *lp, void *data)
{
+ struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
+
+ /* If this is a vfork parent, bail out, it is not going to report
+ any SIGSTOP until the vfork is done with. */
+ if (inf->vfork_child != NULL)
+ return 0;
+
if (!lp->stopped)
{
int status;
/* Save the trap's siginfo in case we need it later. */
save_siginfo (lp);
+ save_sigtrap (lp);
+
/* Now resume this LWP and get the SIGSTOP event. */
errno = 0;
ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
CORE_ADDR pc;
pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
- if (breakpoint_inserted_here_p (pc))
+ if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
{
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
return NULL;
}
- /* Save the trap's siginfo in case we need it later. */
if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
- save_siginfo (lp);
+ {
+ /* Save the trap's siginfo in case we need it later. */
+ save_siginfo (lp);
+
+ save_sigtrap (lp);
+ }
/* Check if the thread has exited. */
if ((WIFEXITED (status) || WIFSIGNALED (status))
lp = NULL;
status = 0;
- /* Make sure there is at least one LWP that has been resumed. */
- gdb_assert (iterate_over_lwps (ptid, resumed_callback, NULL));
+ /* Make sure that of those LWPs we want to get an event from, there
+ is at least one LWP that has been resumed. If there's none, just
+ bail out. The core may just be flushing asynchronously all
+ events. */
+ if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
+ {
+ ourstatus->kind = TARGET_WAITKIND_IGNORE;
+
+ if (debug_linux_nat_async)
+ fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
+
+ restore_child_signals_mask (&prev_mask);
+ return minus_one_ptid;
+ }
/* First check if there is a LWP with a wait status pending. */
if (pid == -1)
&& ptid_is_pid (ptid)
&& ptid_get_pid (lp->ptid) != ptid_get_pid (ptid))
{
+ gdb_assert (lp->resumed);
+
if (debug_linux_nat)
fprintf (stderr, "LWP %ld got an event %06x, leaving pending.\n",
ptid_get_lwp (lp->ptid), status);
{
if (WSTOPSIG (lp->status) != SIGSTOP)
{
- stop_callback (lp, NULL);
-
- /* Resume in order to collect the sigstop. */
- ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
-
- stop_wait_callback (lp, NULL);
+ /* Cancel breakpoint hits. The breakpoint may
+ be removed before we fetch events from this
+ process to report to the core. It is best
+ not to assume the moribund breakpoints
+ heuristic always handles these cases --- it
+ could be too many events go through to the
+ core before this one is handled. All-stop
+ always cancels breakpoint hits in all
+ threads. */
+ if (non_stop
+ && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
+ && WSTOPSIG (lp->status) == SIGTRAP
+ && cancel_breakpoint (lp))
+ {
+ /* Throw away the SIGTRAP. */
+ lp->status = 0;
+
+ if (debug_linux_nat)
+ fprintf (stderr,
+ "LLW: LWP %ld hit a breakpoint while waiting "
+ "for another process; cancelled it\n",
+ ptid_get_lwp (lp->ptid));
+ }
+ lp->stopped = 1;
}
else
{
starvation. */
if (pid == -1)
select_event_lwp (ptid, &lp, &status);
- }
- /* Now that we've selected our final event LWP, cancel any
- breakpoints in other LWPs that have hit a GDB breakpoint. See
- the comment in cancel_breakpoints_callback to find out why. */
- iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
+ /* Now that we've selected our final event LWP, cancel any
+ breakpoints in other LWPs that have hit a GDB breakpoint.
+ See the comment in cancel_breakpoints_callback to find out
+ why. */
+ iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
+
+ /* In all-stop, from the core's perspective, all LWPs are now
+ stopped until a new resume action is sent over. */
+ iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
+ }
+ else
+ lp->resumed = 0;
if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
{
fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
restore_child_signals_mask (&prev_mask);
+
+ if (ourstatus->kind == TARGET_WAITKIND_EXITED
+ || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
+ lp->core = -1;
+ else
+ lp->core = linux_nat_core_of_thread_1 (lp->ptid);
+
return lp->ptid;
}
+/* Resume LWPs that are currently stopped without any pending status
+ to report, but are resumed from the core's perspective. */
+
+static int
+resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
+{
+ ptid_t *wait_ptid_p = data;
+
+ if (lp->stopped
+ && lp->resumed
+ && lp->status == 0
+ && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
+ {
+ gdb_assert (is_executing (lp->ptid));
+
+ /* Don't bother if there's a breakpoint at PC that we'd hit
+ immediately, and we're not waiting for this LWP. */
+ if (!ptid_match (lp->ptid, *wait_ptid_p))
+ {
+ struct regcache *regcache = get_thread_regcache (lp->ptid);
+ CORE_ADDR pc = regcache_read_pc (regcache);
+
+ if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
+ return 0;
+ }
+
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "RSRL: resuming stopped-resumed LWP %s\n",
+ target_pid_to_str (lp->ptid));
+
+ linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
+ lp->step, TARGET_SIGNAL_0);
+ lp->stopped = 0;
+ memset (&lp->siginfo, 0, sizeof (lp->siginfo));
+ lp->stopped_by_watchpoint = 0;
+ }
+
+ return 0;
+}
+
static ptid_t
linux_nat_wait (struct target_ops *ops,
ptid_t ptid, struct target_waitstatus *ourstatus,
if (target_can_async_p ())
async_file_flush ();
+ /* Resume LWPs that are currently stopped without any pending status
+ to report, but are resumed from the core's perspective. LWPs get
+ in this state if we find them stopping at a time we're not
+ interested in reporting the event (target_wait on a
+ specific_process, for example, see linux_nat_wait_1), and
+ meanwhile the event became uninteresting. Don't bother resuming
+ LWPs we're not going to wait for if they'd stop immediately. */
+ if (non_stop)
+ iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
+
event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
/* If we requested any event, and something came out, assume there
else
{
ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
+
/* Stop all threads before killing them, since ptrace requires
that the thread is stopped to sucessfully PTRACE_KILL. */
iterate_over_lwps (ptid, stop_callback, NULL);
long long addr, endaddr, size, offset, inode;
char permissions[8], device[8], filename[MAXPATHLEN];
int read, write, exec;
- int ret;
struct cleanup *cleanup;
/* Compose the filename for the /proc memory map, and open it. */
if (info_verbose)
{
fprintf_filtered (gdb_stdout,
- "Save segment, %lld bytes at %s (%c%c%c)",
- size, paddress (target_gdbarch, addr),
+ "Save segment, %s bytes at %s (%c%c%c)",
+ plongest (size), paddress (target_gdbarch, addr),
read ? 'r' : ' ',
write ? 'w' : ' ', exec ? 'x' : ' ');
if (filename[0])
linux_spu_make_corefile_notes (bfd *obfd, char *note_data, int *note_size)
{
struct linux_spu_corefile_data args;
+
args.obfd = obfd;
args.note_data = note_data;
args.note_size = note_size;
linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
{
struct linux_nat_corefile_thread_data thread_args;
- struct cleanup *old_chain;
/* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
char fname[16] = { '\0' };
/* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
char psargs[80] = { '\0' };
char *note_data = NULL;
- ptid_t current_ptid = inferior_ptid;
ptid_t filter = pid_to_ptid (ptid_get_pid (inferior_ptid));
gdb_byte *auxv;
int auxv_len;
int cwd_f = 1;
int exe_f = 1;
int mappings_f = 0;
- int environ_f = 0;
int status_f = 0;
int stat_f = 0;
int all = 0;
if ((procfile = fopen (fname1, "r")) != NULL)
{
struct cleanup *cleanup = make_cleanup_fclose (procfile);
+
if (fgets (buffer, sizeof (buffer), procfile))
printf_filtered ("cmdline = '%s'\n", buffer);
else
if ((procfile = fopen (fname1, "r")) != NULL)
{
struct cleanup *cleanup = make_cleanup_fclose (procfile);
+
while (fgets (buffer, sizeof (buffer), procfile) != NULL)
puts_filtered (buffer);
do_cleanups (cleanup);
{
FILE *procfile;
char buffer[MAXPATHLEN], fname[MAXPATHLEN];
- int signum;
struct cleanup *cleanup;
sigemptyset (pending);
static LONGEST
linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
- const char *annex, gdb_byte *readbuf,
- const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
+ const char *annex, gdb_byte *readbuf,
+ const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
{
/* We make the process list snapshot when the object starts to be
read. */
if (offset == 0)
{
if (len_avail != -1 && len_avail != 0)
- obstack_free (&obstack, NULL);
+ obstack_free (&obstack, NULL);
len_avail = 0;
buf = NULL;
obstack_init (&obstack);
dirp = opendir ("/proc");
if (dirp)
- {
- struct dirent *dp;
- while ((dp = readdir (dirp)) != NULL)
- {
- struct stat statbuf;
- char procentry[sizeof ("/proc/4294967295")];
-
- if (!isdigit (dp->d_name[0])
- || NAMELEN (dp) > sizeof ("4294967295") - 1)
- continue;
-
- sprintf (procentry, "/proc/%s", dp->d_name);
- if (stat (procentry, &statbuf) == 0
- && S_ISDIR (statbuf.st_mode))
- {
- char *pathname;
- FILE *f;
- char cmd[MAXPATHLEN + 1];
- struct passwd *entry;
-
- pathname = xstrprintf ("/proc/%s/cmdline", dp->d_name);
- entry = getpwuid (statbuf.st_uid);
-
- if ((f = fopen (pathname, "r")) != NULL)
- {
- size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
- if (len > 0)
- {
- int i;
- for (i = 0; i < len; i++)
- if (cmd[i] == '\0')
- cmd[i] = ' ';
- cmd[len] = '\0';
-
- obstack_xml_printf (
- &obstack,
- "<item>"
- "<column name=\"pid\">%s</column>"
- "<column name=\"user\">%s</column>"
- "<column name=\"command\">%s</column>"
- "</item>",
- dp->d_name,
- entry ? entry->pw_name : "?",
- cmd);
- }
- fclose (f);
- }
-
- xfree (pathname);
- }
- }
-
- closedir (dirp);
- }
+ {
+ struct dirent *dp;
+
+ while ((dp = readdir (dirp)) != NULL)
+ {
+ struct stat statbuf;
+ char procentry[sizeof ("/proc/4294967295")];
+
+ if (!isdigit (dp->d_name[0])
+ || NAMELEN (dp) > sizeof ("4294967295") - 1)
+ continue;
+
+ sprintf (procentry, "/proc/%s", dp->d_name);
+ if (stat (procentry, &statbuf) == 0
+ && S_ISDIR (statbuf.st_mode))
+ {
+ char *pathname;
+ FILE *f;
+ char cmd[MAXPATHLEN + 1];
+ struct passwd *entry;
+
+ pathname = xstrprintf ("/proc/%s/cmdline", dp->d_name);
+ entry = getpwuid (statbuf.st_uid);
+
+ if ((f = fopen (pathname, "r")) != NULL)
+ {
+ size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
+
+ if (len > 0)
+ {
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (cmd[i] == '\0')
+ cmd[i] = ' ';
+ cmd[len] = '\0';
+
+ obstack_xml_printf (
+ &obstack,
+ "<item>"
+ "<column name=\"pid\">%s</column>"
+ "<column name=\"user\">%s</column>"
+ "<column name=\"command\">%s</column>"
+ "</item>",
+ dp->d_name,
+ entry ? entry->pw_name : "?",
+ cmd);
+ }
+ fclose (f);
+ }
+
+ xfree (pathname);
+ }
+ }
+
+ closedir (dirp);
+ }
obstack_grow_str0 (&obstack, "</osdata>\n");
buf = obstack_finish (&obstack);
LONGEST xfer;
if (object == TARGET_OBJECT_AUXV)
- return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
+ return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
offset, len);
if (object == TARGET_OBJECT_OSDATA)
{
if (!lwp->stopped)
{
- int pid, status;
ptid_t ptid = lwp->ptid;
if (debug_linux_nat)
linux_ops->to_close (quitting);
}
+/* When requests are passed down from the linux-nat layer to the
+ single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
+ used. The address space pointer is stored in the inferior object,
+ but the common code that is passed such ptid can't tell whether
+ lwpid is a "main" process id or not (it assumes so). We reverse
+ look up the "main" process id from the lwp here. */
+
+struct address_space *
+linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
+{
+ struct lwp_info *lwp;
+ struct inferior *inf;
+ int pid;
+
+ pid = GET_LWP (ptid);
+ if (GET_LWP (ptid) == 0)
+ {
+ /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
+ tgid. */
+ lwp = find_lwp_pid (ptid);
+ pid = GET_PID (lwp->ptid);
+ }
+ else
+ {
+ /* A (pid,lwpid,0) ptid. */
+ pid = GET_PID (ptid);
+ }
+
+ inf = find_inferior_pid (pid);
+ gdb_assert (inf != NULL);
+ return inf->aspace;
+}
+
+int
+linux_nat_core_of_thread_1 (ptid_t ptid)
+{
+ struct cleanup *back_to;
+ char *filename;
+ FILE *f;
+ char *content = NULL;
+ char *p;
+ char *ts = 0;
+ int content_read = 0;
+ int i;
+ int core;
+
+ filename = xstrprintf ("/proc/%d/task/%ld/stat",
+ GET_PID (ptid), GET_LWP (ptid));
+ back_to = make_cleanup (xfree, filename);
+
+ f = fopen (filename, "r");
+ if (!f)
+ {
+ do_cleanups (back_to);
+ return -1;
+ }
+
+ make_cleanup_fclose (f);
+
+ for (;;)
+ {
+ int n;
+
+ content = xrealloc (content, content_read + 1024);
+ n = fread (content + content_read, 1, 1024, f);
+ content_read += n;
+ if (n < 1024)
+ {
+ content[content_read] = '\0';
+ break;
+ }
+ }
+
+ make_cleanup (xfree, content);
+
+ p = strchr (content, '(');
+
+ /* Skip ")". */
+ if (p != NULL)
+ p = strchr (p, ')');
+ if (p != NULL)
+ p++;
+
+ /* If the first field after program name has index 0, then core number is
+ the field with index 36. There's no constant for that anywhere. */
+ if (p != NULL)
+ p = strtok_r (p, " ", &ts);
+ for (i = 0; p != NULL && i != 36; ++i)
+ p = strtok_r (NULL, " ", &ts);
+
+ if (p == NULL || sscanf (p, "%d", &core) == 0)
+ core = -1;
+
+ do_cleanups (back_to);
+
+ return core;
+}
+
+/* Return the cached value of the processor core for thread PTID. */
+
+int
+linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
+{
+ struct lwp_info *info = find_lwp_pid (ptid);
+
+ if (info)
+ return info->core;
+ return -1;
+}
+
void
linux_nat_add_target (struct target_ops *t)
{
t->to_thread_alive = linux_nat_thread_alive;
t->to_pid_to_str = linux_nat_pid_to_str;
t->to_has_thread_control = tc_schedlock;
+ t->to_thread_address_space = linux_nat_thread_address_space;
+ t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
+ t->to_stopped_data_address = linux_nat_stopped_data_address;
t->to_can_async_p = linux_nat_can_async_p;
t->to_is_async_p = linux_nat_is_async_p;
t->to_supports_multi_process = linux_nat_supports_multi_process;
+ t->to_core_of_thread = linux_nat_core_of_thread;
+
/* We don't change the stratum; this target will sit at
process_stratum and thread_db will set at thread_stratum. This
is a little strange, since this is a multi-threaded-capable
void
_initialize_linux_nat (void)
{
- sigset_t mask;
-
add_info ("proc", linux_nat_info_proc_cmd, _("\
Show /proc process information about any running process.\n\
Specify any process id, or use the program being debugged by default.\n\