static void
add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
{
- struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
+ struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
new_pid->pid = pid;
new_pid->status = status;
static struct lwp_info *add_lwp (ptid_t ptid);
static int linux_stopped_by_watchpoint (void);
static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
+static int lwp_is_marked_dead (struct lwp_info *lwp);
static void proceed_all_lwps (void);
static int finish_step_over (struct lwp_info *lwp);
static int kill_lwp (unsigned long lwpid, int signo);
+static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
+static void complete_ongoing_step_over (void);
/* When the event-loop is doing a step-over, this points at the thread
being stepped. */
struct process_info *proc;
proc = add_process (pid, attached);
- proc->priv = xcalloc (1, sizeof (*proc->priv));
+ proc->priv = XCNEW (struct process_info_private);
if (the_low_target.new_process != NULL)
proc->priv->arch_private = the_low_target.new_process ();
child_thr->last_resume_kind = resume_stop;
child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
+ /* If we're suspending all threads, leave this one suspended
+ too. */
+ if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
+ {
+ if (debug_threads)
+ debug_printf ("HEW: leaving child suspended\n");
+ child_lwp->suspended = 1;
+ }
+
parent_proc = get_thread_process (event_thr);
child_proc->attached = parent_proc->attached;
clone_all_breakpoints (&child_proc->breakpoints,
&child_proc->raw_breakpoints,
parent_proc->breakpoints);
- tdesc = xmalloc (sizeof (struct target_desc));
+ tdesc = XNEW (struct target_desc);
copy_target_description (tdesc, parent_proc->tdesc);
child_proc->tdesc = tdesc;
{
if (siginfo.si_signo == SIGTRAP)
{
- if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
+ if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
{
if (debug_threads)
{
debug_printf ("CSBB: %s stopped by trace\n",
target_pid_to_str (ptid_of (thr)));
}
+
+ lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
}
}
}
{
struct lwp_info *lwp;
- lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
- memset (lwp, 0, sizeof (*lwp));
+ lwp = XCNEW (struct lwp_info);
+
+ lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
if (the_low_target.new_thread != NULL)
the_low_target.new_thread (lwp);
if (process == NULL)
return -1;
+ /* As there's a step over already in progress, let it finish first,
+ otherwise nesting a stabilize_threads operation on top gets real
+ messy. */
+ complete_ongoing_step_over ();
+
/* Stop all threads before detaching. First, ptrace requires that
the thread is stopped to sucessfully detach. Second, thread_db
may need to uninstall thread event breakpoints from memory, which
exited but we still haven't been able to report it to GDB, we'll
hold on to the last lwp of the dead process. */
if (lwp != NULL)
- return !lwp->dead;
+ return !lwp_is_marked_dead (lwp);
else
return 0;
}
return 0;
}
+/* Increment LWP's suspend count. */
+
+static void
+lwp_suspended_inc (struct lwp_info *lwp)
+{
+ lwp->suspended++;
+
+ if (debug_threads && lwp->suspended > 4)
+ {
+ struct thread_info *thread = get_lwp_thread (lwp);
+
+ debug_printf ("LWP %ld has a suspiciously high suspend count,"
+ " suspended=%d\n", lwpid_of (thread), lwp->suspended);
+ }
+}
+
+/* Decrement LWP's suspend count. */
+
+static void
+lwp_suspended_decr (struct lwp_info *lwp)
+{
+ lwp->suspended--;
+
+ if (lwp->suspended < 0)
+ {
+ struct thread_info *thread = get_lwp_thread (lwp);
+
+ internal_error (__FILE__, __LINE__,
+ "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
+ lwp->suspended);
+ }
+}
+
/* This function should only be called if the LWP got a SIGTRAP.
Handle any tracepoint steps or hits. Return true if a tracepoint
uninsert tracepoints. To do this, we temporarily pause all
threads, unpatch away, and then unpause threads. We need to make
sure the unpausing doesn't resume LWP too. */
- lwp->suspended++;
+ lwp_suspended_inc (lwp);
/* And we need to be sure that any all-threads-stopping doesn't try
to move threads out of the jump pads, as it could deadlock the
actions. */
tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
- lwp->suspended--;
+ lwp_suspended_decr (lwp);
gdb_assert (lwp->suspended == 0);
gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
}
}
- p_sig = xmalloc (sizeof (*p_sig));
+ p_sig = XCNEW (struct pending_signals);
p_sig->prev = lwp->pending_signals_to_report;
p_sig->signal = WSTOPSIG (*wstat);
- memset (&p_sig->info, 0, sizeof (siginfo_t));
+
ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
&p_sig->info);
/* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
or hardware watchpoint. Check which is which if we got
- TARGET_STOPPED_BY_HW_BREAKPOINT. */
+ TARGET_STOPPED_BY_HW_BREAKPOINT. Likewise, we may have single
+ stepped an instruction that triggered a watchpoint. In that
+ case, on some architectures (such as x86), instead of
+ TRAP_HWBKPT, si_code indicates TRAP_TRACE, and we need to check
+ the debug registers separately. */
if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
- && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
- || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
+ && child->stop_reason != TARGET_STOPPED_BY_SW_BREAKPOINT)
check_stopped_by_watchpoint (child);
if (!have_stop_pc)
struct lwp_info *lp = get_thread_lwp (thread);
if (lp->stopped
+ && !lp->suspended
&& !lp->status_pending_p
&& thread->last_resume_kind != resume_stop
&& thread->last_status.kind == TARGET_WAITKIND_IGNORE)
if (lwp == except)
return 0;
- lwp->suspended--;
-
- gdb_assert (lwp->suspended >= 0);
+ lwp_suspended_decr (lwp);
return 0;
}
lwp = get_thread_lwp (current_thread);
/* Lock it. */
- lwp->suspended++;
+ lwp_suspended_inc (lwp);
if (ourstatus.value.sig != GDB_SIGNAL_0
|| current_thread->last_resume_kind == resume_stop)
return null_ptid;
}
-/* Return non-zero if WAITSTATUS reflects an extended linux
- event. Otherwise, return zero. */
-
-static int
-extended_event_reported (const struct target_waitstatus *waitstatus)
-{
- if (waitstatus == NULL)
- return 0;
-
- return (waitstatus->kind == TARGET_WAITKIND_FORKED
- || waitstatus->kind == TARGET_WAITKIND_VFORKED
- || waitstatus->kind == TARGET_WAITKIND_VFORK_DONE);
-}
-
/* Wait for process, returns status. */
static ptid_t
info_p = &info;
else
info_p = NULL;
- linux_resume_one_lwp (event_child, event_child->stepping,
- WSTOPSIG (w), info_p);
+
+ if (step_over_finished)
+ {
+ /* We cancelled this thread's step-over above. We still
+ need to unsuspend all other LWPs, and set them back
+ running again while the signal handler runs. */
+ unsuspend_all_lwps (event_child);
+
+ /* Enqueue the pending signal info so that proceed_all_lwps
+ doesn't lose it. */
+ enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
+
+ proceed_all_lwps ();
+ }
+ else
+ {
+ linux_resume_one_lwp (event_child, event_child->stepping,
+ WSTOPSIG (w), info_p);
+ }
return ignore_event (ourstatus);
}
do, we're be able to handle GDB breakpoints on top of internal
breakpoints, by handling the internal breakpoint and still
reporting the event to GDB. If we don't, we're out of luck, GDB
- won't see the breakpoint hit. */
+ won't see the breakpoint hit. If we see a single-step event but
+ the thread should be continuing, don't pass the trap to gdb.
+ That indicates that we had previously finished a single-step but
+ left the single-step pending -- see
+ complete_ongoing_step_over. */
report_to_gdb = (!maybe_internal_trap
|| (current_thread->last_resume_kind == resume_step
&& !in_step_range)
|| event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
- || (!step_over_finished && !in_step_range
- && !bp_explains_trap && !trace_event)
+ || (!in_step_range
+ && !bp_explains_trap
+ && !trace_event
+ && !step_over_finished
+ && !(current_thread->last_resume_kind == resume_continue
+ && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
|| (gdb_breakpoint_here (event_child->stop_pc)
&& gdb_condition_true_at_breakpoint (event_child->stop_pc)
&& gdb_no_commands_at_breakpoint (event_child->stop_pc))
- || extended_event_reported (&event_child->waitstatus));
+ || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
run_breakpoint_commands (event_child->stop_pc);
if (debug_threads)
{
- if (extended_event_reported (&event_child->waitstatus))
+ if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
{
char *str;
unstop_all_lwps (1, event_child);
}
- if (extended_event_reported (&event_child->waitstatus))
+ if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
{
- /* If the reported event is a fork, vfork or exec, let GDB know. */
- ourstatus->kind = event_child->waitstatus.kind;
- ourstatus->value = event_child->waitstatus.value;
-
+ /* If the reported event is an exit, fork, vfork or exec, let
+ GDB know. */
+ *ourstatus = event_child->waitstatus;
/* Clear the event lwp's waitstatus since we handled it already. */
event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
}
if (lwp == except)
return 0;
- lwp->suspended++;
+ lwp_suspended_inc (lwp);
return send_sigstop_callback (entry, except);
}
static void
mark_lwp_dead (struct lwp_info *lwp, int wstat)
{
- /* It's dead, really. */
- lwp->dead = 1;
-
/* Store the exit status for later. */
lwp->status_pending_p = 1;
lwp->status_pending = wstat;
+ /* Store in waitstatus as well, as there's nothing else to process
+ for this event. */
+ if (WIFEXITED (wstat))
+ {
+ lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
+ lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
+ }
+ else if (WIFSIGNALED (wstat))
+ {
+ lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
+ lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
+ }
+
/* Prevent trying to stop it. */
lwp->stopped = 1;
lwp->stop_expected = 0;
}
+/* Return true if LWP has exited already, and has a pending exit event
+ to report to GDB. */
+
+static int
+lwp_is_marked_dead (struct lwp_info *lwp)
+{
+ return (lwp->status_pending_p
+ && (WIFEXITED (lwp->status_pending)
+ || WIFSIGNALED (lwp->status_pending)));
+}
+
/* Wait for all children to stop for the SIGSTOPs we just queued. */
static void
if (debug_threads)
debug_printf ("Previously current thread died.\n");
- if (non_stop)
- {
- /* We can't change the current inferior behind GDB's back,
- otherwise, a subsequent command may apply to the wrong
- process. */
- current_thread = NULL;
- }
- else
- {
- /* Set a valid thread as current. */
- set_desired_thread (0);
- }
+ /* We can't change the current inferior behind GDB's back,
+ otherwise, a subsequent command may apply to the wrong
+ process. */
+ current_thread = NULL;
}
}
struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
- gdb_assert (lwp->suspended == 0);
+ if (lwp->suspended != 0)
+ {
+ internal_error (__FILE__, __LINE__,
+ "LWP %ld is suspended, suspended=%d\n",
+ lwpid_of (thread), lwp->suspended);
+ }
gdb_assert (lwp->stopped);
/* Allow debugging the jump pad, gdb_collect, etc.. */
move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
{
struct thread_info *thread = (struct thread_info *) entry;
+ struct thread_info *saved_thread;
struct lwp_info *lwp = get_thread_lwp (thread);
int *wstat;
- gdb_assert (lwp->suspended == 0);
+ if (lwp->suspended != 0)
+ {
+ internal_error (__FILE__, __LINE__,
+ "LWP %ld is suspended, suspended=%d\n",
+ lwpid_of (thread), lwp->suspended);
+ }
gdb_assert (lwp->stopped);
+ /* For gdb_breakpoint_here. */
+ saved_thread = current_thread;
+ current_thread = thread;
+
wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
/* Allow debugging the jump pad, gdb_collect, etc. */
linux_resume_one_lwp (lwp, 0, 0, NULL);
}
else
- lwp->suspended++;
+ lwp_suspended_inc (lwp);
+
+ current_thread = saved_thread;
}
static int
struct thread_info *thread = (struct thread_info *) entry;
struct lwp_info *lwp = get_thread_lwp (thread);
- if (lwp->dead)
+ if (lwp_is_marked_dead (lwp))
return 0;
if (lwp->stopped)
return 0;
}
}
+/* Enqueue one signal in the chain of signals which need to be
+ delivered to this process on next resume. */
+
+static void
+enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
+{
+ struct pending_signals *p_sig = XNEW (struct pending_signals);
+
+ p_sig->prev = lwp->pending_signals;
+ p_sig->signal = signal;
+ if (info == NULL)
+ memset (&p_sig->info, 0, sizeof (siginfo_t));
+ else
+ memcpy (&p_sig->info, info, sizeof (siginfo_t));
+ lwp->pending_signals = p_sig;
+}
+
/* Resume execution of LWP. If STEP is nonzero, single-step it. If
SIGNAL is nonzero, give it that signal. */
|| lwp->bp_reinsert != 0
|| fast_tp_collecting))
{
- struct pending_signals *p_sig;
- p_sig = xmalloc (sizeof (*p_sig));
+ struct pending_signals *p_sig = XNEW (struct pending_signals);
+
p_sig->prev = lwp->pending_signals;
p_sig->signal = signal;
if (info == NULL)
lwpid_of (thread));
stop_all_lwps (1, lwp);
- gdb_assert (lwp->suspended == 0);
+
+ if (lwp->suspended != 0)
+ {
+ internal_error (__FILE__, __LINE__,
+ "LWP %ld suspended=%d\n", lwpid_of (thread),
+ lwp->suspended);
+ }
if (debug_threads)
debug_printf ("Done stopping all threads for step-over.\n");
return 0;
}
+/* If there's a step over in progress, wait until all threads stop
+ (that is, until the stepping thread finishes its step), and
+ unsuspend all lwps. The stepping thread ends with its status
+ pending, which is processed later when we get back to processing
+ events. */
+
+static void
+complete_ongoing_step_over (void)
+{
+ if (!ptid_equal (step_over_bkpt, null_ptid))
+ {
+ struct lwp_info *lwp;
+ int wstat;
+ int ret;
+
+ if (debug_threads)
+ debug_printf ("detach: step over in progress, finish it first\n");
+
+ /* Passing NULL_PTID as filter indicates we want all events to
+ be left pending. Eventually this returns when there are no
+ unwaited-for children left. */
+ ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
+ &wstat, __WALL);
+ gdb_assert (ret == -1);
+
+ lwp = find_lwp_pid (step_over_bkpt);
+ if (lwp != NULL)
+ finish_step_over (lwp);
+ step_over_bkpt = null_ptid;
+ unsuspend_all_lwps (lwp);
+ }
+}
+
/* This function is called once per thread. We check the thread's resume
request, which will tell us whether to resume, step, or leave the thread
stopped; and what signal, if any, it should be sent.
}
/* If this thread which is about to be resumed has a pending status,
- then don't resume any threads - we can just report the pending
- status. Make sure to queue any signals that would otherwise be
- sent. In all-stop mode, we do this decision based on if *any*
- thread has a pending status. If there's a thread that needs the
- step-over-breakpoint dance, then don't resume any other thread
- but that particular one. */
- leave_pending = (lwp->status_pending_p || leave_all_stopped);
+ then don't resume it - we can just report the pending status.
+ Likewise if it is suspended, because e.g., another thread is
+ stepping past a breakpoint. Make sure to queue any signals that
+ would otherwise be sent. In all-stop mode, we do this decision
+ based on if *any* thread has a pending status. If there's a
+ thread that needs the step-over-breakpoint dance, then don't
+ resume any other thread but that particular one. */
+ leave_pending = (lwp->suspended
+ || lwp->status_pending_p
+ || leave_all_stopped);
if (!leave_pending)
{
/* If we have a new signal, enqueue the signal. */
if (lwp->resume->sig != 0)
{
- struct pending_signals *p_sig;
- p_sig = xmalloc (sizeof (*p_sig));
+ struct pending_signals *p_sig = XCNEW (struct pending_signals);
+
p_sig->prev = lwp->pending_signals;
p_sig->signal = lwp->resume->sig;
- memset (&p_sig->info, 0, sizeof (siginfo_t));
/* If this is the same signal we were previously stopped by,
make sure to queue its siginfo. We can ignore the return
send_sigstop (lwp);
}
- step = thread->last_resume_kind == resume_step;
+ if (thread->last_resume_kind == resume_step)
+ {
+ if (debug_threads)
+ debug_printf (" stepping LWP %ld, client wants it stepping\n",
+ lwpid_of (thread));
+ step = 1;
+ }
+ else if (lwp->bp_reinsert != 0)
+ {
+ if (debug_threads)
+ debug_printf (" stepping LWP %ld, reinsert set\n",
+ lwpid_of (thread));
+ step = 1;
+ }
+ else
+ step = 0;
+
linux_resume_one_lwp (lwp, step, 0, NULL);
return 0;
}
if (lwp == except)
return 0;
- lwp->suspended--;
- gdb_assert (lwp->suspended >= 0);
+ lwp_suspended_decr (lwp);
return proceed_one_lwp (entry, except);
}
count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
/ sizeof (PTRACE_XFER_TYPE));
/* Allocate buffer of that many longwords. */
- buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
+ buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
/* Read all the longwords */
errno = 0;
/ sizeof (PTRACE_XFER_TYPE);
/* Allocate buffer of that many longwords. */
- register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
- alloca (count * sizeof (PTRACE_XFER_TYPE));
+ register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
int pid = lwpid_of (current_thread);
if (debug_threads)
{
/* Dump up to four bytes. */
- unsigned int val = * (unsigned int *) myaddr;
- if (len == 1)
- val = val & 0xff;
- else if (len == 2)
- val = val & 0xffff;
- else if (len == 3)
- val = val & 0xffffff;
- debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
- 2 * ((len < 4) ? len : 4), val, (long)memaddr, pid);
+ char str[4 * 2 + 1];
+ char *p = str;
+ int dump = len < 4 ? len : 4;
+
+ for (i = 0; i < dump; i++)
+ {
+ sprintf (p, "%02x", myaddr[i]);
+ p += 2;
+ }
+ *p = '\0';
+
+ debug_printf ("Writing %s to 0x%08lx in process %d\n",
+ str, (long) memaddr, pid);
}
/* Fill start and end extra bytes of buffer with existing memory data. */
if (is_elf64)
{
Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
-#ifdef DT_MIPS_RLD_MAP
+#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
union
{
Elf64_Xword map;
unsigned char buf[sizeof (Elf64_Xword)];
}
rld_map;
-
+#endif
+#ifdef DT_MIPS_RLD_MAP
if (dyn->d_tag == DT_MIPS_RLD_MAP)
{
if (linux_read_memory (dyn->d_un.d_val,
break;
}
#endif /* DT_MIPS_RLD_MAP */
+#ifdef DT_MIPS_RLD_MAP_REL
+ if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
+ {
+ if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
+ rld_map.buf, sizeof (rld_map.buf)) == 0)
+ return rld_map.map;
+ else
+ break;
+ }
+#endif /* DT_MIPS_RLD_MAP_REL */
if (dyn->d_tag == DT_DEBUG && map == -1)
map = dyn->d_un.d_val;
else
{
Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
-#ifdef DT_MIPS_RLD_MAP
+#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
union
{
Elf32_Word map;
unsigned char buf[sizeof (Elf32_Word)];
}
rld_map;
-
+#endif
+#ifdef DT_MIPS_RLD_MAP
if (dyn->d_tag == DT_MIPS_RLD_MAP)
{
if (linux_read_memory (dyn->d_un.d_val,
break;
}
#endif /* DT_MIPS_RLD_MAP */
+#ifdef DT_MIPS_RLD_MAP_REL
+ if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
+ {
+ if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
+ rld_map.buf, sizeof (rld_map.buf)) == 0)
+ return rld_map.map;
+ else
+ break;
+ }
+#endif /* DT_MIPS_RLD_MAP_REL */
if (dyn->d_tag == DT_DEBUG && map == -1)
map = dyn->d_un.d_val;
#ifdef HAVE_LINUX_BTRACE
-/* See to_enable_btrace target method. */
-
-static struct btrace_target_info *
-linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
-{
- struct btrace_target_info *tinfo;
-
- tinfo = linux_enable_btrace (ptid, conf);
-
- if (tinfo != NULL && tinfo->ptr_bits == 0)
- {
- struct thread_info *thread = find_thread_ptid (ptid);
- struct regcache *regcache = get_thread_regcache (thread, 0);
-
- tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
- }
-
- return tinfo;
-}
-
/* See to_disable_btrace target method. */
static int
linux_supports_agent,
#ifdef HAVE_LINUX_BTRACE
linux_supports_btrace,
- linux_low_enable_btrace,
+ linux_enable_btrace,
linux_low_disable_btrace,
linux_low_read_btrace,
linux_low_btrace_conf,