+2011-06-06 Pedro Alves <pedro@codesourcery.com>
+
+ * infcall.c (run_inferior_call): Don't mask async. Instead force
+ a synchronous wait, if the target can async.
+
+ * target.h (struct target_ops): Delete to_async_mask.
+ (target_async_mask): Delete.
+ * target.c (update_current_target): Delete references to to_async_mask.
+ * linux-nat.c (linux_nat_async_mask_value): Delete.
+ (linux_nat_is_async_p, linux_nat_can_async_p): Remove references
+ to linux_nat_async_mask_value.
+ (linux_nat_async_mask): Delete.
+ (linux_nat_async, linux_nat_close): Remove references to
+ linux_nat_async_mask_value.
+ * record.c (record_async_mask_value): Delete.
+ (record_async): Remove references to record_async_mask_value.
+ (record_async_mask): Delete.
+ (record_can_async_p, record_is_async_p): Remove references to
+ record_async_mask_value.
+ (init_record_ops, init_record_core_ops): Remove references to
+ record_async_mask.
+ * remote.c (remote_async_mask_value): Delete.
+ (init_remote_ops): Remove reference to remote_async_mask.
+ (remote_can_async_p, remote_is_async_p): Remove references to
+ remote_async_mask_value.
+ (remote_async): Remove references to remote_async_mask_value.
+ (remote_async_mask): Delete.
+
+ * infrun.c (fetch_inferior_event): Don't claim registers changed
+ if the current thread is already not executing.
+
2011-06-03 Joel Brobecker <brobecker@adacore.com> (obvious fix)
From Stephen Kitt <steve@sk2.org>
run_inferior_call (struct thread_info *call_thread, CORE_ADDR real_pc)
{
volatile struct gdb_exception e;
- int saved_async = 0;
int saved_in_infcall = call_thread->control.in_infcall;
ptid_t call_thread_ptid = call_thread->ptid;
- char *saved_target_shortname = xstrdup (target_shortname);
call_thread->control.in_infcall = 1;
/* We want stop_registers, please... */
call_thread->control.proceed_to_finish = 1;
- if (target_can_async_p ())
- saved_async = target_async_mask (0);
-
TRY_CATCH (e, RETURN_MASK_ALL)
- proceed (real_pc, TARGET_SIGNAL_0, 0);
+ {
+ proceed (real_pc, TARGET_SIGNAL_0, 0);
+
+ /* Inferior function calls are always synchronous, even if the
+ target supports asynchronous execution. Do here what
+ `proceed' itself does in sync mode. */
+ if (target_can_async_p () && is_running (inferior_ptid))
+ {
+ wait_for_inferior ();
+ normal_stop ();
+ }
+ }
/* At this point the current thread may have changed. Refresh
CALL_THREAD as it could be invalid if its thread has exited. */
call_thread = find_thread_ptid (call_thread_ptid);
- /* Don't restore the async mask if the target has changed,
- saved_async is for the original target. */
- if (saved_async
- && strcmp (saved_target_shortname, target_shortname) == 0)
- target_async_mask (saved_async);
-
enable_watchpoints_after_interactive_call_stop ();
/* Call breakpoint_auto_delete on the current contents of the bpstat
if (call_thread != NULL)
call_thread->control.in_infcall = saved_in_infcall;
- xfree (saved_target_shortname);
-
return e;
}
status mechanism. */
overlay_cache_invalid = 1;
- registers_changed ();
+
+ /* But don't do it if the current thread is already stopped (hence
+ this is either a delayed event that will result in
+ TARGET_WAITKIND_IGNORE, or it's an event for another thread (and
+ we always clear the register and frame caches when the user
+ switches threads anyway). If we didn't do this, a spurious
+ delayed event in all-stop mode would make the user lose the
+ selected frame. */
+ if (non_stop || is_executing (inferior_ptid))
+ registers_changed ();
make_cleanup_restore_integer (&execution_direction);
execution_direction = target_execution_direction ();
static int linux_supports_tracevforkdone_flag = -1;
-/* Async mode support. */
-
-/* Zero if the async mode, although enabled, is masked, which means
- linux_nat_wait should behave as if async mode was off. */
-static int linux_nat_async_mask_value = 1;
-
/* Stores the current used ptrace() options. */
static int current_ptrace_options = 0;
+/* Async mode support. */
+
/* The read/write ends of the pipe registered as waitable file in the
event loop. */
static int linux_nat_event_pipe[2] = { -1, -1 };
(enum inferior_event_type event_type,
void *context),
void *context);
-static int linux_nat_async_mask (int mask);
static int kill_lwp (int lwpid, int signo);
static int stop_callback (struct lwp_info *lp, void *data);
/* NOTE: palves 2008-03-21: We're only async when the user requests
it explicitly with the "set target-async" command.
Someday, linux will always be async. */
- if (!target_async_permitted)
- return 0;
-
- /* See target.h/target_async_mask. */
- return linux_nat_async_mask_value;
+ return target_async_permitted;
}
/* target_can_async_p implementation. */
/* NOTE: palves 2008-03-21: We're only async when the user requests
it explicitly with the "set target-async" command.
Someday, linux will always be async. */
- if (!target_async_permitted)
- return 0;
-
- /* See target.h/target_async_mask. */
- return linux_nat_async_mask_value;
+ return target_async_permitted;
}
static int
return linux_multi_process;
}
-/* target_async_mask implementation. */
-
-static int
-linux_nat_async_mask (int new_mask)
-{
- int curr_mask = linux_nat_async_mask_value;
-
- if (curr_mask != new_mask)
- {
- if (new_mask == 0)
- {
- linux_nat_async (NULL, 0);
- linux_nat_async_mask_value = new_mask;
- }
- else
- {
- linux_nat_async_mask_value = new_mask;
-
- /* If we're going out of async-mask in all-stop, then the
- inferior is stopped. The next resume will call
- target_async. In non-stop, the target event source
- should be always registered in the event loop. Do so
- now. */
- if (non_stop)
- linux_nat_async (inferior_event_handler, 0);
- }
- }
-
- return curr_mask;
-}
-
static int async_terminal_is_ours = 1;
/* target_terminal_inferior implementation. */
linux_nat_async (void (*callback) (enum inferior_event_type event_type,
void *context), void *context)
{
- if (linux_nat_async_mask_value == 0 || !target_async_permitted)
- internal_error (__FILE__, __LINE__,
- "Calling target_async when async is masked");
-
if (callback != NULL)
{
async_client_callback = callback;
if (target_is_async_p ())
target_async (NULL, 0);
- /* Reset the async_masking. */
- linux_nat_async_mask_value = 1;
-
if (linux_ops->to_close)
linux_ops->to_close (quitting);
}
t->to_is_async_p = linux_nat_is_async_p;
t->to_supports_non_stop = linux_nat_supports_non_stop;
t->to_async = linux_nat_async;
- t->to_async_mask = linux_nat_async_mask;
t->to_terminal_inferior = linux_nat_terminal_inferior;
t->to_terminal_ours = linux_nat_terminal_ours;
t->to_close = linux_nat_close;
return;
}
-static int record_async_mask_value = 1;
-
static void
record_async (void (*callback) (enum inferior_event_type event_type,
void *context), void *context)
{
- if (record_async_mask_value == 0)
- internal_error (__FILE__, __LINE__,
- _("Calling record_async when async is masked"));
-
/* If we're on top of a line target (e.g., linux-nat, remote), then
set it to async mode as well. Will be NULL if we're sitting on
top of the core target, for "record restore". */
record_beneath_to_async (callback, context);
}
-static int
-record_async_mask (int new_mask)
-{
- int curr_mask = record_async_mask_value;
-
- record_async_mask_value = new_mask;
- return curr_mask;
-}
-
static int
record_can_async_p (void)
{
/* We only enable async when the user specifically asks for it. */
- if (!target_async_permitted)
- return 0;
-
- return record_async_mask_value;
+ return target_async_permitted;
}
static int
record_is_async_p (void)
{
/* We only enable async when the user specifically asks for it. */
- if (!target_async_permitted)
- return 0;
-
- return record_async_mask_value;
+ return target_async_permitted;
}
static enum exec_direction_kind
record_ops.to_async = record_async;
record_ops.to_can_async_p = record_can_async_p;
record_ops.to_is_async_p = record_is_async_p;
- record_ops.to_async_mask = record_async_mask;
record_ops.to_execution_direction = record_execution_direction;
record_ops.to_magic = OPS_MAGIC;
}
record_core_ops.to_async = record_async;
record_core_ops.to_can_async_p = record_can_async_p;
record_core_ops.to_is_async_p = record_is_async_p;
- record_core_ops.to_async_mask = record_async_mask;
record_core_ops.to_execution_direction = record_execution_direction;
record_core_ops.to_magic = OPS_MAGIC;
}
static void remote_async (void (*callback) (enum inferior_event_type event_type,
void *context), void *context);
-static int remote_async_mask (int new_mask);
-
static void remote_detach (struct target_ops *ops, char *args, int from_tty);
static void remote_interrupt (int signo);
static struct target_ops extended_remote_ops;
-static int remote_async_mask_value = 1;
-
/* FIXME: cagney/1999-09-23: Even though getpkt was called with
``forever'' still use the normal timeout mechanism. This is
currently used by the ASYNC code to guarentee that target reads
remote_ops.to_can_async_p = remote_can_async_p;
remote_ops.to_is_async_p = remote_is_async_p;
remote_ops.to_async = remote_async;
- remote_ops.to_async_mask = remote_async_mask;
remote_ops.to_terminal_inferior = remote_terminal_inferior;
remote_ops.to_terminal_ours = remote_terminal_ours;
remote_ops.to_supports_non_stop = remote_supports_non_stop;
return 0;
/* We're async whenever the serial device is. */
- return remote_async_mask_value && serial_can_async_p (remote_desc);
+ return serial_can_async_p (remote_desc);
}
static int
return 0;
/* We're async whenever the serial device is. */
- return remote_async_mask_value && serial_is_async_p (remote_desc);
+ return serial_is_async_p (remote_desc);
}
/* Pass the SERIAL event on and up to the client. One day this code
remote_async (void (*callback) (enum inferior_event_type event_type,
void *context), void *context)
{
- if (remote_async_mask_value == 0)
- internal_error (__FILE__, __LINE__,
- _("Calling remote_async when async is masked"));
-
if (callback != NULL)
{
serial_async (remote_desc, remote_async_serial_handler, NULL);
serial_async (remote_desc, NULL, NULL);
}
-static int
-remote_async_mask (int new_mask)
-{
- int curr_mask = remote_async_mask_value;
-
- remote_async_mask_value = new_mask;
- return curr_mask;
-}
-
static void
set_remote_cmd (char *args, int from_tty)
{
INHERIT (to_can_async_p, t);
INHERIT (to_is_async_p, t);
INHERIT (to_async, t);
- INHERIT (to_async_mask, t);
INHERIT (to_find_memory_regions, t);
INHERIT (to_make_corefile_notes, t);
INHERIT (to_get_bookmark, t);
de_fault (to_async,
(void (*) (void (*) (enum inferior_event_type, void*), void*))
tcomplain);
- de_fault (to_async_mask,
- (int (*) (int))
- return_one);
de_fault (to_thread_architecture,
default_thread_architecture);
current_target.to_read_description = NULL;
int (*to_can_async_p) (void);
int (*to_is_async_p) (void);
void (*to_async) (void (*) (enum inferior_event_type, void *), void *);
- int (*to_async_mask) (int);
int (*to_supports_non_stop) (void);
/* find_memory_regions support method for gcore */
int (*to_find_memory_regions) (find_memory_region_ftype func, void *data);
#define target_async(CALLBACK,CONTEXT) \
(current_target.to_async ((CALLBACK), (CONTEXT)))
-/* This is to be used ONLY within call_function_by_hand(). It provides
- a workaround, to have inferior function calls done in sychronous
- mode, even though the target is asynchronous. After
- target_async_mask(0) is called, calls to target_can_async_p() will
- return FALSE , so that target_resume() will not try to start the
- target asynchronously. After the inferior stops, we IMMEDIATELY
- restore the previous nature of the target, by calling
- target_async_mask(1). After that, target_can_async_p() will return
- TRUE. ANY OTHER USE OF THIS FEATURE IS DEPRECATED.
-
- FIXME ezannoni 1999-12-13: we won't need this once we move
- the turning async on and off to the single execution commands,
- from where it is done currently, in remote_resume(). */
-
-#define target_async_mask(MASK) \
- (current_target.to_async_mask (MASK))
-
#define target_execution_direction() \
(current_target.to_execution_direction ())