/* Select target systems and architectures at runtime for GDB.
Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
- 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
Free Software Foundation, Inc.
Contributed by Cygnus Support.
#include "target-descriptions.h"
#include "gdbthread.h"
#include "solib.h"
+#include "exec.h"
+#include "inline-frame.h"
+#include "tracepoint.h"
static void target_info (char *, int);
-static void kill_or_be_killed (int);
-
static void default_terminal_info (char *, int);
static int default_watchpoint_addr_within_range (struct target_ops *,
static int nosymbol (char *, CORE_ADDR *);
-static void tcomplain (void) ATTR_NORETURN;
+static void tcomplain (void) ATTRIBUTE_NORETURN;
static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
static struct target_ops *find_default_run_target (char *);
-static void nosupport_runtime (void);
-
static LONGEST default_xfer_partial (struct target_ops *ops,
enum target_object object,
const char *annex, gdb_byte *readbuf,
void *readbuf, const void *writebuf,
ULONGEST offset, LONGEST len);
+static struct gdbarch *default_thread_architecture (struct target_ops *ops,
+ ptid_t ptid);
+
static void init_dummy_target (void);
static struct target_ops debug_target;
static void debug_to_files_info (struct target_ops *);
-static int debug_to_insert_breakpoint (struct bp_target_info *);
+static int debug_to_insert_breakpoint (struct gdbarch *,
+ struct bp_target_info *);
-static int debug_to_remove_breakpoint (struct bp_target_info *);
+static int debug_to_remove_breakpoint (struct gdbarch *,
+ struct bp_target_info *);
static int debug_to_can_use_hw_breakpoint (int, int, int);
-static int debug_to_insert_hw_breakpoint (struct bp_target_info *);
+static int debug_to_insert_hw_breakpoint (struct gdbarch *,
+ struct bp_target_info *);
-static int debug_to_remove_hw_breakpoint (struct bp_target_info *);
+static int debug_to_remove_hw_breakpoint (struct gdbarch *,
+ struct bp_target_info *);
static int debug_to_insert_watchpoint (CORE_ADDR, int, int);
static void setup_target_debug (void);
-DCACHE *target_dcache;
+/* The option sets this. */
+static int stack_cache_enabled_p_1 = 1;
+/* And set_stack_cache_enabled_p updates this.
+ The reason for the separation is so that we don't flush the cache for
+ on->on transitions. */
+static int stack_cache_enabled_p = 1;
+
+/* This is called *after* the stack-cache has been set.
+ Flush the cache for off->on and on->off transitions.
+ There's no real need to flush the cache for on->off transitions,
+ except cleanliness. */
+
+static void
+set_stack_cache_enabled_p (char *args, int from_tty,
+ struct cmd_list_element *c)
+{
+ if (stack_cache_enabled_p != stack_cache_enabled_p_1)
+ target_dcache_invalidate ();
+
+ stack_cache_enabled_p = stack_cache_enabled_p_1;
+}
+
+static void
+show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
+ struct cmd_list_element *c, const char *value)
+{
+ fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
+}
+
+/* Cache of memory operations, to speed up remote access. */
+static DCACHE *target_dcache;
+
+/* Invalidate the target dcache. */
+
+void
+target_dcache_invalidate (void)
+{
+ dcache_invalidate (target_dcache);
+}
/* The user just typed 'target' without the name of a target. */
gdb_stdout);
}
+/* Default target_has_* methods for process_stratum targets. */
+
+int
+default_child_has_all_memory (struct target_ops *ops)
+{
+ /* If no inferior selected, then we can't read memory here. */
+ if (ptid_equal (inferior_ptid, null_ptid))
+ return 0;
+
+ return 1;
+}
+
+int
+default_child_has_memory (struct target_ops *ops)
+{
+ /* If no inferior selected, then we can't read memory here. */
+ if (ptid_equal (inferior_ptid, null_ptid))
+ return 0;
+
+ return 1;
+}
+
+int
+default_child_has_stack (struct target_ops *ops)
+{
+ /* If no inferior selected, there's no stack. */
+ if (ptid_equal (inferior_ptid, null_ptid))
+ return 0;
+
+ return 1;
+}
+
+int
+default_child_has_registers (struct target_ops *ops)
+{
+ /* Can't read registers from no inferior. */
+ if (ptid_equal (inferior_ptid, null_ptid))
+ return 0;
+
+ return 1;
+}
+
+int
+default_child_has_execution (struct target_ops *ops)
+{
+ /* If there's no thread selected, then we can't make it run through
+ hoops. */
+ if (ptid_equal (inferior_ptid, null_ptid))
+ return 0;
+
+ return 1;
+}
+
+
+int
+target_has_all_memory_1 (void)
+{
+ struct target_ops *t;
+
+ for (t = current_target.beneath; t != NULL; t = t->beneath)
+ if (t->to_has_all_memory (t))
+ return 1;
+
+ return 0;
+}
+
+int
+target_has_memory_1 (void)
+{
+ struct target_ops *t;
+
+ for (t = current_target.beneath; t != NULL; t = t->beneath)
+ if (t->to_has_memory (t))
+ return 1;
+
+ return 0;
+}
+
+int
+target_has_stack_1 (void)
+{
+ struct target_ops *t;
+
+ for (t = current_target.beneath; t != NULL; t = t->beneath)
+ if (t->to_has_stack (t))
+ return 1;
+
+ return 0;
+}
+
+int
+target_has_registers_1 (void)
+{
+ struct target_ops *t;
+
+ for (t = current_target.beneath; t != NULL; t = t->beneath)
+ if (t->to_has_registers (t))
+ return 1;
+
+ return 0;
+}
+
+int
+target_has_execution_1 (void)
+{
+ struct target_ops *t;
+
+ for (t = current_target.beneath; t != NULL; t = t->beneath)
+ if (t->to_has_execution (t))
+ return 1;
+
+ return 0;
+}
+
/* Add a possible target architecture to the list. */
void
if (t->to_xfer_partial == NULL)
t->to_xfer_partial = default_xfer_partial;
+ if (t->to_has_all_memory == NULL)
+ t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
+
+ if (t->to_has_memory == NULL)
+ t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
+
+ if (t->to_has_stack == NULL)
+ t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
+
+ if (t->to_has_registers == NULL)
+ t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
+
+ if (t->to_has_execution == NULL)
+ t->to_has_execution = (int (*) (struct target_ops *)) return_zero;
+
if (!target_structs)
{
target_struct_allocsize = DEFAULT_ALLOCSIZE;
void
target_load (char *arg, int from_tty)
{
- dcache_invalidate (target_dcache);
+ target_dcache_invalidate ();
(*current_target.to_load) (arg, from_tty);
}
char **env, int from_tty)
{
struct target_ops *t;
+
for (t = current_target.beneath; t != NULL; t = t->beneath)
{
if (t->to_create_inferior != NULL)
target_terminal_inferior (void)
{
/* A background resume (``run&'') should leave GDB in control of the
- terminal. */
- if (target_is_async_p () && !sync_execution)
+ terminal. Use target_can_async_p, not target_is_async_p, since at
+ this point the target is not async yet. However, if sync_execution
+ is not set, we know it will become async prior to resume. */
+ if (target_can_async_p () && !sync_execution)
return;
/* If GDB is resuming the inferior in the foreground, install
return 1; /* Symbol does not exist in target env */
}
-static void
-nosupport_runtime (void)
-{
- if (ptid_equal (inferior_ptid, null_ptid))
- noprocess ();
- else
- error (_("No run-time support for this"));
-}
-
-
static void
default_terminal_info (char *args, int from_tty)
{
printf_unfiltered (_("No saved terminal information.\n"));
}
-/* This is the default target_create_inferior and target_attach function.
- If the current target is executing, it asks whether to kill it off.
- If this function returns without calling error(), it has killed off
- the target, and the operation should be attempted. */
-
-static void
-kill_or_be_killed (int from_tty)
-{
- if (target_has_execution)
- {
- printf_unfiltered (_("You are already running a program:\n"));
- target_files_info ();
- if (query (_("Kill it? ")))
- {
- target_kill ();
- if (target_has_execution)
- error (_("Killing the program did not help."));
- return;
- }
- else
- {
- error (_("Program not killed."));
- }
- }
- tcomplain ();
-}
-
/* A default implementation for the to_get_ada_task_ptid target method.
This function builds the PTID by using both LWP and TID as part of
/* Do not inherit to_follow_fork. */
INHERIT (to_insert_exec_catchpoint, t);
INHERIT (to_remove_exec_catchpoint, t);
+ INHERIT (to_set_syscall_catchpoint, t);
INHERIT (to_has_exited, t);
/* Do not inherit to_mourn_inferiour. */
INHERIT (to_can_run, t);
INHERIT (to_pid_to_exec_file, t);
INHERIT (to_log_command, t);
INHERIT (to_stratum, t);
- INHERIT (to_has_all_memory, t);
- INHERIT (to_has_memory, t);
- INHERIT (to_has_stack, t);
- INHERIT (to_has_registers, t);
- INHERIT (to_has_execution, t);
+ /* Do not inherit to_has_all_memory */
+ /* Do not inherit to_has_memory */
+ /* Do not inherit to_has_stack */
+ /* Do not inherit to_has_registers */
+ /* Do not inherit to_has_execution */
INHERIT (to_has_thread_control, t);
- INHERIT (to_sections, t);
- INHERIT (to_sections_end, t);
INHERIT (to_can_async_p, t);
INHERIT (to_is_async_p, t);
INHERIT (to_async, t);
INHERIT (to_async_mask, t);
INHERIT (to_find_memory_regions, t);
INHERIT (to_make_corefile_notes, t);
+ INHERIT (to_get_bookmark, t);
+ INHERIT (to_goto_bookmark, t);
/* Do not inherit to_get_thread_local_address. */
INHERIT (to_can_execute_reverse, t);
+ INHERIT (to_thread_architecture, t);
/* Do not inherit to_read_description. */
INHERIT (to_get_ada_task_ptid, t);
/* Do not inherit to_search_memory. */
INHERIT (to_supports_multi_process, t);
+ INHERIT (to_trace_init, t);
+ INHERIT (to_download_tracepoint, t);
+ INHERIT (to_download_trace_state_variable, t);
+ INHERIT (to_trace_set_readonly_regions, t);
+ INHERIT (to_trace_start, t);
+ INHERIT (to_get_trace_status, t);
+ INHERIT (to_trace_stop, t);
+ INHERIT (to_trace_find, t);
+ INHERIT (to_get_trace_state_variable_value, t);
+ INHERIT (to_save_trace_data, t);
+ INHERIT (to_upload_tracepoints, t);
+ INHERIT (to_upload_trace_state_variables, t);
+ INHERIT (to_get_raw_trace_data, t);
+ INHERIT (to_set_disconnected_tracing, t);
+ INHERIT (to_set_circular_trace_buffer, t);
+ INHERIT (to_get_tib_address, t);
INHERIT (to_magic, t);
/* Do not inherit to_memory_map. */
/* Do not inherit to_flash_erase. */
(int (*) (int, int, int))
return_zero);
de_fault (to_insert_hw_breakpoint,
- (int (*) (struct bp_target_info *))
+ (int (*) (struct gdbarch *, struct bp_target_info *))
return_minus_one);
de_fault (to_remove_hw_breakpoint,
- (int (*) (struct bp_target_info *))
+ (int (*) (struct gdbarch *, struct bp_target_info *))
return_minus_one);
de_fault (to_insert_watchpoint,
(int (*) (CORE_ADDR, int, int))
de_fault (to_remove_exec_catchpoint,
(int (*) (int))
tcomplain);
+ de_fault (to_set_syscall_catchpoint,
+ (int (*) (int, int, int, int, int *))
+ tcomplain);
de_fault (to_has_exited,
(int (*) (int, int, int *))
return_zero);
de_fault (to_async_mask,
(int (*) (int))
return_one);
+ de_fault (to_thread_architecture,
+ default_thread_architecture);
current_target.to_read_description = NULL;
de_fault (to_get_ada_task_ptid,
(ptid_t (*) (long, long))
de_fault (to_supports_multi_process,
(int (*) (void))
return_zero);
+ de_fault (to_trace_init,
+ (void (*) (void))
+ tcomplain);
+ de_fault (to_download_tracepoint,
+ (void (*) (struct breakpoint *))
+ tcomplain);
+ de_fault (to_download_trace_state_variable,
+ (void (*) (struct trace_state_variable *))
+ tcomplain);
+ de_fault (to_trace_set_readonly_regions,
+ (void (*) (void))
+ tcomplain);
+ de_fault (to_trace_start,
+ (void (*) (void))
+ tcomplain);
+ de_fault (to_get_trace_status,
+ (int (*) (struct trace_status *))
+ return_minus_one);
+ de_fault (to_trace_stop,
+ (void (*) (void))
+ tcomplain);
+ de_fault (to_trace_find,
+ (int (*) (enum trace_find_type, int, ULONGEST, ULONGEST, int *))
+ return_minus_one);
+ de_fault (to_get_trace_state_variable_value,
+ (int (*) (int, LONGEST *))
+ return_zero);
+ de_fault (to_save_trace_data,
+ (int (*) (const char *))
+ tcomplain);
+ de_fault (to_upload_tracepoints,
+ (int (*) (struct uploaded_tp **))
+ return_zero);
+ de_fault (to_upload_trace_state_variables,
+ (int (*) (struct uploaded_tsv **))
+ return_zero);
+ de_fault (to_get_raw_trace_data,
+ (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
+ tcomplain);
+ de_fault (to_set_disconnected_tracing,
+ (void (*) (int))
+ target_ignore);
+ de_fault (to_set_circular_trace_buffer,
+ (void (*) (int))
+ target_ignore);
+ de_fault (to_get_tib_address,
+ (int (*) (ptid_t, CORE_ADDR *))
+ tcomplain);
#undef de_fault
/* Finally, position the target-stack beneath the squashed
setup_target_debug ();
}
-/* Mark OPS as a running target. This reverses the effect
- of target_mark_exited. */
-
-void
-target_mark_running (struct target_ops *ops)
-{
- struct target_ops *t;
-
- for (t = target_stack; t != NULL; t = t->beneath)
- if (t == ops)
- break;
- if (t == NULL)
- internal_error (__FILE__, __LINE__,
- "Attempted to mark unpushed target \"%s\" as running",
- ops->to_shortname);
-
- ops->to_has_execution = 1;
- ops->to_has_all_memory = 1;
- ops->to_has_memory = 1;
- ops->to_has_stack = 1;
- ops->to_has_registers = 1;
-
- update_current_target ();
-}
-
-/* Mark OPS as a non-running target. This reverses the effect
- of target_mark_running. */
-
-void
-target_mark_exited (struct target_ops *ops)
-{
- struct target_ops *t;
-
- for (t = target_stack; t != NULL; t = t->beneath)
- if (t == ops)
- break;
- if (t == NULL)
- internal_error (__FILE__, __LINE__,
- "Attempted to mark unpushed target \"%s\" as running",
- ops->to_shortname);
-
- ops->to_has_execution = 0;
- ops->to_has_all_memory = 0;
- ops->to_has_memory = 0;
- ops->to_has_stack = 0;
- ops->to_has_registers = 0;
-
- update_current_target ();
-}
-
/* Push a new target type into the stack of the existing target accessors,
possibly superseding some of the existing accessors.
- Result is zero if the pushed target ended up on top of the stack,
- nonzero if at least one target is on top of it.
-
Rather than allow an empty stack, we always have the dummy target at
the bottom stratum, so we can call the function vectors without
checking them. */
-int
+void
push_target (struct target_ops *t)
{
struct target_ops **cur;
/* There's already something at this stratum level. Close it,
and un-hook it from the stack. */
struct target_ops *tmp = (*cur);
+
(*cur) = (*cur)->beneath;
tmp->beneath = NULL;
target_close (tmp, 0);
(*cur) = t;
update_current_target ();
-
- /* Not on top? */
- return (t != target_stack);
}
/* Remove a target_ops vector from the stack, wherever it may be.
fprintf_unfiltered (gdb_stderr,
"pop_target couldn't find target %s\n",
current_target.to_shortname);
- internal_error (__FILE__, __LINE__, _("failed internal consistency check"));
+ internal_error (__FILE__, __LINE__,
+ _("failed internal consistency check"));
}
void
if (bufptr - buffer + tlen > buffer_allocated)
{
unsigned int bytes;
+
bytes = bufptr - buffer;
buffer_allocated *= 2;
buffer = xrealloc (buffer, buffer_allocated);
return nbytes_read;
}
+struct target_section_table *
+target_get_section_table (struct target_ops *target)
+{
+ struct target_ops *t;
+
+ if (targetdebug)
+ fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
+
+ for (t = target; t != NULL; t = t->beneath)
+ if (t->to_get_section_table != NULL)
+ return (*t->to_get_section_table) (t);
+
+ return NULL;
+}
+
/* Find a section containing ADDR. */
+
struct target_section *
target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
{
+ struct target_section_table *table = target_get_section_table (target);
struct target_section *secp;
- for (secp = target->to_sections;
- secp < target->to_sections_end;
- secp++)
+
+ if (table == NULL)
+ return NULL;
+
+ for (secp = table->sections; secp < table->sections_end; secp++)
{
if (addr >= secp->addr && addr < secp->endaddr)
return secp;
return NULL;
}
-/* Perform a partial memory transfer. The arguments and return
- value are just as for target_xfer_partial. */
+/* Perform a partial memory transfer.
+ For docs see target.h, to_xfer_partial. */
static LONGEST
-memory_xfer_partial (struct target_ops *ops, void *readbuf, const void *writebuf,
- ULONGEST memaddr, LONGEST len)
+memory_xfer_partial (struct target_ops *ops, enum target_object object,
+ void *readbuf, const void *writebuf, ULONGEST memaddr,
+ LONGEST len)
{
LONGEST res;
int reg_len;
struct mem_region *region;
+ struct inferior *inf;
/* Zero length requests are ok and require no work. */
if (len == 0)
return 0;
- /* Try the executable file, if "trust-readonly-sections" is set. */
+ /* For accesses to unmapped overlay sections, read directly from
+ files. Must do this first, as MEMADDR may need adjustment. */
+ if (readbuf != NULL && overlay_debugging)
+ {
+ struct obj_section *section = find_pc_overlay (memaddr);
+
+ if (pc_in_unmapped_range (memaddr, section))
+ {
+ struct target_section_table *table
+ = target_get_section_table (ops);
+ const char *section_name = section->the_bfd_section->name;
+
+ memaddr = overlay_mapped_address (memaddr, section);
+ return section_table_xfer_memory_partial (readbuf, writebuf,
+ memaddr, len,
+ table->sections,
+ table->sections_end,
+ section_name);
+ }
+ }
+
+ /* Try the executable files, if "trust-readonly-sections" is set. */
if (readbuf != NULL && trust_readonly)
{
struct target_section *secp;
+ struct target_section_table *table;
secp = target_section_by_addr (ops, memaddr);
if (secp != NULL
&& (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
& SEC_READONLY))
- return xfer_memory (memaddr, readbuf, len, 0, NULL, ops);
- }
-
- /* Likewise for accesses to unmapped overlay sections. */
- if (readbuf != NULL && overlay_debugging)
- {
- struct obj_section *section = find_pc_overlay (memaddr);
- if (pc_in_unmapped_range (memaddr, section))
- return xfer_memory (memaddr, readbuf, len, 0, NULL, ops);
+ {
+ table = target_get_section_table (ops);
+ return section_table_xfer_memory_partial (readbuf, writebuf,
+ memaddr, len,
+ table->sections,
+ table->sections_end,
+ NULL);
+ }
}
/* Try GDB's internal data cache. */
return -1;
}
- if (region->attrib.cache)
+ if (!ptid_equal (inferior_ptid, null_ptid))
+ inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
+ else
+ inf = NULL;
+
+ if (inf != NULL
+ /* The dcache reads whole cache lines; that doesn't play well
+ with reading from a trace buffer, because reading outside of
+ the collected memory range fails. */
+ && get_traceframe_number () == -1
+ && (region->attrib.cache
+ || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
{
- /* FIXME drow/2006-08-09: This call discards OPS, so the raw
- memory request will start back at current_target. */
if (readbuf != NULL)
- res = dcache_xfer_memory (target_dcache, memaddr, readbuf,
+ res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
reg_len, 0);
else
/* FIXME drow/2006-08-09: If we're going to preserve const
correctness dcache_xfer_memory should take readbuf and
writebuf. */
- res = dcache_xfer_memory (target_dcache, memaddr,
+ res = dcache_xfer_memory (ops, target_dcache, memaddr,
(void *) writebuf,
reg_len, 1);
if (res <= 0)
/* We want to continue past core files to executables, but not
past a running target's memory. */
- if (ops->to_has_all_memory)
+ if (ops->to_has_all_memory (ops))
break;
ops = ops->beneath;
if (readbuf && !show_memory_breakpoints)
breakpoint_restore_shadows (readbuf, memaddr, reg_len);
+ /* Make sure the cache gets updated no matter what - if we are writing
+ to the stack. Even if this write is not tagged as such, we still need
+ to update the cache. */
+
+ if (res > 0
+ && inf != NULL
+ && writebuf != NULL
+ && !region->attrib.cache
+ && stack_cache_enabled_p
+ && object != TARGET_OBJECT_STACK_MEMORY)
+ {
+ dcache_update (target_dcache, memaddr, (void *) writebuf, res);
+ }
+
/* If we still haven't got anything, return the last error. We
give up. */
return res;
make_show_memory_breakpoints_cleanup (int show)
{
int current = show_memory_breakpoints;
- show_memory_breakpoints = show;
+ show_memory_breakpoints = show;
return make_cleanup (restore_show_memory_breakpoints,
(void *) (uintptr_t) current);
}
+/* For docs see target.h, to_xfer_partial. */
+
static LONGEST
target_xfer_partial (struct target_ops *ops,
enum target_object object, const char *annex,
/* If this is a memory transfer, let the memory-specific code
have a look at it instead. Memory transfers are more
complicated. */
- if (object == TARGET_OBJECT_MEMORY)
- retval = memory_xfer_partial (ops, readbuf, writebuf, offset, len);
+ if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
+ retval = memory_xfer_partial (ops, object, readbuf,
+ writebuf, offset, len);
else
{
enum target_object raw_object = object;
int
target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
{
- if (target_read (¤t_target, TARGET_OBJECT_MEMORY, NULL,
+ /* Dispatch to the topmost target, not the flattened current_target.
+ Memory accesses check target->to_has_(all_)memory, and the
+ flattened target doesn't inherit those. */
+ if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
myaddr, memaddr, len) == len)
return 0;
else
return EIO;
}
+/* Like target_read_memory, but specify explicitly that this is a read from
+ the target's stack. This may trigger different cache behavior. */
+
+int
+target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
+{
+ /* Dispatch to the topmost target, not the flattened current_target.
+ Memory accesses check target->to_has_(all_)memory, and the
+ flattened target doesn't inherit those. */
+
+ if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
+ myaddr, memaddr, len) == len)
+ return 0;
+ else
+ return EIO;
+}
+
+/* Write LEN bytes from MYADDR to target memory at address MEMADDR.
+ Returns either 0 for success or an errno value if any error occurs.
+ If an error occurs, no guarantee is made about how much data got written.
+ Callers that can deal with partial writes should call target_write. */
+
int
target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
{
- if (target_write (¤t_target, TARGET_OBJECT_MEMORY, NULL,
+ /* Dispatch to the topmost target, not the flattened current_target.
+ Memory accesses check target->to_has_(all_)memory, and the
+ flattened target doesn't inherit those. */
+ if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
myaddr, memaddr, len) == len)
return 0;
else
for (t = current_target.beneath; t != NULL; t = t->beneath)
if (t->to_flash_erase != NULL)
- {
- if (targetdebug)
- fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
- paddr (address), phex (length, 0));
- t->to_flash_erase (t, address, length);
- return;
- }
+ {
+ if (targetdebug)
+ fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
+ hex_string (address), phex (length, 0));
+ t->to_flash_erase (t, address, length);
+ return;
+ }
tcomplain ();
}
for (t = current_target.beneath; t != NULL; t = t->beneath)
if (t->to_flash_done != NULL)
- {
- if (targetdebug)
- fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
- t->to_flash_done (t);
- return;
- }
+ {
+ if (targetdebug)
+ fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
+ t->to_flash_done (t);
+ return;
+ }
tcomplain ();
}
"deprecated_xfer_memory" method. */
{
int xfered = -1;
+
errno = 0;
if (writebuf != NULL)
{
void *buffer = xmalloc (len);
struct cleanup *cleanup = make_cleanup (xfree, buffer);
+
memcpy (buffer, writebuf, len);
xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1/*write*/, NULL, ops);
return -1;
}
-/* Target vector read/write partial wrapper functions.
-
- NOTE: cagney/2003-10-21: I wonder if having "to_xfer_partial
- (inbuf, outbuf)", instead of separate read/write methods, make life
- easier. */
+/* Target vector read/write partial wrapper functions. */
static LONGEST
target_read_partial (struct target_ops *ops,
}
/* Wrappers to perform the full transfer. */
+
+/* For docs on target_read see target.h. */
+
LONGEST
target_read (struct target_ops *ops,
enum target_object object,
ULONGEST offset, LONGEST len)
{
LONGEST xfered = 0;
+
while (xfered < len)
{
LONGEST xfer = target_read_partial (ops, object, annex,
(gdb_byte *) buf + xfered,
offset + xfered, len - xfered);
+
/* Call an observer, notifying them of the xfer progress? */
if (xfer == 0)
return xfered;
ULONGEST offset, LONGEST len)
{
LONGEST xfered = 0;
+
while (xfered < len)
{
LONGEST xfer = target_read_partial (ops, object, annex,
(gdb_byte *) buf + xfered,
offset + xfered, len - xfered);
+
/* Call an observer, notifying them of the xfer progress? */
if (xfer == 0)
return xfered;
return len;
}
-
/* An alternative to target_write with progress callbacks. */
LONGEST
return len;
}
+/* For docs on target_write see target.h. */
+
LONGEST
target_write (struct target_ops *ops,
enum target_object object,
get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
LONGEST len)
{
- if (target_read (ops, TARGET_OBJECT_MEMORY, NULL, buf, addr, len)
+ /* This method is used to read from an alternate, non-current
+ target. This read must bypass the overlay support (as symbols
+ don't match this target), and GDB's internal cache (wrong cache
+ for this target). */
+ if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
!= len)
memory_error (EIO, addr);
}
ULONGEST
-get_target_memory_unsigned (struct target_ops *ops,
- CORE_ADDR addr, int len)
+get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
+ int len, enum bfd_endian byte_order)
{
gdb_byte buf[sizeof (ULONGEST)];
gdb_assert (len <= sizeof (buf));
get_target_memory (ops, addr, buf, len);
- return extract_unsigned_integer (buf, len);
+ return extract_unsigned_integer (buf, len, byte_order);
}
static void
for (t = target_stack; t != NULL; t = t->beneath)
{
- if (!t->to_has_memory)
+ if (!(*t->to_has_memory) (t))
continue;
if ((int) (t->to_stratum) <= (int) dummy_stratum)
printf_unfiltered (_("\tWhile running this, GDB does not access memory from...\n"));
printf_unfiltered ("%s:\n", t->to_longname);
(t->to_files_info) (t);
- has_all_mem = t->to_has_all_memory;
+ has_all_mem = (*t->to_has_all_memory) (t);
}
}
}
}
+/* Callback for iterate_over_inferiors. Gets rid of the given
+ inferior. */
+
+static int
+dispose_inferior (struct inferior *inf, void *args)
+{
+ struct thread_info *thread;
+
+ thread = any_thread_of_process (inf->pid);
+ if (thread)
+ {
+ switch_to_thread (thread->ptid);
+
+ /* Core inferiors actually should be detached, not killed. */
+ if (target_has_execution)
+ target_kill ();
+ else
+ target_detach (NULL, 0);
+ }
+
+ return 0;
+}
+
/* This is to be called by the open routine before it does
anything. */
{
dont_repeat ();
- if (target_has_execution)
+ if (have_inferiors ())
{
if (!from_tty
- || query (_("A program is being debugged already. Kill it? ")))
- target_kill ();
+ || !have_live_inferiors ()
+ || query (_("A program is being debugged already. Kill it? ")))
+ iterate_over_inferiors (dispose_inferior, NULL);
else
error (_("Program not killed."));
}
else
/* If we're in breakpoints-always-inserted mode, have to remove
them before detaching. */
- remove_breakpoints ();
+ remove_breakpoints_pid (PIDGET (inferior_ptid));
+
+ prepare_for_detach ();
for (t = current_target.beneath; t != NULL; t = t->beneath)
{
{
struct target_ops *t;
- dcache_invalidate (target_dcache);
+ target_dcache_invalidate ();
for (t = current_target.beneath; t != NULL; t = t->beneath)
{
step ? "step" : "continue",
target_signal_to_name (signal));
+ registers_changed_ptid (ptid);
set_executing (ptid, 1);
set_running (ptid, 1);
+ clear_inline_frame_state (ptid);
return;
}
}
if (t->to_follow_fork != NULL)
{
int retval = t->to_follow_fork (t, follow_child);
+
if (targetdebug)
fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
follow_child, retval);
target_mourn_inferior (void)
{
struct target_ops *t;
+
for (t = current_target.beneath; t != NULL; t = t->beneath)
{
if (t->to_mourn_inferior != NULL)
if (found_ptr != NULL)
{
CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
+
*found_addrp = found_addr;
do_cleanups (old_cleanups);
return 1;
if (search_space_len >= pattern_len)
{
unsigned keep_len = search_buf_size - chunk_size;
- CORE_ADDR read_addr = start_addr + keep_len;
+ CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
int nr_to_read;
/* Copy the trailing part of the previous iteration to the front
{
/* If a special version of to_search_memory isn't available, use the
simple version. */
- found = simple_search_memory (¤t_target,
+ found = simple_search_memory (current_target.beneath,
start_addr, search_space_len,
pattern, pattern_len, found_addrp);
}
/* Do not worry about thread_stratum targets that can not
create inferiors. Assume they will be pushed again if
necessary, and continue to the process_stratum. */
- if (t->to_stratum == thread_stratum)
+ if (t->to_stratum == thread_stratum
+ || t->to_stratum == arch_stratum)
continue;
error (_("\
target_supports_non_stop (void)
{
struct target_ops *t;
+
for (t = ¤t_target; t != NULL; t = t->beneath)
if (t->to_supports_non_stop)
return t->to_supports_non_stop ();
char *
target_get_osdata (const char *type)
{
- char *document;
struct target_ops *t;
/* If we're already connected to something that can get us OS
return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
}
+/* Determine the current address space of thread PTID. */
+
+struct address_space *
+target_thread_address_space (ptid_t ptid)
+{
+ struct address_space *aspace;
+ struct inferior *inf;
+ struct target_ops *t;
+
+ for (t = current_target.beneath; t != NULL; t = t->beneath)
+ {
+ if (t->to_thread_address_space != NULL)
+ {
+ aspace = t->to_thread_address_space (t, ptid);
+ gdb_assert (aspace);
+
+ if (targetdebug)
+ fprintf_unfiltered (gdb_stdlog,
+ "target_thread_address_space (%s) = %d\n",
+ target_pid_to_str (ptid),
+ address_space_num (aspace));
+ return aspace;
+ }
+ }
+
+ /* Fall-back to the "main" address space of the inferior. */
+ inf = find_inferior_pid (ptid_get_pid (ptid));
+
+ if (inf == NULL || inf->aspace == NULL)
+ internal_error (__FILE__, __LINE__, "\
+Can't determine the current address space of thread %s\n",
+ target_pid_to_str (ptid));
+
+ return inf->aspace;
+}
+
static int
default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
{
return addr >= start && addr < start + length;
}
+static struct gdbarch *
+default_thread_architecture (struct target_ops *ops, ptid_t ptid)
+{
+ return target_gdbarch;
+}
+
static int
return_zero (void)
{
return -1;
}
-/*
- * Resize the to_sections pointer. Also make sure that anyone that
- * was holding on to an old value of it gets updated.
- * Returns the old size.
- */
-
-int
-target_resize_to_sections (struct target_ops *target, int num_added)
-{
- struct target_ops **t;
- struct target_section *old_value;
- int old_count;
-
- old_value = target->to_sections;
-
- if (target->to_sections)
- {
- old_count = target->to_sections_end - target->to_sections;
- target->to_sections = (struct target_section *)
- xrealloc ((char *) target->to_sections,
- (sizeof (struct target_section)) * (num_added + old_count));
- }
- else
- {
- old_count = 0;
- target->to_sections = (struct target_section *)
- xmalloc ((sizeof (struct target_section)) * num_added);
- }
- target->to_sections_end = target->to_sections + (num_added + old_count);
-
- /* Check to see if anyone else was pointing to this structure.
- If old_value was null, then no one was. */
-
- if (old_value)
- {
- for (t = target_structs; t < target_structs + target_struct_size;
- ++t)
- {
- if ((*t)->to_sections == old_value)
- {
- (*t)->to_sections = target->to_sections;
- (*t)->to_sections_end = target->to_sections_end;
- }
- }
- /* There is a flattened view of the target stack in current_target,
- so its to_sections pointer might also need updating. */
- if (current_target.to_sections == old_value)
- {
- current_target.to_sections = target->to_sections;
- current_target.to_sections_end = target->to_sections_end;
- }
- }
-
- return old_count;
-
-}
-
-/* Remove all target sections taken from ABFD.
-
- Scan the current target stack for targets whose section tables
- refer to sections from BFD, and remove those sections. We use this
- when we notice that the inferior has unloaded a shared object, for
- example. */
-void
-remove_target_sections (bfd *abfd)
-{
- struct target_ops **t;
-
- for (t = target_structs; t < target_structs + target_struct_size; t++)
- {
- struct target_section *src, *dest;
-
- dest = (*t)->to_sections;
- for (src = (*t)->to_sections; src < (*t)->to_sections_end; src++)
- if (src->bfd != abfd)
- {
- /* Keep this section. */
- if (dest < src) *dest = *src;
- dest++;
- }
-
- /* If we've dropped any sections, resize the section table. */
- if (dest < src)
- target_resize_to_sections (*t, dest - src);
- }
-}
-
-
-
-
/* Find a single runnable target in the stack and return it. If for
some reason there is more than one, return NULL. */
if (!ptid_equal (ptid, null_ptid))
{
int pid = ptid_get_pid (ptid);
- delete_inferior (pid);
+ exit_inferior (pid);
}
breakpoint_init_inferior (inf_exited);
return normal_pid_to_str (ptid);
}
-/* Error-catcher for target_find_memory_regions */
-static int dummy_find_memory_regions (int (*ignore1) (), void *ignore2)
+/* Error-catcher for target_find_memory_regions. */
+static int
+dummy_find_memory_regions (int (*ignore1) (), void *ignore2)
{
- error (_("No target."));
+ error (_("Command not implemented for this target."));
return 0;
}
-/* Error-catcher for target_make_corefile_notes */
-static char * dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
+/* Error-catcher for target_make_corefile_notes. */
+static char *
+dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
+{
+ error (_("Command not implemented for this target."));
+ return NULL;
+}
+
+/* Error-catcher for target_get_bookmark. */
+static gdb_byte *
+dummy_get_bookmark (char *ignore1, int ignore2)
{
- error (_("No target."));
+ tcomplain ();
return NULL;
}
+/* Error-catcher for target_goto_bookmark. */
+static void
+dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
+{
+ tcomplain ();
+}
+
/* Set up the handful of non-empty slots needed by the dummy target
vector. */
dummy_target.to_stratum = dummy_stratum;
dummy_target.to_find_memory_regions = dummy_find_memory_regions;
dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
+ dummy_target.to_get_bookmark = dummy_get_bookmark;
+ dummy_target.to_goto_bookmark = dummy_goto_bookmark;
dummy_target.to_xfer_partial = default_xfer_partial;
+ dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
+ dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
+ dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
+ dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
+ dummy_target.to_has_execution = (int (*) (struct target_ops *)) return_zero;
+ dummy_target.to_stopped_by_watchpoint = return_zero;
+ dummy_target.to_stopped_data_address =
+ (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
dummy_target.to_magic = OPS_MAGIC;
}
\f
target_attach (char *args, int from_tty)
{
struct target_ops *t;
+
for (t = current_target.beneath; t != NULL; t = t->beneath)
{
if (t->to_attach != NULL)
target_thread_alive (ptid_t ptid)
{
struct target_ops *t;
+
for (t = current_target.beneath; t != NULL; t = t->beneath)
{
if (t->to_thread_alive != NULL)
target_find_new_threads (void)
{
struct target_ops *t;
+
for (t = current_target.beneath; t != NULL; t = t->beneath)
{
if (t->to_find_new_threads != NULL)
case TARGET_WAITKIND_EXECD:
return xstrprintf ("%sexecd", kind_str);
case TARGET_WAITKIND_SYSCALL_ENTRY:
- return xstrprintf ("%ssyscall-entry", kind_str);
+ return xstrprintf ("%sentered syscall", kind_str);
case TARGET_WAITKIND_SYSCALL_RETURN:
- return xstrprintf ("%ssyscall-return", kind_str);
+ return xstrprintf ("%sexited syscall", kind_str);
case TARGET_WAITKIND_SPURIOUS:
return xstrprintf ("%sspurious", kind_str);
case TARGET_WAITKIND_IGNORE:
struct regcache *regcache, int regno)
{
struct gdbarch *gdbarch = get_regcache_arch (regcache);
+
fprintf_unfiltered (gdb_stdlog, "%s ", func);
if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
&& gdbarch_register_name (gdbarch, regno) != NULL
fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
{
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
int i, size = register_size (gdbarch, regno);
unsigned char buf[MAX_REGISTER_SIZE];
+
regcache_raw_collect (regcache, regno, buf);
fprintf_unfiltered (gdb_stdlog, " = ");
for (i = 0; i < size; i++)
}
if (size <= sizeof (LONGEST))
{
- ULONGEST val = extract_unsigned_integer (buf, size);
+ ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
+
fprintf_unfiltered (gdb_stdlog, " %s %s",
core_addr_to_string_nz (val), plongest (val));
}
target_fetch_registers (struct regcache *regcache, int regno)
{
struct target_ops *t;
+
for (t = current_target.beneath; t != NULL; t = t->beneath)
{
if (t->to_fetch_registers != NULL)
void
target_store_registers (struct regcache *regcache, int regno)
{
-
struct target_ops *t;
+
for (t = current_target.beneath; t != NULL; t = t->beneath)
{
if (t->to_store_registers != NULL)
noprocess ();
}
+int
+target_core_of_thread (ptid_t ptid)
+{
+ struct target_ops *t;
+
+ for (t = current_target.beneath; t != NULL; t = t->beneath)
+ {
+ if (t->to_core_of_thread != NULL)
+ {
+ int retval = t->to_core_of_thread (t, ptid);
+
+ if (targetdebug)
+ fprintf_unfiltered (gdb_stdlog, "target_core_of_thread (%d) = %d\n",
+ PIDGET (ptid), retval);
+ return retval;
+ }
+ }
+
+ return -1;
+}
+
+int
+target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
+{
+ struct target_ops *t;
+
+ for (t = current_target.beneath; t != NULL; t = t->beneath)
+ {
+ if (t->to_verify_memory != NULL)
+ {
+ int retval = t->to_verify_memory (t, data, memaddr, size);
+
+ if (targetdebug)
+ fprintf_unfiltered (gdb_stdlog, "target_verify_memory (%s, %s) = %d\n",
+ paddress (target_gdbarch, memaddr),
+ pulongest (size),
+ retval);
+ return retval;
+ }
+ }
+
+ tcomplain ();
+}
+
static void
debug_to_prepare_to_store (struct regcache *regcache)
{
fprintf_unfiltered (gdb_stdlog,
"target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
- paddress (memaddr), len, write ? "write" : "read",
- retval);
+ paddress (target_gdbarch, memaddr), len,
+ write ? "write" : "read", retval);
if (retval > 0)
{
}
static int
-debug_to_insert_breakpoint (struct bp_target_info *bp_tgt)
+debug_to_insert_breakpoint (struct gdbarch *gdbarch,
+ struct bp_target_info *bp_tgt)
{
int retval;
- retval = debug_target.to_insert_breakpoint (bp_tgt);
+ retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
fprintf_unfiltered (gdb_stdlog,
"target_insert_breakpoint (0x%lx, xxx) = %ld\n",
}
static int
-debug_to_remove_breakpoint (struct bp_target_info *bp_tgt)
+debug_to_remove_breakpoint (struct gdbarch *gdbarch,
+ struct bp_target_info *bp_tgt)
{
int retval;
- retval = debug_target.to_remove_breakpoint (bp_tgt);
+ retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
fprintf_unfiltered (gdb_stdlog,
"target_remove_breakpoint (0x%lx, xxx) = %ld\n",
}
static int
-debug_to_insert_hw_breakpoint (struct bp_target_info *bp_tgt)
+debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
+ struct bp_target_info *bp_tgt)
{
int retval;
- retval = debug_target.to_insert_hw_breakpoint (bp_tgt);
+ retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
fprintf_unfiltered (gdb_stdlog,
"target_insert_hw_breakpoint (0x%lx, xxx) = %ld\n",
}
static int
-debug_to_remove_hw_breakpoint (struct bp_target_info *bp_tgt)
+debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
+ struct bp_target_info *bp_tgt)
{
int retval;
- retval = debug_target.to_remove_hw_breakpoint (bp_tgt);
+ retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
fprintf_unfiltered (gdb_stdlog,
"target_remove_hw_breakpoint (0x%lx, xxx) = %ld\n",
PIDGET (ptid));
}
+static struct gdbarch *
+debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
+{
+ struct gdbarch *retval;
+
+ retval = debug_target.to_thread_architecture (ops, ptid);
+
+ fprintf_unfiltered (gdb_stdlog, "target_thread_architecture (%s) = %s [%s]\n",
+ target_pid_to_str (ptid), host_address_to_string (retval),
+ gdbarch_bfd_arch_info (retval)->printable_name);
+ return retval;
+}
+
static void
debug_to_stop (ptid_t ptid)
{
current_target.to_stop = debug_to_stop;
current_target.to_rcmd = debug_to_rcmd;
current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
+ current_target.to_thread_architecture = debug_to_thread_architecture;
}
\f
set_maintenance_target_async_permitted (char *args, int from_tty,
struct cmd_list_element *c)
{
- if (target_has_execution)
+ if (have_live_inferiors ())
{
target_async_permitted_1 = target_async_permitted;
error (_("Cannot change this setting while the inferior is running."));
&setlist,
&showlist);
+ add_setshow_boolean_cmd ("stack-cache", class_support,
+ &stack_cache_enabled_p_1, _("\
+Set cache use for stack access."), _("\
+Show cache use for stack access."), _("\
+When on, use the data cache for all stack access, regardless of any\n\
+configured memory regions. This improves remote performance significantly.\n\
+By default, caching for stack access is on."),
+ set_stack_cache_enabled_p,
+ show_stack_cache_enabled_p,
+ &setlist, &showlist);
+
target_dcache = dcache_init ();
}