/* Low level packing and unpacking of values for GDB, the GNU Debugger.
- Copyright (C) 1986-2020 Free Software Foundation, Inc.
+ Copyright (C) 1986-2022 Free Software Foundation, Inc.
This file is part of GDB.
#include "cp-abi.h"
#include "user-regs.h"
#include <algorithm>
+#include <iterator>
+#include <utility>
+#include <vector>
#include "completer.h"
#include "gdbsupport/selftest.h"
#include "gdbsupport/array-view.h"
#include "cli/cli-style.h"
+#include "expop.h"
+#include "inferior.h"
+#include "varobj.h"
/* Definition of a user function. */
struct internal_function
care for (this is a range afterall), we need to check if the
_previous_ range overlaps the I range. E.g.,
- R
- |---|
+ R
+ |---|
|---| |---| |------| ... |--|
0 1 2 N
Then we need to check if the I range overlaps the I range itself.
E.g.,
- R
- |---|
+ R
+ |---|
|---| |---| |-------| ... |--|
0 1 2 N
lazy (1),
initialized (1),
stack (0),
+ is_zero (false),
type (type_),
enclosing_type (type_)
{
used instead of read_memory to enable extra caching. */
unsigned int stack : 1;
+ /* True if this is a zero value, created by 'value_zero'; false
+ otherwise. */
+ bool is_zero : 1;
+
/* Location of value (if lval). */
union
{
LONGEST embedded_offset = 0;
LONGEST pointed_to_offset = 0;
- /* Actual contents of the value. Target byte-order. NULL or not
- valid if lazy is nonzero. */
+ /* Actual contents of the value. Target byte-order.
+
+ May be nullptr if the value is lazy or is entirely optimized out.
+ Guaranteed to be non-nullptr otherwise. */
gdb::unique_xmalloc_ptr<gdb_byte> contents;
/* Unavailable ranges in CONTENTS. We mark unavailable ranges,
struct gdbarch *
get_value_arch (const struct value *value)
{
- return get_type_arch (value_type (value));
+ return value_type (value)->arch ();
}
int
R
|-...-|
- |--| |---| |------| ... |--|
- 0 1 2 N
+ |--| |---| |------| ... |--|
+ 0 1 2 N
I=0
R
|------------------------|
- |--| |---| |------| ... |--|
- 0 1 2 N
+ |--| |---| |------| ... |--|
+ 0 1 2 N
I=0
PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
to:
PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
- / TARGET_CHAR_BIT) */
+ / TARGET_CHAR_BIT) */
static int
memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
const gdb_byte *ptr2, size_t offset2_bits,
struct cmd_list_element *c, const char *value)
{
if (max_value_size == -1)
- fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
+ gdb_printf (file, _("Maximum value size is unlimited.\n"));
else
- fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
- max_value_size);
+ gdb_printf (file, _("Maximum value size is %d bytes.\n"),
+ max_value_size);
}
/* Called before we attempt to allocate or reallocate a buffer for the
static void
check_type_length_before_alloc (const struct type *type)
{
- unsigned int length = TYPE_LENGTH (type);
+ ULONGEST length = TYPE_LENGTH (type);
if (max_value_size > -1 && length > max_value_size)
{
- if (TYPE_NAME (type) != NULL)
- error (_("value of type `%s' requires %u bytes, which is more "
- "than max-value-size"), TYPE_NAME (type), length);
+ if (type->name () != NULL)
+ error (_("value of type `%s' requires %s bytes, which is more "
+ "than max-value-size"), type->name (), pulongest (length));
else
- error (_("value requires %u bytes, which is more than "
- "max-value-size"), length);
+ error (_("value requires %s bytes, which is more than "
+ "max-value-size"), pulongest (length));
}
}
struct value *
allocate_repeat_value (struct type *type, int count)
{
- int low_bound = current_language->string_lower_bound; /* ??? */
+ /* Despite the fact that we are really creating an array of TYPE here, we
+ use the string lower bound as the array lower bound. This seems to
+ work fine for now. */
+ int low_bound = current_language->string_lower_bound ();
/* FIXME-type-allocation: need a way to free this type when we are
done with it. */
struct type *array_type
struct value *
allocate_computed_value (struct type *type,
- const struct lval_funcs *funcs,
- void *closure)
+ const struct lval_funcs *funcs,
+ void *closure)
{
struct value *v = allocate_value_lazy (type);
value->parent = value_ref_ptr::new_reference (parent);
}
-gdb_byte *
+gdb::array_view<gdb_byte>
value_contents_raw (struct value *value)
{
struct gdbarch *arch = get_value_arch (value);
int unit_size = gdbarch_addressable_memory_unit_size (arch);
allocate_value_contents (value);
- return value->contents.get () + value->embedded_offset * unit_size;
+
+ ULONGEST length = TYPE_LENGTH (value_type (value));
+ return gdb::make_array_view
+ (value->contents.get () + value->embedded_offset * unit_size, length);
}
-gdb_byte *
+gdb::array_view<gdb_byte>
value_contents_all_raw (struct value *value)
{
allocate_value_contents (value);
- return value->contents.get ();
+
+ ULONGEST length = TYPE_LENGTH (value_enclosing_type (value));
+ return gdb::make_array_view (value->contents.get (), length);
}
struct type *
{
/* If result's target type is TYPE_CODE_STRUCT, proceed to
fetch its rtti type. */
- if ((TYPE_CODE (result) == TYPE_CODE_PTR || TYPE_IS_REFERENCE (result))
- && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
- == TYPE_CODE_STRUCT
+ if (result->is_pointer_or_reference ()
+ && (check_typedef (TYPE_TARGET_TYPE (result))->code ()
+ == TYPE_CODE_STRUCT)
&& !value_optimized_out (value))
- {
- struct type *real_type;
-
- real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
- if (real_type)
- {
- if (real_type_found)
- *real_type_found = 1;
- result = real_type;
- }
- }
+ {
+ struct type *real_type;
+
+ real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
+ if (real_type)
+ {
+ if (real_type_found)
+ *real_type_found = 1;
+ result = real_type;
+ }
+ }
else if (resolve_simple_types)
- {
- if (real_type_found)
- *real_type_found = 1;
- result = value_enclosing_type (value);
- }
+ {
+ if (real_type_found)
+ *real_type_found = 1;
+ result = value_enclosing_type (value);
+ }
}
return result;
void
error_value_optimized_out (void)
{
- error (_("value has been optimized out"));
+ throw_error (OPTIMIZED_OUT_ERROR, _("value has been optimized out"));
}
static void
if (!value->optimized_out.empty ())
{
if (value->lval == lval_register)
- error (_("register has not been saved in frame"));
+ throw_error (OPTIMIZED_OUT_ERROR,
+ _("register has not been saved in frame"));
else
error_value_optimized_out ();
}
throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
}
-const gdb_byte *
+gdb::array_view<const gdb_byte>
value_contents_for_printing (struct value *value)
{
if (value->lazy)
value_fetch_lazy (value);
- return value->contents.get ();
+
+ ULONGEST length = TYPE_LENGTH (value_enclosing_type (value));
+ return gdb::make_array_view (value->contents.get (), length);
}
-const gdb_byte *
+gdb::array_view<const gdb_byte>
value_contents_for_printing_const (const struct value *value)
{
gdb_assert (!value->lazy);
- return value->contents.get ();
+
+ ULONGEST length = TYPE_LENGTH (value_enclosing_type (value));
+ return gdb::make_array_view (value->contents.get (), length);
}
-const gdb_byte *
+gdb::array_view<const gdb_byte>
value_contents_all (struct value *value)
{
- const gdb_byte *result = value_contents_for_printing (value);
+ gdb::array_view<const gdb_byte> result = value_contents_for_printing (value);
require_not_optimized_out (value);
require_available (value);
return result;
It is assumed the contents of DST in the [DST_OFFSET,
DST_OFFSET+LENGTH) range are wholly available. */
-void
+static void
value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
struct value *src, LONGEST src_offset, LONGEST length)
{
TARGET_CHAR_BIT * length));
/* Copy the data. */
- memcpy (value_contents_all_raw (dst) + dst_offset * unit_size,
- value_contents_all_raw (src) + src_offset * unit_size,
- length * unit_size);
+ gdb::array_view<gdb_byte> dst_contents
+ = value_contents_all_raw (dst).slice (dst_offset * unit_size,
+ length * unit_size);
+ gdb::array_view<const gdb_byte> src_contents
+ = value_contents_all_raw (src).slice (src_offset * unit_size,
+ length * unit_size);
+ copy (src_contents, dst_contents);
/* Copy the meta-data, adjusted. */
src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
value->stack = val;
}
-const gdb_byte *
+gdb::array_view<const gdb_byte>
value_contents (struct value *value)
{
- const gdb_byte *result = value_contents_writeable (value);
+ gdb::array_view<const gdb_byte> result = value_contents_writeable (value);
require_not_optimized_out (value);
require_available (value);
return result;
}
-gdb_byte *
+gdb::array_view<gdb_byte>
value_contents_writeable (struct value *value)
{
if (value->lazy)
int
value_optimized_out (struct value *value)
{
- /* We can only know if a value is optimized out once we have tried to
- fetch it. */
- if (value->optimized_out.empty () && value->lazy)
+ if (value->lazy)
{
+ /* See if we can compute the result without fetching the
+ value. */
+ if (VALUE_LVAL (value) == lval_memory)
+ return false;
+ else if (VALUE_LVAL (value) == lval_computed)
+ {
+ const struct lval_funcs *funcs = value->location.computed.funcs;
+
+ if (funcs->is_optimized_out != nullptr)
+ return funcs->is_optimized_out (value);
+ }
+
+ /* Fall back to fetching. */
try
{
value_fetch_lazy (value);
}
catch (const gdb_exception_error &ex)
{
- /* Fall back to checking value->optimized_out. */
+ switch (ex.error)
+ {
+ case MEMORY_ERROR:
+ case OPTIMIZED_OUT_ERROR:
+ case NOT_AVAILABLE_ERROR:
+ /* These can normally happen when we try to access an
+ optimized out or unavailable register, either in a
+ physical register or spilled to memory. */
+ break;
+ default:
+ throw;
+ }
}
}
but it's a different block of storage. */
struct value *
-value_copy (struct value *arg)
+value_copy (const value *arg)
{
struct type *encl_type = value_enclosing_type (arg);
struct value *val;
else
val = allocate_value (encl_type);
val->type = arg->type;
- VALUE_LVAL (val) = VALUE_LVAL (arg);
+ VALUE_LVAL (val) = arg->lval;
val->location = arg->location;
val->offset = arg->offset;
val->bitpos = arg->bitpos;
val->embedded_offset = value_embedded_offset (arg);
val->pointed_to_offset = arg->pointed_to_offset;
val->modifiable = arg->modifiable;
- if (!value_lazy (val))
- {
- memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
- TYPE_LENGTH (value_enclosing_type (arg)));
-
- }
+ val->stack = arg->stack;
+ val->is_zero = arg->is_zero;
+ val->initialized = arg->initialized;
val->unavailable = arg->unavailable;
val->optimized_out = arg->optimized_out;
+
+ if (!value_lazy (val) && !value_entirely_optimized_out (val))
+ {
+ gdb_assert (arg->contents != nullptr);
+ ULONGEST length = TYPE_LENGTH (value_enclosing_type (arg));
+ const auto &arg_view
+ = gdb::make_array_view (arg->contents.get (), length);
+ copy (arg_view, value_contents_all_raw (val));
+ }
+
val->parent = arg->parent;
if (VALUE_LVAL (val) == lval_computed)
{
const struct lval_funcs *funcs = val->location.computed.funcs;
if (funcs->copy_closure)
- val->location.computed.closure = funcs->copy_closure (val);
+ val->location.computed.closure = funcs->copy_closure (val);
}
return val;
}
struct type *enc_type = value_enclosing_type (arg);
struct value *val = allocate_value (enc_type);
- memcpy (value_contents_all_raw (val), value_contents_all (arg),
- TYPE_LENGTH (enc_type));
+ copy (value_contents_all (arg), value_contents_all_raw (val));
val->type = arg->type;
set_value_embedded_offset (val, value_embedded_offset (arg));
set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
{
gdb_assert (VALUE_LVAL (v) == not_lval);
- write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
+ write_memory (addr, value_contents_raw (v).data (), TYPE_LENGTH (value_type (v)));
v->lval = lval_memory;
v->location.address = addr;
}
const struct lval_funcs *funcs = whole->location.computed.funcs;
if (funcs->copy_closure)
- component->location.computed.closure = funcs->copy_closure (whole);
+ component->location.computed.closure = funcs->copy_closure (whole);
}
- /* If type has a dynamic resolved location property
- update it's value address. */
+ /* If the WHOLE value has a dynamically resolved location property then
+ update the address of the COMPONENT. */
type = value_type (whole);
if (NULL != TYPE_DATA_LOCATION (type)
&& TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
+
+ /* Similarly, if the COMPONENT value has a dynamically resolved location
+ property then update its address. */
+ type = value_type (component);
+ if (NULL != TYPE_DATA_LOCATION (type)
+ && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
+ {
+ /* If the COMPONENT has a dynamic location, and is an
+ lval_internalvar_component, then we change it to a lval_memory.
+
+ Usually a component of an internalvar is created non-lazy, and has
+ its content immediately copied from the parent internalvar.
+ However, for components with a dynamic location, the content of
+ the component is not contained within the parent, but is instead
+ accessed indirectly. Further, the component will be created as a
+ lazy value.
+
+ By changing the type of the component to lval_memory we ensure
+ that value_fetch_lazy can successfully load the component.
+
+ This solution isn't ideal, but a real fix would require values to
+ carry around both the parent value contents, and the contents of
+ any dynamic fields within the parent. This is a substantial
+ change to how values work in GDB. */
+ if (VALUE_LVAL (component) == lval_internalvar_component)
+ {
+ gdb_assert (value_lazy (component));
+ VALUE_LVAL (component) = lval_memory;
+ }
+ else
+ gdb_assert (VALUE_LVAL (component) == lval_memory);
+ set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
+ }
}
/* Access to the value history. */
return value_copy (value_history[absnum].get ());
}
+/* See value.h. */
+
+ULONGEST
+value_history_count ()
+{
+ return value_history.size ();
+}
+
static void
show_values (const char *num_exp, int from_tty)
{
if (num_exp)
{
/* "show values +" should print from the stored position.
- "show values <exp>" should print around value number <exp>. */
+ "show values <exp>" should print around value number <exp>. */
if (num_exp[0] != '+' || num_exp[1] != '\0')
num = parse_and_eval_long (num_exp) - 5;
}
struct value_print_options opts;
val = access_value_history (i);
- printf_filtered (("$%d = "), i);
+ gdb_printf (("$%d = "), i);
get_user_print_options (&opts);
value_print (val, gdb_stdout, &opts);
- printf_filtered (("\n"));
+ gdb_printf (("\n"));
}
/* The next "show values +" should start after what we just printed. */
static void
init_if_undefined_command (const char* args, int from_tty)
{
- struct internalvar* intvar;
+ struct internalvar *intvar = nullptr;
/* Parse the expression - this is taken from set_command(). */
expression_up expr = parse_expression (args);
/* Validate the expression.
Was the expression an assignment?
Or even an expression at all? */
- if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
+ if (expr->first_opcode () != BINOP_ASSIGN)
error (_("Init-if-undefined requires an assignment expression."));
- /* Extract the variable from the parsed expression.
- In the case of an assign the lvalue will be in elts[1] and elts[2]. */
- if (expr->elts[1].opcode != OP_INTERNALVAR)
+ /* Extract the variable from the parsed expression. */
+ expr::assign_operation *assign
+ = dynamic_cast<expr::assign_operation *> (expr->op.get ());
+ if (assign != nullptr)
+ {
+ expr::operation *lhs = assign->get_lhs ();
+ expr::internalvar_operation *ivarop
+ = dynamic_cast<expr::internalvar_operation *> (lhs);
+ if (ivarop != nullptr)
+ intvar = ivarop->get_internalvar ();
+ }
+
+ if (intvar == nullptr)
error (_("The first parameter to init-if-undefined "
"should be a GDB variable."));
- intvar = expr->elts[2].internalvar;
/* Only evaluate the expression if the lvalue is void.
This may still fail if the expression is invalid. */
{
struct type *type = check_typedef (value_type (var->u.value));
- if (TYPE_CODE (type) == TYPE_CODE_INT)
+ if (type->code () == TYPE_CODE_INT)
{
*result = value_as_long (var->u.value);
return 1;
switch (var->kind)
{
case INTERNALVAR_VALUE:
- addr = value_contents_writeable (var->u.value);
+ addr = value_contents_writeable (var->u.value).data ();
arch = get_value_arch (var->u.value);
unit_size = gdbarch_addressable_memory_unit_size (arch);
modify_field (value_type (var->u.value), addr + offset,
value_as_long (newval), bitpos, bitsize);
else
- memcpy (addr + offset * unit_size, value_contents (newval),
+ memcpy (addr + offset * unit_size, value_contents (newval).data (),
TYPE_LENGTH (value_type (newval)));
break;
error (_("Cannot overwrite convenience function %s"), var->name);
/* Prepare new contents. */
- switch (TYPE_CODE (check_typedef (value_type (val))))
+ switch (check_typedef (value_type (val))->code ())
{
case TYPE_CODE_VOID:
new_kind = INTERNALVAR_VOID;
new_data.value = release_value (copy).release ();
/* Internal variables which are created from values with a dynamic
- location don't need the location property of the origin anymore.
- The resolved dynamic location is used prior then any other address
- when accessing the value.
- If we keep it, we would still refer to the origin value.
- Remove the location property in case it exist. */
- remove_dyn_prop (DYN_PROP_DATA_LOCATION, value_type (new_data.value));
+ location don't need the location property of the origin anymore.
+ The resolved dynamic location is used prior then any other address
+ when accessing the value.
+ If we keep it, we would still refer to the origin value.
+ Remove the location property in case it exist. */
+ value_type (new_data.value)->remove_dyn_prop (DYN_PROP_DATA_LOCATION);
break;
}
xfree (var->u.string);
break;
- case INTERNALVAR_MAKE_VALUE:
- if (var->u.make_value.functions->destroy != NULL)
- var->u.make_value.functions->destroy (var->u.make_value.data);
- break;
-
default:
break;
}
var->kind = INTERNALVAR_VOID;
}
-char *
+const char *
internalvar_name (const struct internalvar *var)
{
return var->name;
return ifn;
}
-char *
+const char *
value_internal_function_name (struct value *val)
{
struct internal_function *ifn;
preserve_one_value (struct value *value, struct objfile *objfile,
htab_t copied_types)
{
- if (TYPE_OBJFILE (value->type) == objfile)
- value->type = copy_type_recursive (objfile, value->type, copied_types);
+ if (value->type->objfile_owner () == objfile)
+ value->type = copy_type_recursive (value->type, copied_types);
- if (TYPE_OBJFILE (value->enclosing_type) == objfile)
- value->enclosing_type = copy_type_recursive (objfile,
- value->enclosing_type,
+ if (value->enclosing_type->objfile_owner () == objfile)
+ value->enclosing_type = copy_type_recursive (value->enclosing_type,
copied_types);
}
switch (var->kind)
{
case INTERNALVAR_INTEGER:
- if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
+ if (var->u.integer.type
+ && var->u.integer.type->objfile_owner () == objfile)
var->u.integer.type
- = copy_type_recursive (objfile, var->u.integer.type, copied_types);
+ = copy_type_recursive (var->u.integer.type, copied_types);
break;
case INTERNALVAR_VALUE:
}
}
+/* Make sure that all types and values referenced by VAROBJ are updated before
+ OBJFILE is discarded. COPIED_TYPES is used to prevent cycles and
+ duplicates. */
+
+static void
+preserve_one_varobj (struct varobj *varobj, struct objfile *objfile,
+ htab_t copied_types)
+{
+ if (varobj->type->is_objfile_owned ()
+ && varobj->type->objfile_owner () == objfile)
+ {
+ varobj->type
+ = copy_type_recursive (varobj->type, copied_types);
+ }
+
+ if (varobj->value != nullptr)
+ preserve_one_value (varobj->value.get (), objfile, copied_types);
+}
+
/* Update the internal variables and value history when OBJFILE is
discarded; we must copy the types out of the objfile. New global types
will be created for every convenience variable which currently points to
void
preserve_values (struct objfile *objfile)
{
- htab_t copied_types;
struct internalvar *var;
/* Create the hash table. We allocate on the objfile's obstack, since
it is soon to be deleted. */
- copied_types = create_copied_types_hash (objfile);
+ htab_up copied_types = create_copied_types_hash ();
for (const value_ref_ptr &item : value_history)
- preserve_one_value (item.get (), objfile, copied_types);
+ preserve_one_value (item.get (), objfile, copied_types.get ());
for (var = internalvars; var; var = var->next)
- preserve_one_internalvar (var, objfile, copied_types);
+ preserve_one_internalvar (var, objfile, copied_types.get ());
- preserve_ext_lang_values (objfile, copied_types);
+ /* For the remaining varobj, check that none has type owned by OBJFILE. */
+ all_root_varobjs ([&copied_types, objfile] (struct varobj *varobj)
+ {
+ preserve_one_varobj (varobj, objfile,
+ copied_types.get ());
+ });
- htab_delete (copied_types);
+ preserve_ext_lang_values (objfile, copied_types.get ());
}
static void
{
varseen = 1;
}
- printf_filtered (("$%s = "), var->name);
+ gdb_printf (("$%s = "), var->name);
try
{
_("<error: %s>"), ex.what ());
}
- printf_filtered (("\n"));
+ gdb_printf (("\n"));
}
if (!varseen)
{
The user can't create them except via Python, and if Python support
is installed this message will never be printed ($_streq will
exist). */
- printf_unfiltered (_("No debugger convenience variables now defined.\n"
- "Convenience variables have "
- "names starting with \"$\";\n"
- "use \"set\" as in \"set "
- "$foo = 5\" to define them.\n"));
+ gdb_printf (_("No debugger convenience variables now defined.\n"
+ "Convenience variables have "
+ "names starting with \"$\";\n"
+ "use \"set\" as in \"set "
+ "$foo = 5\" to define them.\n"));
}
}
\f
struct type *
result_type_of_xmethod (struct value *method, gdb::array_view<value *> argv)
{
- gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
+ gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD
&& method->lval == lval_xcallable && !argv.empty ());
return method->location.xm_worker->get_result_type (argv[0], argv.slice (1));
struct value *
call_xmethod (struct value *method, gdb::array_view<value *> argv)
{
- gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
+ gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD
&& method->lval == lval_xcallable && !argv.empty ());
return method->location.xm_worker->invoke (argv[0], argv.slice (1));
in disassemble_command). It also dereferences references, which
I suspect is the most logical thing to do. */
val = coerce_array (val);
- return unpack_long (value_type (val), value_contents (val));
+ return unpack_long (value_type (val), value_contents (val).data ());
}
/* Extract a value as a C pointer. Does not deallocate the value.
CORE_ADDR
value_as_address (struct value *val)
{
- struct gdbarch *gdbarch = get_type_arch (value_type (val));
+ struct gdbarch *gdbarch = value_type (val)->arch ();
/* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
whether we want this to be true eventually. */
The following shortcut avoids this whole mess. If VAL is a
function, just return its address directly. */
- if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
- || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
+ if (value_type (val)->code () == TYPE_CODE_FUNC
+ || value_type (val)->code () == TYPE_CODE_METHOD)
return value_address (val);
val = coerce_array (val);
converted to pointers; usually, the ABI doesn't either, but
ABI-specific code is a more reasonable place to handle it. */
- if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
- && !TYPE_IS_REFERENCE (value_type (val))
+ if (!value_type (val)->is_pointer_or_reference ()
&& gdbarch_integer_to_address_p (gdbarch))
return gdbarch_integer_to_address (gdbarch, value_type (val),
- value_contents (val));
+ value_contents (val).data ());
- return unpack_long (value_type (val), value_contents (val));
+ return unpack_long (value_type (val), value_contents (val).data ());
#endif
}
\f
LONGEST
unpack_long (struct type *type, const gdb_byte *valaddr)
{
+ if (is_fixed_point_type (type))
+ type = type->fixed_point_type_base_type ();
+
enum bfd_endian byte_order = type_byte_order (type);
- enum type_code code = TYPE_CODE (type);
+ enum type_code code = type->code ();
int len = TYPE_LENGTH (type);
- int nosign = TYPE_UNSIGNED (type);
+ int nosign = type->is_unsigned ();
switch (code)
{
case TYPE_CODE_MEMBERPTR:
{
LONGEST result;
- if (nosign)
- result = extract_unsigned_integer (valaddr, len, byte_order);
+
+ if (type->bit_size_differs_p ())
+ {
+ unsigned bit_off = type->bit_offset ();
+ unsigned bit_size = type->bit_size ();
+ if (bit_size == 0)
+ {
+ /* unpack_bits_as_long doesn't handle this case the
+ way we'd like, so handle it here. */
+ result = 0;
+ }
+ else
+ result = unpack_bits_as_long (type, valaddr, bit_off, bit_size);
+ }
else
- result = extract_signed_integer (valaddr, len, byte_order);
+ {
+ if (nosign)
+ result = extract_unsigned_integer (valaddr, len, byte_order);
+ else
+ result = extract_signed_integer (valaddr, len, byte_order);
+ }
if (code == TYPE_CODE_RANGE)
- result += TYPE_RANGE_DATA (type)->bias;
+ result += type->bounds ()->bias;
return result;
}
case TYPE_CODE_DECFLOAT:
return target_float_to_longest (valaddr, type);
+ case TYPE_CODE_FIXED_POINT:
+ {
+ gdb_mpq vq;
+ vq.read_fixed_point (gdb::make_array_view (valaddr, len),
+ byte_order, nosign,
+ type->fixed_point_scaling_factor ());
+
+ gdb_mpz vz;
+ mpz_tdiv_q (vz.val, mpq_numref (vq.val), mpq_denref (vq.val));
+ return vz.as_integer<LONGEST> ();
+ }
+
case TYPE_CODE_PTR:
case TYPE_CODE_REF:
case TYPE_CODE_RVALUE_REF:
/* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
- whether we want this to be true eventually. */
+ whether we want this to be true eventually. */
return extract_typed_address (valaddr, type);
default:
if (is_floating_type (type))
{
- if (!target_float_is_valid (value_contents (val), type))
+ if (!target_float_is_valid (value_contents (val).data (), type))
error (_("Invalid floating value found in program."));
return true;
}
{
struct value *retval;
- switch (TYPE_FIELD_LOC_KIND (type, fieldno))
+ switch (type->field (fieldno).loc_kind ())
{
case FIELD_LOC_KIND_PHYSADDR:
- retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
- TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
+ retval = value_at_lazy (type->field (fieldno).type (),
+ type->field (fieldno).loc_physaddr ());
break;
case FIELD_LOC_KIND_PHYSNAME:
{
- const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
- /* TYPE_FIELD_NAME (type, fieldno); */
+ const char *phys_name = type->field (fieldno).loc_physname ();
+ /* type->field (fieldno).name (); */
struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
if (sym.symbol == NULL)
reported as non-debuggable symbols. */
struct bound_minimal_symbol msym
= lookup_minimal_symbol (phys_name, NULL, NULL);
- struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
+ struct type *field_type = type->field (fieldno).type ();
if (!msym.minsym)
retval = allocate_optimized_out_value (field_type);
else
- retval = value_at_lazy (field_type, BMSYMBOL_VALUE_ADDRESS (msym));
+ retval = value_at_lazy (field_type, msym.value_address ());
}
else
retval = value_of_variable (sym.symbol, sym.block);
int unit_size = gdbarch_addressable_memory_unit_size (arch);
arg_type = check_typedef (arg_type);
- type = TYPE_FIELD_TYPE (arg_type, fieldno);
+ type = arg_type->field (fieldno).type ();
/* Call check_typedef on our type to make sure that, if TYPE
is a TYPE_CODE_TYPEDEF, its length is set to the length
bit. Assume that the address, offset, and embedded offset
are sufficiently aligned. */
- LONGEST bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
+ LONGEST bitpos = arg_type->field (fieldno).loc_bitpos ();
LONGEST container_bitsize = TYPE_LENGTH (type) * 8;
v = allocate_value_lazy (type);
for references to ordinary fields of unavailable values. */
if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
boffset = baseclass_offset (arg_type, fieldno,
- value_contents (arg1),
+ value_contents (arg1).data (),
value_embedded_offset (arg1),
value_address (arg1),
arg1);
else
- boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
+ boffset = arg_type->field (fieldno).loc_bitpos () / 8;
if (value_lazy (arg1))
v = allocate_value_lazy (value_enclosing_type (arg1));
/* We expect an already resolved data location. */
gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
/* For dynamic data types defer memory allocation
- until we actual access the value. */
+ until we actual access the value. */
v = allocate_value_lazy (type);
}
else
{
/* Plain old data member */
- offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
- / (HOST_CHAR_BIT * unit_size));
+ offset += (arg_type->field (fieldno).loc_bitpos ()
+ / (HOST_CHAR_BIT * unit_size));
/* Lazy register values with offsets are not supported. */
if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
struct bound_minimal_symbol msym;
sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
- if (sym != NULL)
+ if (sym == nullptr)
{
- memset (&msym, 0, sizeof (msym));
- }
- else
- {
- gdb_assert (sym == NULL);
msym = lookup_bound_minimal_symbol (physname);
if (msym.minsym == NULL)
return NULL;
VALUE_LVAL (v) = lval_memory;
if (sym)
{
- set_value_address (v, BLOCK_ENTRY_PC (SYMBOL_BLOCK_VALUE (sym)));
+ set_value_address (v, sym->value_block ()->entry_pc ());
}
else
{
set_value_address (v,
gdbarch_convert_from_func_ptr_addr
- (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), current_top_target ()));
+ (gdbarch, msym.value_address (),
+ current_inferior ()->top_target ()));
}
if (arg1p)
value_addr (*arg1p)));
/* Move the `this' pointer according to the offset.
- VALUE_OFFSET (*arg1p) += offset; */
+ VALUE_OFFSET (*arg1p) += offset; */
}
return v;
{
valmask = (((ULONGEST) 1) << bitsize) - 1;
val &= valmask;
- if (!TYPE_UNSIGNED (field_type))
+ if (!field_type->is_unsigned ())
{
if (val & (valmask ^ (valmask >> 1)))
{
LONGEST embedded_offset, int fieldno,
const struct value *val, LONGEST *result)
{
- int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
+ int bitpos = type->field (fieldno).loc_bitpos ();
int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
- struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
+ struct type *field_type = type->field (fieldno).type ();
int bit_offset;
gdb_assert (val != NULL);
LONGEST
unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
{
- int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
+ int bitpos = type->field (fieldno).loc_bitpos ();
int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
- struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
+ struct type *field_type = type->field (fieldno).type ();
return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
}
num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
bitpos, bitsize);
- store_signed_integer (value_contents_raw (dest_val),
+ store_signed_integer (value_contents_raw (dest_val).data (),
TYPE_LENGTH (field_type), byte_order, num);
}
const gdb_byte *valaddr,
LONGEST embedded_offset, const struct value *val)
{
- int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
+ int bitpos = type->field (fieldno).loc_bitpos ();
int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
- struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
+ struct value *res_val = allocate_value (type->field (fieldno).type ());
unpack_value_bitfield (res_val, bitpos, bitsize,
valaddr, embedded_offset, val);
if (0 != (fieldval & ~mask))
{
/* FIXME: would like to include fieldval in the message, but
- we don't have a sprintf_longest. */
+ we don't have a sprintf_longest. */
warning (_("Value does not fit in %s bits."), plongest (bitsize));
/* Truncate it, otherwise adjoining fields may be corrupted. */
type = check_typedef (type);
len = TYPE_LENGTH (type);
- switch (TYPE_CODE (type))
+ switch (type->code ())
{
case TYPE_CODE_RANGE:
- num -= TYPE_RANGE_DATA (type)->bias;
+ num -= type->bounds ()->bias;
/* Fall through. */
case TYPE_CODE_INT:
case TYPE_CODE_CHAR:
case TYPE_CODE_FLAGS:
case TYPE_CODE_BOOL:
case TYPE_CODE_MEMBERPTR:
+ if (type->bit_size_differs_p ())
+ {
+ unsigned bit_off = type->bit_offset ();
+ unsigned bit_size = type->bit_size ();
+ num &= ((ULONGEST) 1 << bit_size) - 1;
+ num <<= bit_off;
+ }
store_signed_integer (buf, len, byte_order, num);
break;
default:
error (_("Unexpected type (%d) encountered for integer constant."),
- TYPE_CODE (type));
+ type->code ());
}
}
len = TYPE_LENGTH (type);
byte_order = type_byte_order (type);
- switch (TYPE_CODE (type))
+ switch (type->code ())
{
case TYPE_CODE_INT:
case TYPE_CODE_CHAR:
case TYPE_CODE_BOOL:
case TYPE_CODE_RANGE:
case TYPE_CODE_MEMBERPTR:
+ if (type->bit_size_differs_p ())
+ {
+ unsigned bit_off = type->bit_offset ();
+ unsigned bit_size = type->bit_size ();
+ num &= ((ULONGEST) 1 << bit_size) - 1;
+ num <<= bit_off;
+ }
store_unsigned_integer (buf, len, byte_order, num);
break;
default:
error (_("Unexpected type (%d) encountered "
"for unsigned integer constant."),
- TYPE_CODE (type));
+ type->code ());
}
}
+/* Create a value of type TYPE that is zero, and return it. */
+
+struct value *
+value_zero (struct type *type, enum lval_type lv)
+{
+ struct value *val = allocate_value_lazy (type);
+
+ VALUE_LVAL (val) = (lv == lval_computed ? not_lval : lv);
+ val->is_zero = true;
+ return val;
+}
+
/* Convert C numbers into newly allocated values. */
struct value *
{
struct value *val = allocate_value (type);
- pack_long (value_contents_raw (val), type, num);
+ pack_long (value_contents_raw (val).data (), type, num);
return val;
}
{
struct value *val = allocate_value (type);
- pack_unsigned_long (value_contents_raw (val), type, num);
+ pack_unsigned_long (value_contents_raw (val).data (), type, num);
return val;
}
{
struct value *val = allocate_value (type);
- store_typed_address (value_contents_raw (val),
+ store_typed_address (value_contents_raw (val).data (),
check_typedef (type), addr);
return val;
}
value_from_host_double (struct type *type, double d)
{
struct value *value = allocate_value (type);
- gdb_assert (TYPE_CODE (type) == TYPE_CODE_FLT);
- target_float_from_host_double (value_contents_raw (value),
+ gdb_assert (type->code () == TYPE_CODE_FLT);
+ target_float_from_host_double (value_contents_raw (value).data (),
value_type (value), d);
return value;
}
const gdb_byte *valaddr,
CORE_ADDR address)
{
- struct type *resolved_type = resolve_dynamic_type (type, valaddr, address);
+ gdb::array_view<const gdb_byte> view;
+ if (valaddr != nullptr)
+ view = gdb::make_array_view (valaddr, TYPE_LENGTH (type));
+ struct type *resolved_type = resolve_dynamic_type (type, view, address);
struct type *resolved_type_no_typedef = check_typedef (resolved_type);
struct value *v;
struct value *result;
result = allocate_value (type);
- memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
+ memcpy (value_contents_raw (result).data (), contents, TYPE_LENGTH (type));
return result;
}
struct value *
readjust_indirect_value_type (struct value *value, struct type *enc_type,
const struct type *original_type,
- const struct value *original_value)
+ struct value *original_value,
+ CORE_ADDR original_value_address)
{
+ gdb_assert (original_type->is_pointer_or_reference ());
+
+ struct type *original_target_type = TYPE_TARGET_TYPE (original_type);
+ gdb::array_view<const gdb_byte> view;
+ struct type *resolved_original_target_type
+ = resolve_dynamic_type (original_target_type, view,
+ original_value_address);
+
/* Re-adjust type. */
- deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
+ deprecated_set_value_type (value, resolved_original_target_type);
/* Add embedding info. */
set_value_enclosing_type (value, enc_type);
enc_type = check_typedef (value_enclosing_type (arg));
enc_type = TYPE_TARGET_TYPE (enc_type);
- retval = value_at_lazy (enc_type,
- unpack_pointer (value_type (arg),
- value_contents (arg)));
+ CORE_ADDR addr = unpack_pointer (value_type (arg), value_contents (arg).data ());
+ retval = value_at_lazy (enc_type, addr);
enc_type = value_type (retval);
- return readjust_indirect_value_type (retval, enc_type,
- value_type_arg_tmp, arg);
+ return readjust_indirect_value_type (retval, enc_type, value_type_arg_tmp,
+ arg, addr);
}
struct value *
arg = coerce_ref (arg);
type = check_typedef (value_type (arg));
- switch (TYPE_CODE (type))
+ switch (type->code ())
{
case TYPE_CODE_ARRAY:
- if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
+ if (!type->is_vector () && current_language->c_style_arrays_p ())
arg = value_coerce_array (arg);
break;
case TYPE_CODE_FUNC:
struct_return_convention (struct gdbarch *gdbarch,
struct value *function, struct type *value_type)
{
- enum type_code code = TYPE_CODE (value_type);
+ enum type_code code = value_type->code ();
if (code == TYPE_CODE_ERROR)
error (_("Function return type unknown."));
using_struct_return (struct gdbarch *gdbarch,
struct value *function, struct type *value_type)
{
- if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
+ if (value_type->code () == TYPE_CODE_VOID)
/* A void return value is never in memory. See also corresponding
code in "print_return_value". */
return 0;
value_fetch_lazy (parent);
unpack_value_bitfield (val, value_bitpos (val), value_bitsize (val),
- value_contents_for_printing (parent),
+ value_contents_for_printing (parent).data (),
value_offset (val), parent);
}
if (TYPE_LENGTH (type))
read_value_memory (val, 0, value_stack (val),
- addr, value_contents_all_raw (val),
+ addr, value_contents_all_raw (val).data (),
type_length_units (type));
}
{
struct gdbarch *gdbarch;
struct frame_info *frame;
- /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID,
- so that the frame level will be shown correctly. */
- frame = frame_find_by_id (VALUE_FRAME_ID (val));
+ frame = frame_find_by_id (VALUE_NEXT_FRAME_ID (val));
+ frame = get_prev_frame_always (frame);
regnum = VALUE_REGNUM (val);
gdbarch = get_frame_arch (frame);
- fprintf_unfiltered (gdb_stdlog,
- "{ value_fetch_lazy "
- "(frame=%d,regnum=%d(%s),...) ",
- frame_relative_level (frame), regnum,
- user_reg_map_regnum_to_name (gdbarch, regnum));
+ string_file debug_file;
+ gdb_printf (&debug_file,
+ "(frame=%d, regnum=%d(%s), ...) ",
+ frame_relative_level (frame), regnum,
+ user_reg_map_regnum_to_name (gdbarch, regnum));
- fprintf_unfiltered (gdb_stdlog, "->");
+ gdb_printf (&debug_file, "->");
if (value_optimized_out (new_val))
{
- fprintf_unfiltered (gdb_stdlog, " ");
- val_print_optimized_out (new_val, gdb_stdlog);
+ gdb_printf (&debug_file, " ");
+ val_print_optimized_out (new_val, &debug_file);
}
else
{
int i;
- const gdb_byte *buf = value_contents (new_val);
+ gdb::array_view<const gdb_byte> buf = value_contents (new_val);
if (VALUE_LVAL (new_val) == lval_register)
- fprintf_unfiltered (gdb_stdlog, " register=%d",
- VALUE_REGNUM (new_val));
+ gdb_printf (&debug_file, " register=%d",
+ VALUE_REGNUM (new_val));
else if (VALUE_LVAL (new_val) == lval_memory)
- fprintf_unfiltered (gdb_stdlog, " address=%s",
- paddress (gdbarch,
- value_address (new_val)));
+ gdb_printf (&debug_file, " address=%s",
+ paddress (gdbarch,
+ value_address (new_val)));
else
- fprintf_unfiltered (gdb_stdlog, " computed");
+ gdb_printf (&debug_file, " computed");
- fprintf_unfiltered (gdb_stdlog, " bytes=");
- fprintf_unfiltered (gdb_stdlog, "[");
+ gdb_printf (&debug_file, " bytes=");
+ gdb_printf (&debug_file, "[");
for (i = 0; i < register_size (gdbarch, regnum); i++)
- fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
- fprintf_unfiltered (gdb_stdlog, "]");
+ gdb_printf (&debug_file, "%02x", buf[i]);
+ gdb_printf (&debug_file, "]");
}
- fprintf_unfiltered (gdb_stdlog, " }\n");
+ frame_debug_printf ("%s", debug_file.c_str ());
}
/* Dispose of the intermediate values. This prevents
value. */
gdb_assert (val->optimized_out.empty ());
gdb_assert (val->unavailable.empty ());
- if (value_bitsize (val))
+ if (val->is_zero)
+ {
+ /* Nothing. */
+ }
+ else if (value_bitsize (val))
value_fetch_lazy_bitfield (val);
else if (VALUE_LVAL (val) == lval_memory)
value_fetch_lazy_memory (val);
if (argc != 1)
error (_("You must provide one argument for $_isvoid."));
- ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
+ ret = value_type (argv[0])->code () == TYPE_CODE_VOID;
return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
}
value *cval = argv[0];
type *ctype = check_typedef (value_type (cval));
- if (TYPE_CODE (ctype) != TYPE_CODE_COMPLEX)
+ if (ctype->code () != TYPE_CODE_COMPLEX)
error (_("expected a complex number"));
return value_real_part (cval);
}
value *cval = argv[0];
type *ctype = check_typedef (value_type (cval));
- if (TYPE_CODE (ctype) != TYPE_CODE_COMPLEX)
+ if (ctype->code () != TYPE_CODE_COMPLEX)
error (_("expected a complex number"));
return value_imaginary_part (cval);
}
}
}
+static void
+test_value_copy ()
+{
+ type *type = builtin_type (current_inferior ()->gdbarch)->builtin_int;
+
+ /* Verify that we can copy an entirely optimized out value, that may not have
+ its contents allocated. */
+ value_ref_ptr val = release_value (allocate_optimized_out_value (type));
+ value_ref_ptr copy = release_value (value_copy (val.get ()));
+
+ SELF_CHECK (value_entirely_optimized_out (val.get ()));
+ SELF_CHECK (value_entirely_optimized_out (copy.get ()));
+}
+
} /* namespace selftests */
#endif /* GDB_SELF_TEST */
void
_initialize_values ()
{
- add_cmd ("convenience", no_class, show_convenience, _("\
+ cmd_list_element *show_convenience_cmd
+ = add_cmd ("convenience", no_class, show_convenience, _("\
Debugger convenience (\"$foo\") variables and functions.\n\
Convenience variables are created when you assign them values;\n\
thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
Convenience functions are defined via the Python API."
#endif
), &showlist);
- add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
+ add_alias_cmd ("conv", show_convenience_cmd, no_class, 1, &showlist);
add_cmd ("values", no_set_class, show_values, _("\
Elements of value history around item number IDX (or last ten)."),
add_prefix_cmd ("function", no_class, function_command, _("\
Placeholder command for showing help on convenience functions."),
- &functionlist, "function ", 0, &cmdlist);
+ &functionlist, 0, &cmdlist);
add_internal_function ("_isvoid", _("\
Check whether an expression is void.\n\
set_max_value_size,
show_max_value_size,
&setlist, &showlist);
+ set_show_commands vsize_limit
+ = add_setshow_zuinteger_unlimited_cmd ("varsize-limit", class_support,
+ &max_value_size, _("\
+Set the maximum number of bytes allowed in a variable-size object."), _("\
+Show the maximum number of bytes allowed in a variable-size object."), _("\
+Attempts to access an object whose size is not a compile-time constant\n\
+and exceeds this limit will cause an error."),
+ NULL, NULL, &setlist, &showlist);
+ deprecate_cmd (vsize_limit.set, "set max-value-size");
+
#if GDB_SELF_TEST
selftests::register_test ("ranges_contain", selftests::test_ranges_contain);
selftests::register_test ("insert_into_bit_range_vector",
selftests::test_insert_into_bit_range_vector);
+ selftests::register_test ("value_copy", selftests::test_value_copy);
#endif
}