1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
3 Copyright (C) 1986-2023 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21 #include "arch-utils.h"
33 #include "target-float.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
39 #include "tracepoint.h"
41 #include "user-regs.h"
46 #include "completer.h"
47 #include "gdbsupport/selftest.h"
48 #include "gdbsupport/array-view.h"
49 #include "cli/cli-style.h"
54 /* Definition of a user function. */
55 struct internal_function
57 /* The name of the function. It is a bit odd to have this in the
58 function itself -- the user might use a differently-named
59 convenience variable to hold the function. */
63 internal_function_fn handler
;
65 /* User data for the handler. */
69 /* Returns true if the ranges defined by [offset1, offset1+len1) and
70 [offset2, offset2+len2) overlap. */
73 ranges_overlap (LONGEST offset1
, ULONGEST len1
,
74 LONGEST offset2
, ULONGEST len2
)
78 l
= std::max (offset1
, offset2
);
79 h
= std::min (offset1
+ len1
, offset2
+ len2
);
83 /* Returns true if RANGES contains any range that overlaps [OFFSET,
87 ranges_contain (const std::vector
<range
> &ranges
, LONGEST offset
,
95 /* We keep ranges sorted by offset and coalesce overlapping and
96 contiguous ranges, so to check if a range list contains a given
97 range, we can do a binary search for the position the given range
98 would be inserted if we only considered the starting OFFSET of
99 ranges. We call that position I. Since we also have LENGTH to
100 care for (this is a range afterall), we need to check if the
101 _previous_ range overlaps the I range. E.g.,
105 |---| |---| |------| ... |--|
110 In the case above, the binary search would return `I=1', meaning,
111 this OFFSET should be inserted at position 1, and the current
112 position 1 should be pushed further (and before 2). But, `0'
115 Then we need to check if the I range overlaps the I range itself.
120 |---| |---| |-------| ... |--|
127 auto i
= std::lower_bound (ranges
.begin (), ranges
.end (), what
);
129 if (i
> ranges
.begin ())
131 const struct range
&bef
= *(i
- 1);
133 if (ranges_overlap (bef
.offset
, bef
.length
, offset
, length
))
137 if (i
< ranges
.end ())
139 const struct range
&r
= *i
;
141 if (ranges_overlap (r
.offset
, r
.length
, offset
, length
))
148 static struct cmd_list_element
*functionlist
;
152 if (VALUE_LVAL (this) == lval_computed
)
154 const struct lval_funcs
*funcs
= m_location
.computed
.funcs
;
156 if (funcs
->free_closure
)
157 funcs
->free_closure (this);
159 else if (VALUE_LVAL (this) == lval_xcallable
)
160 delete m_location
.xm_worker
;
168 return type ()->arch ();
172 value_bits_available (const struct value
*value
,
173 LONGEST offset
, ULONGEST length
)
175 gdb_assert (!value
->m_lazy
);
177 /* Don't pretend we have anything available there in the history beyond
178 the boundaries of the value recorded. It's not like inferior memory
179 where there is actual stuff underneath. */
180 ULONGEST val_len
= TARGET_CHAR_BIT
* value_enclosing_type (value
)->length ();
181 return !((value
->m_in_history
182 && (offset
< 0 || offset
+ length
> val_len
))
183 || ranges_contain (value
->m_unavailable
, offset
, length
));
187 value_bytes_available (const struct value
*value
,
188 LONGEST offset
, ULONGEST length
)
190 ULONGEST sign
= (1ULL << (sizeof (ULONGEST
) * 8 - 1)) / TARGET_CHAR_BIT
;
191 ULONGEST mask
= (sign
<< 1) - 1;
193 if (offset
!= ((offset
& mask
) ^ sign
) - sign
194 || length
!= ((length
& mask
) ^ sign
) - sign
195 || (length
> 0 && (~offset
& (offset
+ length
- 1) & sign
) != 0))
196 error (_("Integer overflow in data location calculation"));
198 return value_bits_available (value
,
199 offset
* TARGET_CHAR_BIT
,
200 length
* TARGET_CHAR_BIT
);
204 value_bits_any_optimized_out (const struct value
*value
, int bit_offset
, int bit_length
)
206 gdb_assert (!value
->m_lazy
);
208 return ranges_contain (value
->m_optimized_out
, bit_offset
, bit_length
);
212 value_entirely_available (struct value
*value
)
214 /* We can only tell whether the whole value is available when we try
217 value_fetch_lazy (value
);
219 if (value
->m_unavailable
.empty ())
224 /* Returns true if VALUE is entirely covered by RANGES. If the value
225 is lazy, it'll be read now. Note that RANGE is a pointer to
226 pointer because reading the value might change *RANGE. */
229 value_entirely_covered_by_range_vector (struct value
*value
,
230 const std::vector
<range
> &ranges
)
232 /* We can only tell whether the whole value is optimized out /
233 unavailable when we try to read it. */
235 value_fetch_lazy (value
);
237 if (ranges
.size () == 1)
239 const struct range
&t
= ranges
[0];
242 && t
.length
== (TARGET_CHAR_BIT
243 * value_enclosing_type (value
)->length ()))
251 value_entirely_unavailable (struct value
*value
)
253 return value_entirely_covered_by_range_vector (value
, value
->m_unavailable
);
257 value_entirely_optimized_out (struct value
*value
)
259 return value_entirely_covered_by_range_vector (value
, value
->m_optimized_out
);
262 /* Insert into the vector pointed to by VECTORP the bit range starting of
263 OFFSET bits, and extending for the next LENGTH bits. */
266 insert_into_bit_range_vector (std::vector
<range
> *vectorp
,
267 LONGEST offset
, ULONGEST length
)
271 /* Insert the range sorted. If there's overlap or the new range
272 would be contiguous with an existing range, merge. */
274 newr
.offset
= offset
;
275 newr
.length
= length
;
277 /* Do a binary search for the position the given range would be
278 inserted if we only considered the starting OFFSET of ranges.
279 Call that position I. Since we also have LENGTH to care for
280 (this is a range afterall), we need to check if the _previous_
281 range overlaps the I range. E.g., calling R the new range:
283 #1 - overlaps with previous
287 |---| |---| |------| ... |--|
292 In the case #1 above, the binary search would return `I=1',
293 meaning, this OFFSET should be inserted at position 1, and the
294 current position 1 should be pushed further (and become 2). But,
295 note that `0' overlaps with R, so we want to merge them.
297 A similar consideration needs to be taken if the new range would
298 be contiguous with the previous range:
300 #2 - contiguous with previous
304 |--| |---| |------| ... |--|
309 If there's no overlap with the previous range, as in:
311 #3 - not overlapping and not contiguous
315 |--| |---| |------| ... |--|
322 #4 - R is the range with lowest offset
326 |--| |---| |------| ... |--|
331 ... we just push the new range to I.
333 All the 4 cases above need to consider that the new range may
334 also overlap several of the ranges that follow, or that R may be
335 contiguous with the following range, and merge. E.g.,
337 #5 - overlapping following ranges
340 |------------------------|
341 |--| |---| |------| ... |--|
350 |--| |---| |------| ... |--|
357 auto i
= std::lower_bound (vectorp
->begin (), vectorp
->end (), newr
);
358 if (i
> vectorp
->begin ())
360 struct range
&bef
= *(i
- 1);
362 if (ranges_overlap (bef
.offset
, bef
.length
, offset
, length
))
365 LONGEST l
= std::min (bef
.offset
, offset
);
366 LONGEST h
= std::max (bef
.offset
+ bef
.length
, offset
+ length
);
372 else if (offset
== bef
.offset
+ bef
.length
)
375 bef
.length
+= length
;
381 i
= vectorp
->insert (i
, newr
);
387 i
= vectorp
->insert (i
, newr
);
390 /* Check whether the ranges following the one we've just added or
391 touched can be folded in (#5 above). */
392 if (i
!= vectorp
->end () && i
+ 1 < vectorp
->end ())
397 /* Get the range we just touched. */
398 struct range
&t
= *i
;
402 for (; i
< vectorp
->end (); i
++)
404 struct range
&r
= *i
;
405 if (r
.offset
<= t
.offset
+ t
.length
)
409 l
= std::min (t
.offset
, r
.offset
);
410 h
= std::max (t
.offset
+ t
.length
, r
.offset
+ r
.length
);
419 /* If we couldn't merge this one, we won't be able to
420 merge following ones either, since the ranges are
421 always sorted by OFFSET. */
427 vectorp
->erase (next
, next
+ removed
);
432 mark_value_bits_unavailable (struct value
*value
,
433 LONGEST offset
, ULONGEST length
)
435 insert_into_bit_range_vector (&value
->m_unavailable
, offset
, length
);
439 mark_value_bytes_unavailable (struct value
*value
,
440 LONGEST offset
, ULONGEST length
)
442 mark_value_bits_unavailable (value
,
443 offset
* TARGET_CHAR_BIT
,
444 length
* TARGET_CHAR_BIT
);
447 /* Find the first range in RANGES that overlaps the range defined by
448 OFFSET and LENGTH, starting at element POS in the RANGES vector,
449 Returns the index into RANGES where such overlapping range was
450 found, or -1 if none was found. */
453 find_first_range_overlap (const std::vector
<range
> *ranges
, int pos
,
454 LONGEST offset
, LONGEST length
)
458 for (i
= pos
; i
< ranges
->size (); i
++)
460 const range
&r
= (*ranges
)[i
];
461 if (ranges_overlap (r
.offset
, r
.length
, offset
, length
))
468 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
469 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
472 It must always be the case that:
473 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
475 It is assumed that memory can be accessed from:
476 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
478 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
479 / TARGET_CHAR_BIT) */
481 memcmp_with_bit_offsets (const gdb_byte
*ptr1
, size_t offset1_bits
,
482 const gdb_byte
*ptr2
, size_t offset2_bits
,
485 gdb_assert (offset1_bits
% TARGET_CHAR_BIT
486 == offset2_bits
% TARGET_CHAR_BIT
);
488 if (offset1_bits
% TARGET_CHAR_BIT
!= 0)
491 gdb_byte mask
, b1
, b2
;
493 /* The offset from the base pointers PTR1 and PTR2 is not a complete
494 number of bytes. A number of bits up to either the next exact
495 byte boundary, or LENGTH_BITS (which ever is sooner) will be
497 bits
= TARGET_CHAR_BIT
- offset1_bits
% TARGET_CHAR_BIT
;
498 gdb_assert (bits
< sizeof (mask
) * TARGET_CHAR_BIT
);
499 mask
= (1 << bits
) - 1;
501 if (length_bits
< bits
)
503 mask
&= ~(gdb_byte
) ((1 << (bits
- length_bits
)) - 1);
507 /* Now load the two bytes and mask off the bits we care about. */
508 b1
= *(ptr1
+ offset1_bits
/ TARGET_CHAR_BIT
) & mask
;
509 b2
= *(ptr2
+ offset2_bits
/ TARGET_CHAR_BIT
) & mask
;
514 /* Now update the length and offsets to take account of the bits
515 we've just compared. */
517 offset1_bits
+= bits
;
518 offset2_bits
+= bits
;
521 if (length_bits
% TARGET_CHAR_BIT
!= 0)
525 gdb_byte mask
, b1
, b2
;
527 /* The length is not an exact number of bytes. After the previous
528 IF.. block then the offsets are byte aligned, or the
529 length is zero (in which case this code is not reached). Compare
530 a number of bits at the end of the region, starting from an exact
532 bits
= length_bits
% TARGET_CHAR_BIT
;
533 o1
= offset1_bits
+ length_bits
- bits
;
534 o2
= offset2_bits
+ length_bits
- bits
;
536 gdb_assert (bits
< sizeof (mask
) * TARGET_CHAR_BIT
);
537 mask
= ((1 << bits
) - 1) << (TARGET_CHAR_BIT
- bits
);
539 gdb_assert (o1
% TARGET_CHAR_BIT
== 0);
540 gdb_assert (o2
% TARGET_CHAR_BIT
== 0);
542 b1
= *(ptr1
+ o1
/ TARGET_CHAR_BIT
) & mask
;
543 b2
= *(ptr2
+ o2
/ TARGET_CHAR_BIT
) & mask
;
553 /* We've now taken care of any stray "bits" at the start, or end of
554 the region to compare, the remainder can be covered with a simple
556 gdb_assert (offset1_bits
% TARGET_CHAR_BIT
== 0);
557 gdb_assert (offset2_bits
% TARGET_CHAR_BIT
== 0);
558 gdb_assert (length_bits
% TARGET_CHAR_BIT
== 0);
560 return memcmp (ptr1
+ offset1_bits
/ TARGET_CHAR_BIT
,
561 ptr2
+ offset2_bits
/ TARGET_CHAR_BIT
,
562 length_bits
/ TARGET_CHAR_BIT
);
565 /* Length is zero, regions match. */
569 /* Helper struct for find_first_range_overlap_and_match and
570 value_contents_bits_eq. Keep track of which slot of a given ranges
571 vector have we last looked at. */
573 struct ranges_and_idx
576 const std::vector
<range
> *ranges
;
578 /* The range we've last found in RANGES. Given ranges are sorted,
579 we can start the next lookup here. */
583 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
584 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
585 ranges starting at OFFSET2 bits. Return true if the ranges match
586 and fill in *L and *H with the overlapping window relative to
587 (both) OFFSET1 or OFFSET2. */
590 find_first_range_overlap_and_match (struct ranges_and_idx
*rp1
,
591 struct ranges_and_idx
*rp2
,
592 LONGEST offset1
, LONGEST offset2
,
593 ULONGEST length
, ULONGEST
*l
, ULONGEST
*h
)
595 rp1
->idx
= find_first_range_overlap (rp1
->ranges
, rp1
->idx
,
597 rp2
->idx
= find_first_range_overlap (rp2
->ranges
, rp2
->idx
,
600 if (rp1
->idx
== -1 && rp2
->idx
== -1)
606 else if (rp1
->idx
== -1 || rp2
->idx
== -1)
610 const range
*r1
, *r2
;
614 r1
= &(*rp1
->ranges
)[rp1
->idx
];
615 r2
= &(*rp2
->ranges
)[rp2
->idx
];
617 /* Get the unavailable windows intersected by the incoming
618 ranges. The first and last ranges that overlap the argument
619 range may be wider than said incoming arguments ranges. */
620 l1
= std::max (offset1
, r1
->offset
);
621 h1
= std::min (offset1
+ length
, r1
->offset
+ r1
->length
);
623 l2
= std::max (offset2
, r2
->offset
);
624 h2
= std::min (offset2
+ length
, offset2
+ r2
->length
);
626 /* Make them relative to the respective start offsets, so we can
627 compare them for equality. */
634 /* Different ranges, no match. */
635 if (l1
!= l2
|| h1
!= h2
)
644 /* Helper function for value_contents_eq. The only difference is that
645 this function is bit rather than byte based.
647 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
648 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
649 Return true if the available bits match. */
652 value_contents_bits_eq (const struct value
*val1
, int offset1
,
653 const struct value
*val2
, int offset2
,
656 /* Each array element corresponds to a ranges source (unavailable,
657 optimized out). '1' is for VAL1, '2' for VAL2. */
658 struct ranges_and_idx rp1
[2], rp2
[2];
660 /* See function description in value.h. */
661 gdb_assert (!val1
->m_lazy
&& !val2
->m_lazy
);
663 /* We shouldn't be trying to compare past the end of the values. */
664 gdb_assert (offset1
+ length
665 <= val1
->m_enclosing_type
->length () * TARGET_CHAR_BIT
);
666 gdb_assert (offset2
+ length
667 <= val2
->m_enclosing_type
->length () * TARGET_CHAR_BIT
);
669 memset (&rp1
, 0, sizeof (rp1
));
670 memset (&rp2
, 0, sizeof (rp2
));
671 rp1
[0].ranges
= &val1
->m_unavailable
;
672 rp2
[0].ranges
= &val2
->m_unavailable
;
673 rp1
[1].ranges
= &val1
->m_optimized_out
;
674 rp2
[1].ranges
= &val2
->m_optimized_out
;
678 ULONGEST l
= 0, h
= 0; /* init for gcc -Wall */
681 for (i
= 0; i
< 2; i
++)
683 ULONGEST l_tmp
, h_tmp
;
685 /* The contents only match equal if the invalid/unavailable
686 contents ranges match as well. */
687 if (!find_first_range_overlap_and_match (&rp1
[i
], &rp2
[i
],
688 offset1
, offset2
, length
,
692 /* We're interested in the lowest/first range found. */
693 if (i
== 0 || l_tmp
< l
)
700 /* Compare the available/valid contents. */
701 if (memcmp_with_bit_offsets (val1
->m_contents
.get (), offset1
,
702 val2
->m_contents
.get (), offset2
, l
) != 0)
714 value_contents_eq (const struct value
*val1
, LONGEST offset1
,
715 const struct value
*val2
, LONGEST offset2
,
718 return value_contents_bits_eq (val1
, offset1
* TARGET_CHAR_BIT
,
719 val2
, offset2
* TARGET_CHAR_BIT
,
720 length
* TARGET_CHAR_BIT
);
726 value_contents_eq (const struct value
*val1
, const struct value
*val2
)
728 ULONGEST len1
= check_typedef (value_enclosing_type (val1
))->length ();
729 ULONGEST len2
= check_typedef (value_enclosing_type (val2
))->length ();
732 return value_contents_eq (val1
, 0, val2
, 0, len1
);
735 /* The value-history records all the values printed by print commands
736 during this session. */
738 static std::vector
<value_ref_ptr
> value_history
;
741 /* List of all value objects currently allocated
742 (except for those released by calls to release_value)
743 This is so they can be freed after each command. */
745 static std::vector
<value_ref_ptr
> all_values
;
747 /* Allocate a lazy value for type TYPE. Its actual content is
748 "lazily" allocated too: the content field of the return value is
749 NULL; it will be allocated when it is fetched from the target. */
752 allocate_value_lazy (struct type
*type
)
756 /* Call check_typedef on our type to make sure that, if TYPE
757 is a TYPE_CODE_TYPEDEF, its length is set to the length
758 of the target type instead of zero. However, we do not
759 replace the typedef type by the target type, because we want
760 to keep the typedef in order to be able to set the VAL's type
761 description correctly. */
762 check_typedef (type
);
764 val
= new struct value (type
);
766 /* Values start out on the all_values chain. */
767 all_values
.emplace_back (val
);
772 /* The maximum size, in bytes, that GDB will try to allocate for a value.
773 The initial value of 64k was not selected for any specific reason, it is
774 just a reasonable starting point. */
776 static int max_value_size
= 65536; /* 64k bytes */
778 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
779 LONGEST, otherwise GDB will not be able to parse integer values from the
780 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
781 be unable to parse "set max-value-size 2".
783 As we want a consistent GDB experience across hosts with different sizes
784 of LONGEST, this arbitrary minimum value was selected, so long as this
785 is bigger than LONGEST on all GDB supported hosts we're fine. */
787 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
788 gdb_static_assert (sizeof (LONGEST
) <= MIN_VALUE_FOR_MAX_VALUE_SIZE
);
790 /* Implement the "set max-value-size" command. */
793 set_max_value_size (const char *args
, int from_tty
,
794 struct cmd_list_element
*c
)
796 gdb_assert (max_value_size
== -1 || max_value_size
>= 0);
798 if (max_value_size
> -1 && max_value_size
< MIN_VALUE_FOR_MAX_VALUE_SIZE
)
800 max_value_size
= MIN_VALUE_FOR_MAX_VALUE_SIZE
;
801 error (_("max-value-size set too low, increasing to %d bytes"),
806 /* Implement the "show max-value-size" command. */
809 show_max_value_size (struct ui_file
*file
, int from_tty
,
810 struct cmd_list_element
*c
, const char *value
)
812 if (max_value_size
== -1)
813 gdb_printf (file
, _("Maximum value size is unlimited.\n"));
815 gdb_printf (file
, _("Maximum value size is %d bytes.\n"),
819 /* Called before we attempt to allocate or reallocate a buffer for the
820 contents of a value. TYPE is the type of the value for which we are
821 allocating the buffer. If the buffer is too large (based on the user
822 controllable setting) then throw an error. If this function returns
823 then we should attempt to allocate the buffer. */
826 check_type_length_before_alloc (const struct type
*type
)
828 ULONGEST length
= type
->length ();
830 if (max_value_size
> -1 && length
> max_value_size
)
832 if (type
->name () != NULL
)
833 error (_("value of type `%s' requires %s bytes, which is more "
834 "than max-value-size"), type
->name (), pulongest (length
));
836 error (_("value requires %s bytes, which is more than "
837 "max-value-size"), pulongest (length
));
841 /* When this has a value, it is used to limit the number of array elements
842 of an array that are loaded into memory when an array value is made
844 static gdb::optional
<int> array_length_limiting_element_count
;
847 scoped_array_length_limiting::scoped_array_length_limiting (int elements
)
849 m_old_value
= array_length_limiting_element_count
;
850 array_length_limiting_element_count
.emplace (elements
);
854 scoped_array_length_limiting::~scoped_array_length_limiting ()
856 array_length_limiting_element_count
= m_old_value
;
859 /* Find the inner element type for ARRAY_TYPE. */
862 find_array_element_type (struct type
*array_type
)
864 array_type
= check_typedef (array_type
);
865 gdb_assert (array_type
->code () == TYPE_CODE_ARRAY
);
867 if (current_language
->la_language
== language_fortran
)
868 while (array_type
->code () == TYPE_CODE_ARRAY
)
870 array_type
= array_type
->target_type ();
871 array_type
= check_typedef (array_type
);
875 array_type
= array_type
->target_type ();
876 array_type
= check_typedef (array_type
);
882 /* Return the limited length of ARRAY_TYPE, which must be of
883 TYPE_CODE_ARRAY. This function can only be called when the global
884 ARRAY_LENGTH_LIMITING_ELEMENT_COUNT has a value.
886 The limited length of an array is the smallest of either (1) the total
887 size of the array type, or (2) the array target type multiplies by the
888 array_length_limiting_element_count. */
891 calculate_limited_array_length (struct type
*array_type
)
893 gdb_assert (array_length_limiting_element_count
.has_value ());
895 array_type
= check_typedef (array_type
);
896 gdb_assert (array_type
->code () == TYPE_CODE_ARRAY
);
898 struct type
*elm_type
= find_array_element_type (array_type
);
899 ULONGEST len
= (elm_type
->length ()
900 * (*array_length_limiting_element_count
));
901 len
= std::min (len
, array_type
->length ());
906 /* Try to limit ourselves to only fetching the limited number of
907 elements. However, if this limited number of elements still
908 puts us over max_value_size, then we still refuse it and
909 return failure here, which will ultimately throw an error. */
912 set_limited_array_length (struct value
*val
)
914 ULONGEST limit
= val
->m_limited_length
;
915 ULONGEST len
= val
->type ()->length ();
917 if (array_length_limiting_element_count
.has_value ())
918 len
= calculate_limited_array_length (val
->type ());
920 if (limit
!= 0 && len
> limit
)
922 if (len
> max_value_size
)
925 val
->m_limited_length
= max_value_size
;
929 /* Allocate the contents of VAL if it has not been allocated yet.
930 If CHECK_SIZE is true, then apply the usual max-value-size checks. */
933 allocate_value_contents (struct value
*val
, bool check_size
)
935 if (!val
->m_contents
)
937 struct type
*enclosing_type
= value_enclosing_type (val
);
938 ULONGEST len
= enclosing_type
->length ();
942 /* If we are allocating the contents of an array, which
943 is greater in size than max_value_size, and there is
944 an element limit in effect, then we can possibly try
945 to load only a sub-set of the array contents into
947 if (val
->type () == enclosing_type
948 && val
->type ()->code () == TYPE_CODE_ARRAY
949 && len
> max_value_size
950 && set_limited_array_length (val
))
951 len
= val
->m_limited_length
;
953 check_type_length_before_alloc (enclosing_type
);
956 val
->m_contents
.reset ((gdb_byte
*) xzalloc (len
));
960 /* Allocate a value and its contents for type TYPE. If CHECK_SIZE is true,
961 then apply the usual max-value-size checks. */
963 static struct value
*
964 allocate_value (struct type
*type
, bool check_size
)
966 struct value
*val
= allocate_value_lazy (type
);
968 allocate_value_contents (val
, check_size
);
973 /* Allocate a value and its contents for type TYPE. */
976 allocate_value (struct type
*type
)
978 return allocate_value (type
, true);
981 /* Allocate a value that has the correct length
982 for COUNT repetitions of type TYPE. */
985 allocate_repeat_value (struct type
*type
, int count
)
987 /* Despite the fact that we are really creating an array of TYPE here, we
988 use the string lower bound as the array lower bound. This seems to
989 work fine for now. */
990 int low_bound
= current_language
->string_lower_bound ();
991 /* FIXME-type-allocation: need a way to free this type when we are
993 struct type
*array_type
994 = lookup_array_range_type (type
, low_bound
, count
+ low_bound
- 1);
996 return allocate_value (array_type
);
1000 allocate_computed_value (struct type
*type
,
1001 const struct lval_funcs
*funcs
,
1004 struct value
*v
= allocate_value_lazy (type
);
1006 VALUE_LVAL (v
) = lval_computed
;
1007 v
->m_location
.computed
.funcs
= funcs
;
1008 v
->m_location
.computed
.closure
= closure
;
1013 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1016 allocate_optimized_out_value (struct type
*type
)
1018 struct value
*retval
= allocate_value_lazy (type
);
1020 mark_value_bytes_optimized_out (retval
, 0, type
->length ());
1021 set_value_lazy (retval
, 0);
1025 /* Accessor methods. */
1027 gdb::array_view
<gdb_byte
>
1028 value_contents_raw (struct value
*value
)
1030 struct gdbarch
*arch
= value
->arch ();
1031 int unit_size
= gdbarch_addressable_memory_unit_size (arch
);
1033 allocate_value_contents (value
, true);
1035 ULONGEST length
= value
->type ()->length ();
1036 return gdb::make_array_view
1037 (value
->m_contents
.get () + value
->m_embedded_offset
* unit_size
, length
);
1040 gdb::array_view
<gdb_byte
>
1041 value_contents_all_raw (struct value
*value
)
1043 allocate_value_contents (value
, true);
1045 ULONGEST length
= value_enclosing_type (value
)->length ();
1046 return gdb::make_array_view (value
->m_contents
.get (), length
);
1050 value_enclosing_type (const struct value
*value
)
1052 return value
->m_enclosing_type
;
1055 /* Look at value.h for description. */
1058 value_actual_type (struct value
*value
, int resolve_simple_types
,
1059 int *real_type_found
)
1061 struct value_print_options opts
;
1062 struct type
*result
;
1064 get_user_print_options (&opts
);
1066 if (real_type_found
)
1067 *real_type_found
= 0;
1068 result
= value
->type ();
1069 if (opts
.objectprint
)
1071 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1072 fetch its rtti type. */
1073 if (result
->is_pointer_or_reference ()
1074 && (check_typedef (result
->target_type ())->code ()
1075 == TYPE_CODE_STRUCT
)
1076 && !value_optimized_out (value
))
1078 struct type
*real_type
;
1080 real_type
= value_rtti_indirect_type (value
, NULL
, NULL
, NULL
);
1083 if (real_type_found
)
1084 *real_type_found
= 1;
1088 else if (resolve_simple_types
)
1090 if (real_type_found
)
1091 *real_type_found
= 1;
1092 result
= value_enclosing_type (value
);
1100 error_value_optimized_out (void)
1102 throw_error (OPTIMIZED_OUT_ERROR
, _("value has been optimized out"));
1106 require_not_optimized_out (const struct value
*value
)
1108 if (!value
->m_optimized_out
.empty ())
1110 if (value
->m_lval
== lval_register
)
1111 throw_error (OPTIMIZED_OUT_ERROR
,
1112 _("register has not been saved in frame"));
1114 error_value_optimized_out ();
1119 require_available (const struct value
*value
)
1121 if (!value
->m_unavailable
.empty ())
1122 throw_error (NOT_AVAILABLE_ERROR
, _("value is not available"));
1125 gdb::array_view
<const gdb_byte
>
1126 value_contents_for_printing (struct value
*value
)
1129 value_fetch_lazy (value
);
1131 ULONGEST length
= value_enclosing_type (value
)->length ();
1132 return gdb::make_array_view (value
->m_contents
.get (), length
);
1135 gdb::array_view
<const gdb_byte
>
1136 value_contents_for_printing_const (const struct value
*value
)
1138 gdb_assert (!value
->m_lazy
);
1140 ULONGEST length
= value_enclosing_type (value
)->length ();
1141 return gdb::make_array_view (value
->m_contents
.get (), length
);
1144 gdb::array_view
<const gdb_byte
>
1145 value_contents_all (struct value
*value
)
1147 gdb::array_view
<const gdb_byte
> result
= value_contents_for_printing (value
);
1148 require_not_optimized_out (value
);
1149 require_available (value
);
1153 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1154 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1157 ranges_copy_adjusted (std::vector
<range
> *dst_range
, int dst_bit_offset
,
1158 const std::vector
<range
> &src_range
, int src_bit_offset
,
1159 unsigned int bit_length
)
1161 for (const range
&r
: src_range
)
1165 l
= std::max (r
.offset
, (LONGEST
) src_bit_offset
);
1166 h
= std::min ((LONGEST
) (r
.offset
+ r
.length
),
1167 (LONGEST
) src_bit_offset
+ bit_length
);
1170 insert_into_bit_range_vector (dst_range
,
1171 dst_bit_offset
+ (l
- src_bit_offset
),
1176 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1177 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1180 value_ranges_copy_adjusted (struct value
*dst
, int dst_bit_offset
,
1181 const struct value
*src
, int src_bit_offset
,
1184 ranges_copy_adjusted (&dst
->m_unavailable
, dst_bit_offset
,
1185 src
->m_unavailable
, src_bit_offset
,
1187 ranges_copy_adjusted (&dst
->m_optimized_out
, dst_bit_offset
,
1188 src
->m_optimized_out
, src_bit_offset
,
1192 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1193 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1194 contents, starting at DST_OFFSET. If unavailable contents are
1195 being copied from SRC, the corresponding DST contents are marked
1196 unavailable accordingly. Neither DST nor SRC may be lazy
1199 It is assumed the contents of DST in the [DST_OFFSET,
1200 DST_OFFSET+LENGTH) range are wholly available. */
1203 value_contents_copy_raw (struct value
*dst
, LONGEST dst_offset
,
1204 struct value
*src
, LONGEST src_offset
, LONGEST length
)
1206 LONGEST src_bit_offset
, dst_bit_offset
, bit_length
;
1207 struct gdbarch
*arch
= src
->arch ();
1208 int unit_size
= gdbarch_addressable_memory_unit_size (arch
);
1210 /* A lazy DST would make that this copy operation useless, since as
1211 soon as DST's contents were un-lazied (by a later value_contents
1212 call, say), the contents would be overwritten. A lazy SRC would
1213 mean we'd be copying garbage. */
1214 gdb_assert (!dst
->m_lazy
&& !src
->m_lazy
);
1216 /* The overwritten DST range gets unavailability ORed in, not
1217 replaced. Make sure to remember to implement replacing if it
1218 turns out actually necessary. */
1219 gdb_assert (value_bytes_available (dst
, dst_offset
, length
));
1220 gdb_assert (!value_bits_any_optimized_out (dst
,
1221 TARGET_CHAR_BIT
* dst_offset
,
1222 TARGET_CHAR_BIT
* length
));
1224 /* Copy the data. */
1225 gdb::array_view
<gdb_byte
> dst_contents
1226 = value_contents_all_raw (dst
).slice (dst_offset
* unit_size
,
1227 length
* unit_size
);
1228 gdb::array_view
<const gdb_byte
> src_contents
1229 = value_contents_all_raw (src
).slice (src_offset
* unit_size
,
1230 length
* unit_size
);
1231 copy (src_contents
, dst_contents
);
1233 /* Copy the meta-data, adjusted. */
1234 src_bit_offset
= src_offset
* unit_size
* HOST_CHAR_BIT
;
1235 dst_bit_offset
= dst_offset
* unit_size
* HOST_CHAR_BIT
;
1236 bit_length
= length
* unit_size
* HOST_CHAR_BIT
;
1238 value_ranges_copy_adjusted (dst
, dst_bit_offset
,
1239 src
, src_bit_offset
,
1243 /* A helper for value_from_component_bitsize that copies bits from SRC
1247 value_contents_copy_raw_bitwise (struct value
*dst
, LONGEST dst_bit_offset
,
1248 struct value
*src
, LONGEST src_bit_offset
,
1251 /* A lazy DST would make that this copy operation useless, since as
1252 soon as DST's contents were un-lazied (by a later value_contents
1253 call, say), the contents would be overwritten. A lazy SRC would
1254 mean we'd be copying garbage. */
1255 gdb_assert (!dst
->m_lazy
&& !src
->m_lazy
);
1257 /* The overwritten DST range gets unavailability ORed in, not
1258 replaced. Make sure to remember to implement replacing if it
1259 turns out actually necessary. */
1260 LONGEST dst_offset
= dst_bit_offset
/ TARGET_CHAR_BIT
;
1261 LONGEST length
= bit_length
/ TARGET_CHAR_BIT
;
1262 gdb_assert (value_bytes_available (dst
, dst_offset
, length
));
1263 gdb_assert (!value_bits_any_optimized_out (dst
, dst_bit_offset
,
1266 /* Copy the data. */
1267 gdb::array_view
<gdb_byte
> dst_contents
= value_contents_all_raw (dst
);
1268 gdb::array_view
<const gdb_byte
> src_contents
= value_contents_all_raw (src
);
1269 copy_bitwise (dst_contents
.data (), dst_bit_offset
,
1270 src_contents
.data (), src_bit_offset
,
1272 type_byte_order (src
->type ()) == BFD_ENDIAN_BIG
);
1274 /* Copy the meta-data. */
1275 value_ranges_copy_adjusted (dst
, dst_bit_offset
,
1276 src
, src_bit_offset
,
1280 /* Copy LENGTH bytes of SRC value's (all) contents
1281 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1282 (all) contents, starting at DST_OFFSET. If unavailable contents
1283 are being copied from SRC, the corresponding DST contents are
1284 marked unavailable accordingly. DST must not be lazy. If SRC is
1285 lazy, it will be fetched now.
1287 It is assumed the contents of DST in the [DST_OFFSET,
1288 DST_OFFSET+LENGTH) range are wholly available. */
1291 value_contents_copy (struct value
*dst
, LONGEST dst_offset
,
1292 struct value
*src
, LONGEST src_offset
, LONGEST length
)
1295 value_fetch_lazy (src
);
1297 value_contents_copy_raw (dst
, dst_offset
, src
, src_offset
, length
);
1301 value_lazy (const struct value
*value
)
1303 return value
->m_lazy
;
1307 set_value_lazy (struct value
*value
, int val
)
1309 value
->m_lazy
= val
;
1313 value_stack (const struct value
*value
)
1315 return value
->m_stack
;
1319 set_value_stack (struct value
*value
, int val
)
1321 value
->m_stack
= val
;
1324 gdb::array_view
<const gdb_byte
>
1325 value_contents (struct value
*value
)
1327 gdb::array_view
<const gdb_byte
> result
= value_contents_writeable (value
);
1328 require_not_optimized_out (value
);
1329 require_available (value
);
1333 gdb::array_view
<gdb_byte
>
1334 value_contents_writeable (struct value
*value
)
1337 value_fetch_lazy (value
);
1338 return value_contents_raw (value
);
1342 value_optimized_out (struct value
*value
)
1346 /* See if we can compute the result without fetching the
1348 if (VALUE_LVAL (value
) == lval_memory
)
1350 else if (VALUE_LVAL (value
) == lval_computed
)
1352 const struct lval_funcs
*funcs
= value
->m_location
.computed
.funcs
;
1354 if (funcs
->is_optimized_out
!= nullptr)
1355 return funcs
->is_optimized_out (value
);
1358 /* Fall back to fetching. */
1361 value_fetch_lazy (value
);
1363 catch (const gdb_exception_error
&ex
)
1368 case OPTIMIZED_OUT_ERROR
:
1369 case NOT_AVAILABLE_ERROR
:
1370 /* These can normally happen when we try to access an
1371 optimized out or unavailable register, either in a
1372 physical register or spilled to memory. */
1380 return !value
->m_optimized_out
.empty ();
1383 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1384 the following LENGTH bytes. */
1387 mark_value_bytes_optimized_out (struct value
*value
, int offset
, int length
)
1389 mark_value_bits_optimized_out (value
,
1390 offset
* TARGET_CHAR_BIT
,
1391 length
* TARGET_CHAR_BIT
);
1397 mark_value_bits_optimized_out (struct value
*value
,
1398 LONGEST offset
, LONGEST length
)
1400 insert_into_bit_range_vector (&value
->m_optimized_out
, offset
, length
);
1404 value_bits_synthetic_pointer (const struct value
*value
,
1405 LONGEST offset
, LONGEST length
)
1407 if (value
->m_lval
!= lval_computed
1408 || !value
->m_location
.computed
.funcs
->check_synthetic_pointer
)
1410 return value
->m_location
.computed
.funcs
->check_synthetic_pointer (value
,
1416 value_embedded_offset (const struct value
*value
)
1418 return value
->m_embedded_offset
;
1422 set_value_embedded_offset (struct value
*value
, LONGEST val
)
1424 value
->m_embedded_offset
= val
;
1428 value_pointed_to_offset (const struct value
*value
)
1430 return value
->m_pointed_to_offset
;
1434 set_value_pointed_to_offset (struct value
*value
, LONGEST val
)
1436 value
->m_pointed_to_offset
= val
;
1439 const struct lval_funcs
*
1440 value_computed_funcs (const struct value
*v
)
1442 gdb_assert (value_lval_const (v
) == lval_computed
);
1444 return v
->m_location
.computed
.funcs
;
1448 value_computed_closure (const struct value
*v
)
1450 gdb_assert (v
->m_lval
== lval_computed
);
1452 return v
->m_location
.computed
.closure
;
1456 deprecated_value_lval_hack (struct value
*value
)
1458 return &value
->m_lval
;
1462 value_lval_const (const struct value
*value
)
1464 return value
->m_lval
;
1468 value_address (const struct value
*value
)
1470 if (value
->m_lval
!= lval_memory
)
1472 if (value
->m_parent
!= NULL
)
1473 return value_address (value
->m_parent
.get ()) + value
->m_offset
;
1474 if (NULL
!= TYPE_DATA_LOCATION (value
->type ()))
1476 gdb_assert (PROP_CONST
== TYPE_DATA_LOCATION_KIND (value
->type ()));
1477 return TYPE_DATA_LOCATION_ADDR (value
->type ());
1480 return value
->m_location
.address
+ value
->m_offset
;
1484 value_raw_address (const struct value
*value
)
1486 if (value
->m_lval
!= lval_memory
)
1488 return value
->m_location
.address
;
1492 set_value_address (struct value
*value
, CORE_ADDR addr
)
1494 gdb_assert (value
->m_lval
== lval_memory
);
1495 value
->m_location
.address
= addr
;
1498 struct internalvar
**
1499 deprecated_value_internalvar_hack (struct value
*value
)
1501 return &value
->m_location
.internalvar
;
1505 deprecated_value_next_frame_id_hack (struct value
*value
)
1507 gdb_assert (value
->m_lval
== lval_register
);
1508 return &value
->m_location
.reg
.next_frame_id
;
1512 deprecated_value_regnum_hack (struct value
*value
)
1514 gdb_assert (value
->m_lval
== lval_register
);
1515 return &value
->m_location
.reg
.regnum
;
1519 /* Return a mark in the value chain. All values allocated after the
1520 mark is obtained (except for those released) are subject to being freed
1521 if a subsequent value_free_to_mark is passed the mark. */
1525 if (all_values
.empty ())
1527 return all_values
.back ().get ();
1533 value_incref (struct value
*val
)
1535 val
->m_reference_count
++;
1538 /* Release a reference to VAL, which was acquired with value_incref.
1539 This function is also called to deallocate values from the value
1543 value_decref (struct value
*val
)
1547 gdb_assert (val
->m_reference_count
> 0);
1548 val
->m_reference_count
--;
1549 if (val
->m_reference_count
== 0)
1554 /* Free all values allocated since MARK was obtained by value_mark
1555 (except for those released). */
1557 value_free_to_mark (const struct value
*mark
)
1559 auto iter
= std::find (all_values
.begin (), all_values
.end (), mark
);
1560 if (iter
== all_values
.end ())
1561 all_values
.clear ();
1563 all_values
.erase (iter
+ 1, all_values
.end ());
1566 /* Remove VAL from the chain all_values
1567 so it will not be freed automatically. */
1570 release_value (struct value
*val
)
1573 return value_ref_ptr ();
1575 std::vector
<value_ref_ptr
>::reverse_iterator iter
;
1576 for (iter
= all_values
.rbegin (); iter
!= all_values
.rend (); ++iter
)
1580 value_ref_ptr result
= *iter
;
1581 all_values
.erase (iter
.base () - 1);
1586 /* We must always return an owned reference. Normally this happens
1587 because we transfer the reference from the value chain, but in
1588 this case the value was not on the chain. */
1589 return value_ref_ptr::new_reference (val
);
1594 std::vector
<value_ref_ptr
>
1595 value_release_to_mark (const struct value
*mark
)
1597 std::vector
<value_ref_ptr
> result
;
1599 auto iter
= std::find (all_values
.begin (), all_values
.end (), mark
);
1600 if (iter
== all_values
.end ())
1601 std::swap (result
, all_values
);
1604 std::move (iter
+ 1, all_values
.end (), std::back_inserter (result
));
1605 all_values
.erase (iter
+ 1, all_values
.end ());
1607 std::reverse (result
.begin (), result
.end ());
1611 /* Return a copy of the value ARG. It contains the same contents,
1612 for the same memory address, but it's a different block of storage. */
1615 value_copy (const value
*arg
)
1617 struct type
*encl_type
= value_enclosing_type (arg
);
1620 val
= allocate_value_lazy (encl_type
);
1621 val
->m_type
= arg
->m_type
;
1622 VALUE_LVAL (val
) = arg
->m_lval
;
1623 val
->m_location
= arg
->m_location
;
1624 val
->m_offset
= arg
->m_offset
;
1625 val
->m_bitpos
= arg
->m_bitpos
;
1626 val
->m_bitsize
= arg
->m_bitsize
;
1627 val
->m_lazy
= arg
->m_lazy
;
1628 val
->m_embedded_offset
= value_embedded_offset (arg
);
1629 val
->m_pointed_to_offset
= arg
->m_pointed_to_offset
;
1630 val
->m_modifiable
= arg
->m_modifiable
;
1631 val
->m_stack
= arg
->m_stack
;
1632 val
->m_is_zero
= arg
->m_is_zero
;
1633 val
->m_in_history
= arg
->m_in_history
;
1634 val
->m_initialized
= arg
->m_initialized
;
1635 val
->m_unavailable
= arg
->m_unavailable
;
1636 val
->m_optimized_out
= arg
->m_optimized_out
;
1637 val
->m_parent
= arg
->m_parent
;
1638 val
->m_limited_length
= arg
->m_limited_length
;
1640 if (!value_lazy (val
)
1641 && !(value_entirely_optimized_out (val
)
1642 || value_entirely_unavailable (val
)))
1644 ULONGEST length
= val
->m_limited_length
;
1646 length
= value_enclosing_type (val
)->length ();
1648 gdb_assert (arg
->m_contents
!= nullptr);
1649 const auto &arg_view
1650 = gdb::make_array_view (arg
->m_contents
.get (), length
);
1652 allocate_value_contents (val
, false);
1653 gdb::array_view
<gdb_byte
> val_contents
1654 = value_contents_all_raw (val
).slice (0, length
);
1656 copy (arg_view
, val_contents
);
1659 if (VALUE_LVAL (val
) == lval_computed
)
1661 const struct lval_funcs
*funcs
= val
->m_location
.computed
.funcs
;
1663 if (funcs
->copy_closure
)
1664 val
->m_location
.computed
.closure
= funcs
->copy_closure (val
);
1669 /* Return a "const" and/or "volatile" qualified version of the value V.
1670 If CNST is true, then the returned value will be qualified with
1672 if VOLTL is true, then the returned value will be qualified with
1676 make_cv_value (int cnst
, int voltl
, struct value
*v
)
1678 struct type
*val_type
= v
->type ();
1679 struct type
*m_enclosing_type
= value_enclosing_type (v
);
1680 struct value
*cv_val
= value_copy (v
);
1682 cv_val
->deprecated_set_type (make_cv_type (cnst
, voltl
, val_type
, NULL
));
1683 set_value_enclosing_type (cv_val
,
1684 make_cv_type (cnst
, voltl
, m_enclosing_type
, NULL
));
1689 /* Return a version of ARG that is non-lvalue. */
1692 value_non_lval (struct value
*arg
)
1694 if (VALUE_LVAL (arg
) != not_lval
)
1696 struct type
*enc_type
= value_enclosing_type (arg
);
1697 struct value
*val
= allocate_value (enc_type
);
1699 copy (value_contents_all (arg
), value_contents_all_raw (val
));
1700 val
->m_type
= arg
->m_type
;
1701 set_value_embedded_offset (val
, value_embedded_offset (arg
));
1702 set_value_pointed_to_offset (val
, value_pointed_to_offset (arg
));
1708 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1711 value_force_lval (struct value
*v
, CORE_ADDR addr
)
1713 gdb_assert (VALUE_LVAL (v
) == not_lval
);
1715 write_memory (addr
, value_contents_raw (v
).data (), v
->type ()->length ());
1716 v
->m_lval
= lval_memory
;
1717 v
->m_location
.address
= addr
;
1721 set_value_component_location (struct value
*component
,
1722 const struct value
*whole
)
1726 gdb_assert (whole
->m_lval
!= lval_xcallable
);
1728 if (whole
->m_lval
== lval_internalvar
)
1729 VALUE_LVAL (component
) = lval_internalvar_component
;
1731 VALUE_LVAL (component
) = whole
->m_lval
;
1733 component
->m_location
= whole
->m_location
;
1734 if (whole
->m_lval
== lval_computed
)
1736 const struct lval_funcs
*funcs
= whole
->m_location
.computed
.funcs
;
1738 if (funcs
->copy_closure
)
1739 component
->m_location
.computed
.closure
= funcs
->copy_closure (whole
);
1742 /* If the WHOLE value has a dynamically resolved location property then
1743 update the address of the COMPONENT. */
1744 type
= whole
->type ();
1745 if (NULL
!= TYPE_DATA_LOCATION (type
)
1746 && TYPE_DATA_LOCATION_KIND (type
) == PROP_CONST
)
1747 set_value_address (component
, TYPE_DATA_LOCATION_ADDR (type
));
1749 /* Similarly, if the COMPONENT value has a dynamically resolved location
1750 property then update its address. */
1751 type
= component
->type ();
1752 if (NULL
!= TYPE_DATA_LOCATION (type
)
1753 && TYPE_DATA_LOCATION_KIND (type
) == PROP_CONST
)
1755 /* If the COMPONENT has a dynamic location, and is an
1756 lval_internalvar_component, then we change it to a lval_memory.
1758 Usually a component of an internalvar is created non-lazy, and has
1759 its content immediately copied from the parent internalvar.
1760 However, for components with a dynamic location, the content of
1761 the component is not contained within the parent, but is instead
1762 accessed indirectly. Further, the component will be created as a
1765 By changing the type of the component to lval_memory we ensure
1766 that value_fetch_lazy can successfully load the component.
1768 This solution isn't ideal, but a real fix would require values to
1769 carry around both the parent value contents, and the contents of
1770 any dynamic fields within the parent. This is a substantial
1771 change to how values work in GDB. */
1772 if (VALUE_LVAL (component
) == lval_internalvar_component
)
1774 gdb_assert (value_lazy (component
));
1775 VALUE_LVAL (component
) = lval_memory
;
1778 gdb_assert (VALUE_LVAL (component
) == lval_memory
);
1779 set_value_address (component
, TYPE_DATA_LOCATION_ADDR (type
));
1783 /* Access to the value history. */
1785 /* Record a new value in the value history.
1786 Returns the absolute history index of the entry. */
1789 record_latest_value (struct value
*val
)
1791 struct type
*enclosing_type
= value_enclosing_type (val
);
1792 struct type
*type
= val
->type ();
1794 /* We don't want this value to have anything to do with the inferior anymore.
1795 In particular, "set $1 = 50" should not affect the variable from which
1796 the value was taken, and fast watchpoints should be able to assume that
1797 a value on the value history never changes. */
1798 if (value_lazy (val
))
1800 /* We know that this is a _huge_ array, any attempt to fetch this
1801 is going to cause GDB to throw an error. However, to allow
1802 the array to still be displayed we fetch its contents up to
1803 `max_value_size' and mark anything beyond "unavailable" in
1805 if (type
->code () == TYPE_CODE_ARRAY
1806 && type
->length () > max_value_size
1807 && array_length_limiting_element_count
.has_value ()
1808 && enclosing_type
== type
1809 && calculate_limited_array_length (type
) <= max_value_size
)
1810 val
->m_limited_length
= max_value_size
;
1812 value_fetch_lazy (val
);
1815 ULONGEST limit
= val
->m_limited_length
;
1817 mark_value_bytes_unavailable (val
, limit
,
1818 enclosing_type
->length () - limit
);
1820 /* Mark the value as recorded in the history for the availability check. */
1821 val
->m_in_history
= true;
1823 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1824 from. This is a bit dubious, because then *&$1 does not just return $1
1825 but the current contents of that location. c'est la vie... */
1826 val
->m_modifiable
= 0;
1828 value_history
.push_back (release_value (val
));
1830 return value_history
.size ();
1833 /* Return a copy of the value in the history with sequence number NUM. */
1836 access_value_history (int num
)
1841 absnum
+= value_history
.size ();
1846 error (_("The history is empty."));
1848 error (_("There is only one value in the history."));
1850 error (_("History does not go back to $$%d."), -num
);
1852 if (absnum
> value_history
.size ())
1853 error (_("History has not yet reached $%d."), absnum
);
1857 return value_copy (value_history
[absnum
].get ());
1863 value_history_count ()
1865 return value_history
.size ();
1869 show_values (const char *num_exp
, int from_tty
)
1877 /* "show values +" should print from the stored position.
1878 "show values <exp>" should print around value number <exp>. */
1879 if (num_exp
[0] != '+' || num_exp
[1] != '\0')
1880 num
= parse_and_eval_long (num_exp
) - 5;
1884 /* "show values" means print the last 10 values. */
1885 num
= value_history
.size () - 9;
1891 for (i
= num
; i
< num
+ 10 && i
<= value_history
.size (); i
++)
1893 struct value_print_options opts
;
1895 val
= access_value_history (i
);
1896 gdb_printf (("$%d = "), i
);
1897 get_user_print_options (&opts
);
1898 value_print (val
, gdb_stdout
, &opts
);
1899 gdb_printf (("\n"));
1902 /* The next "show values +" should start after what we just printed. */
1905 /* Hitting just return after this command should do the same thing as
1906 "show values +". If num_exp is null, this is unnecessary, since
1907 "show values +" is not useful after "show values". */
1908 if (from_tty
&& num_exp
)
1909 set_repeat_arguments ("+");
1912 enum internalvar_kind
1914 /* The internal variable is empty. */
1917 /* The value of the internal variable is provided directly as
1918 a GDB value object. */
1921 /* A fresh value is computed via a call-back routine on every
1922 access to the internal variable. */
1923 INTERNALVAR_MAKE_VALUE
,
1925 /* The internal variable holds a GDB internal convenience function. */
1926 INTERNALVAR_FUNCTION
,
1928 /* The variable holds an integer value. */
1929 INTERNALVAR_INTEGER
,
1931 /* The variable holds a GDB-provided string. */
1935 union internalvar_data
1937 /* A value object used with INTERNALVAR_VALUE. */
1938 struct value
*value
;
1940 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1943 /* The functions to call. */
1944 const struct internalvar_funcs
*functions
;
1946 /* The function's user-data. */
1950 /* The internal function used with INTERNALVAR_FUNCTION. */
1953 struct internal_function
*function
;
1954 /* True if this is the canonical name for the function. */
1958 /* An integer value used with INTERNALVAR_INTEGER. */
1961 /* If type is non-NULL, it will be used as the type to generate
1962 a value for this internal variable. If type is NULL, a default
1963 integer type for the architecture is used. */
1968 /* A string value used with INTERNALVAR_STRING. */
1972 /* Internal variables. These are variables within the debugger
1973 that hold values assigned by debugger commands.
1974 The user refers to them with a '$' prefix
1975 that does not appear in the variable names stored internally. */
1979 struct internalvar
*next
;
1982 /* We support various different kinds of content of an internal variable.
1983 enum internalvar_kind specifies the kind, and union internalvar_data
1984 provides the data associated with this particular kind. */
1986 enum internalvar_kind kind
;
1988 union internalvar_data u
;
1991 static struct internalvar
*internalvars
;
1993 /* If the variable does not already exist create it and give it the
1994 value given. If no value is given then the default is zero. */
1996 init_if_undefined_command (const char* args
, int from_tty
)
1998 struct internalvar
*intvar
= nullptr;
2000 /* Parse the expression - this is taken from set_command(). */
2001 expression_up expr
= parse_expression (args
);
2003 /* Validate the expression.
2004 Was the expression an assignment?
2005 Or even an expression at all? */
2006 if (expr
->first_opcode () != BINOP_ASSIGN
)
2007 error (_("Init-if-undefined requires an assignment expression."));
2009 /* Extract the variable from the parsed expression. */
2010 expr::assign_operation
*assign
2011 = dynamic_cast<expr::assign_operation
*> (expr
->op
.get ());
2012 if (assign
!= nullptr)
2014 expr::operation
*lhs
= assign
->get_lhs ();
2015 expr::internalvar_operation
*ivarop
2016 = dynamic_cast<expr::internalvar_operation
*> (lhs
);
2017 if (ivarop
!= nullptr)
2018 intvar
= ivarop
->get_internalvar ();
2021 if (intvar
== nullptr)
2022 error (_("The first parameter to init-if-undefined "
2023 "should be a GDB variable."));
2025 /* Only evaluate the expression if the lvalue is void.
2026 This may still fail if the expression is invalid. */
2027 if (intvar
->kind
== INTERNALVAR_VOID
)
2028 evaluate_expression (expr
.get ());
2032 /* Look up an internal variable with name NAME. NAME should not
2033 normally include a dollar sign.
2035 If the specified internal variable does not exist,
2036 the return value is NULL. */
2038 struct internalvar
*
2039 lookup_only_internalvar (const char *name
)
2041 struct internalvar
*var
;
2043 for (var
= internalvars
; var
; var
= var
->next
)
2044 if (strcmp (var
->name
, name
) == 0)
2050 /* Complete NAME by comparing it to the names of internal
2054 complete_internalvar (completion_tracker
&tracker
, const char *name
)
2056 struct internalvar
*var
;
2059 len
= strlen (name
);
2061 for (var
= internalvars
; var
; var
= var
->next
)
2062 if (strncmp (var
->name
, name
, len
) == 0)
2063 tracker
.add_completion (make_unique_xstrdup (var
->name
));
2066 /* Create an internal variable with name NAME and with a void value.
2067 NAME should not normally include a dollar sign. */
2069 struct internalvar
*
2070 create_internalvar (const char *name
)
2072 struct internalvar
*var
= XNEW (struct internalvar
);
2074 var
->name
= xstrdup (name
);
2075 var
->kind
= INTERNALVAR_VOID
;
2076 var
->next
= internalvars
;
2081 /* Create an internal variable with name NAME and register FUN as the
2082 function that value_of_internalvar uses to create a value whenever
2083 this variable is referenced. NAME should not normally include a
2084 dollar sign. DATA is passed uninterpreted to FUN when it is
2085 called. CLEANUP, if not NULL, is called when the internal variable
2086 is destroyed. It is passed DATA as its only argument. */
2088 struct internalvar
*
2089 create_internalvar_type_lazy (const char *name
,
2090 const struct internalvar_funcs
*funcs
,
2093 struct internalvar
*var
= create_internalvar (name
);
2095 var
->kind
= INTERNALVAR_MAKE_VALUE
;
2096 var
->u
.make_value
.functions
= funcs
;
2097 var
->u
.make_value
.data
= data
;
2101 /* See documentation in value.h. */
2104 compile_internalvar_to_ax (struct internalvar
*var
,
2105 struct agent_expr
*expr
,
2106 struct axs_value
*value
)
2108 if (var
->kind
!= INTERNALVAR_MAKE_VALUE
2109 || var
->u
.make_value
.functions
->compile_to_ax
== NULL
)
2112 var
->u
.make_value
.functions
->compile_to_ax (var
, expr
, value
,
2113 var
->u
.make_value
.data
);
2117 /* Look up an internal variable with name NAME. NAME should not
2118 normally include a dollar sign.
2120 If the specified internal variable does not exist,
2121 one is created, with a void value. */
2123 struct internalvar
*
2124 lookup_internalvar (const char *name
)
2126 struct internalvar
*var
;
2128 var
= lookup_only_internalvar (name
);
2132 return create_internalvar (name
);
2135 /* Return current value of internal variable VAR. For variables that
2136 are not inherently typed, use a value type appropriate for GDBARCH. */
2139 value_of_internalvar (struct gdbarch
*gdbarch
, struct internalvar
*var
)
2142 struct trace_state_variable
*tsv
;
2144 /* If there is a trace state variable of the same name, assume that
2145 is what we really want to see. */
2146 tsv
= find_trace_state_variable (var
->name
);
2149 tsv
->value_known
= target_get_trace_state_variable_value (tsv
->number
,
2151 if (tsv
->value_known
)
2152 val
= value_from_longest (builtin_type (gdbarch
)->builtin_int64
,
2155 val
= allocate_value (builtin_type (gdbarch
)->builtin_void
);
2161 case INTERNALVAR_VOID
:
2162 val
= allocate_value (builtin_type (gdbarch
)->builtin_void
);
2165 case INTERNALVAR_FUNCTION
:
2166 val
= allocate_value (builtin_type (gdbarch
)->internal_fn
);
2169 case INTERNALVAR_INTEGER
:
2170 if (!var
->u
.integer
.type
)
2171 val
= value_from_longest (builtin_type (gdbarch
)->builtin_int
,
2172 var
->u
.integer
.val
);
2174 val
= value_from_longest (var
->u
.integer
.type
, var
->u
.integer
.val
);
2177 case INTERNALVAR_STRING
:
2178 val
= value_cstring (var
->u
.string
, strlen (var
->u
.string
),
2179 builtin_type (gdbarch
)->builtin_char
);
2182 case INTERNALVAR_VALUE
:
2183 val
= value_copy (var
->u
.value
);
2184 if (value_lazy (val
))
2185 value_fetch_lazy (val
);
2188 case INTERNALVAR_MAKE_VALUE
:
2189 val
= (*var
->u
.make_value
.functions
->make_value
) (gdbarch
, var
,
2190 var
->u
.make_value
.data
);
2194 internal_error (_("bad kind"));
2197 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2198 on this value go back to affect the original internal variable.
2200 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2201 no underlying modifiable state in the internal variable.
2203 Likewise, if the variable's value is a computed lvalue, we want
2204 references to it to produce another computed lvalue, where
2205 references and assignments actually operate through the
2206 computed value's functions.
2208 This means that internal variables with computed values
2209 behave a little differently from other internal variables:
2210 assignments to them don't just replace the previous value
2211 altogether. At the moment, this seems like the behavior we
2214 if (var
->kind
!= INTERNALVAR_MAKE_VALUE
2215 && val
->m_lval
!= lval_computed
)
2217 VALUE_LVAL (val
) = lval_internalvar
;
2218 VALUE_INTERNALVAR (val
) = var
;
2225 get_internalvar_integer (struct internalvar
*var
, LONGEST
*result
)
2227 if (var
->kind
== INTERNALVAR_INTEGER
)
2229 *result
= var
->u
.integer
.val
;
2233 if (var
->kind
== INTERNALVAR_VALUE
)
2235 struct type
*type
= check_typedef (var
->u
.value
->type ());
2237 if (type
->code () == TYPE_CODE_INT
)
2239 *result
= value_as_long (var
->u
.value
);
2248 get_internalvar_function (struct internalvar
*var
,
2249 struct internal_function
**result
)
2253 case INTERNALVAR_FUNCTION
:
2254 *result
= var
->u
.fn
.function
;
2263 set_internalvar_component (struct internalvar
*var
,
2264 LONGEST offset
, LONGEST bitpos
,
2265 LONGEST bitsize
, struct value
*newval
)
2268 struct gdbarch
*arch
;
2273 case INTERNALVAR_VALUE
:
2274 addr
= value_contents_writeable (var
->u
.value
).data ();
2275 arch
= var
->u
.value
->arch ();
2276 unit_size
= gdbarch_addressable_memory_unit_size (arch
);
2279 modify_field (var
->u
.value
->type (), addr
+ offset
,
2280 value_as_long (newval
), bitpos
, bitsize
);
2282 memcpy (addr
+ offset
* unit_size
, value_contents (newval
).data (),
2283 newval
->type ()->length ());
2287 /* We can never get a component of any other kind. */
2288 internal_error (_("set_internalvar_component"));
2293 set_internalvar (struct internalvar
*var
, struct value
*val
)
2295 enum internalvar_kind new_kind
;
2296 union internalvar_data new_data
= { 0 };
2298 if (var
->kind
== INTERNALVAR_FUNCTION
&& var
->u
.fn
.canonical
)
2299 error (_("Cannot overwrite convenience function %s"), var
->name
);
2301 /* Prepare new contents. */
2302 switch (check_typedef (val
->type ())->code ())
2304 case TYPE_CODE_VOID
:
2305 new_kind
= INTERNALVAR_VOID
;
2308 case TYPE_CODE_INTERNAL_FUNCTION
:
2309 gdb_assert (VALUE_LVAL (val
) == lval_internalvar
);
2310 new_kind
= INTERNALVAR_FUNCTION
;
2311 get_internalvar_function (VALUE_INTERNALVAR (val
),
2312 &new_data
.fn
.function
);
2313 /* Copies created here are never canonical. */
2317 new_kind
= INTERNALVAR_VALUE
;
2318 struct value
*copy
= value_copy (val
);
2319 copy
->m_modifiable
= 1;
2321 /* Force the value to be fetched from the target now, to avoid problems
2322 later when this internalvar is referenced and the target is gone or
2324 if (value_lazy (copy
))
2325 value_fetch_lazy (copy
);
2327 /* Release the value from the value chain to prevent it from being
2328 deleted by free_all_values. From here on this function should not
2329 call error () until new_data is installed into the var->u to avoid
2331 new_data
.value
= release_value (copy
).release ();
2333 /* Internal variables which are created from values with a dynamic
2334 location don't need the location property of the origin anymore.
2335 The resolved dynamic location is used prior then any other address
2336 when accessing the value.
2337 If we keep it, we would still refer to the origin value.
2338 Remove the location property in case it exist. */
2339 new_data
.value
->type ()->remove_dyn_prop (DYN_PROP_DATA_LOCATION
);
2344 /* Clean up old contents. */
2345 clear_internalvar (var
);
2348 var
->kind
= new_kind
;
2350 /* End code which must not call error(). */
2354 set_internalvar_integer (struct internalvar
*var
, LONGEST l
)
2356 /* Clean up old contents. */
2357 clear_internalvar (var
);
2359 var
->kind
= INTERNALVAR_INTEGER
;
2360 var
->u
.integer
.type
= NULL
;
2361 var
->u
.integer
.val
= l
;
2365 set_internalvar_string (struct internalvar
*var
, const char *string
)
2367 /* Clean up old contents. */
2368 clear_internalvar (var
);
2370 var
->kind
= INTERNALVAR_STRING
;
2371 var
->u
.string
= xstrdup (string
);
2375 set_internalvar_function (struct internalvar
*var
, struct internal_function
*f
)
2377 /* Clean up old contents. */
2378 clear_internalvar (var
);
2380 var
->kind
= INTERNALVAR_FUNCTION
;
2381 var
->u
.fn
.function
= f
;
2382 var
->u
.fn
.canonical
= 1;
2383 /* Variables installed here are always the canonical version. */
2387 clear_internalvar (struct internalvar
*var
)
2389 /* Clean up old contents. */
2392 case INTERNALVAR_VALUE
:
2393 value_decref (var
->u
.value
);
2396 case INTERNALVAR_STRING
:
2397 xfree (var
->u
.string
);
2404 /* Reset to void kind. */
2405 var
->kind
= INTERNALVAR_VOID
;
2409 internalvar_name (const struct internalvar
*var
)
2414 static struct internal_function
*
2415 create_internal_function (const char *name
,
2416 internal_function_fn handler
, void *cookie
)
2418 struct internal_function
*ifn
= XNEW (struct internal_function
);
2420 ifn
->name
= xstrdup (name
);
2421 ifn
->handler
= handler
;
2422 ifn
->cookie
= cookie
;
2427 value_internal_function_name (struct value
*val
)
2429 struct internal_function
*ifn
;
2432 gdb_assert (VALUE_LVAL (val
) == lval_internalvar
);
2433 result
= get_internalvar_function (VALUE_INTERNALVAR (val
), &ifn
);
2434 gdb_assert (result
);
2440 call_internal_function (struct gdbarch
*gdbarch
,
2441 const struct language_defn
*language
,
2442 struct value
*func
, int argc
, struct value
**argv
)
2444 struct internal_function
*ifn
;
2447 gdb_assert (VALUE_LVAL (func
) == lval_internalvar
);
2448 result
= get_internalvar_function (VALUE_INTERNALVAR (func
), &ifn
);
2449 gdb_assert (result
);
2451 return (*ifn
->handler
) (gdbarch
, language
, ifn
->cookie
, argc
, argv
);
2454 /* The 'function' command. This does nothing -- it is just a
2455 placeholder to let "help function NAME" work. This is also used as
2456 the implementation of the sub-command that is created when
2457 registering an internal function. */
2459 function_command (const char *command
, int from_tty
)
2464 /* Helper function that does the work for add_internal_function. */
2466 static struct cmd_list_element
*
2467 do_add_internal_function (const char *name
, const char *doc
,
2468 internal_function_fn handler
, void *cookie
)
2470 struct internal_function
*ifn
;
2471 struct internalvar
*var
= lookup_internalvar (name
);
2473 ifn
= create_internal_function (name
, handler
, cookie
);
2474 set_internalvar_function (var
, ifn
);
2476 return add_cmd (name
, no_class
, function_command
, doc
, &functionlist
);
2482 add_internal_function (const char *name
, const char *doc
,
2483 internal_function_fn handler
, void *cookie
)
2485 do_add_internal_function (name
, doc
, handler
, cookie
);
2491 add_internal_function (gdb::unique_xmalloc_ptr
<char> &&name
,
2492 gdb::unique_xmalloc_ptr
<char> &&doc
,
2493 internal_function_fn handler
, void *cookie
)
2495 struct cmd_list_element
*cmd
2496 = do_add_internal_function (name
.get (), doc
.get (), handler
, cookie
);
2498 cmd
->doc_allocated
= 1;
2500 cmd
->name_allocated
= 1;
2503 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2504 prevent cycles / duplicates. */
2507 preserve_one_value (struct value
*value
, struct objfile
*objfile
,
2508 htab_t copied_types
)
2510 if (value
->m_type
->objfile_owner () == objfile
)
2511 value
->m_type
= copy_type_recursive (value
->m_type
, copied_types
);
2513 if (value
->m_enclosing_type
->objfile_owner () == objfile
)
2514 value
->m_enclosing_type
= copy_type_recursive (value
->m_enclosing_type
,
2518 /* Likewise for internal variable VAR. */
2521 preserve_one_internalvar (struct internalvar
*var
, struct objfile
*objfile
,
2522 htab_t copied_types
)
2526 case INTERNALVAR_INTEGER
:
2527 if (var
->u
.integer
.type
2528 && var
->u
.integer
.type
->objfile_owner () == objfile
)
2530 = copy_type_recursive (var
->u
.integer
.type
, copied_types
);
2533 case INTERNALVAR_VALUE
:
2534 preserve_one_value (var
->u
.value
, objfile
, copied_types
);
2539 /* Make sure that all types and values referenced by VAROBJ are updated before
2540 OBJFILE is discarded. COPIED_TYPES is used to prevent cycles and
2544 preserve_one_varobj (struct varobj
*varobj
, struct objfile
*objfile
,
2545 htab_t copied_types
)
2547 if (varobj
->type
->is_objfile_owned ()
2548 && varobj
->type
->objfile_owner () == objfile
)
2551 = copy_type_recursive (varobj
->type
, copied_types
);
2554 if (varobj
->value
!= nullptr)
2555 preserve_one_value (varobj
->value
.get (), objfile
, copied_types
);
2558 /* Update the internal variables and value history when OBJFILE is
2559 discarded; we must copy the types out of the objfile. New global types
2560 will be created for every convenience variable which currently points to
2561 this objfile's types, and the convenience variables will be adjusted to
2562 use the new global types. */
2565 preserve_values (struct objfile
*objfile
)
2567 struct internalvar
*var
;
2569 /* Create the hash table. We allocate on the objfile's obstack, since
2570 it is soon to be deleted. */
2571 htab_up copied_types
= create_copied_types_hash ();
2573 for (const value_ref_ptr
&item
: value_history
)
2574 preserve_one_value (item
.get (), objfile
, copied_types
.get ());
2576 for (var
= internalvars
; var
; var
= var
->next
)
2577 preserve_one_internalvar (var
, objfile
, copied_types
.get ());
2579 /* For the remaining varobj, check that none has type owned by OBJFILE. */
2580 all_root_varobjs ([&copied_types
, objfile
] (struct varobj
*varobj
)
2582 preserve_one_varobj (varobj
, objfile
,
2583 copied_types
.get ());
2586 preserve_ext_lang_values (objfile
, copied_types
.get ());
2590 show_convenience (const char *ignore
, int from_tty
)
2592 struct gdbarch
*gdbarch
= get_current_arch ();
2593 struct internalvar
*var
;
2595 struct value_print_options opts
;
2597 get_user_print_options (&opts
);
2598 for (var
= internalvars
; var
; var
= var
->next
)
2605 gdb_printf (("$%s = "), var
->name
);
2611 val
= value_of_internalvar (gdbarch
, var
);
2612 value_print (val
, gdb_stdout
, &opts
);
2614 catch (const gdb_exception_error
&ex
)
2616 fprintf_styled (gdb_stdout
, metadata_style
.style (),
2617 _("<error: %s>"), ex
.what ());
2620 gdb_printf (("\n"));
2624 /* This text does not mention convenience functions on purpose.
2625 The user can't create them except via Python, and if Python support
2626 is installed this message will never be printed ($_streq will
2628 gdb_printf (_("No debugger convenience variables now defined.\n"
2629 "Convenience variables have "
2630 "names starting with \"$\";\n"
2631 "use \"set\" as in \"set "
2632 "$foo = 5\" to define them.\n"));
2640 value_from_xmethod (xmethod_worker_up
&&worker
)
2644 v
= allocate_value (builtin_type (target_gdbarch ())->xmethod
);
2645 v
->m_lval
= lval_xcallable
;
2646 v
->m_location
.xm_worker
= worker
.release ();
2647 v
->m_modifiable
= 0;
2652 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2655 result_type_of_xmethod (struct value
*method
, gdb::array_view
<value
*> argv
)
2657 gdb_assert (method
->type ()->code () == TYPE_CODE_XMETHOD
2658 && method
->m_lval
== lval_xcallable
&& !argv
.empty ());
2660 return method
->m_location
.xm_worker
->get_result_type (argv
[0], argv
.slice (1));
2663 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2666 call_xmethod (struct value
*method
, gdb::array_view
<value
*> argv
)
2668 gdb_assert (method
->type ()->code () == TYPE_CODE_XMETHOD
2669 && method
->m_lval
== lval_xcallable
&& !argv
.empty ());
2671 return method
->m_location
.xm_worker
->invoke (argv
[0], argv
.slice (1));
2674 /* Extract a value as a C number (either long or double).
2675 Knows how to convert fixed values to double, or
2676 floating values to long.
2677 Does not deallocate the value. */
2680 value_as_long (struct value
*val
)
2682 /* This coerces arrays and functions, which is necessary (e.g.
2683 in disassemble_command). It also dereferences references, which
2684 I suspect is the most logical thing to do. */
2685 val
= coerce_array (val
);
2686 return unpack_long (val
->type (), value_contents (val
).data ());
2689 /* Extract a value as a C pointer. Does not deallocate the value.
2690 Note that val's type may not actually be a pointer; value_as_long
2691 handles all the cases. */
2693 value_as_address (struct value
*val
)
2695 struct gdbarch
*gdbarch
= val
->type ()->arch ();
2697 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2698 whether we want this to be true eventually. */
2700 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2701 non-address (e.g. argument to "signal", "info break", etc.), or
2702 for pointers to char, in which the low bits *are* significant. */
2703 return gdbarch_addr_bits_remove (gdbarch
, value_as_long (val
));
2706 /* There are several targets (IA-64, PowerPC, and others) which
2707 don't represent pointers to functions as simply the address of
2708 the function's entry point. For example, on the IA-64, a
2709 function pointer points to a two-word descriptor, generated by
2710 the linker, which contains the function's entry point, and the
2711 value the IA-64 "global pointer" register should have --- to
2712 support position-independent code. The linker generates
2713 descriptors only for those functions whose addresses are taken.
2715 On such targets, it's difficult for GDB to convert an arbitrary
2716 function address into a function pointer; it has to either find
2717 an existing descriptor for that function, or call malloc and
2718 build its own. On some targets, it is impossible for GDB to
2719 build a descriptor at all: the descriptor must contain a jump
2720 instruction; data memory cannot be executed; and code memory
2723 Upon entry to this function, if VAL is a value of type `function'
2724 (that is, TYPE_CODE (val->type ()) == TYPE_CODE_FUNC), then
2725 value_address (val) is the address of the function. This is what
2726 you'll get if you evaluate an expression like `main'. The call
2727 to COERCE_ARRAY below actually does all the usual unary
2728 conversions, which includes converting values of type `function'
2729 to `pointer to function'. This is the challenging conversion
2730 discussed above. Then, `unpack_long' will convert that pointer
2731 back into an address.
2733 So, suppose the user types `disassemble foo' on an architecture
2734 with a strange function pointer representation, on which GDB
2735 cannot build its own descriptors, and suppose further that `foo'
2736 has no linker-built descriptor. The address->pointer conversion
2737 will signal an error and prevent the command from running, even
2738 though the next step would have been to convert the pointer
2739 directly back into the same address.
2741 The following shortcut avoids this whole mess. If VAL is a
2742 function, just return its address directly. */
2743 if (val
->type ()->code () == TYPE_CODE_FUNC
2744 || val
->type ()->code () == TYPE_CODE_METHOD
)
2745 return value_address (val
);
2747 val
= coerce_array (val
);
2749 /* Some architectures (e.g. Harvard), map instruction and data
2750 addresses onto a single large unified address space. For
2751 instance: An architecture may consider a large integer in the
2752 range 0x10000000 .. 0x1000ffff to already represent a data
2753 addresses (hence not need a pointer to address conversion) while
2754 a small integer would still need to be converted integer to
2755 pointer to address. Just assume such architectures handle all
2756 integer conversions in a single function. */
2760 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2761 must admonish GDB hackers to make sure its behavior matches the
2762 compiler's, whenever possible.
2764 In general, I think GDB should evaluate expressions the same way
2765 the compiler does. When the user copies an expression out of
2766 their source code and hands it to a `print' command, they should
2767 get the same value the compiler would have computed. Any
2768 deviation from this rule can cause major confusion and annoyance,
2769 and needs to be justified carefully. In other words, GDB doesn't
2770 really have the freedom to do these conversions in clever and
2773 AndrewC pointed out that users aren't complaining about how GDB
2774 casts integers to pointers; they are complaining that they can't
2775 take an address from a disassembly listing and give it to `x/i'.
2776 This is certainly important.
2778 Adding an architecture method like integer_to_address() certainly
2779 makes it possible for GDB to "get it right" in all circumstances
2780 --- the target has complete control over how things get done, so
2781 people can Do The Right Thing for their target without breaking
2782 anyone else. The standard doesn't specify how integers get
2783 converted to pointers; usually, the ABI doesn't either, but
2784 ABI-specific code is a more reasonable place to handle it. */
2786 if (!val
->type ()->is_pointer_or_reference ()
2787 && gdbarch_integer_to_address_p (gdbarch
))
2788 return gdbarch_integer_to_address (gdbarch
, val
->type (),
2789 value_contents (val
).data ());
2791 return unpack_long (val
->type (), value_contents (val
).data ());
2795 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2796 as a long, or as a double, assuming the raw data is described
2797 by type TYPE. Knows how to convert different sizes of values
2798 and can convert between fixed and floating point. We don't assume
2799 any alignment for the raw data. Return value is in host byte order.
2801 If you want functions and arrays to be coerced to pointers, and
2802 references to be dereferenced, call value_as_long() instead.
2804 C++: It is assumed that the front-end has taken care of
2805 all matters concerning pointers to members. A pointer
2806 to member which reaches here is considered to be equivalent
2807 to an INT (or some size). After all, it is only an offset. */
2810 unpack_long (struct type
*type
, const gdb_byte
*valaddr
)
2812 if (is_fixed_point_type (type
))
2813 type
= type
->fixed_point_type_base_type ();
2815 enum bfd_endian byte_order
= type_byte_order (type
);
2816 enum type_code code
= type
->code ();
2817 int len
= type
->length ();
2818 int nosign
= type
->is_unsigned ();
2822 case TYPE_CODE_TYPEDEF
:
2823 return unpack_long (check_typedef (type
), valaddr
);
2824 case TYPE_CODE_ENUM
:
2825 case TYPE_CODE_FLAGS
:
2826 case TYPE_CODE_BOOL
:
2828 case TYPE_CODE_CHAR
:
2829 case TYPE_CODE_RANGE
:
2830 case TYPE_CODE_MEMBERPTR
:
2834 if (type
->bit_size_differs_p ())
2836 unsigned bit_off
= type
->bit_offset ();
2837 unsigned bit_size
= type
->bit_size ();
2840 /* unpack_bits_as_long doesn't handle this case the
2841 way we'd like, so handle it here. */
2845 result
= unpack_bits_as_long (type
, valaddr
, bit_off
, bit_size
);
2850 result
= extract_unsigned_integer (valaddr
, len
, byte_order
);
2852 result
= extract_signed_integer (valaddr
, len
, byte_order
);
2854 if (code
== TYPE_CODE_RANGE
)
2855 result
+= type
->bounds ()->bias
;
2860 case TYPE_CODE_DECFLOAT
:
2861 return target_float_to_longest (valaddr
, type
);
2863 case TYPE_CODE_FIXED_POINT
:
2866 vq
.read_fixed_point (gdb::make_array_view (valaddr
, len
),
2868 type
->fixed_point_scaling_factor ());
2871 mpz_tdiv_q (vz
.val
, mpq_numref (vq
.val
), mpq_denref (vq
.val
));
2872 return vz
.as_integer
<LONGEST
> ();
2877 case TYPE_CODE_RVALUE_REF
:
2878 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2879 whether we want this to be true eventually. */
2880 return extract_typed_address (valaddr
, type
);
2883 error (_("Value can't be converted to integer."));
2887 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2888 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2889 We don't assume any alignment for the raw data. Return value is in
2892 If you want functions and arrays to be coerced to pointers, and
2893 references to be dereferenced, call value_as_address() instead.
2895 C++: It is assumed that the front-end has taken care of
2896 all matters concerning pointers to members. A pointer
2897 to member which reaches here is considered to be equivalent
2898 to an INT (or some size). After all, it is only an offset. */
2901 unpack_pointer (struct type
*type
, const gdb_byte
*valaddr
)
2903 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2904 whether we want this to be true eventually. */
2905 return unpack_long (type
, valaddr
);
2909 is_floating_value (struct value
*val
)
2911 struct type
*type
= check_typedef (val
->type ());
2913 if (is_floating_type (type
))
2915 if (!target_float_is_valid (value_contents (val
).data (), type
))
2916 error (_("Invalid floating value found in program."));
2924 /* Get the value of the FIELDNO'th field (which must be static) of
2928 value_static_field (struct type
*type
, int fieldno
)
2930 struct value
*retval
;
2932 switch (type
->field (fieldno
).loc_kind ())
2934 case FIELD_LOC_KIND_PHYSADDR
:
2935 retval
= value_at_lazy (type
->field (fieldno
).type (),
2936 type
->field (fieldno
).loc_physaddr ());
2938 case FIELD_LOC_KIND_PHYSNAME
:
2940 const char *phys_name
= type
->field (fieldno
).loc_physname ();
2941 /* type->field (fieldno).name (); */
2942 struct block_symbol sym
= lookup_symbol (phys_name
, 0, VAR_DOMAIN
, 0);
2944 if (sym
.symbol
== NULL
)
2946 /* With some compilers, e.g. HP aCC, static data members are
2947 reported as non-debuggable symbols. */
2948 struct bound_minimal_symbol msym
2949 = lookup_minimal_symbol (phys_name
, NULL
, NULL
);
2950 struct type
*field_type
= type
->field (fieldno
).type ();
2953 retval
= allocate_optimized_out_value (field_type
);
2955 retval
= value_at_lazy (field_type
, msym
.value_address ());
2958 retval
= value_of_variable (sym
.symbol
, sym
.block
);
2962 gdb_assert_not_reached ("unexpected field location kind");
2968 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2969 You have to be careful here, since the size of the data area for the value
2970 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2971 than the old enclosing type, you have to allocate more space for the
2975 set_value_enclosing_type (struct value
*val
, struct type
*new_encl_type
)
2977 if (new_encl_type
->length () > value_enclosing_type (val
)->length ())
2979 check_type_length_before_alloc (new_encl_type
);
2981 .reset ((gdb_byte
*) xrealloc (val
->m_contents
.release (),
2982 new_encl_type
->length ()));
2985 val
->m_enclosing_type
= new_encl_type
;
2988 /* Given a value ARG1 (offset by OFFSET bytes)
2989 of a struct or union type ARG_TYPE,
2990 extract and return the value of one of its (non-static) fields.
2991 FIELDNO says which field. */
2994 value_primitive_field (struct value
*arg1
, LONGEST offset
,
2995 int fieldno
, struct type
*arg_type
)
2999 struct gdbarch
*arch
= arg1
->arch ();
3000 int unit_size
= gdbarch_addressable_memory_unit_size (arch
);
3002 arg_type
= check_typedef (arg_type
);
3003 type
= arg_type
->field (fieldno
).type ();
3005 /* Call check_typedef on our type to make sure that, if TYPE
3006 is a TYPE_CODE_TYPEDEF, its length is set to the length
3007 of the target type instead of zero. However, we do not
3008 replace the typedef type by the target type, because we want
3009 to keep the typedef in order to be able to print the type
3010 description correctly. */
3011 check_typedef (type
);
3013 if (TYPE_FIELD_BITSIZE (arg_type
, fieldno
))
3015 /* Handle packed fields.
3017 Create a new value for the bitfield, with bitpos and bitsize
3018 set. If possible, arrange offset and bitpos so that we can
3019 do a single aligned read of the size of the containing type.
3020 Otherwise, adjust offset to the byte containing the first
3021 bit. Assume that the address, offset, and embedded offset
3022 are sufficiently aligned. */
3024 LONGEST bitpos
= arg_type
->field (fieldno
).loc_bitpos ();
3025 LONGEST container_bitsize
= type
->length () * 8;
3027 v
= allocate_value_lazy (type
);
3028 v
->m_bitsize
= TYPE_FIELD_BITSIZE (arg_type
, fieldno
);
3029 if ((bitpos
% container_bitsize
) + v
->m_bitsize
<= container_bitsize
3030 && type
->length () <= (int) sizeof (LONGEST
))
3031 v
->m_bitpos
= bitpos
% container_bitsize
;
3033 v
->m_bitpos
= bitpos
% 8;
3034 v
->m_offset
= (value_embedded_offset (arg1
)
3036 + (bitpos
- v
->m_bitpos
) / 8);
3037 v
->set_parent (arg1
);
3038 if (!value_lazy (arg1
))
3039 value_fetch_lazy (v
);
3041 else if (fieldno
< TYPE_N_BASECLASSES (arg_type
))
3043 /* This field is actually a base subobject, so preserve the
3044 entire object's contents for later references to virtual
3048 /* Lazy register values with offsets are not supported. */
3049 if (VALUE_LVAL (arg1
) == lval_register
&& value_lazy (arg1
))
3050 value_fetch_lazy (arg1
);
3052 /* We special case virtual inheritance here because this
3053 requires access to the contents, which we would rather avoid
3054 for references to ordinary fields of unavailable values. */
3055 if (BASETYPE_VIA_VIRTUAL (arg_type
, fieldno
))
3056 boffset
= baseclass_offset (arg_type
, fieldno
,
3057 value_contents (arg1
).data (),
3058 value_embedded_offset (arg1
),
3059 value_address (arg1
),
3062 boffset
= arg_type
->field (fieldno
).loc_bitpos () / 8;
3064 if (value_lazy (arg1
))
3065 v
= allocate_value_lazy (value_enclosing_type (arg1
));
3068 v
= allocate_value (value_enclosing_type (arg1
));
3069 value_contents_copy_raw (v
, 0, arg1
, 0,
3070 value_enclosing_type (arg1
)->length ());
3073 v
->m_offset
= arg1
->offset ();
3074 v
->m_embedded_offset
= offset
+ value_embedded_offset (arg1
) + boffset
;
3076 else if (NULL
!= TYPE_DATA_LOCATION (type
))
3078 /* Field is a dynamic data member. */
3080 gdb_assert (0 == offset
);
3081 /* We expect an already resolved data location. */
3082 gdb_assert (PROP_CONST
== TYPE_DATA_LOCATION_KIND (type
));
3083 /* For dynamic data types defer memory allocation
3084 until we actual access the value. */
3085 v
= allocate_value_lazy (type
);
3089 /* Plain old data member */
3090 offset
+= (arg_type
->field (fieldno
).loc_bitpos ()
3091 / (HOST_CHAR_BIT
* unit_size
));
3093 /* Lazy register values with offsets are not supported. */
3094 if (VALUE_LVAL (arg1
) == lval_register
&& value_lazy (arg1
))
3095 value_fetch_lazy (arg1
);
3097 if (value_lazy (arg1
))
3098 v
= allocate_value_lazy (type
);
3101 v
= allocate_value (type
);
3102 value_contents_copy_raw (v
, value_embedded_offset (v
),
3103 arg1
, value_embedded_offset (arg1
) + offset
,
3104 type_length_units (type
));
3106 v
->m_offset
= (arg1
->offset () + offset
3107 + value_embedded_offset (arg1
));
3109 set_value_component_location (v
, arg1
);
3113 /* Given a value ARG1 of a struct or union type,
3114 extract and return the value of one of its (non-static) fields.
3115 FIELDNO says which field. */
3118 value_field (struct value
*arg1
, int fieldno
)
3120 return value_primitive_field (arg1
, 0, fieldno
, arg1
->type ());
3123 /* Return a non-virtual function as a value.
3124 F is the list of member functions which contains the desired method.
3125 J is an index into F which provides the desired method.
3127 We only use the symbol for its address, so be happy with either a
3128 full symbol or a minimal symbol. */
3131 value_fn_field (struct value
**arg1p
, struct fn_field
*f
,
3132 int j
, struct type
*type
,
3136 struct type
*ftype
= TYPE_FN_FIELD_TYPE (f
, j
);
3137 const char *physname
= TYPE_FN_FIELD_PHYSNAME (f
, j
);
3139 struct bound_minimal_symbol msym
;
3141 sym
= lookup_symbol (physname
, 0, VAR_DOMAIN
, 0).symbol
;
3144 msym
= lookup_bound_minimal_symbol (physname
);
3145 if (msym
.minsym
== NULL
)
3149 v
= allocate_value (ftype
);
3150 VALUE_LVAL (v
) = lval_memory
;
3153 set_value_address (v
, sym
->value_block ()->entry_pc ());
3157 /* The minimal symbol might point to a function descriptor;
3158 resolve it to the actual code address instead. */
3159 struct objfile
*objfile
= msym
.objfile
;
3160 struct gdbarch
*gdbarch
= objfile
->arch ();
3162 set_value_address (v
,
3163 gdbarch_convert_from_func_ptr_addr
3164 (gdbarch
, msym
.value_address (),
3165 current_inferior ()->top_target ()));
3170 if (type
!= (*arg1p
)->type ())
3171 *arg1p
= value_ind (value_cast (lookup_pointer_type (type
),
3172 value_addr (*arg1p
)));
3174 /* Move the `this' pointer according to the offset.
3175 (*arg1p)->offset () += offset; */
3186 unpack_bits_as_long (struct type
*field_type
, const gdb_byte
*valaddr
,
3187 LONGEST bitpos
, LONGEST bitsize
)
3189 enum bfd_endian byte_order
= type_byte_order (field_type
);
3194 LONGEST read_offset
;
3196 /* Read the minimum number of bytes required; there may not be
3197 enough bytes to read an entire ULONGEST. */
3198 field_type
= check_typedef (field_type
);
3200 bytes_read
= ((bitpos
% 8) + bitsize
+ 7) / 8;
3203 bytes_read
= field_type
->length ();
3204 bitsize
= 8 * bytes_read
;
3207 read_offset
= bitpos
/ 8;
3209 val
= extract_unsigned_integer (valaddr
+ read_offset
,
3210 bytes_read
, byte_order
);
3212 /* Extract bits. See comment above. */
3214 if (byte_order
== BFD_ENDIAN_BIG
)
3215 lsbcount
= (bytes_read
* 8 - bitpos
% 8 - bitsize
);
3217 lsbcount
= (bitpos
% 8);
3220 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3221 If the field is signed, and is negative, then sign extend. */
3223 if (bitsize
< 8 * (int) sizeof (val
))
3225 valmask
= (((ULONGEST
) 1) << bitsize
) - 1;
3227 if (!field_type
->is_unsigned ())
3229 if (val
& (valmask
^ (valmask
>> 1)))
3239 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3240 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3241 ORIGINAL_VALUE, which must not be NULL. See
3242 unpack_value_bits_as_long for more details. */
3245 unpack_value_field_as_long (struct type
*type
, const gdb_byte
*valaddr
,
3246 LONGEST embedded_offset
, int fieldno
,
3247 const struct value
*val
, LONGEST
*result
)
3249 int bitpos
= type
->field (fieldno
).loc_bitpos ();
3250 int bitsize
= TYPE_FIELD_BITSIZE (type
, fieldno
);
3251 struct type
*field_type
= type
->field (fieldno
).type ();
3254 gdb_assert (val
!= NULL
);
3256 bit_offset
= embedded_offset
* TARGET_CHAR_BIT
+ bitpos
;
3257 if (value_bits_any_optimized_out (val
, bit_offset
, bitsize
)
3258 || !value_bits_available (val
, bit_offset
, bitsize
))
3261 *result
= unpack_bits_as_long (field_type
, valaddr
+ embedded_offset
,
3266 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3267 object at VALADDR. See unpack_bits_as_long for more details. */
3270 unpack_field_as_long (struct type
*type
, const gdb_byte
*valaddr
, int fieldno
)
3272 int bitpos
= type
->field (fieldno
).loc_bitpos ();
3273 int bitsize
= TYPE_FIELD_BITSIZE (type
, fieldno
);
3274 struct type
*field_type
= type
->field (fieldno
).type ();
3276 return unpack_bits_as_long (field_type
, valaddr
, bitpos
, bitsize
);
3279 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3280 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3281 the contents in DEST_VAL, zero or sign extending if the type of
3282 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3283 VAL. If the VAL's contents required to extract the bitfield from
3284 are unavailable/optimized out, DEST_VAL is correspondingly
3285 marked unavailable/optimized out. */
3288 unpack_value_bitfield (struct value
*dest_val
,
3289 LONGEST bitpos
, LONGEST bitsize
,
3290 const gdb_byte
*valaddr
, LONGEST embedded_offset
,
3291 const struct value
*val
)
3293 enum bfd_endian byte_order
;
3296 struct type
*field_type
= dest_val
->type ();
3298 byte_order
= type_byte_order (field_type
);
3300 /* First, unpack and sign extend the bitfield as if it was wholly
3301 valid. Optimized out/unavailable bits are read as zero, but
3302 that's OK, as they'll end up marked below. If the VAL is
3303 wholly-invalid we may have skipped allocating its contents,
3304 though. See allocate_optimized_out_value. */
3305 if (valaddr
!= NULL
)
3309 num
= unpack_bits_as_long (field_type
, valaddr
+ embedded_offset
,
3311 store_signed_integer (value_contents_raw (dest_val
).data (),
3312 field_type
->length (), byte_order
, num
);
3315 /* Now copy the optimized out / unavailability ranges to the right
3317 src_bit_offset
= embedded_offset
* TARGET_CHAR_BIT
+ bitpos
;
3318 if (byte_order
== BFD_ENDIAN_BIG
)
3319 dst_bit_offset
= field_type
->length () * TARGET_CHAR_BIT
- bitsize
;
3322 value_ranges_copy_adjusted (dest_val
, dst_bit_offset
,
3323 val
, src_bit_offset
, bitsize
);
3326 /* Return a new value with type TYPE, which is FIELDNO field of the
3327 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3328 of VAL. If the VAL's contents required to extract the bitfield
3329 from are unavailable/optimized out, the new value is
3330 correspondingly marked unavailable/optimized out. */
3333 value_field_bitfield (struct type
*type
, int fieldno
,
3334 const gdb_byte
*valaddr
,
3335 LONGEST embedded_offset
, const struct value
*val
)
3337 int bitpos
= type
->field (fieldno
).loc_bitpos ();
3338 int bitsize
= TYPE_FIELD_BITSIZE (type
, fieldno
);
3339 struct value
*res_val
= allocate_value (type
->field (fieldno
).type ());
3341 unpack_value_bitfield (res_val
, bitpos
, bitsize
,
3342 valaddr
, embedded_offset
, val
);
3347 /* Modify the value of a bitfield. ADDR points to a block of memory in
3348 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3349 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3350 indicate which bits (in target bit order) comprise the bitfield.
3351 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3352 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3355 modify_field (struct type
*type
, gdb_byte
*addr
,
3356 LONGEST fieldval
, LONGEST bitpos
, LONGEST bitsize
)
3358 enum bfd_endian byte_order
= type_byte_order (type
);
3360 ULONGEST mask
= (ULONGEST
) -1 >> (8 * sizeof (ULONGEST
) - bitsize
);
3363 /* Normalize BITPOS. */
3367 /* If a negative fieldval fits in the field in question, chop
3368 off the sign extension bits. */
3369 if ((~fieldval
& ~(mask
>> 1)) == 0)
3372 /* Warn if value is too big to fit in the field in question. */
3373 if (0 != (fieldval
& ~mask
))
3375 /* FIXME: would like to include fieldval in the message, but
3376 we don't have a sprintf_longest. */
3377 warning (_("Value does not fit in %s bits."), plongest (bitsize
));
3379 /* Truncate it, otherwise adjoining fields may be corrupted. */
3383 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3384 false valgrind reports. */
3386 bytesize
= (bitpos
+ bitsize
+ 7) / 8;
3387 oword
= extract_unsigned_integer (addr
, bytesize
, byte_order
);
3389 /* Shifting for bit field depends on endianness of the target machine. */
3390 if (byte_order
== BFD_ENDIAN_BIG
)
3391 bitpos
= bytesize
* 8 - bitpos
- bitsize
;
3393 oword
&= ~(mask
<< bitpos
);
3394 oword
|= fieldval
<< bitpos
;
3396 store_unsigned_integer (addr
, bytesize
, byte_order
, oword
);
3399 /* Pack NUM into BUF using a target format of TYPE. */
3402 pack_long (gdb_byte
*buf
, struct type
*type
, LONGEST num
)
3404 enum bfd_endian byte_order
= type_byte_order (type
);
3407 type
= check_typedef (type
);
3408 len
= type
->length ();
3410 switch (type
->code ())
3412 case TYPE_CODE_RANGE
:
3413 num
-= type
->bounds ()->bias
;
3416 case TYPE_CODE_CHAR
:
3417 case TYPE_CODE_ENUM
:
3418 case TYPE_CODE_FLAGS
:
3419 case TYPE_CODE_BOOL
:
3420 case TYPE_CODE_MEMBERPTR
:
3421 if (type
->bit_size_differs_p ())
3423 unsigned bit_off
= type
->bit_offset ();
3424 unsigned bit_size
= type
->bit_size ();
3425 num
&= ((ULONGEST
) 1 << bit_size
) - 1;
3428 store_signed_integer (buf
, len
, byte_order
, num
);
3432 case TYPE_CODE_RVALUE_REF
:
3434 store_typed_address (buf
, type
, (CORE_ADDR
) num
);
3438 case TYPE_CODE_DECFLOAT
:
3439 target_float_from_longest (buf
, type
, num
);
3443 error (_("Unexpected type (%d) encountered for integer constant."),
3449 /* Pack NUM into BUF using a target format of TYPE. */
3452 pack_unsigned_long (gdb_byte
*buf
, struct type
*type
, ULONGEST num
)
3455 enum bfd_endian byte_order
;
3457 type
= check_typedef (type
);
3458 len
= type
->length ();
3459 byte_order
= type_byte_order (type
);
3461 switch (type
->code ())
3464 case TYPE_CODE_CHAR
:
3465 case TYPE_CODE_ENUM
:
3466 case TYPE_CODE_FLAGS
:
3467 case TYPE_CODE_BOOL
:
3468 case TYPE_CODE_RANGE
:
3469 case TYPE_CODE_MEMBERPTR
:
3470 if (type
->bit_size_differs_p ())
3472 unsigned bit_off
= type
->bit_offset ();
3473 unsigned bit_size
= type
->bit_size ();
3474 num
&= ((ULONGEST
) 1 << bit_size
) - 1;
3477 store_unsigned_integer (buf
, len
, byte_order
, num
);
3481 case TYPE_CODE_RVALUE_REF
:
3483 store_typed_address (buf
, type
, (CORE_ADDR
) num
);
3487 case TYPE_CODE_DECFLOAT
:
3488 target_float_from_ulongest (buf
, type
, num
);
3492 error (_("Unexpected type (%d) encountered "
3493 "for unsigned integer constant."),
3499 /* Create a value of type TYPE that is zero, and return it. */
3502 value_zero (struct type
*type
, enum lval_type lv
)
3504 struct value
*val
= allocate_value_lazy (type
);
3506 VALUE_LVAL (val
) = (lv
== lval_computed
? not_lval
: lv
);
3507 val
->m_is_zero
= true;
3511 /* Convert C numbers into newly allocated values. */
3514 value_from_longest (struct type
*type
, LONGEST num
)
3516 struct value
*val
= allocate_value (type
);
3518 pack_long (value_contents_raw (val
).data (), type
, num
);
3523 /* Convert C unsigned numbers into newly allocated values. */
3526 value_from_ulongest (struct type
*type
, ULONGEST num
)
3528 struct value
*val
= allocate_value (type
);
3530 pack_unsigned_long (value_contents_raw (val
).data (), type
, num
);
3536 /* Create a value representing a pointer of type TYPE to the address
3540 value_from_pointer (struct type
*type
, CORE_ADDR addr
)
3542 struct value
*val
= allocate_value (type
);
3544 store_typed_address (value_contents_raw (val
).data (),
3545 check_typedef (type
), addr
);
3549 /* Create and return a value object of TYPE containing the value D. The
3550 TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once
3551 it is converted to target format. */
3554 value_from_host_double (struct type
*type
, double d
)
3556 struct value
*value
= allocate_value (type
);
3557 gdb_assert (type
->code () == TYPE_CODE_FLT
);
3558 target_float_from_host_double (value_contents_raw (value
).data (),
3563 /* Create a value of type TYPE whose contents come from VALADDR, if it
3564 is non-null, and whose memory address (in the inferior) is
3565 ADDRESS. The type of the created value may differ from the passed
3566 type TYPE. Make sure to retrieve values new type after this call.
3567 Note that TYPE is not passed through resolve_dynamic_type; this is
3568 a special API intended for use only by Ada. */
3571 value_from_contents_and_address_unresolved (struct type
*type
,
3572 const gdb_byte
*valaddr
,
3577 if (valaddr
== NULL
)
3578 v
= allocate_value_lazy (type
);
3580 v
= value_from_contents (type
, valaddr
);
3581 VALUE_LVAL (v
) = lval_memory
;
3582 set_value_address (v
, address
);
3586 /* Create a value of type TYPE whose contents come from VALADDR, if it
3587 is non-null, and whose memory address (in the inferior) is
3588 ADDRESS. The type of the created value may differ from the passed
3589 type TYPE. Make sure to retrieve values new type after this call. */
3592 value_from_contents_and_address (struct type
*type
,
3593 const gdb_byte
*valaddr
,
3596 gdb::array_view
<const gdb_byte
> view
;
3597 if (valaddr
!= nullptr)
3598 view
= gdb::make_array_view (valaddr
, type
->length ());
3599 struct type
*resolved_type
= resolve_dynamic_type (type
, view
, address
);
3600 struct type
*resolved_type_no_typedef
= check_typedef (resolved_type
);
3603 if (valaddr
== NULL
)
3604 v
= allocate_value_lazy (resolved_type
);
3606 v
= value_from_contents (resolved_type
, valaddr
);
3607 if (TYPE_DATA_LOCATION (resolved_type_no_typedef
) != NULL
3608 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef
) == PROP_CONST
)
3609 address
= TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef
);
3610 VALUE_LVAL (v
) = lval_memory
;
3611 set_value_address (v
, address
);
3615 /* Create a value of type TYPE holding the contents CONTENTS.
3616 The new value is `not_lval'. */
3619 value_from_contents (struct type
*type
, const gdb_byte
*contents
)
3621 struct value
*result
;
3623 result
= allocate_value (type
);
3624 memcpy (value_contents_raw (result
).data (), contents
, type
->length ());
3628 /* Extract a value from the history file. Input will be of the form
3629 $digits or $$digits. See block comment above 'write_dollar_variable'
3633 value_from_history_ref (const char *h
, const char **endp
)
3645 /* Find length of numeral string. */
3646 for (; isdigit (h
[len
]); len
++)
3649 /* Make sure numeral string is not part of an identifier. */
3650 if (h
[len
] == '_' || isalpha (h
[len
]))
3653 /* Now collect the index value. */
3658 /* For some bizarre reason, "$$" is equivalent to "$$1",
3659 rather than to "$$0" as it ought to be! */
3667 index
= -strtol (&h
[2], &local_end
, 10);
3675 /* "$" is equivalent to "$0". */
3683 index
= strtol (&h
[1], &local_end
, 10);
3688 return access_value_history (index
);
3691 /* Get the component value (offset by OFFSET bytes) of a struct or
3692 union WHOLE. Component's type is TYPE. */
3695 value_from_component (struct value
*whole
, struct type
*type
, LONGEST offset
)
3699 if (VALUE_LVAL (whole
) == lval_memory
&& value_lazy (whole
))
3700 v
= allocate_value_lazy (type
);
3703 v
= allocate_value (type
);
3704 value_contents_copy (v
, value_embedded_offset (v
),
3705 whole
, value_embedded_offset (whole
) + offset
,
3706 type_length_units (type
));
3708 v
->m_offset
= whole
->offset () + offset
+ value_embedded_offset (whole
);
3709 set_value_component_location (v
, whole
);
3717 value_from_component_bitsize (struct value
*whole
, struct type
*type
,
3718 LONGEST bit_offset
, LONGEST bit_length
)
3720 gdb_assert (!value_lazy (whole
));
3722 /* Preserve lvalue-ness if possible. This is needed to avoid
3723 array-printing failures (including crashes) when printing Ada
3724 arrays in programs compiled with -fgnat-encodings=all. */
3725 if ((bit_offset
% TARGET_CHAR_BIT
) == 0
3726 && (bit_length
% TARGET_CHAR_BIT
) == 0
3727 && bit_length
== TARGET_CHAR_BIT
* type
->length ())
3728 return value_from_component (whole
, type
, bit_offset
/ TARGET_CHAR_BIT
);
3730 struct value
*v
= allocate_value (type
);
3732 LONGEST dst_offset
= TARGET_CHAR_BIT
* value_embedded_offset (v
);
3733 if (is_scalar_type (type
) && type_byte_order (type
) == BFD_ENDIAN_BIG
)
3734 dst_offset
+= TARGET_CHAR_BIT
* type
->length () - bit_length
;
3736 value_contents_copy_raw_bitwise (v
, dst_offset
,
3739 * value_embedded_offset (whole
)
3746 coerce_ref_if_computed (const struct value
*arg
)
3748 const struct lval_funcs
*funcs
;
3750 if (!TYPE_IS_REFERENCE (check_typedef (arg
->type ())))
3753 if (value_lval_const (arg
) != lval_computed
)
3756 funcs
= value_computed_funcs (arg
);
3757 if (funcs
->coerce_ref
== NULL
)
3760 return funcs
->coerce_ref (arg
);
3763 /* Look at value.h for description. */
3766 readjust_indirect_value_type (struct value
*value
, struct type
*enc_type
,
3767 const struct type
*original_type
,
3768 struct value
*original_value
,
3769 CORE_ADDR original_value_address
)
3771 gdb_assert (original_type
->is_pointer_or_reference ());
3773 struct type
*original_target_type
= original_type
->target_type ();
3774 gdb::array_view
<const gdb_byte
> view
;
3775 struct type
*resolved_original_target_type
3776 = resolve_dynamic_type (original_target_type
, view
,
3777 original_value_address
);
3779 /* Re-adjust type. */
3780 value
->deprecated_set_type (resolved_original_target_type
);
3782 /* Add embedding info. */
3783 set_value_enclosing_type (value
, enc_type
);
3784 set_value_embedded_offset (value
, value_pointed_to_offset (original_value
));
3786 /* We may be pointing to an object of some derived type. */
3787 return value_full_object (value
, NULL
, 0, 0, 0);
3791 coerce_ref (struct value
*arg
)
3793 struct type
*value_type_arg_tmp
= check_typedef (arg
->type ());
3794 struct value
*retval
;
3795 struct type
*enc_type
;
3797 retval
= coerce_ref_if_computed (arg
);
3801 if (!TYPE_IS_REFERENCE (value_type_arg_tmp
))
3804 enc_type
= check_typedef (value_enclosing_type (arg
));
3805 enc_type
= enc_type
->target_type ();
3807 CORE_ADDR addr
= unpack_pointer (arg
->type (), value_contents (arg
).data ());
3808 retval
= value_at_lazy (enc_type
, addr
);
3809 enc_type
= retval
->type ();
3810 return readjust_indirect_value_type (retval
, enc_type
, value_type_arg_tmp
,
3815 coerce_array (struct value
*arg
)
3819 arg
= coerce_ref (arg
);
3820 type
= check_typedef (arg
->type ());
3822 switch (type
->code ())
3824 case TYPE_CODE_ARRAY
:
3825 if (!type
->is_vector () && current_language
->c_style_arrays_p ())
3826 arg
= value_coerce_array (arg
);
3828 case TYPE_CODE_FUNC
:
3829 arg
= value_coerce_function (arg
);
3836 /* Return the return value convention that will be used for the
3839 enum return_value_convention
3840 struct_return_convention (struct gdbarch
*gdbarch
,
3841 struct value
*function
, struct type
*value_type
)
3843 enum type_code code
= value_type
->code ();
3845 if (code
== TYPE_CODE_ERROR
)
3846 error (_("Function return type unknown."));
3848 /* Probe the architecture for the return-value convention. */
3849 return gdbarch_return_value_as_value (gdbarch
, function
, value_type
,
3853 /* Return true if the function returning the specified type is using
3854 the convention of returning structures in memory (passing in the
3855 address as a hidden first parameter). */
3858 using_struct_return (struct gdbarch
*gdbarch
,
3859 struct value
*function
, struct type
*value_type
)
3861 if (value_type
->code () == TYPE_CODE_VOID
)
3862 /* A void return value is never in memory. See also corresponding
3863 code in "print_return_value". */
3866 return (struct_return_convention (gdbarch
, function
, value_type
)
3867 != RETURN_VALUE_REGISTER_CONVENTION
);
3870 /* Set the initialized field in a value struct. */
3873 set_value_initialized (struct value
*val
, int status
)
3875 val
->m_initialized
= status
;
3878 /* Return the initialized field in a value struct. */
3881 value_initialized (const struct value
*val
)
3883 return val
->m_initialized
;
3886 /* Helper for value_fetch_lazy when the value is a bitfield. */
3889 value_fetch_lazy_bitfield (struct value
*val
)
3891 gdb_assert (val
->bitsize () != 0);
3893 /* To read a lazy bitfield, read the entire enclosing value. This
3894 prevents reading the same block of (possibly volatile) memory once
3895 per bitfield. It would be even better to read only the containing
3896 word, but we have no way to record that just specific bits of a
3897 value have been fetched. */
3898 struct value
*parent
= val
->parent ();
3900 if (value_lazy (parent
))
3901 value_fetch_lazy (parent
);
3903 unpack_value_bitfield (val
, val
->bitpos (), val
->bitsize (),
3904 value_contents_for_printing (parent
).data (),
3905 val
->offset (), parent
);
3908 /* Helper for value_fetch_lazy when the value is in memory. */
3911 value_fetch_lazy_memory (struct value
*val
)
3913 gdb_assert (VALUE_LVAL (val
) == lval_memory
);
3915 CORE_ADDR addr
= value_address (val
);
3916 struct type
*type
= check_typedef (value_enclosing_type (val
));
3918 /* Figure out how much we should copy from memory. Usually, this is just
3919 the size of the type, but, for arrays, we might only be loading a
3920 small part of the array (this is only done for very large arrays). */
3922 if (val
->m_limited_length
> 0)
3924 gdb_assert (val
->type ()->code () == TYPE_CODE_ARRAY
);
3925 len
= val
->m_limited_length
;
3927 else if (type
->length () > 0)
3928 len
= type_length_units (type
);
3930 gdb_assert (len
>= 0);
3933 read_value_memory (val
, 0, value_stack (val
), addr
,
3934 value_contents_all_raw (val
).data (), len
);
3937 /* Helper for value_fetch_lazy when the value is in a register. */
3940 value_fetch_lazy_register (struct value
*val
)
3942 frame_info_ptr next_frame
;
3944 struct type
*type
= check_typedef (val
->type ());
3945 struct value
*new_val
= val
, *mark
= value_mark ();
3947 /* Offsets are not supported here; lazy register values must
3948 refer to the entire register. */
3949 gdb_assert (val
->offset () == 0);
3951 while (VALUE_LVAL (new_val
) == lval_register
&& value_lazy (new_val
))
3953 struct frame_id next_frame_id
= VALUE_NEXT_FRAME_ID (new_val
);
3955 next_frame
= frame_find_by_id (next_frame_id
);
3956 regnum
= VALUE_REGNUM (new_val
);
3958 gdb_assert (next_frame
!= NULL
);
3960 /* Convertible register routines are used for multi-register
3961 values and for interpretation in different types
3962 (e.g. float or int from a double register). Lazy
3963 register values should have the register's natural type,
3964 so they do not apply. */
3965 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame
),
3968 /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
3969 Since a "->next" operation was performed when setting
3970 this field, we do not need to perform a "next" operation
3971 again when unwinding the register. That's why
3972 frame_unwind_register_value() is called here instead of
3973 get_frame_register_value(). */
3974 new_val
= frame_unwind_register_value (next_frame
, regnum
);
3976 /* If we get another lazy lval_register value, it means the
3977 register is found by reading it from NEXT_FRAME's next frame.
3978 frame_unwind_register_value should never return a value with
3979 the frame id pointing to NEXT_FRAME. If it does, it means we
3980 either have two consecutive frames with the same frame id
3981 in the frame chain, or some code is trying to unwind
3982 behind get_prev_frame's back (e.g., a frame unwind
3983 sniffer trying to unwind), bypassing its validations. In
3984 any case, it should always be an internal error to end up
3985 in this situation. */
3986 if (VALUE_LVAL (new_val
) == lval_register
3987 && value_lazy (new_val
)
3988 && VALUE_NEXT_FRAME_ID (new_val
) == next_frame_id
)
3989 internal_error (_("infinite loop while fetching a register"));
3992 /* If it's still lazy (for instance, a saved register on the
3993 stack), fetch it. */
3994 if (value_lazy (new_val
))
3995 value_fetch_lazy (new_val
);
3997 /* Copy the contents and the unavailability/optimized-out
3998 meta-data from NEW_VAL to VAL. */
3999 set_value_lazy (val
, 0);
4000 value_contents_copy (val
, value_embedded_offset (val
),
4001 new_val
, value_embedded_offset (new_val
),
4002 type_length_units (type
));
4006 struct gdbarch
*gdbarch
;
4007 frame_info_ptr frame
;
4008 frame
= frame_find_by_id (VALUE_NEXT_FRAME_ID (val
));
4009 frame
= get_prev_frame_always (frame
);
4010 regnum
= VALUE_REGNUM (val
);
4011 gdbarch
= get_frame_arch (frame
);
4013 string_file debug_file
;
4014 gdb_printf (&debug_file
,
4015 "(frame=%d, regnum=%d(%s), ...) ",
4016 frame_relative_level (frame
), regnum
,
4017 user_reg_map_regnum_to_name (gdbarch
, regnum
));
4019 gdb_printf (&debug_file
, "->");
4020 if (value_optimized_out (new_val
))
4022 gdb_printf (&debug_file
, " ");
4023 val_print_optimized_out (new_val
, &debug_file
);
4028 gdb::array_view
<const gdb_byte
> buf
= value_contents (new_val
);
4030 if (VALUE_LVAL (new_val
) == lval_register
)
4031 gdb_printf (&debug_file
, " register=%d",
4032 VALUE_REGNUM (new_val
));
4033 else if (VALUE_LVAL (new_val
) == lval_memory
)
4034 gdb_printf (&debug_file
, " address=%s",
4036 value_address (new_val
)));
4038 gdb_printf (&debug_file
, " computed");
4040 gdb_printf (&debug_file
, " bytes=");
4041 gdb_printf (&debug_file
, "[");
4042 for (i
= 0; i
< register_size (gdbarch
, regnum
); i
++)
4043 gdb_printf (&debug_file
, "%02x", buf
[i
]);
4044 gdb_printf (&debug_file
, "]");
4047 frame_debug_printf ("%s", debug_file
.c_str ());
4050 /* Dispose of the intermediate values. This prevents
4051 watchpoints from trying to watch the saved frame pointer. */
4052 value_free_to_mark (mark
);
4055 /* Load the actual content of a lazy value. Fetch the data from the
4056 user's process and clear the lazy flag to indicate that the data in
4057 the buffer is valid.
4059 If the value is zero-length, we avoid calling read_memory, which
4060 would abort. We mark the value as fetched anyway -- all 0 bytes of
4064 value_fetch_lazy (struct value
*val
)
4066 gdb_assert (value_lazy (val
));
4067 allocate_value_contents (val
, true);
4068 /* A value is either lazy, or fully fetched. The
4069 availability/validity is only established as we try to fetch a
4071 gdb_assert (val
->m_optimized_out
.empty ());
4072 gdb_assert (val
->m_unavailable
.empty ());
4077 else if (val
->bitsize ())
4078 value_fetch_lazy_bitfield (val
);
4079 else if (VALUE_LVAL (val
) == lval_memory
)
4080 value_fetch_lazy_memory (val
);
4081 else if (VALUE_LVAL (val
) == lval_register
)
4082 value_fetch_lazy_register (val
);
4083 else if (VALUE_LVAL (val
) == lval_computed
4084 && value_computed_funcs (val
)->read
!= NULL
)
4085 value_computed_funcs (val
)->read (val
);
4087 internal_error (_("Unexpected lazy value type."));
4089 set_value_lazy (val
, 0);
4092 /* Implementation of the convenience function $_isvoid. */
4094 static struct value
*
4095 isvoid_internal_fn (struct gdbarch
*gdbarch
,
4096 const struct language_defn
*language
,
4097 void *cookie
, int argc
, struct value
**argv
)
4102 error (_("You must provide one argument for $_isvoid."));
4104 ret
= argv
[0]->type ()->code () == TYPE_CODE_VOID
;
4106 return value_from_longest (builtin_type (gdbarch
)->builtin_int
, ret
);
4109 /* Implementation of the convenience function $_creal. Extracts the
4110 real part from a complex number. */
4112 static struct value
*
4113 creal_internal_fn (struct gdbarch
*gdbarch
,
4114 const struct language_defn
*language
,
4115 void *cookie
, int argc
, struct value
**argv
)
4118 error (_("You must provide one argument for $_creal."));
4120 value
*cval
= argv
[0];
4121 type
*ctype
= check_typedef (cval
->type ());
4122 if (ctype
->code () != TYPE_CODE_COMPLEX
)
4123 error (_("expected a complex number"));
4124 return value_real_part (cval
);
4127 /* Implementation of the convenience function $_cimag. Extracts the
4128 imaginary part from a complex number. */
4130 static struct value
*
4131 cimag_internal_fn (struct gdbarch
*gdbarch
,
4132 const struct language_defn
*language
,
4133 void *cookie
, int argc
,
4134 struct value
**argv
)
4137 error (_("You must provide one argument for $_cimag."));
4139 value
*cval
= argv
[0];
4140 type
*ctype
= check_typedef (cval
->type ());
4141 if (ctype
->code () != TYPE_CODE_COMPLEX
)
4142 error (_("expected a complex number"));
4143 return value_imaginary_part (cval
);
4150 /* Test the ranges_contain function. */
4153 test_ranges_contain ()
4155 std::vector
<range
> ranges
;
4161 ranges
.push_back (r
);
4166 ranges
.push_back (r
);
4169 SELF_CHECK (!ranges_contain (ranges
, 2, 5));
4171 SELF_CHECK (ranges_contain (ranges
, 9, 5));
4173 SELF_CHECK (ranges_contain (ranges
, 10, 2));
4175 SELF_CHECK (ranges_contain (ranges
, 10, 5));
4177 SELF_CHECK (ranges_contain (ranges
, 13, 6));
4179 SELF_CHECK (ranges_contain (ranges
, 14, 5));
4181 SELF_CHECK (!ranges_contain (ranges
, 15, 4));
4183 SELF_CHECK (!ranges_contain (ranges
, 16, 4));
4185 SELF_CHECK (ranges_contain (ranges
, 16, 6));
4187 SELF_CHECK (ranges_contain (ranges
, 21, 1));
4189 SELF_CHECK (ranges_contain (ranges
, 21, 5));
4191 SELF_CHECK (!ranges_contain (ranges
, 26, 3));
4194 /* Check that RANGES contains the same ranges as EXPECTED. */
4197 check_ranges_vector (gdb::array_view
<const range
> ranges
,
4198 gdb::array_view
<const range
> expected
)
4200 return ranges
== expected
;
4203 /* Test the insert_into_bit_range_vector function. */
4206 test_insert_into_bit_range_vector ()
4208 std::vector
<range
> ranges
;
4212 insert_into_bit_range_vector (&ranges
, 10, 5);
4213 static const range expected
[] = {
4216 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4221 insert_into_bit_range_vector (&ranges
, 11, 4);
4222 static const range expected
= {10, 5};
4223 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4226 /* [10, 14] [20, 24] */
4228 insert_into_bit_range_vector (&ranges
, 20, 5);
4229 static const range expected
[] = {
4233 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4236 /* [10, 14] [17, 24] */
4238 insert_into_bit_range_vector (&ranges
, 17, 5);
4239 static const range expected
[] = {
4243 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4246 /* [2, 8] [10, 14] [17, 24] */
4248 insert_into_bit_range_vector (&ranges
, 2, 7);
4249 static const range expected
[] = {
4254 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4257 /* [2, 14] [17, 24] */
4259 insert_into_bit_range_vector (&ranges
, 9, 1);
4260 static const range expected
[] = {
4264 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4267 /* [2, 14] [17, 24] */
4269 insert_into_bit_range_vector (&ranges
, 9, 1);
4270 static const range expected
[] = {
4274 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4279 insert_into_bit_range_vector (&ranges
, 4, 30);
4280 static const range expected
= {2, 32};
4281 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4288 type
*type
= builtin_type (current_inferior ()->gdbarch
)->builtin_int
;
4290 /* Verify that we can copy an entirely optimized out value, that may not have
4291 its contents allocated. */
4292 value_ref_ptr val
= release_value (allocate_optimized_out_value (type
));
4293 value_ref_ptr copy
= release_value (value_copy (val
.get ()));
4295 SELF_CHECK (value_entirely_optimized_out (val
.get ()));
4296 SELF_CHECK (value_entirely_optimized_out (copy
.get ()));
4299 } /* namespace selftests */
4300 #endif /* GDB_SELF_TEST */
4302 void _initialize_values ();
4304 _initialize_values ()
4306 cmd_list_element
*show_convenience_cmd
4307 = add_cmd ("convenience", no_class
, show_convenience
, _("\
4308 Debugger convenience (\"$foo\") variables and functions.\n\
4309 Convenience variables are created when you assign them values;\n\
4310 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4312 A few convenience variables are given values automatically:\n\
4313 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4314 \"$__\" holds the contents of the last address examined with \"x\"."
4317 Convenience functions are defined via the Python API."
4320 add_alias_cmd ("conv", show_convenience_cmd
, no_class
, 1, &showlist
);
4322 add_cmd ("values", no_set_class
, show_values
, _("\
4323 Elements of value history around item number IDX (or last ten)."),
4326 add_com ("init-if-undefined", class_vars
, init_if_undefined_command
, _("\
4327 Initialize a convenience variable if necessary.\n\
4328 init-if-undefined VARIABLE = EXPRESSION\n\
4329 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4330 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4331 VARIABLE is already initialized."));
4333 add_prefix_cmd ("function", no_class
, function_command
, _("\
4334 Placeholder command for showing help on convenience functions."),
4335 &functionlist
, 0, &cmdlist
);
4337 add_internal_function ("_isvoid", _("\
4338 Check whether an expression is void.\n\
4339 Usage: $_isvoid (expression)\n\
4340 Return 1 if the expression is void, zero otherwise."),
4341 isvoid_internal_fn
, NULL
);
4343 add_internal_function ("_creal", _("\
4344 Extract the real part of a complex number.\n\
4345 Usage: $_creal (expression)\n\
4346 Return the real part of a complex number, the type depends on the\n\
4347 type of a complex number."),
4348 creal_internal_fn
, NULL
);
4350 add_internal_function ("_cimag", _("\
4351 Extract the imaginary part of a complex number.\n\
4352 Usage: $_cimag (expression)\n\
4353 Return the imaginary part of a complex number, the type depends on the\n\
4354 type of a complex number."),
4355 cimag_internal_fn
, NULL
);
4357 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4358 class_support
, &max_value_size
, _("\
4359 Set maximum sized value gdb will load from the inferior."), _("\
4360 Show maximum sized value gdb will load from the inferior."), _("\
4361 Use this to control the maximum size, in bytes, of a value that gdb\n\
4362 will load from the inferior. Setting this value to 'unlimited'\n\
4363 disables checking.\n\
4364 Setting this does not invalidate already allocated values, it only\n\
4365 prevents future values, larger than this size, from being allocated."),
4367 show_max_value_size
,
4368 &setlist
, &showlist
);
4369 set_show_commands vsize_limit
4370 = add_setshow_zuinteger_unlimited_cmd ("varsize-limit", class_support
,
4371 &max_value_size
, _("\
4372 Set the maximum number of bytes allowed in a variable-size object."), _("\
4373 Show the maximum number of bytes allowed in a variable-size object."), _("\
4374 Attempts to access an object whose size is not a compile-time constant\n\
4375 and exceeds this limit will cause an error."),
4376 NULL
, NULL
, &setlist
, &showlist
);
4377 deprecate_cmd (vsize_limit
.set
, "set max-value-size");
4380 selftests::register_test ("ranges_contain", selftests::test_ranges_contain
);
4381 selftests::register_test ("insert_into_bit_range_vector",
4382 selftests::test_insert_into_bit_range_vector
);
4383 selftests::register_test ("value_copy", selftests::test_value_copy
);
4392 all_values
.clear ();