Convert break-catch-syscall to vtable ops
[binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2022 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "regcache.h"
32 #include "block.h"
33 #include "target-float.h"
34 #include "objfiles.h"
35 #include "valprint.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
38 #include <ctype.h>
39 #include "tracepoint.h"
40 #include "cp-abi.h"
41 #include "user-regs.h"
42 #include <algorithm>
43 #include "completer.h"
44 #include "gdbsupport/selftest.h"
45 #include "gdbsupport/array-view.h"
46 #include "cli/cli-style.h"
47 #include "expop.h"
48 #include "inferior.h"
49
50 /* Definition of a user function. */
51 struct internal_function
52 {
53 /* The name of the function. It is a bit odd to have this in the
54 function itself -- the user might use a differently-named
55 convenience variable to hold the function. */
56 char *name;
57
58 /* The handler. */
59 internal_function_fn handler;
60
61 /* User data for the handler. */
62 void *cookie;
63 };
64
65 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
66
67 struct range
68 {
69 /* Lowest offset in the range. */
70 LONGEST offset;
71
72 /* Length of the range. */
73 LONGEST length;
74
75 /* Returns true if THIS is strictly less than OTHER, useful for
76 searching. We keep ranges sorted by offset and coalesce
77 overlapping and contiguous ranges, so this just compares the
78 starting offset. */
79
80 bool operator< (const range &other) const
81 {
82 return offset < other.offset;
83 }
84
85 /* Returns true if THIS is equal to OTHER. */
86 bool operator== (const range &other) const
87 {
88 return offset == other.offset && length == other.length;
89 }
90 };
91
92 /* Returns true if the ranges defined by [offset1, offset1+len1) and
93 [offset2, offset2+len2) overlap. */
94
95 static int
96 ranges_overlap (LONGEST offset1, LONGEST len1,
97 LONGEST offset2, LONGEST len2)
98 {
99 ULONGEST h, l;
100
101 l = std::max (offset1, offset2);
102 h = std::min (offset1 + len1, offset2 + len2);
103 return (l < h);
104 }
105
106 /* Returns true if RANGES contains any range that overlaps [OFFSET,
107 OFFSET+LENGTH). */
108
109 static int
110 ranges_contain (const std::vector<range> &ranges, LONGEST offset,
111 LONGEST length)
112 {
113 range what;
114
115 what.offset = offset;
116 what.length = length;
117
118 /* We keep ranges sorted by offset and coalesce overlapping and
119 contiguous ranges, so to check if a range list contains a given
120 range, we can do a binary search for the position the given range
121 would be inserted if we only considered the starting OFFSET of
122 ranges. We call that position I. Since we also have LENGTH to
123 care for (this is a range afterall), we need to check if the
124 _previous_ range overlaps the I range. E.g.,
125
126 R
127 |---|
128 |---| |---| |------| ... |--|
129 0 1 2 N
130
131 I=1
132
133 In the case above, the binary search would return `I=1', meaning,
134 this OFFSET should be inserted at position 1, and the current
135 position 1 should be pushed further (and before 2). But, `0'
136 overlaps with R.
137
138 Then we need to check if the I range overlaps the I range itself.
139 E.g.,
140
141 R
142 |---|
143 |---| |---| |-------| ... |--|
144 0 1 2 N
145
146 I=1
147 */
148
149
150 auto i = std::lower_bound (ranges.begin (), ranges.end (), what);
151
152 if (i > ranges.begin ())
153 {
154 const struct range &bef = *(i - 1);
155
156 if (ranges_overlap (bef.offset, bef.length, offset, length))
157 return 1;
158 }
159
160 if (i < ranges.end ())
161 {
162 const struct range &r = *i;
163
164 if (ranges_overlap (r.offset, r.length, offset, length))
165 return 1;
166 }
167
168 return 0;
169 }
170
171 static struct cmd_list_element *functionlist;
172
173 /* Note that the fields in this structure are arranged to save a bit
174 of memory. */
175
176 struct value
177 {
178 explicit value (struct type *type_)
179 : modifiable (1),
180 lazy (1),
181 initialized (1),
182 stack (0),
183 is_zero (false),
184 type (type_),
185 enclosing_type (type_)
186 {
187 }
188
189 ~value ()
190 {
191 if (VALUE_LVAL (this) == lval_computed)
192 {
193 const struct lval_funcs *funcs = location.computed.funcs;
194
195 if (funcs->free_closure)
196 funcs->free_closure (this);
197 }
198 else if (VALUE_LVAL (this) == lval_xcallable)
199 delete location.xm_worker;
200 }
201
202 DISABLE_COPY_AND_ASSIGN (value);
203
204 /* Type of value; either not an lval, or one of the various
205 different possible kinds of lval. */
206 enum lval_type lval = not_lval;
207
208 /* Is it modifiable? Only relevant if lval != not_lval. */
209 unsigned int modifiable : 1;
210
211 /* If zero, contents of this value are in the contents field. If
212 nonzero, contents are in inferior. If the lval field is lval_memory,
213 the contents are in inferior memory at location.address plus offset.
214 The lval field may also be lval_register.
215
216 WARNING: This field is used by the code which handles watchpoints
217 (see breakpoint.c) to decide whether a particular value can be
218 watched by hardware watchpoints. If the lazy flag is set for
219 some member of a value chain, it is assumed that this member of
220 the chain doesn't need to be watched as part of watching the
221 value itself. This is how GDB avoids watching the entire struct
222 or array when the user wants to watch a single struct member or
223 array element. If you ever change the way lazy flag is set and
224 reset, be sure to consider this use as well! */
225 unsigned int lazy : 1;
226
227 /* If value is a variable, is it initialized or not. */
228 unsigned int initialized : 1;
229
230 /* If value is from the stack. If this is set, read_stack will be
231 used instead of read_memory to enable extra caching. */
232 unsigned int stack : 1;
233
234 /* True if this is a zero value, created by 'value_zero'; false
235 otherwise. */
236 bool is_zero : 1;
237
238 /* Location of value (if lval). */
239 union
240 {
241 /* If lval == lval_memory, this is the address in the inferior */
242 CORE_ADDR address;
243
244 /*If lval == lval_register, the value is from a register. */
245 struct
246 {
247 /* Register number. */
248 int regnum;
249 /* Frame ID of "next" frame to which a register value is relative.
250 If the register value is found relative to frame F, then the
251 frame id of F->next will be stored in next_frame_id. */
252 struct frame_id next_frame_id;
253 } reg;
254
255 /* Pointer to internal variable. */
256 struct internalvar *internalvar;
257
258 /* Pointer to xmethod worker. */
259 struct xmethod_worker *xm_worker;
260
261 /* If lval == lval_computed, this is a set of function pointers
262 to use to access and describe the value, and a closure pointer
263 for them to use. */
264 struct
265 {
266 /* Functions to call. */
267 const struct lval_funcs *funcs;
268
269 /* Closure for those functions to use. */
270 void *closure;
271 } computed;
272 } location {};
273
274 /* Describes offset of a value within lval of a structure in target
275 addressable memory units. Note also the member embedded_offset
276 below. */
277 LONGEST offset = 0;
278
279 /* Only used for bitfields; number of bits contained in them. */
280 LONGEST bitsize = 0;
281
282 /* Only used for bitfields; position of start of field. For
283 little-endian targets, it is the position of the LSB. For
284 big-endian targets, it is the position of the MSB. */
285 LONGEST bitpos = 0;
286
287 /* The number of references to this value. When a value is created,
288 the value chain holds a reference, so REFERENCE_COUNT is 1. If
289 release_value is called, this value is removed from the chain but
290 the caller of release_value now has a reference to this value.
291 The caller must arrange for a call to value_free later. */
292 int reference_count = 1;
293
294 /* Only used for bitfields; the containing value. This allows a
295 single read from the target when displaying multiple
296 bitfields. */
297 value_ref_ptr parent;
298
299 /* Type of the value. */
300 struct type *type;
301
302 /* If a value represents a C++ object, then the `type' field gives
303 the object's compile-time type. If the object actually belongs
304 to some class derived from `type', perhaps with other base
305 classes and additional members, then `type' is just a subobject
306 of the real thing, and the full object is probably larger than
307 `type' would suggest.
308
309 If `type' is a dynamic class (i.e. one with a vtable), then GDB
310 can actually determine the object's run-time type by looking at
311 the run-time type information in the vtable. When this
312 information is available, we may elect to read in the entire
313 object, for several reasons:
314
315 - When printing the value, the user would probably rather see the
316 full object, not just the limited portion apparent from the
317 compile-time type.
318
319 - If `type' has virtual base classes, then even printing `type'
320 alone may require reaching outside the `type' portion of the
321 object to wherever the virtual base class has been stored.
322
323 When we store the entire object, `enclosing_type' is the run-time
324 type -- the complete object -- and `embedded_offset' is the
325 offset of `type' within that larger type, in target addressable memory
326 units. The value_contents() macro takes `embedded_offset' into account,
327 so most GDB code continues to see the `type' portion of the value, just
328 as the inferior would.
329
330 If `type' is a pointer to an object, then `enclosing_type' is a
331 pointer to the object's run-time type, and `pointed_to_offset' is
332 the offset in target addressable memory units from the full object
333 to the pointed-to object -- that is, the value `embedded_offset' would
334 have if we followed the pointer and fetched the complete object.
335 (I don't really see the point. Why not just determine the
336 run-time type when you indirect, and avoid the special case? The
337 contents don't matter until you indirect anyway.)
338
339 If we're not doing anything fancy, `enclosing_type' is equal to
340 `type', and `embedded_offset' is zero, so everything works
341 normally. */
342 struct type *enclosing_type;
343 LONGEST embedded_offset = 0;
344 LONGEST pointed_to_offset = 0;
345
346 /* Actual contents of the value. Target byte-order.
347
348 May be nullptr if the value is lazy or is entirely optimized out.
349 Guaranteed to be non-nullptr otherwise. */
350 gdb::unique_xmalloc_ptr<gdb_byte> contents;
351
352 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
353 rather than available, since the common and default case is for a
354 value to be available. This is filled in at value read time.
355 The unavailable ranges are tracked in bits. Note that a contents
356 bit that has been optimized out doesn't really exist in the
357 program, so it can't be marked unavailable either. */
358 std::vector<range> unavailable;
359
360 /* Likewise, but for optimized out contents (a chunk of the value of
361 a variable that does not actually exist in the program). If LVAL
362 is lval_register, this is a register ($pc, $sp, etc., never a
363 program variable) that has not been saved in the frame. Not
364 saved registers and optimized-out program variables values are
365 treated pretty much the same, except not-saved registers have a
366 different string representation and related error strings. */
367 std::vector<range> optimized_out;
368 };
369
370 /* See value.h. */
371
372 struct gdbarch *
373 get_value_arch (const struct value *value)
374 {
375 return value_type (value)->arch ();
376 }
377
378 int
379 value_bits_available (const struct value *value, LONGEST offset, LONGEST length)
380 {
381 gdb_assert (!value->lazy);
382
383 return !ranges_contain (value->unavailable, offset, length);
384 }
385
386 int
387 value_bytes_available (const struct value *value,
388 LONGEST offset, LONGEST length)
389 {
390 return value_bits_available (value,
391 offset * TARGET_CHAR_BIT,
392 length * TARGET_CHAR_BIT);
393 }
394
395 int
396 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
397 {
398 gdb_assert (!value->lazy);
399
400 return ranges_contain (value->optimized_out, bit_offset, bit_length);
401 }
402
403 int
404 value_entirely_available (struct value *value)
405 {
406 /* We can only tell whether the whole value is available when we try
407 to read it. */
408 if (value->lazy)
409 value_fetch_lazy (value);
410
411 if (value->unavailable.empty ())
412 return 1;
413 return 0;
414 }
415
416 /* Returns true if VALUE is entirely covered by RANGES. If the value
417 is lazy, it'll be read now. Note that RANGE is a pointer to
418 pointer because reading the value might change *RANGE. */
419
420 static int
421 value_entirely_covered_by_range_vector (struct value *value,
422 const std::vector<range> &ranges)
423 {
424 /* We can only tell whether the whole value is optimized out /
425 unavailable when we try to read it. */
426 if (value->lazy)
427 value_fetch_lazy (value);
428
429 if (ranges.size () == 1)
430 {
431 const struct range &t = ranges[0];
432
433 if (t.offset == 0
434 && t.length == (TARGET_CHAR_BIT
435 * TYPE_LENGTH (value_enclosing_type (value))))
436 return 1;
437 }
438
439 return 0;
440 }
441
442 int
443 value_entirely_unavailable (struct value *value)
444 {
445 return value_entirely_covered_by_range_vector (value, value->unavailable);
446 }
447
448 int
449 value_entirely_optimized_out (struct value *value)
450 {
451 return value_entirely_covered_by_range_vector (value, value->optimized_out);
452 }
453
454 /* Insert into the vector pointed to by VECTORP the bit range starting of
455 OFFSET bits, and extending for the next LENGTH bits. */
456
457 static void
458 insert_into_bit_range_vector (std::vector<range> *vectorp,
459 LONGEST offset, LONGEST length)
460 {
461 range newr;
462
463 /* Insert the range sorted. If there's overlap or the new range
464 would be contiguous with an existing range, merge. */
465
466 newr.offset = offset;
467 newr.length = length;
468
469 /* Do a binary search for the position the given range would be
470 inserted if we only considered the starting OFFSET of ranges.
471 Call that position I. Since we also have LENGTH to care for
472 (this is a range afterall), we need to check if the _previous_
473 range overlaps the I range. E.g., calling R the new range:
474
475 #1 - overlaps with previous
476
477 R
478 |-...-|
479 |---| |---| |------| ... |--|
480 0 1 2 N
481
482 I=1
483
484 In the case #1 above, the binary search would return `I=1',
485 meaning, this OFFSET should be inserted at position 1, and the
486 current position 1 should be pushed further (and become 2). But,
487 note that `0' overlaps with R, so we want to merge them.
488
489 A similar consideration needs to be taken if the new range would
490 be contiguous with the previous range:
491
492 #2 - contiguous with previous
493
494 R
495 |-...-|
496 |--| |---| |------| ... |--|
497 0 1 2 N
498
499 I=1
500
501 If there's no overlap with the previous range, as in:
502
503 #3 - not overlapping and not contiguous
504
505 R
506 |-...-|
507 |--| |---| |------| ... |--|
508 0 1 2 N
509
510 I=1
511
512 or if I is 0:
513
514 #4 - R is the range with lowest offset
515
516 R
517 |-...-|
518 |--| |---| |------| ... |--|
519 0 1 2 N
520
521 I=0
522
523 ... we just push the new range to I.
524
525 All the 4 cases above need to consider that the new range may
526 also overlap several of the ranges that follow, or that R may be
527 contiguous with the following range, and merge. E.g.,
528
529 #5 - overlapping following ranges
530
531 R
532 |------------------------|
533 |--| |---| |------| ... |--|
534 0 1 2 N
535
536 I=0
537
538 or:
539
540 R
541 |-------|
542 |--| |---| |------| ... |--|
543 0 1 2 N
544
545 I=1
546
547 */
548
549 auto i = std::lower_bound (vectorp->begin (), vectorp->end (), newr);
550 if (i > vectorp->begin ())
551 {
552 struct range &bef = *(i - 1);
553
554 if (ranges_overlap (bef.offset, bef.length, offset, length))
555 {
556 /* #1 */
557 ULONGEST l = std::min (bef.offset, offset);
558 ULONGEST h = std::max (bef.offset + bef.length, offset + length);
559
560 bef.offset = l;
561 bef.length = h - l;
562 i--;
563 }
564 else if (offset == bef.offset + bef.length)
565 {
566 /* #2 */
567 bef.length += length;
568 i--;
569 }
570 else
571 {
572 /* #3 */
573 i = vectorp->insert (i, newr);
574 }
575 }
576 else
577 {
578 /* #4 */
579 i = vectorp->insert (i, newr);
580 }
581
582 /* Check whether the ranges following the one we've just added or
583 touched can be folded in (#5 above). */
584 if (i != vectorp->end () && i + 1 < vectorp->end ())
585 {
586 int removed = 0;
587 auto next = i + 1;
588
589 /* Get the range we just touched. */
590 struct range &t = *i;
591 removed = 0;
592
593 i = next;
594 for (; i < vectorp->end (); i++)
595 {
596 struct range &r = *i;
597 if (r.offset <= t.offset + t.length)
598 {
599 ULONGEST l, h;
600
601 l = std::min (t.offset, r.offset);
602 h = std::max (t.offset + t.length, r.offset + r.length);
603
604 t.offset = l;
605 t.length = h - l;
606
607 removed++;
608 }
609 else
610 {
611 /* If we couldn't merge this one, we won't be able to
612 merge following ones either, since the ranges are
613 always sorted by OFFSET. */
614 break;
615 }
616 }
617
618 if (removed != 0)
619 vectorp->erase (next, next + removed);
620 }
621 }
622
623 void
624 mark_value_bits_unavailable (struct value *value,
625 LONGEST offset, LONGEST length)
626 {
627 insert_into_bit_range_vector (&value->unavailable, offset, length);
628 }
629
630 void
631 mark_value_bytes_unavailable (struct value *value,
632 LONGEST offset, LONGEST length)
633 {
634 mark_value_bits_unavailable (value,
635 offset * TARGET_CHAR_BIT,
636 length * TARGET_CHAR_BIT);
637 }
638
639 /* Find the first range in RANGES that overlaps the range defined by
640 OFFSET and LENGTH, starting at element POS in the RANGES vector,
641 Returns the index into RANGES where such overlapping range was
642 found, or -1 if none was found. */
643
644 static int
645 find_first_range_overlap (const std::vector<range> *ranges, int pos,
646 LONGEST offset, LONGEST length)
647 {
648 int i;
649
650 for (i = pos; i < ranges->size (); i++)
651 {
652 const range &r = (*ranges)[i];
653 if (ranges_overlap (r.offset, r.length, offset, length))
654 return i;
655 }
656
657 return -1;
658 }
659
660 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
661 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
662 return non-zero.
663
664 It must always be the case that:
665 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
666
667 It is assumed that memory can be accessed from:
668 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
669 to:
670 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
671 / TARGET_CHAR_BIT) */
672 static int
673 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
674 const gdb_byte *ptr2, size_t offset2_bits,
675 size_t length_bits)
676 {
677 gdb_assert (offset1_bits % TARGET_CHAR_BIT
678 == offset2_bits % TARGET_CHAR_BIT);
679
680 if (offset1_bits % TARGET_CHAR_BIT != 0)
681 {
682 size_t bits;
683 gdb_byte mask, b1, b2;
684
685 /* The offset from the base pointers PTR1 and PTR2 is not a complete
686 number of bytes. A number of bits up to either the next exact
687 byte boundary, or LENGTH_BITS (which ever is sooner) will be
688 compared. */
689 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
690 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
691 mask = (1 << bits) - 1;
692
693 if (length_bits < bits)
694 {
695 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
696 bits = length_bits;
697 }
698
699 /* Now load the two bytes and mask off the bits we care about. */
700 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
701 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
702
703 if (b1 != b2)
704 return 1;
705
706 /* Now update the length and offsets to take account of the bits
707 we've just compared. */
708 length_bits -= bits;
709 offset1_bits += bits;
710 offset2_bits += bits;
711 }
712
713 if (length_bits % TARGET_CHAR_BIT != 0)
714 {
715 size_t bits;
716 size_t o1, o2;
717 gdb_byte mask, b1, b2;
718
719 /* The length is not an exact number of bytes. After the previous
720 IF.. block then the offsets are byte aligned, or the
721 length is zero (in which case this code is not reached). Compare
722 a number of bits at the end of the region, starting from an exact
723 byte boundary. */
724 bits = length_bits % TARGET_CHAR_BIT;
725 o1 = offset1_bits + length_bits - bits;
726 o2 = offset2_bits + length_bits - bits;
727
728 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
729 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
730
731 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
732 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
733
734 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
735 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
736
737 if (b1 != b2)
738 return 1;
739
740 length_bits -= bits;
741 }
742
743 if (length_bits > 0)
744 {
745 /* We've now taken care of any stray "bits" at the start, or end of
746 the region to compare, the remainder can be covered with a simple
747 memcmp. */
748 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
749 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
750 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
751
752 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
753 ptr2 + offset2_bits / TARGET_CHAR_BIT,
754 length_bits / TARGET_CHAR_BIT);
755 }
756
757 /* Length is zero, regions match. */
758 return 0;
759 }
760
761 /* Helper struct for find_first_range_overlap_and_match and
762 value_contents_bits_eq. Keep track of which slot of a given ranges
763 vector have we last looked at. */
764
765 struct ranges_and_idx
766 {
767 /* The ranges. */
768 const std::vector<range> *ranges;
769
770 /* The range we've last found in RANGES. Given ranges are sorted,
771 we can start the next lookup here. */
772 int idx;
773 };
774
775 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
776 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
777 ranges starting at OFFSET2 bits. Return true if the ranges match
778 and fill in *L and *H with the overlapping window relative to
779 (both) OFFSET1 or OFFSET2. */
780
781 static int
782 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
783 struct ranges_and_idx *rp2,
784 LONGEST offset1, LONGEST offset2,
785 LONGEST length, ULONGEST *l, ULONGEST *h)
786 {
787 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
788 offset1, length);
789 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
790 offset2, length);
791
792 if (rp1->idx == -1 && rp2->idx == -1)
793 {
794 *l = length;
795 *h = length;
796 return 1;
797 }
798 else if (rp1->idx == -1 || rp2->idx == -1)
799 return 0;
800 else
801 {
802 const range *r1, *r2;
803 ULONGEST l1, h1;
804 ULONGEST l2, h2;
805
806 r1 = &(*rp1->ranges)[rp1->idx];
807 r2 = &(*rp2->ranges)[rp2->idx];
808
809 /* Get the unavailable windows intersected by the incoming
810 ranges. The first and last ranges that overlap the argument
811 range may be wider than said incoming arguments ranges. */
812 l1 = std::max (offset1, r1->offset);
813 h1 = std::min (offset1 + length, r1->offset + r1->length);
814
815 l2 = std::max (offset2, r2->offset);
816 h2 = std::min (offset2 + length, offset2 + r2->length);
817
818 /* Make them relative to the respective start offsets, so we can
819 compare them for equality. */
820 l1 -= offset1;
821 h1 -= offset1;
822
823 l2 -= offset2;
824 h2 -= offset2;
825
826 /* Different ranges, no match. */
827 if (l1 != l2 || h1 != h2)
828 return 0;
829
830 *h = h1;
831 *l = l1;
832 return 1;
833 }
834 }
835
836 /* Helper function for value_contents_eq. The only difference is that
837 this function is bit rather than byte based.
838
839 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
840 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
841 Return true if the available bits match. */
842
843 static bool
844 value_contents_bits_eq (const struct value *val1, int offset1,
845 const struct value *val2, int offset2,
846 int length)
847 {
848 /* Each array element corresponds to a ranges source (unavailable,
849 optimized out). '1' is for VAL1, '2' for VAL2. */
850 struct ranges_and_idx rp1[2], rp2[2];
851
852 /* See function description in value.h. */
853 gdb_assert (!val1->lazy && !val2->lazy);
854
855 /* We shouldn't be trying to compare past the end of the values. */
856 gdb_assert (offset1 + length
857 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
858 gdb_assert (offset2 + length
859 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
860
861 memset (&rp1, 0, sizeof (rp1));
862 memset (&rp2, 0, sizeof (rp2));
863 rp1[0].ranges = &val1->unavailable;
864 rp2[0].ranges = &val2->unavailable;
865 rp1[1].ranges = &val1->optimized_out;
866 rp2[1].ranges = &val2->optimized_out;
867
868 while (length > 0)
869 {
870 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
871 int i;
872
873 for (i = 0; i < 2; i++)
874 {
875 ULONGEST l_tmp, h_tmp;
876
877 /* The contents only match equal if the invalid/unavailable
878 contents ranges match as well. */
879 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
880 offset1, offset2, length,
881 &l_tmp, &h_tmp))
882 return false;
883
884 /* We're interested in the lowest/first range found. */
885 if (i == 0 || l_tmp < l)
886 {
887 l = l_tmp;
888 h = h_tmp;
889 }
890 }
891
892 /* Compare the available/valid contents. */
893 if (memcmp_with_bit_offsets (val1->contents.get (), offset1,
894 val2->contents.get (), offset2, l) != 0)
895 return false;
896
897 length -= h;
898 offset1 += h;
899 offset2 += h;
900 }
901
902 return true;
903 }
904
905 bool
906 value_contents_eq (const struct value *val1, LONGEST offset1,
907 const struct value *val2, LONGEST offset2,
908 LONGEST length)
909 {
910 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
911 val2, offset2 * TARGET_CHAR_BIT,
912 length * TARGET_CHAR_BIT);
913 }
914
915
916 /* The value-history records all the values printed by print commands
917 during this session. */
918
919 static std::vector<value_ref_ptr> value_history;
920
921 \f
922 /* List of all value objects currently allocated
923 (except for those released by calls to release_value)
924 This is so they can be freed after each command. */
925
926 static std::vector<value_ref_ptr> all_values;
927
928 /* Allocate a lazy value for type TYPE. Its actual content is
929 "lazily" allocated too: the content field of the return value is
930 NULL; it will be allocated when it is fetched from the target. */
931
932 struct value *
933 allocate_value_lazy (struct type *type)
934 {
935 struct value *val;
936
937 /* Call check_typedef on our type to make sure that, if TYPE
938 is a TYPE_CODE_TYPEDEF, its length is set to the length
939 of the target type instead of zero. However, we do not
940 replace the typedef type by the target type, because we want
941 to keep the typedef in order to be able to set the VAL's type
942 description correctly. */
943 check_typedef (type);
944
945 val = new struct value (type);
946
947 /* Values start out on the all_values chain. */
948 all_values.emplace_back (val);
949
950 return val;
951 }
952
953 /* The maximum size, in bytes, that GDB will try to allocate for a value.
954 The initial value of 64k was not selected for any specific reason, it is
955 just a reasonable starting point. */
956
957 static int max_value_size = 65536; /* 64k bytes */
958
959 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
960 LONGEST, otherwise GDB will not be able to parse integer values from the
961 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
962 be unable to parse "set max-value-size 2".
963
964 As we want a consistent GDB experience across hosts with different sizes
965 of LONGEST, this arbitrary minimum value was selected, so long as this
966 is bigger than LONGEST on all GDB supported hosts we're fine. */
967
968 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
969 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
970
971 /* Implement the "set max-value-size" command. */
972
973 static void
974 set_max_value_size (const char *args, int from_tty,
975 struct cmd_list_element *c)
976 {
977 gdb_assert (max_value_size == -1 || max_value_size >= 0);
978
979 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
980 {
981 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
982 error (_("max-value-size set too low, increasing to %d bytes"),
983 max_value_size);
984 }
985 }
986
987 /* Implement the "show max-value-size" command. */
988
989 static void
990 show_max_value_size (struct ui_file *file, int from_tty,
991 struct cmd_list_element *c, const char *value)
992 {
993 if (max_value_size == -1)
994 gdb_printf (file, _("Maximum value size is unlimited.\n"));
995 else
996 gdb_printf (file, _("Maximum value size is %d bytes.\n"),
997 max_value_size);
998 }
999
1000 /* Called before we attempt to allocate or reallocate a buffer for the
1001 contents of a value. TYPE is the type of the value for which we are
1002 allocating the buffer. If the buffer is too large (based on the user
1003 controllable setting) then throw an error. If this function returns
1004 then we should attempt to allocate the buffer. */
1005
1006 static void
1007 check_type_length_before_alloc (const struct type *type)
1008 {
1009 ULONGEST length = TYPE_LENGTH (type);
1010
1011 if (max_value_size > -1 && length > max_value_size)
1012 {
1013 if (type->name () != NULL)
1014 error (_("value of type `%s' requires %s bytes, which is more "
1015 "than max-value-size"), type->name (), pulongest (length));
1016 else
1017 error (_("value requires %s bytes, which is more than "
1018 "max-value-size"), pulongest (length));
1019 }
1020 }
1021
1022 /* Allocate the contents of VAL if it has not been allocated yet. */
1023
1024 static void
1025 allocate_value_contents (struct value *val)
1026 {
1027 if (!val->contents)
1028 {
1029 check_type_length_before_alloc (val->enclosing_type);
1030 val->contents.reset
1031 ((gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type)));
1032 }
1033 }
1034
1035 /* Allocate a value and its contents for type TYPE. */
1036
1037 struct value *
1038 allocate_value (struct type *type)
1039 {
1040 struct value *val = allocate_value_lazy (type);
1041
1042 allocate_value_contents (val);
1043 val->lazy = 0;
1044 return val;
1045 }
1046
1047 /* Allocate a value that has the correct length
1048 for COUNT repetitions of type TYPE. */
1049
1050 struct value *
1051 allocate_repeat_value (struct type *type, int count)
1052 {
1053 /* Despite the fact that we are really creating an array of TYPE here, we
1054 use the string lower bound as the array lower bound. This seems to
1055 work fine for now. */
1056 int low_bound = current_language->string_lower_bound ();
1057 /* FIXME-type-allocation: need a way to free this type when we are
1058 done with it. */
1059 struct type *array_type
1060 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1061
1062 return allocate_value (array_type);
1063 }
1064
1065 struct value *
1066 allocate_computed_value (struct type *type,
1067 const struct lval_funcs *funcs,
1068 void *closure)
1069 {
1070 struct value *v = allocate_value_lazy (type);
1071
1072 VALUE_LVAL (v) = lval_computed;
1073 v->location.computed.funcs = funcs;
1074 v->location.computed.closure = closure;
1075
1076 return v;
1077 }
1078
1079 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1080
1081 struct value *
1082 allocate_optimized_out_value (struct type *type)
1083 {
1084 struct value *retval = allocate_value_lazy (type);
1085
1086 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1087 set_value_lazy (retval, 0);
1088 return retval;
1089 }
1090
1091 /* Accessor methods. */
1092
1093 struct type *
1094 value_type (const struct value *value)
1095 {
1096 return value->type;
1097 }
1098 void
1099 deprecated_set_value_type (struct value *value, struct type *type)
1100 {
1101 value->type = type;
1102 }
1103
1104 LONGEST
1105 value_offset (const struct value *value)
1106 {
1107 return value->offset;
1108 }
1109 void
1110 set_value_offset (struct value *value, LONGEST offset)
1111 {
1112 value->offset = offset;
1113 }
1114
1115 LONGEST
1116 value_bitpos (const struct value *value)
1117 {
1118 return value->bitpos;
1119 }
1120 void
1121 set_value_bitpos (struct value *value, LONGEST bit)
1122 {
1123 value->bitpos = bit;
1124 }
1125
1126 LONGEST
1127 value_bitsize (const struct value *value)
1128 {
1129 return value->bitsize;
1130 }
1131 void
1132 set_value_bitsize (struct value *value, LONGEST bit)
1133 {
1134 value->bitsize = bit;
1135 }
1136
1137 struct value *
1138 value_parent (const struct value *value)
1139 {
1140 return value->parent.get ();
1141 }
1142
1143 /* See value.h. */
1144
1145 void
1146 set_value_parent (struct value *value, struct value *parent)
1147 {
1148 value->parent = value_ref_ptr::new_reference (parent);
1149 }
1150
1151 gdb::array_view<gdb_byte>
1152 value_contents_raw (struct value *value)
1153 {
1154 struct gdbarch *arch = get_value_arch (value);
1155 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1156
1157 allocate_value_contents (value);
1158
1159 ULONGEST length = TYPE_LENGTH (value_type (value));
1160 return gdb::make_array_view
1161 (value->contents.get () + value->embedded_offset * unit_size, length);
1162 }
1163
1164 gdb::array_view<gdb_byte>
1165 value_contents_all_raw (struct value *value)
1166 {
1167 allocate_value_contents (value);
1168
1169 ULONGEST length = TYPE_LENGTH (value_enclosing_type (value));
1170 return gdb::make_array_view (value->contents.get (), length);
1171 }
1172
1173 struct type *
1174 value_enclosing_type (const struct value *value)
1175 {
1176 return value->enclosing_type;
1177 }
1178
1179 /* Look at value.h for description. */
1180
1181 struct type *
1182 value_actual_type (struct value *value, int resolve_simple_types,
1183 int *real_type_found)
1184 {
1185 struct value_print_options opts;
1186 struct type *result;
1187
1188 get_user_print_options (&opts);
1189
1190 if (real_type_found)
1191 *real_type_found = 0;
1192 result = value_type (value);
1193 if (opts.objectprint)
1194 {
1195 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1196 fetch its rtti type. */
1197 if (result->is_pointer_or_reference ()
1198 && (check_typedef (TYPE_TARGET_TYPE (result))->code ()
1199 == TYPE_CODE_STRUCT)
1200 && !value_optimized_out (value))
1201 {
1202 struct type *real_type;
1203
1204 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1205 if (real_type)
1206 {
1207 if (real_type_found)
1208 *real_type_found = 1;
1209 result = real_type;
1210 }
1211 }
1212 else if (resolve_simple_types)
1213 {
1214 if (real_type_found)
1215 *real_type_found = 1;
1216 result = value_enclosing_type (value);
1217 }
1218 }
1219
1220 return result;
1221 }
1222
1223 void
1224 error_value_optimized_out (void)
1225 {
1226 throw_error (OPTIMIZED_OUT_ERROR, _("value has been optimized out"));
1227 }
1228
1229 static void
1230 require_not_optimized_out (const struct value *value)
1231 {
1232 if (!value->optimized_out.empty ())
1233 {
1234 if (value->lval == lval_register)
1235 throw_error (OPTIMIZED_OUT_ERROR,
1236 _("register has not been saved in frame"));
1237 else
1238 error_value_optimized_out ();
1239 }
1240 }
1241
1242 static void
1243 require_available (const struct value *value)
1244 {
1245 if (!value->unavailable.empty ())
1246 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1247 }
1248
1249 gdb::array_view<const gdb_byte>
1250 value_contents_for_printing (struct value *value)
1251 {
1252 if (value->lazy)
1253 value_fetch_lazy (value);
1254
1255 ULONGEST length = TYPE_LENGTH (value_enclosing_type (value));
1256 return gdb::make_array_view (value->contents.get (), length);
1257 }
1258
1259 gdb::array_view<const gdb_byte>
1260 value_contents_for_printing_const (const struct value *value)
1261 {
1262 gdb_assert (!value->lazy);
1263
1264 ULONGEST length = TYPE_LENGTH (value_enclosing_type (value));
1265 return gdb::make_array_view (value->contents.get (), length);
1266 }
1267
1268 gdb::array_view<const gdb_byte>
1269 value_contents_all (struct value *value)
1270 {
1271 gdb::array_view<const gdb_byte> result = value_contents_for_printing (value);
1272 require_not_optimized_out (value);
1273 require_available (value);
1274 return result;
1275 }
1276
1277 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1278 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1279
1280 static void
1281 ranges_copy_adjusted (std::vector<range> *dst_range, int dst_bit_offset,
1282 const std::vector<range> &src_range, int src_bit_offset,
1283 int bit_length)
1284 {
1285 for (const range &r : src_range)
1286 {
1287 ULONGEST h, l;
1288
1289 l = std::max (r.offset, (LONGEST) src_bit_offset);
1290 h = std::min (r.offset + r.length,
1291 (LONGEST) src_bit_offset + bit_length);
1292
1293 if (l < h)
1294 insert_into_bit_range_vector (dst_range,
1295 dst_bit_offset + (l - src_bit_offset),
1296 h - l);
1297 }
1298 }
1299
1300 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1301 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1302
1303 static void
1304 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1305 const struct value *src, int src_bit_offset,
1306 int bit_length)
1307 {
1308 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1309 src->unavailable, src_bit_offset,
1310 bit_length);
1311 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1312 src->optimized_out, src_bit_offset,
1313 bit_length);
1314 }
1315
1316 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1317 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1318 contents, starting at DST_OFFSET. If unavailable contents are
1319 being copied from SRC, the corresponding DST contents are marked
1320 unavailable accordingly. Neither DST nor SRC may be lazy
1321 values.
1322
1323 It is assumed the contents of DST in the [DST_OFFSET,
1324 DST_OFFSET+LENGTH) range are wholly available. */
1325
1326 static void
1327 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1328 struct value *src, LONGEST src_offset, LONGEST length)
1329 {
1330 LONGEST src_bit_offset, dst_bit_offset, bit_length;
1331 struct gdbarch *arch = get_value_arch (src);
1332 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1333
1334 /* A lazy DST would make that this copy operation useless, since as
1335 soon as DST's contents were un-lazied (by a later value_contents
1336 call, say), the contents would be overwritten. A lazy SRC would
1337 mean we'd be copying garbage. */
1338 gdb_assert (!dst->lazy && !src->lazy);
1339
1340 /* The overwritten DST range gets unavailability ORed in, not
1341 replaced. Make sure to remember to implement replacing if it
1342 turns out actually necessary. */
1343 gdb_assert (value_bytes_available (dst, dst_offset, length));
1344 gdb_assert (!value_bits_any_optimized_out (dst,
1345 TARGET_CHAR_BIT * dst_offset,
1346 TARGET_CHAR_BIT * length));
1347
1348 /* Copy the data. */
1349 gdb::array_view<gdb_byte> dst_contents
1350 = value_contents_all_raw (dst).slice (dst_offset * unit_size,
1351 length * unit_size);
1352 gdb::array_view<const gdb_byte> src_contents
1353 = value_contents_all_raw (src).slice (src_offset * unit_size,
1354 length * unit_size);
1355 copy (src_contents, dst_contents);
1356
1357 /* Copy the meta-data, adjusted. */
1358 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1359 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1360 bit_length = length * unit_size * HOST_CHAR_BIT;
1361
1362 value_ranges_copy_adjusted (dst, dst_bit_offset,
1363 src, src_bit_offset,
1364 bit_length);
1365 }
1366
1367 /* Copy LENGTH bytes of SRC value's (all) contents
1368 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1369 (all) contents, starting at DST_OFFSET. If unavailable contents
1370 are being copied from SRC, the corresponding DST contents are
1371 marked unavailable accordingly. DST must not be lazy. If SRC is
1372 lazy, it will be fetched now.
1373
1374 It is assumed the contents of DST in the [DST_OFFSET,
1375 DST_OFFSET+LENGTH) range are wholly available. */
1376
1377 void
1378 value_contents_copy (struct value *dst, LONGEST dst_offset,
1379 struct value *src, LONGEST src_offset, LONGEST length)
1380 {
1381 if (src->lazy)
1382 value_fetch_lazy (src);
1383
1384 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1385 }
1386
1387 int
1388 value_lazy (const struct value *value)
1389 {
1390 return value->lazy;
1391 }
1392
1393 void
1394 set_value_lazy (struct value *value, int val)
1395 {
1396 value->lazy = val;
1397 }
1398
1399 int
1400 value_stack (const struct value *value)
1401 {
1402 return value->stack;
1403 }
1404
1405 void
1406 set_value_stack (struct value *value, int val)
1407 {
1408 value->stack = val;
1409 }
1410
1411 gdb::array_view<const gdb_byte>
1412 value_contents (struct value *value)
1413 {
1414 gdb::array_view<const gdb_byte> result = value_contents_writeable (value);
1415 require_not_optimized_out (value);
1416 require_available (value);
1417 return result;
1418 }
1419
1420 gdb::array_view<gdb_byte>
1421 value_contents_writeable (struct value *value)
1422 {
1423 if (value->lazy)
1424 value_fetch_lazy (value);
1425 return value_contents_raw (value);
1426 }
1427
1428 int
1429 value_optimized_out (struct value *value)
1430 {
1431 if (value->lazy)
1432 {
1433 /* See if we can compute the result without fetching the
1434 value. */
1435 if (VALUE_LVAL (value) == lval_memory)
1436 return false;
1437 else if (VALUE_LVAL (value) == lval_computed)
1438 {
1439 const struct lval_funcs *funcs = value->location.computed.funcs;
1440
1441 if (funcs->is_optimized_out != nullptr)
1442 return funcs->is_optimized_out (value);
1443 }
1444
1445 /* Fall back to fetching. */
1446 try
1447 {
1448 value_fetch_lazy (value);
1449 }
1450 catch (const gdb_exception_error &ex)
1451 {
1452 switch (ex.error)
1453 {
1454 case MEMORY_ERROR:
1455 case OPTIMIZED_OUT_ERROR:
1456 case NOT_AVAILABLE_ERROR:
1457 /* These can normally happen when we try to access an
1458 optimized out or unavailable register, either in a
1459 physical register or spilled to memory. */
1460 break;
1461 default:
1462 throw;
1463 }
1464 }
1465 }
1466
1467 return !value->optimized_out.empty ();
1468 }
1469
1470 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1471 the following LENGTH bytes. */
1472
1473 void
1474 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1475 {
1476 mark_value_bits_optimized_out (value,
1477 offset * TARGET_CHAR_BIT,
1478 length * TARGET_CHAR_BIT);
1479 }
1480
1481 /* See value.h. */
1482
1483 void
1484 mark_value_bits_optimized_out (struct value *value,
1485 LONGEST offset, LONGEST length)
1486 {
1487 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1488 }
1489
1490 int
1491 value_bits_synthetic_pointer (const struct value *value,
1492 LONGEST offset, LONGEST length)
1493 {
1494 if (value->lval != lval_computed
1495 || !value->location.computed.funcs->check_synthetic_pointer)
1496 return 0;
1497 return value->location.computed.funcs->check_synthetic_pointer (value,
1498 offset,
1499 length);
1500 }
1501
1502 LONGEST
1503 value_embedded_offset (const struct value *value)
1504 {
1505 return value->embedded_offset;
1506 }
1507
1508 void
1509 set_value_embedded_offset (struct value *value, LONGEST val)
1510 {
1511 value->embedded_offset = val;
1512 }
1513
1514 LONGEST
1515 value_pointed_to_offset (const struct value *value)
1516 {
1517 return value->pointed_to_offset;
1518 }
1519
1520 void
1521 set_value_pointed_to_offset (struct value *value, LONGEST val)
1522 {
1523 value->pointed_to_offset = val;
1524 }
1525
1526 const struct lval_funcs *
1527 value_computed_funcs (const struct value *v)
1528 {
1529 gdb_assert (value_lval_const (v) == lval_computed);
1530
1531 return v->location.computed.funcs;
1532 }
1533
1534 void *
1535 value_computed_closure (const struct value *v)
1536 {
1537 gdb_assert (v->lval == lval_computed);
1538
1539 return v->location.computed.closure;
1540 }
1541
1542 enum lval_type *
1543 deprecated_value_lval_hack (struct value *value)
1544 {
1545 return &value->lval;
1546 }
1547
1548 enum lval_type
1549 value_lval_const (const struct value *value)
1550 {
1551 return value->lval;
1552 }
1553
1554 CORE_ADDR
1555 value_address (const struct value *value)
1556 {
1557 if (value->lval != lval_memory)
1558 return 0;
1559 if (value->parent != NULL)
1560 return value_address (value->parent.get ()) + value->offset;
1561 if (NULL != TYPE_DATA_LOCATION (value_type (value)))
1562 {
1563 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
1564 return TYPE_DATA_LOCATION_ADDR (value_type (value));
1565 }
1566
1567 return value->location.address + value->offset;
1568 }
1569
1570 CORE_ADDR
1571 value_raw_address (const struct value *value)
1572 {
1573 if (value->lval != lval_memory)
1574 return 0;
1575 return value->location.address;
1576 }
1577
1578 void
1579 set_value_address (struct value *value, CORE_ADDR addr)
1580 {
1581 gdb_assert (value->lval == lval_memory);
1582 value->location.address = addr;
1583 }
1584
1585 struct internalvar **
1586 deprecated_value_internalvar_hack (struct value *value)
1587 {
1588 return &value->location.internalvar;
1589 }
1590
1591 struct frame_id *
1592 deprecated_value_next_frame_id_hack (struct value *value)
1593 {
1594 gdb_assert (value->lval == lval_register);
1595 return &value->location.reg.next_frame_id;
1596 }
1597
1598 int *
1599 deprecated_value_regnum_hack (struct value *value)
1600 {
1601 gdb_assert (value->lval == lval_register);
1602 return &value->location.reg.regnum;
1603 }
1604
1605 int
1606 deprecated_value_modifiable (const struct value *value)
1607 {
1608 return value->modifiable;
1609 }
1610 \f
1611 /* Return a mark in the value chain. All values allocated after the
1612 mark is obtained (except for those released) are subject to being freed
1613 if a subsequent value_free_to_mark is passed the mark. */
1614 struct value *
1615 value_mark (void)
1616 {
1617 if (all_values.empty ())
1618 return nullptr;
1619 return all_values.back ().get ();
1620 }
1621
1622 /* See value.h. */
1623
1624 void
1625 value_incref (struct value *val)
1626 {
1627 val->reference_count++;
1628 }
1629
1630 /* Release a reference to VAL, which was acquired with value_incref.
1631 This function is also called to deallocate values from the value
1632 chain. */
1633
1634 void
1635 value_decref (struct value *val)
1636 {
1637 if (val != nullptr)
1638 {
1639 gdb_assert (val->reference_count > 0);
1640 val->reference_count--;
1641 if (val->reference_count == 0)
1642 delete val;
1643 }
1644 }
1645
1646 /* Free all values allocated since MARK was obtained by value_mark
1647 (except for those released). */
1648 void
1649 value_free_to_mark (const struct value *mark)
1650 {
1651 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1652 if (iter == all_values.end ())
1653 all_values.clear ();
1654 else
1655 all_values.erase (iter + 1, all_values.end ());
1656 }
1657
1658 /* Remove VAL from the chain all_values
1659 so it will not be freed automatically. */
1660
1661 value_ref_ptr
1662 release_value (struct value *val)
1663 {
1664 if (val == nullptr)
1665 return value_ref_ptr ();
1666
1667 std::vector<value_ref_ptr>::reverse_iterator iter;
1668 for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter)
1669 {
1670 if (*iter == val)
1671 {
1672 value_ref_ptr result = *iter;
1673 all_values.erase (iter.base () - 1);
1674 return result;
1675 }
1676 }
1677
1678 /* We must always return an owned reference. Normally this happens
1679 because we transfer the reference from the value chain, but in
1680 this case the value was not on the chain. */
1681 return value_ref_ptr::new_reference (val);
1682 }
1683
1684 /* See value.h. */
1685
1686 std::vector<value_ref_ptr>
1687 value_release_to_mark (const struct value *mark)
1688 {
1689 std::vector<value_ref_ptr> result;
1690
1691 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1692 if (iter == all_values.end ())
1693 std::swap (result, all_values);
1694 else
1695 {
1696 std::move (iter + 1, all_values.end (), std::back_inserter (result));
1697 all_values.erase (iter + 1, all_values.end ());
1698 }
1699 std::reverse (result.begin (), result.end ());
1700 return result;
1701 }
1702
1703 /* Return a copy of the value ARG.
1704 It contains the same contents, for same memory address,
1705 but it's a different block of storage. */
1706
1707 struct value *
1708 value_copy (const value *arg)
1709 {
1710 struct type *encl_type = value_enclosing_type (arg);
1711 struct value *val;
1712
1713 if (value_lazy (arg))
1714 val = allocate_value_lazy (encl_type);
1715 else
1716 val = allocate_value (encl_type);
1717 val->type = arg->type;
1718 VALUE_LVAL (val) = arg->lval;
1719 val->location = arg->location;
1720 val->offset = arg->offset;
1721 val->bitpos = arg->bitpos;
1722 val->bitsize = arg->bitsize;
1723 val->lazy = arg->lazy;
1724 val->embedded_offset = value_embedded_offset (arg);
1725 val->pointed_to_offset = arg->pointed_to_offset;
1726 val->modifiable = arg->modifiable;
1727 val->stack = arg->stack;
1728 val->is_zero = arg->is_zero;
1729 val->initialized = arg->initialized;
1730 val->unavailable = arg->unavailable;
1731 val->optimized_out = arg->optimized_out;
1732
1733 if (!value_lazy (val) && !value_entirely_optimized_out (val))
1734 {
1735 gdb_assert (arg->contents != nullptr);
1736 ULONGEST length = TYPE_LENGTH (value_enclosing_type (arg));
1737 const auto &arg_view
1738 = gdb::make_array_view (arg->contents.get (), length);
1739 copy (arg_view, value_contents_all_raw (val));
1740 }
1741
1742 val->parent = arg->parent;
1743 if (VALUE_LVAL (val) == lval_computed)
1744 {
1745 const struct lval_funcs *funcs = val->location.computed.funcs;
1746
1747 if (funcs->copy_closure)
1748 val->location.computed.closure = funcs->copy_closure (val);
1749 }
1750 return val;
1751 }
1752
1753 /* Return a "const" and/or "volatile" qualified version of the value V.
1754 If CNST is true, then the returned value will be qualified with
1755 "const".
1756 if VOLTL is true, then the returned value will be qualified with
1757 "volatile". */
1758
1759 struct value *
1760 make_cv_value (int cnst, int voltl, struct value *v)
1761 {
1762 struct type *val_type = value_type (v);
1763 struct type *enclosing_type = value_enclosing_type (v);
1764 struct value *cv_val = value_copy (v);
1765
1766 deprecated_set_value_type (cv_val,
1767 make_cv_type (cnst, voltl, val_type, NULL));
1768 set_value_enclosing_type (cv_val,
1769 make_cv_type (cnst, voltl, enclosing_type, NULL));
1770
1771 return cv_val;
1772 }
1773
1774 /* Return a version of ARG that is non-lvalue. */
1775
1776 struct value *
1777 value_non_lval (struct value *arg)
1778 {
1779 if (VALUE_LVAL (arg) != not_lval)
1780 {
1781 struct type *enc_type = value_enclosing_type (arg);
1782 struct value *val = allocate_value (enc_type);
1783
1784 copy (value_contents_all (arg), value_contents_all_raw (val));
1785 val->type = arg->type;
1786 set_value_embedded_offset (val, value_embedded_offset (arg));
1787 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1788 return val;
1789 }
1790 return arg;
1791 }
1792
1793 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1794
1795 void
1796 value_force_lval (struct value *v, CORE_ADDR addr)
1797 {
1798 gdb_assert (VALUE_LVAL (v) == not_lval);
1799
1800 write_memory (addr, value_contents_raw (v).data (), TYPE_LENGTH (value_type (v)));
1801 v->lval = lval_memory;
1802 v->location.address = addr;
1803 }
1804
1805 void
1806 set_value_component_location (struct value *component,
1807 const struct value *whole)
1808 {
1809 struct type *type;
1810
1811 gdb_assert (whole->lval != lval_xcallable);
1812
1813 if (whole->lval == lval_internalvar)
1814 VALUE_LVAL (component) = lval_internalvar_component;
1815 else
1816 VALUE_LVAL (component) = whole->lval;
1817
1818 component->location = whole->location;
1819 if (whole->lval == lval_computed)
1820 {
1821 const struct lval_funcs *funcs = whole->location.computed.funcs;
1822
1823 if (funcs->copy_closure)
1824 component->location.computed.closure = funcs->copy_closure (whole);
1825 }
1826
1827 /* If the WHOLE value has a dynamically resolved location property then
1828 update the address of the COMPONENT. */
1829 type = value_type (whole);
1830 if (NULL != TYPE_DATA_LOCATION (type)
1831 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1832 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1833
1834 /* Similarly, if the COMPONENT value has a dynamically resolved location
1835 property then update its address. */
1836 type = value_type (component);
1837 if (NULL != TYPE_DATA_LOCATION (type)
1838 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1839 {
1840 /* If the COMPONENT has a dynamic location, and is an
1841 lval_internalvar_component, then we change it to a lval_memory.
1842
1843 Usually a component of an internalvar is created non-lazy, and has
1844 its content immediately copied from the parent internalvar.
1845 However, for components with a dynamic location, the content of
1846 the component is not contained within the parent, but is instead
1847 accessed indirectly. Further, the component will be created as a
1848 lazy value.
1849
1850 By changing the type of the component to lval_memory we ensure
1851 that value_fetch_lazy can successfully load the component.
1852
1853 This solution isn't ideal, but a real fix would require values to
1854 carry around both the parent value contents, and the contents of
1855 any dynamic fields within the parent. This is a substantial
1856 change to how values work in GDB. */
1857 if (VALUE_LVAL (component) == lval_internalvar_component)
1858 {
1859 gdb_assert (value_lazy (component));
1860 VALUE_LVAL (component) = lval_memory;
1861 }
1862 else
1863 gdb_assert (VALUE_LVAL (component) == lval_memory);
1864 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1865 }
1866 }
1867
1868 /* Access to the value history. */
1869
1870 /* Record a new value in the value history.
1871 Returns the absolute history index of the entry. */
1872
1873 int
1874 record_latest_value (struct value *val)
1875 {
1876 /* We don't want this value to have anything to do with the inferior anymore.
1877 In particular, "set $1 = 50" should not affect the variable from which
1878 the value was taken, and fast watchpoints should be able to assume that
1879 a value on the value history never changes. */
1880 if (value_lazy (val))
1881 value_fetch_lazy (val);
1882 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1883 from. This is a bit dubious, because then *&$1 does not just return $1
1884 but the current contents of that location. c'est la vie... */
1885 val->modifiable = 0;
1886
1887 value_history.push_back (release_value (val));
1888
1889 return value_history.size ();
1890 }
1891
1892 /* Return a copy of the value in the history with sequence number NUM. */
1893
1894 struct value *
1895 access_value_history (int num)
1896 {
1897 int absnum = num;
1898
1899 if (absnum <= 0)
1900 absnum += value_history.size ();
1901
1902 if (absnum <= 0)
1903 {
1904 if (num == 0)
1905 error (_("The history is empty."));
1906 else if (num == 1)
1907 error (_("There is only one value in the history."));
1908 else
1909 error (_("History does not go back to $$%d."), -num);
1910 }
1911 if (absnum > value_history.size ())
1912 error (_("History has not yet reached $%d."), absnum);
1913
1914 absnum--;
1915
1916 return value_copy (value_history[absnum].get ());
1917 }
1918
1919 /* See value.h. */
1920
1921 ULONGEST
1922 value_history_count ()
1923 {
1924 return value_history.size ();
1925 }
1926
1927 static void
1928 show_values (const char *num_exp, int from_tty)
1929 {
1930 int i;
1931 struct value *val;
1932 static int num = 1;
1933
1934 if (num_exp)
1935 {
1936 /* "show values +" should print from the stored position.
1937 "show values <exp>" should print around value number <exp>. */
1938 if (num_exp[0] != '+' || num_exp[1] != '\0')
1939 num = parse_and_eval_long (num_exp) - 5;
1940 }
1941 else
1942 {
1943 /* "show values" means print the last 10 values. */
1944 num = value_history.size () - 9;
1945 }
1946
1947 if (num <= 0)
1948 num = 1;
1949
1950 for (i = num; i < num + 10 && i <= value_history.size (); i++)
1951 {
1952 struct value_print_options opts;
1953
1954 val = access_value_history (i);
1955 gdb_printf (("$%d = "), i);
1956 get_user_print_options (&opts);
1957 value_print (val, gdb_stdout, &opts);
1958 gdb_printf (("\n"));
1959 }
1960
1961 /* The next "show values +" should start after what we just printed. */
1962 num += 10;
1963
1964 /* Hitting just return after this command should do the same thing as
1965 "show values +". If num_exp is null, this is unnecessary, since
1966 "show values +" is not useful after "show values". */
1967 if (from_tty && num_exp)
1968 set_repeat_arguments ("+");
1969 }
1970 \f
1971 enum internalvar_kind
1972 {
1973 /* The internal variable is empty. */
1974 INTERNALVAR_VOID,
1975
1976 /* The value of the internal variable is provided directly as
1977 a GDB value object. */
1978 INTERNALVAR_VALUE,
1979
1980 /* A fresh value is computed via a call-back routine on every
1981 access to the internal variable. */
1982 INTERNALVAR_MAKE_VALUE,
1983
1984 /* The internal variable holds a GDB internal convenience function. */
1985 INTERNALVAR_FUNCTION,
1986
1987 /* The variable holds an integer value. */
1988 INTERNALVAR_INTEGER,
1989
1990 /* The variable holds a GDB-provided string. */
1991 INTERNALVAR_STRING,
1992 };
1993
1994 union internalvar_data
1995 {
1996 /* A value object used with INTERNALVAR_VALUE. */
1997 struct value *value;
1998
1999 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
2000 struct
2001 {
2002 /* The functions to call. */
2003 const struct internalvar_funcs *functions;
2004
2005 /* The function's user-data. */
2006 void *data;
2007 } make_value;
2008
2009 /* The internal function used with INTERNALVAR_FUNCTION. */
2010 struct
2011 {
2012 struct internal_function *function;
2013 /* True if this is the canonical name for the function. */
2014 int canonical;
2015 } fn;
2016
2017 /* An integer value used with INTERNALVAR_INTEGER. */
2018 struct
2019 {
2020 /* If type is non-NULL, it will be used as the type to generate
2021 a value for this internal variable. If type is NULL, a default
2022 integer type for the architecture is used. */
2023 struct type *type;
2024 LONGEST val;
2025 } integer;
2026
2027 /* A string value used with INTERNALVAR_STRING. */
2028 char *string;
2029 };
2030
2031 /* Internal variables. These are variables within the debugger
2032 that hold values assigned by debugger commands.
2033 The user refers to them with a '$' prefix
2034 that does not appear in the variable names stored internally. */
2035
2036 struct internalvar
2037 {
2038 struct internalvar *next;
2039 char *name;
2040
2041 /* We support various different kinds of content of an internal variable.
2042 enum internalvar_kind specifies the kind, and union internalvar_data
2043 provides the data associated with this particular kind. */
2044
2045 enum internalvar_kind kind;
2046
2047 union internalvar_data u;
2048 };
2049
2050 static struct internalvar *internalvars;
2051
2052 /* If the variable does not already exist create it and give it the
2053 value given. If no value is given then the default is zero. */
2054 static void
2055 init_if_undefined_command (const char* args, int from_tty)
2056 {
2057 struct internalvar *intvar = nullptr;
2058
2059 /* Parse the expression - this is taken from set_command(). */
2060 expression_up expr = parse_expression (args);
2061
2062 /* Validate the expression.
2063 Was the expression an assignment?
2064 Or even an expression at all? */
2065 if (expr->first_opcode () != BINOP_ASSIGN)
2066 error (_("Init-if-undefined requires an assignment expression."));
2067
2068 /* Extract the variable from the parsed expression. */
2069 expr::assign_operation *assign
2070 = dynamic_cast<expr::assign_operation *> (expr->op.get ());
2071 if (assign != nullptr)
2072 {
2073 expr::operation *lhs = assign->get_lhs ();
2074 expr::internalvar_operation *ivarop
2075 = dynamic_cast<expr::internalvar_operation *> (lhs);
2076 if (ivarop != nullptr)
2077 intvar = ivarop->get_internalvar ();
2078 }
2079
2080 if (intvar == nullptr)
2081 error (_("The first parameter to init-if-undefined "
2082 "should be a GDB variable."));
2083
2084 /* Only evaluate the expression if the lvalue is void.
2085 This may still fail if the expression is invalid. */
2086 if (intvar->kind == INTERNALVAR_VOID)
2087 evaluate_expression (expr.get ());
2088 }
2089
2090
2091 /* Look up an internal variable with name NAME. NAME should not
2092 normally include a dollar sign.
2093
2094 If the specified internal variable does not exist,
2095 the return value is NULL. */
2096
2097 struct internalvar *
2098 lookup_only_internalvar (const char *name)
2099 {
2100 struct internalvar *var;
2101
2102 for (var = internalvars; var; var = var->next)
2103 if (strcmp (var->name, name) == 0)
2104 return var;
2105
2106 return NULL;
2107 }
2108
2109 /* Complete NAME by comparing it to the names of internal
2110 variables. */
2111
2112 void
2113 complete_internalvar (completion_tracker &tracker, const char *name)
2114 {
2115 struct internalvar *var;
2116 int len;
2117
2118 len = strlen (name);
2119
2120 for (var = internalvars; var; var = var->next)
2121 if (strncmp (var->name, name, len) == 0)
2122 tracker.add_completion (make_unique_xstrdup (var->name));
2123 }
2124
2125 /* Create an internal variable with name NAME and with a void value.
2126 NAME should not normally include a dollar sign. */
2127
2128 struct internalvar *
2129 create_internalvar (const char *name)
2130 {
2131 struct internalvar *var = XNEW (struct internalvar);
2132
2133 var->name = xstrdup (name);
2134 var->kind = INTERNALVAR_VOID;
2135 var->next = internalvars;
2136 internalvars = var;
2137 return var;
2138 }
2139
2140 /* Create an internal variable with name NAME and register FUN as the
2141 function that value_of_internalvar uses to create a value whenever
2142 this variable is referenced. NAME should not normally include a
2143 dollar sign. DATA is passed uninterpreted to FUN when it is
2144 called. CLEANUP, if not NULL, is called when the internal variable
2145 is destroyed. It is passed DATA as its only argument. */
2146
2147 struct internalvar *
2148 create_internalvar_type_lazy (const char *name,
2149 const struct internalvar_funcs *funcs,
2150 void *data)
2151 {
2152 struct internalvar *var = create_internalvar (name);
2153
2154 var->kind = INTERNALVAR_MAKE_VALUE;
2155 var->u.make_value.functions = funcs;
2156 var->u.make_value.data = data;
2157 return var;
2158 }
2159
2160 /* See documentation in value.h. */
2161
2162 int
2163 compile_internalvar_to_ax (struct internalvar *var,
2164 struct agent_expr *expr,
2165 struct axs_value *value)
2166 {
2167 if (var->kind != INTERNALVAR_MAKE_VALUE
2168 || var->u.make_value.functions->compile_to_ax == NULL)
2169 return 0;
2170
2171 var->u.make_value.functions->compile_to_ax (var, expr, value,
2172 var->u.make_value.data);
2173 return 1;
2174 }
2175
2176 /* Look up an internal variable with name NAME. NAME should not
2177 normally include a dollar sign.
2178
2179 If the specified internal variable does not exist,
2180 one is created, with a void value. */
2181
2182 struct internalvar *
2183 lookup_internalvar (const char *name)
2184 {
2185 struct internalvar *var;
2186
2187 var = lookup_only_internalvar (name);
2188 if (var)
2189 return var;
2190
2191 return create_internalvar (name);
2192 }
2193
2194 /* Return current value of internal variable VAR. For variables that
2195 are not inherently typed, use a value type appropriate for GDBARCH. */
2196
2197 struct value *
2198 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2199 {
2200 struct value *val;
2201 struct trace_state_variable *tsv;
2202
2203 /* If there is a trace state variable of the same name, assume that
2204 is what we really want to see. */
2205 tsv = find_trace_state_variable (var->name);
2206 if (tsv)
2207 {
2208 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2209 &(tsv->value));
2210 if (tsv->value_known)
2211 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2212 tsv->value);
2213 else
2214 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2215 return val;
2216 }
2217
2218 switch (var->kind)
2219 {
2220 case INTERNALVAR_VOID:
2221 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2222 break;
2223
2224 case INTERNALVAR_FUNCTION:
2225 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2226 break;
2227
2228 case INTERNALVAR_INTEGER:
2229 if (!var->u.integer.type)
2230 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2231 var->u.integer.val);
2232 else
2233 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2234 break;
2235
2236 case INTERNALVAR_STRING:
2237 val = value_cstring (var->u.string, strlen (var->u.string),
2238 builtin_type (gdbarch)->builtin_char);
2239 break;
2240
2241 case INTERNALVAR_VALUE:
2242 val = value_copy (var->u.value);
2243 if (value_lazy (val))
2244 value_fetch_lazy (val);
2245 break;
2246
2247 case INTERNALVAR_MAKE_VALUE:
2248 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2249 var->u.make_value.data);
2250 break;
2251
2252 default:
2253 internal_error (__FILE__, __LINE__, _("bad kind"));
2254 }
2255
2256 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2257 on this value go back to affect the original internal variable.
2258
2259 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2260 no underlying modifiable state in the internal variable.
2261
2262 Likewise, if the variable's value is a computed lvalue, we want
2263 references to it to produce another computed lvalue, where
2264 references and assignments actually operate through the
2265 computed value's functions.
2266
2267 This means that internal variables with computed values
2268 behave a little differently from other internal variables:
2269 assignments to them don't just replace the previous value
2270 altogether. At the moment, this seems like the behavior we
2271 want. */
2272
2273 if (var->kind != INTERNALVAR_MAKE_VALUE
2274 && val->lval != lval_computed)
2275 {
2276 VALUE_LVAL (val) = lval_internalvar;
2277 VALUE_INTERNALVAR (val) = var;
2278 }
2279
2280 return val;
2281 }
2282
2283 int
2284 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2285 {
2286 if (var->kind == INTERNALVAR_INTEGER)
2287 {
2288 *result = var->u.integer.val;
2289 return 1;
2290 }
2291
2292 if (var->kind == INTERNALVAR_VALUE)
2293 {
2294 struct type *type = check_typedef (value_type (var->u.value));
2295
2296 if (type->code () == TYPE_CODE_INT)
2297 {
2298 *result = value_as_long (var->u.value);
2299 return 1;
2300 }
2301 }
2302
2303 return 0;
2304 }
2305
2306 static int
2307 get_internalvar_function (struct internalvar *var,
2308 struct internal_function **result)
2309 {
2310 switch (var->kind)
2311 {
2312 case INTERNALVAR_FUNCTION:
2313 *result = var->u.fn.function;
2314 return 1;
2315
2316 default:
2317 return 0;
2318 }
2319 }
2320
2321 void
2322 set_internalvar_component (struct internalvar *var,
2323 LONGEST offset, LONGEST bitpos,
2324 LONGEST bitsize, struct value *newval)
2325 {
2326 gdb_byte *addr;
2327 struct gdbarch *arch;
2328 int unit_size;
2329
2330 switch (var->kind)
2331 {
2332 case INTERNALVAR_VALUE:
2333 addr = value_contents_writeable (var->u.value).data ();
2334 arch = get_value_arch (var->u.value);
2335 unit_size = gdbarch_addressable_memory_unit_size (arch);
2336
2337 if (bitsize)
2338 modify_field (value_type (var->u.value), addr + offset,
2339 value_as_long (newval), bitpos, bitsize);
2340 else
2341 memcpy (addr + offset * unit_size, value_contents (newval).data (),
2342 TYPE_LENGTH (value_type (newval)));
2343 break;
2344
2345 default:
2346 /* We can never get a component of any other kind. */
2347 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2348 }
2349 }
2350
2351 void
2352 set_internalvar (struct internalvar *var, struct value *val)
2353 {
2354 enum internalvar_kind new_kind;
2355 union internalvar_data new_data = { 0 };
2356
2357 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2358 error (_("Cannot overwrite convenience function %s"), var->name);
2359
2360 /* Prepare new contents. */
2361 switch (check_typedef (value_type (val))->code ())
2362 {
2363 case TYPE_CODE_VOID:
2364 new_kind = INTERNALVAR_VOID;
2365 break;
2366
2367 case TYPE_CODE_INTERNAL_FUNCTION:
2368 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2369 new_kind = INTERNALVAR_FUNCTION;
2370 get_internalvar_function (VALUE_INTERNALVAR (val),
2371 &new_data.fn.function);
2372 /* Copies created here are never canonical. */
2373 break;
2374
2375 default:
2376 new_kind = INTERNALVAR_VALUE;
2377 struct value *copy = value_copy (val);
2378 copy->modifiable = 1;
2379
2380 /* Force the value to be fetched from the target now, to avoid problems
2381 later when this internalvar is referenced and the target is gone or
2382 has changed. */
2383 if (value_lazy (copy))
2384 value_fetch_lazy (copy);
2385
2386 /* Release the value from the value chain to prevent it from being
2387 deleted by free_all_values. From here on this function should not
2388 call error () until new_data is installed into the var->u to avoid
2389 leaking memory. */
2390 new_data.value = release_value (copy).release ();
2391
2392 /* Internal variables which are created from values with a dynamic
2393 location don't need the location property of the origin anymore.
2394 The resolved dynamic location is used prior then any other address
2395 when accessing the value.
2396 If we keep it, we would still refer to the origin value.
2397 Remove the location property in case it exist. */
2398 value_type (new_data.value)->remove_dyn_prop (DYN_PROP_DATA_LOCATION);
2399
2400 break;
2401 }
2402
2403 /* Clean up old contents. */
2404 clear_internalvar (var);
2405
2406 /* Switch over. */
2407 var->kind = new_kind;
2408 var->u = new_data;
2409 /* End code which must not call error(). */
2410 }
2411
2412 void
2413 set_internalvar_integer (struct internalvar *var, LONGEST l)
2414 {
2415 /* Clean up old contents. */
2416 clear_internalvar (var);
2417
2418 var->kind = INTERNALVAR_INTEGER;
2419 var->u.integer.type = NULL;
2420 var->u.integer.val = l;
2421 }
2422
2423 void
2424 set_internalvar_string (struct internalvar *var, const char *string)
2425 {
2426 /* Clean up old contents. */
2427 clear_internalvar (var);
2428
2429 var->kind = INTERNALVAR_STRING;
2430 var->u.string = xstrdup (string);
2431 }
2432
2433 static void
2434 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2435 {
2436 /* Clean up old contents. */
2437 clear_internalvar (var);
2438
2439 var->kind = INTERNALVAR_FUNCTION;
2440 var->u.fn.function = f;
2441 var->u.fn.canonical = 1;
2442 /* Variables installed here are always the canonical version. */
2443 }
2444
2445 void
2446 clear_internalvar (struct internalvar *var)
2447 {
2448 /* Clean up old contents. */
2449 switch (var->kind)
2450 {
2451 case INTERNALVAR_VALUE:
2452 value_decref (var->u.value);
2453 break;
2454
2455 case INTERNALVAR_STRING:
2456 xfree (var->u.string);
2457 break;
2458
2459 default:
2460 break;
2461 }
2462
2463 /* Reset to void kind. */
2464 var->kind = INTERNALVAR_VOID;
2465 }
2466
2467 const char *
2468 internalvar_name (const struct internalvar *var)
2469 {
2470 return var->name;
2471 }
2472
2473 static struct internal_function *
2474 create_internal_function (const char *name,
2475 internal_function_fn handler, void *cookie)
2476 {
2477 struct internal_function *ifn = XNEW (struct internal_function);
2478
2479 ifn->name = xstrdup (name);
2480 ifn->handler = handler;
2481 ifn->cookie = cookie;
2482 return ifn;
2483 }
2484
2485 const char *
2486 value_internal_function_name (struct value *val)
2487 {
2488 struct internal_function *ifn;
2489 int result;
2490
2491 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2492 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2493 gdb_assert (result);
2494
2495 return ifn->name;
2496 }
2497
2498 struct value *
2499 call_internal_function (struct gdbarch *gdbarch,
2500 const struct language_defn *language,
2501 struct value *func, int argc, struct value **argv)
2502 {
2503 struct internal_function *ifn;
2504 int result;
2505
2506 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2507 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2508 gdb_assert (result);
2509
2510 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2511 }
2512
2513 /* The 'function' command. This does nothing -- it is just a
2514 placeholder to let "help function NAME" work. This is also used as
2515 the implementation of the sub-command that is created when
2516 registering an internal function. */
2517 static void
2518 function_command (const char *command, int from_tty)
2519 {
2520 /* Do nothing. */
2521 }
2522
2523 /* Helper function that does the work for add_internal_function. */
2524
2525 static struct cmd_list_element *
2526 do_add_internal_function (const char *name, const char *doc,
2527 internal_function_fn handler, void *cookie)
2528 {
2529 struct internal_function *ifn;
2530 struct internalvar *var = lookup_internalvar (name);
2531
2532 ifn = create_internal_function (name, handler, cookie);
2533 set_internalvar_function (var, ifn);
2534
2535 return add_cmd (name, no_class, function_command, doc, &functionlist);
2536 }
2537
2538 /* See value.h. */
2539
2540 void
2541 add_internal_function (const char *name, const char *doc,
2542 internal_function_fn handler, void *cookie)
2543 {
2544 do_add_internal_function (name, doc, handler, cookie);
2545 }
2546
2547 /* See value.h. */
2548
2549 void
2550 add_internal_function (gdb::unique_xmalloc_ptr<char> &&name,
2551 gdb::unique_xmalloc_ptr<char> &&doc,
2552 internal_function_fn handler, void *cookie)
2553 {
2554 struct cmd_list_element *cmd
2555 = do_add_internal_function (name.get (), doc.get (), handler, cookie);
2556 doc.release ();
2557 cmd->doc_allocated = 1;
2558 name.release ();
2559 cmd->name_allocated = 1;
2560 }
2561
2562 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2563 prevent cycles / duplicates. */
2564
2565 void
2566 preserve_one_value (struct value *value, struct objfile *objfile,
2567 htab_t copied_types)
2568 {
2569 if (value->type->objfile_owner () == objfile)
2570 value->type = copy_type_recursive (objfile, value->type, copied_types);
2571
2572 if (value->enclosing_type->objfile_owner () == objfile)
2573 value->enclosing_type = copy_type_recursive (objfile,
2574 value->enclosing_type,
2575 copied_types);
2576 }
2577
2578 /* Likewise for internal variable VAR. */
2579
2580 static void
2581 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2582 htab_t copied_types)
2583 {
2584 switch (var->kind)
2585 {
2586 case INTERNALVAR_INTEGER:
2587 if (var->u.integer.type
2588 && var->u.integer.type->objfile_owner () == objfile)
2589 var->u.integer.type
2590 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2591 break;
2592
2593 case INTERNALVAR_VALUE:
2594 preserve_one_value (var->u.value, objfile, copied_types);
2595 break;
2596 }
2597 }
2598
2599 /* Update the internal variables and value history when OBJFILE is
2600 discarded; we must copy the types out of the objfile. New global types
2601 will be created for every convenience variable which currently points to
2602 this objfile's types, and the convenience variables will be adjusted to
2603 use the new global types. */
2604
2605 void
2606 preserve_values (struct objfile *objfile)
2607 {
2608 struct internalvar *var;
2609
2610 /* Create the hash table. We allocate on the objfile's obstack, since
2611 it is soon to be deleted. */
2612 htab_up copied_types = create_copied_types_hash (objfile);
2613
2614 for (const value_ref_ptr &item : value_history)
2615 preserve_one_value (item.get (), objfile, copied_types.get ());
2616
2617 for (var = internalvars; var; var = var->next)
2618 preserve_one_internalvar (var, objfile, copied_types.get ());
2619
2620 preserve_ext_lang_values (objfile, copied_types.get ());
2621 }
2622
2623 static void
2624 show_convenience (const char *ignore, int from_tty)
2625 {
2626 struct gdbarch *gdbarch = get_current_arch ();
2627 struct internalvar *var;
2628 int varseen = 0;
2629 struct value_print_options opts;
2630
2631 get_user_print_options (&opts);
2632 for (var = internalvars; var; var = var->next)
2633 {
2634
2635 if (!varseen)
2636 {
2637 varseen = 1;
2638 }
2639 gdb_printf (("$%s = "), var->name);
2640
2641 try
2642 {
2643 struct value *val;
2644
2645 val = value_of_internalvar (gdbarch, var);
2646 value_print (val, gdb_stdout, &opts);
2647 }
2648 catch (const gdb_exception_error &ex)
2649 {
2650 fprintf_styled (gdb_stdout, metadata_style.style (),
2651 _("<error: %s>"), ex.what ());
2652 }
2653
2654 gdb_printf (("\n"));
2655 }
2656 if (!varseen)
2657 {
2658 /* This text does not mention convenience functions on purpose.
2659 The user can't create them except via Python, and if Python support
2660 is installed this message will never be printed ($_streq will
2661 exist). */
2662 gdb_printf (_("No debugger convenience variables now defined.\n"
2663 "Convenience variables have "
2664 "names starting with \"$\";\n"
2665 "use \"set\" as in \"set "
2666 "$foo = 5\" to define them.\n"));
2667 }
2668 }
2669 \f
2670
2671 /* See value.h. */
2672
2673 struct value *
2674 value_from_xmethod (xmethod_worker_up &&worker)
2675 {
2676 struct value *v;
2677
2678 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2679 v->lval = lval_xcallable;
2680 v->location.xm_worker = worker.release ();
2681 v->modifiable = 0;
2682
2683 return v;
2684 }
2685
2686 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2687
2688 struct type *
2689 result_type_of_xmethod (struct value *method, gdb::array_view<value *> argv)
2690 {
2691 gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD
2692 && method->lval == lval_xcallable && !argv.empty ());
2693
2694 return method->location.xm_worker->get_result_type (argv[0], argv.slice (1));
2695 }
2696
2697 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2698
2699 struct value *
2700 call_xmethod (struct value *method, gdb::array_view<value *> argv)
2701 {
2702 gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD
2703 && method->lval == lval_xcallable && !argv.empty ());
2704
2705 return method->location.xm_worker->invoke (argv[0], argv.slice (1));
2706 }
2707 \f
2708 /* Extract a value as a C number (either long or double).
2709 Knows how to convert fixed values to double, or
2710 floating values to long.
2711 Does not deallocate the value. */
2712
2713 LONGEST
2714 value_as_long (struct value *val)
2715 {
2716 /* This coerces arrays and functions, which is necessary (e.g.
2717 in disassemble_command). It also dereferences references, which
2718 I suspect is the most logical thing to do. */
2719 val = coerce_array (val);
2720 return unpack_long (value_type (val), value_contents (val).data ());
2721 }
2722
2723 /* Extract a value as a C pointer. Does not deallocate the value.
2724 Note that val's type may not actually be a pointer; value_as_long
2725 handles all the cases. */
2726 CORE_ADDR
2727 value_as_address (struct value *val)
2728 {
2729 struct gdbarch *gdbarch = value_type (val)->arch ();
2730
2731 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2732 whether we want this to be true eventually. */
2733 #if 0
2734 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2735 non-address (e.g. argument to "signal", "info break", etc.), or
2736 for pointers to char, in which the low bits *are* significant. */
2737 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2738 #else
2739
2740 /* There are several targets (IA-64, PowerPC, and others) which
2741 don't represent pointers to functions as simply the address of
2742 the function's entry point. For example, on the IA-64, a
2743 function pointer points to a two-word descriptor, generated by
2744 the linker, which contains the function's entry point, and the
2745 value the IA-64 "global pointer" register should have --- to
2746 support position-independent code. The linker generates
2747 descriptors only for those functions whose addresses are taken.
2748
2749 On such targets, it's difficult for GDB to convert an arbitrary
2750 function address into a function pointer; it has to either find
2751 an existing descriptor for that function, or call malloc and
2752 build its own. On some targets, it is impossible for GDB to
2753 build a descriptor at all: the descriptor must contain a jump
2754 instruction; data memory cannot be executed; and code memory
2755 cannot be modified.
2756
2757 Upon entry to this function, if VAL is a value of type `function'
2758 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2759 value_address (val) is the address of the function. This is what
2760 you'll get if you evaluate an expression like `main'. The call
2761 to COERCE_ARRAY below actually does all the usual unary
2762 conversions, which includes converting values of type `function'
2763 to `pointer to function'. This is the challenging conversion
2764 discussed above. Then, `unpack_long' will convert that pointer
2765 back into an address.
2766
2767 So, suppose the user types `disassemble foo' on an architecture
2768 with a strange function pointer representation, on which GDB
2769 cannot build its own descriptors, and suppose further that `foo'
2770 has no linker-built descriptor. The address->pointer conversion
2771 will signal an error and prevent the command from running, even
2772 though the next step would have been to convert the pointer
2773 directly back into the same address.
2774
2775 The following shortcut avoids this whole mess. If VAL is a
2776 function, just return its address directly. */
2777 if (value_type (val)->code () == TYPE_CODE_FUNC
2778 || value_type (val)->code () == TYPE_CODE_METHOD)
2779 return value_address (val);
2780
2781 val = coerce_array (val);
2782
2783 /* Some architectures (e.g. Harvard), map instruction and data
2784 addresses onto a single large unified address space. For
2785 instance: An architecture may consider a large integer in the
2786 range 0x10000000 .. 0x1000ffff to already represent a data
2787 addresses (hence not need a pointer to address conversion) while
2788 a small integer would still need to be converted integer to
2789 pointer to address. Just assume such architectures handle all
2790 integer conversions in a single function. */
2791
2792 /* JimB writes:
2793
2794 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2795 must admonish GDB hackers to make sure its behavior matches the
2796 compiler's, whenever possible.
2797
2798 In general, I think GDB should evaluate expressions the same way
2799 the compiler does. When the user copies an expression out of
2800 their source code and hands it to a `print' command, they should
2801 get the same value the compiler would have computed. Any
2802 deviation from this rule can cause major confusion and annoyance,
2803 and needs to be justified carefully. In other words, GDB doesn't
2804 really have the freedom to do these conversions in clever and
2805 useful ways.
2806
2807 AndrewC pointed out that users aren't complaining about how GDB
2808 casts integers to pointers; they are complaining that they can't
2809 take an address from a disassembly listing and give it to `x/i'.
2810 This is certainly important.
2811
2812 Adding an architecture method like integer_to_address() certainly
2813 makes it possible for GDB to "get it right" in all circumstances
2814 --- the target has complete control over how things get done, so
2815 people can Do The Right Thing for their target without breaking
2816 anyone else. The standard doesn't specify how integers get
2817 converted to pointers; usually, the ABI doesn't either, but
2818 ABI-specific code is a more reasonable place to handle it. */
2819
2820 if (!value_type (val)->is_pointer_or_reference ()
2821 && gdbarch_integer_to_address_p (gdbarch))
2822 return gdbarch_integer_to_address (gdbarch, value_type (val),
2823 value_contents (val).data ());
2824
2825 return unpack_long (value_type (val), value_contents (val).data ());
2826 #endif
2827 }
2828 \f
2829 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2830 as a long, or as a double, assuming the raw data is described
2831 by type TYPE. Knows how to convert different sizes of values
2832 and can convert between fixed and floating point. We don't assume
2833 any alignment for the raw data. Return value is in host byte order.
2834
2835 If you want functions and arrays to be coerced to pointers, and
2836 references to be dereferenced, call value_as_long() instead.
2837
2838 C++: It is assumed that the front-end has taken care of
2839 all matters concerning pointers to members. A pointer
2840 to member which reaches here is considered to be equivalent
2841 to an INT (or some size). After all, it is only an offset. */
2842
2843 LONGEST
2844 unpack_long (struct type *type, const gdb_byte *valaddr)
2845 {
2846 if (is_fixed_point_type (type))
2847 type = type->fixed_point_type_base_type ();
2848
2849 enum bfd_endian byte_order = type_byte_order (type);
2850 enum type_code code = type->code ();
2851 int len = TYPE_LENGTH (type);
2852 int nosign = type->is_unsigned ();
2853
2854 switch (code)
2855 {
2856 case TYPE_CODE_TYPEDEF:
2857 return unpack_long (check_typedef (type), valaddr);
2858 case TYPE_CODE_ENUM:
2859 case TYPE_CODE_FLAGS:
2860 case TYPE_CODE_BOOL:
2861 case TYPE_CODE_INT:
2862 case TYPE_CODE_CHAR:
2863 case TYPE_CODE_RANGE:
2864 case TYPE_CODE_MEMBERPTR:
2865 {
2866 LONGEST result;
2867
2868 if (type->bit_size_differs_p ())
2869 {
2870 unsigned bit_off = type->bit_offset ();
2871 unsigned bit_size = type->bit_size ();
2872 if (bit_size == 0)
2873 {
2874 /* unpack_bits_as_long doesn't handle this case the
2875 way we'd like, so handle it here. */
2876 result = 0;
2877 }
2878 else
2879 result = unpack_bits_as_long (type, valaddr, bit_off, bit_size);
2880 }
2881 else
2882 {
2883 if (nosign)
2884 result = extract_unsigned_integer (valaddr, len, byte_order);
2885 else
2886 result = extract_signed_integer (valaddr, len, byte_order);
2887 }
2888 if (code == TYPE_CODE_RANGE)
2889 result += type->bounds ()->bias;
2890 return result;
2891 }
2892
2893 case TYPE_CODE_FLT:
2894 case TYPE_CODE_DECFLOAT:
2895 return target_float_to_longest (valaddr, type);
2896
2897 case TYPE_CODE_FIXED_POINT:
2898 {
2899 gdb_mpq vq;
2900 vq.read_fixed_point (gdb::make_array_view (valaddr, len),
2901 byte_order, nosign,
2902 type->fixed_point_scaling_factor ());
2903
2904 gdb_mpz vz;
2905 mpz_tdiv_q (vz.val, mpq_numref (vq.val), mpq_denref (vq.val));
2906 return vz.as_integer<LONGEST> ();
2907 }
2908
2909 case TYPE_CODE_PTR:
2910 case TYPE_CODE_REF:
2911 case TYPE_CODE_RVALUE_REF:
2912 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2913 whether we want this to be true eventually. */
2914 return extract_typed_address (valaddr, type);
2915
2916 default:
2917 error (_("Value can't be converted to integer."));
2918 }
2919 }
2920
2921 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2922 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2923 We don't assume any alignment for the raw data. Return value is in
2924 host byte order.
2925
2926 If you want functions and arrays to be coerced to pointers, and
2927 references to be dereferenced, call value_as_address() instead.
2928
2929 C++: It is assumed that the front-end has taken care of
2930 all matters concerning pointers to members. A pointer
2931 to member which reaches here is considered to be equivalent
2932 to an INT (or some size). After all, it is only an offset. */
2933
2934 CORE_ADDR
2935 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2936 {
2937 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2938 whether we want this to be true eventually. */
2939 return unpack_long (type, valaddr);
2940 }
2941
2942 bool
2943 is_floating_value (struct value *val)
2944 {
2945 struct type *type = check_typedef (value_type (val));
2946
2947 if (is_floating_type (type))
2948 {
2949 if (!target_float_is_valid (value_contents (val).data (), type))
2950 error (_("Invalid floating value found in program."));
2951 return true;
2952 }
2953
2954 return false;
2955 }
2956
2957 \f
2958 /* Get the value of the FIELDNO'th field (which must be static) of
2959 TYPE. */
2960
2961 struct value *
2962 value_static_field (struct type *type, int fieldno)
2963 {
2964 struct value *retval;
2965
2966 switch (type->field (fieldno).loc_kind ())
2967 {
2968 case FIELD_LOC_KIND_PHYSADDR:
2969 retval = value_at_lazy (type->field (fieldno).type (),
2970 type->field (fieldno).loc_physaddr ());
2971 break;
2972 case FIELD_LOC_KIND_PHYSNAME:
2973 {
2974 const char *phys_name = type->field (fieldno).loc_physname ();
2975 /* type->field (fieldno).name (); */
2976 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2977
2978 if (sym.symbol == NULL)
2979 {
2980 /* With some compilers, e.g. HP aCC, static data members are
2981 reported as non-debuggable symbols. */
2982 struct bound_minimal_symbol msym
2983 = lookup_minimal_symbol (phys_name, NULL, NULL);
2984 struct type *field_type = type->field (fieldno).type ();
2985
2986 if (!msym.minsym)
2987 retval = allocate_optimized_out_value (field_type);
2988 else
2989 retval = value_at_lazy (field_type, msym.value_address ());
2990 }
2991 else
2992 retval = value_of_variable (sym.symbol, sym.block);
2993 break;
2994 }
2995 default:
2996 gdb_assert_not_reached ("unexpected field location kind");
2997 }
2998
2999 return retval;
3000 }
3001
3002 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
3003 You have to be careful here, since the size of the data area for the value
3004 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
3005 than the old enclosing type, you have to allocate more space for the
3006 data. */
3007
3008 void
3009 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
3010 {
3011 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
3012 {
3013 check_type_length_before_alloc (new_encl_type);
3014 val->contents
3015 .reset ((gdb_byte *) xrealloc (val->contents.release (),
3016 TYPE_LENGTH (new_encl_type)));
3017 }
3018
3019 val->enclosing_type = new_encl_type;
3020 }
3021
3022 /* Given a value ARG1 (offset by OFFSET bytes)
3023 of a struct or union type ARG_TYPE,
3024 extract and return the value of one of its (non-static) fields.
3025 FIELDNO says which field. */
3026
3027 struct value *
3028 value_primitive_field (struct value *arg1, LONGEST offset,
3029 int fieldno, struct type *arg_type)
3030 {
3031 struct value *v;
3032 struct type *type;
3033 struct gdbarch *arch = get_value_arch (arg1);
3034 int unit_size = gdbarch_addressable_memory_unit_size (arch);
3035
3036 arg_type = check_typedef (arg_type);
3037 type = arg_type->field (fieldno).type ();
3038
3039 /* Call check_typedef on our type to make sure that, if TYPE
3040 is a TYPE_CODE_TYPEDEF, its length is set to the length
3041 of the target type instead of zero. However, we do not
3042 replace the typedef type by the target type, because we want
3043 to keep the typedef in order to be able to print the type
3044 description correctly. */
3045 check_typedef (type);
3046
3047 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
3048 {
3049 /* Handle packed fields.
3050
3051 Create a new value for the bitfield, with bitpos and bitsize
3052 set. If possible, arrange offset and bitpos so that we can
3053 do a single aligned read of the size of the containing type.
3054 Otherwise, adjust offset to the byte containing the first
3055 bit. Assume that the address, offset, and embedded offset
3056 are sufficiently aligned. */
3057
3058 LONGEST bitpos = arg_type->field (fieldno).loc_bitpos ();
3059 LONGEST container_bitsize = TYPE_LENGTH (type) * 8;
3060
3061 v = allocate_value_lazy (type);
3062 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
3063 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
3064 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
3065 v->bitpos = bitpos % container_bitsize;
3066 else
3067 v->bitpos = bitpos % 8;
3068 v->offset = (value_embedded_offset (arg1)
3069 + offset
3070 + (bitpos - v->bitpos) / 8);
3071 set_value_parent (v, arg1);
3072 if (!value_lazy (arg1))
3073 value_fetch_lazy (v);
3074 }
3075 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3076 {
3077 /* This field is actually a base subobject, so preserve the
3078 entire object's contents for later references to virtual
3079 bases, etc. */
3080 LONGEST boffset;
3081
3082 /* Lazy register values with offsets are not supported. */
3083 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3084 value_fetch_lazy (arg1);
3085
3086 /* We special case virtual inheritance here because this
3087 requires access to the contents, which we would rather avoid
3088 for references to ordinary fields of unavailable values. */
3089 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3090 boffset = baseclass_offset (arg_type, fieldno,
3091 value_contents (arg1).data (),
3092 value_embedded_offset (arg1),
3093 value_address (arg1),
3094 arg1);
3095 else
3096 boffset = arg_type->field (fieldno).loc_bitpos () / 8;
3097
3098 if (value_lazy (arg1))
3099 v = allocate_value_lazy (value_enclosing_type (arg1));
3100 else
3101 {
3102 v = allocate_value (value_enclosing_type (arg1));
3103 value_contents_copy_raw (v, 0, arg1, 0,
3104 TYPE_LENGTH (value_enclosing_type (arg1)));
3105 }
3106 v->type = type;
3107 v->offset = value_offset (arg1);
3108 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3109 }
3110 else if (NULL != TYPE_DATA_LOCATION (type))
3111 {
3112 /* Field is a dynamic data member. */
3113
3114 gdb_assert (0 == offset);
3115 /* We expect an already resolved data location. */
3116 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
3117 /* For dynamic data types defer memory allocation
3118 until we actual access the value. */
3119 v = allocate_value_lazy (type);
3120 }
3121 else
3122 {
3123 /* Plain old data member */
3124 offset += (arg_type->field (fieldno).loc_bitpos ()
3125 / (HOST_CHAR_BIT * unit_size));
3126
3127 /* Lazy register values with offsets are not supported. */
3128 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3129 value_fetch_lazy (arg1);
3130
3131 if (value_lazy (arg1))
3132 v = allocate_value_lazy (type);
3133 else
3134 {
3135 v = allocate_value (type);
3136 value_contents_copy_raw (v, value_embedded_offset (v),
3137 arg1, value_embedded_offset (arg1) + offset,
3138 type_length_units (type));
3139 }
3140 v->offset = (value_offset (arg1) + offset
3141 + value_embedded_offset (arg1));
3142 }
3143 set_value_component_location (v, arg1);
3144 return v;
3145 }
3146
3147 /* Given a value ARG1 of a struct or union type,
3148 extract and return the value of one of its (non-static) fields.
3149 FIELDNO says which field. */
3150
3151 struct value *
3152 value_field (struct value *arg1, int fieldno)
3153 {
3154 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3155 }
3156
3157 /* Return a non-virtual function as a value.
3158 F is the list of member functions which contains the desired method.
3159 J is an index into F which provides the desired method.
3160
3161 We only use the symbol for its address, so be happy with either a
3162 full symbol or a minimal symbol. */
3163
3164 struct value *
3165 value_fn_field (struct value **arg1p, struct fn_field *f,
3166 int j, struct type *type,
3167 LONGEST offset)
3168 {
3169 struct value *v;
3170 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3171 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3172 struct symbol *sym;
3173 struct bound_minimal_symbol msym;
3174
3175 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3176 if (sym == nullptr)
3177 {
3178 msym = lookup_bound_minimal_symbol (physname);
3179 if (msym.minsym == NULL)
3180 return NULL;
3181 }
3182
3183 v = allocate_value (ftype);
3184 VALUE_LVAL (v) = lval_memory;
3185 if (sym)
3186 {
3187 set_value_address (v, sym->value_block ()->entry_pc ());
3188 }
3189 else
3190 {
3191 /* The minimal symbol might point to a function descriptor;
3192 resolve it to the actual code address instead. */
3193 struct objfile *objfile = msym.objfile;
3194 struct gdbarch *gdbarch = objfile->arch ();
3195
3196 set_value_address (v,
3197 gdbarch_convert_from_func_ptr_addr
3198 (gdbarch, msym.value_address (),
3199 current_inferior ()->top_target ()));
3200 }
3201
3202 if (arg1p)
3203 {
3204 if (type != value_type (*arg1p))
3205 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3206 value_addr (*arg1p)));
3207
3208 /* Move the `this' pointer according to the offset.
3209 VALUE_OFFSET (*arg1p) += offset; */
3210 }
3211
3212 return v;
3213 }
3214
3215 \f
3216
3217 /* See value.h. */
3218
3219 LONGEST
3220 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3221 LONGEST bitpos, LONGEST bitsize)
3222 {
3223 enum bfd_endian byte_order = type_byte_order (field_type);
3224 ULONGEST val;
3225 ULONGEST valmask;
3226 int lsbcount;
3227 LONGEST bytes_read;
3228 LONGEST read_offset;
3229
3230 /* Read the minimum number of bytes required; there may not be
3231 enough bytes to read an entire ULONGEST. */
3232 field_type = check_typedef (field_type);
3233 if (bitsize)
3234 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3235 else
3236 {
3237 bytes_read = TYPE_LENGTH (field_type);
3238 bitsize = 8 * bytes_read;
3239 }
3240
3241 read_offset = bitpos / 8;
3242
3243 val = extract_unsigned_integer (valaddr + read_offset,
3244 bytes_read, byte_order);
3245
3246 /* Extract bits. See comment above. */
3247
3248 if (byte_order == BFD_ENDIAN_BIG)
3249 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3250 else
3251 lsbcount = (bitpos % 8);
3252 val >>= lsbcount;
3253
3254 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3255 If the field is signed, and is negative, then sign extend. */
3256
3257 if (bitsize < 8 * (int) sizeof (val))
3258 {
3259 valmask = (((ULONGEST) 1) << bitsize) - 1;
3260 val &= valmask;
3261 if (!field_type->is_unsigned ())
3262 {
3263 if (val & (valmask ^ (valmask >> 1)))
3264 {
3265 val |= ~valmask;
3266 }
3267 }
3268 }
3269
3270 return val;
3271 }
3272
3273 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3274 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3275 ORIGINAL_VALUE, which must not be NULL. See
3276 unpack_value_bits_as_long for more details. */
3277
3278 int
3279 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3280 LONGEST embedded_offset, int fieldno,
3281 const struct value *val, LONGEST *result)
3282 {
3283 int bitpos = type->field (fieldno).loc_bitpos ();
3284 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3285 struct type *field_type = type->field (fieldno).type ();
3286 int bit_offset;
3287
3288 gdb_assert (val != NULL);
3289
3290 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3291 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3292 || !value_bits_available (val, bit_offset, bitsize))
3293 return 0;
3294
3295 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3296 bitpos, bitsize);
3297 return 1;
3298 }
3299
3300 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3301 object at VALADDR. See unpack_bits_as_long for more details. */
3302
3303 LONGEST
3304 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3305 {
3306 int bitpos = type->field (fieldno).loc_bitpos ();
3307 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3308 struct type *field_type = type->field (fieldno).type ();
3309
3310 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3311 }
3312
3313 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3314 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3315 the contents in DEST_VAL, zero or sign extending if the type of
3316 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3317 VAL. If the VAL's contents required to extract the bitfield from
3318 are unavailable/optimized out, DEST_VAL is correspondingly
3319 marked unavailable/optimized out. */
3320
3321 void
3322 unpack_value_bitfield (struct value *dest_val,
3323 LONGEST bitpos, LONGEST bitsize,
3324 const gdb_byte *valaddr, LONGEST embedded_offset,
3325 const struct value *val)
3326 {
3327 enum bfd_endian byte_order;
3328 int src_bit_offset;
3329 int dst_bit_offset;
3330 struct type *field_type = value_type (dest_val);
3331
3332 byte_order = type_byte_order (field_type);
3333
3334 /* First, unpack and sign extend the bitfield as if it was wholly
3335 valid. Optimized out/unavailable bits are read as zero, but
3336 that's OK, as they'll end up marked below. If the VAL is
3337 wholly-invalid we may have skipped allocating its contents,
3338 though. See allocate_optimized_out_value. */
3339 if (valaddr != NULL)
3340 {
3341 LONGEST num;
3342
3343 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3344 bitpos, bitsize);
3345 store_signed_integer (value_contents_raw (dest_val).data (),
3346 TYPE_LENGTH (field_type), byte_order, num);
3347 }
3348
3349 /* Now copy the optimized out / unavailability ranges to the right
3350 bits. */
3351 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3352 if (byte_order == BFD_ENDIAN_BIG)
3353 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3354 else
3355 dst_bit_offset = 0;
3356 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3357 val, src_bit_offset, bitsize);
3358 }
3359
3360 /* Return a new value with type TYPE, which is FIELDNO field of the
3361 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3362 of VAL. If the VAL's contents required to extract the bitfield
3363 from are unavailable/optimized out, the new value is
3364 correspondingly marked unavailable/optimized out. */
3365
3366 struct value *
3367 value_field_bitfield (struct type *type, int fieldno,
3368 const gdb_byte *valaddr,
3369 LONGEST embedded_offset, const struct value *val)
3370 {
3371 int bitpos = type->field (fieldno).loc_bitpos ();
3372 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3373 struct value *res_val = allocate_value (type->field (fieldno).type ());
3374
3375 unpack_value_bitfield (res_val, bitpos, bitsize,
3376 valaddr, embedded_offset, val);
3377
3378 return res_val;
3379 }
3380
3381 /* Modify the value of a bitfield. ADDR points to a block of memory in
3382 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3383 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3384 indicate which bits (in target bit order) comprise the bitfield.
3385 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3386 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3387
3388 void
3389 modify_field (struct type *type, gdb_byte *addr,
3390 LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3391 {
3392 enum bfd_endian byte_order = type_byte_order (type);
3393 ULONGEST oword;
3394 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3395 LONGEST bytesize;
3396
3397 /* Normalize BITPOS. */
3398 addr += bitpos / 8;
3399 bitpos %= 8;
3400
3401 /* If a negative fieldval fits in the field in question, chop
3402 off the sign extension bits. */
3403 if ((~fieldval & ~(mask >> 1)) == 0)
3404 fieldval &= mask;
3405
3406 /* Warn if value is too big to fit in the field in question. */
3407 if (0 != (fieldval & ~mask))
3408 {
3409 /* FIXME: would like to include fieldval in the message, but
3410 we don't have a sprintf_longest. */
3411 warning (_("Value does not fit in %s bits."), plongest (bitsize));
3412
3413 /* Truncate it, otherwise adjoining fields may be corrupted. */
3414 fieldval &= mask;
3415 }
3416
3417 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3418 false valgrind reports. */
3419
3420 bytesize = (bitpos + bitsize + 7) / 8;
3421 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3422
3423 /* Shifting for bit field depends on endianness of the target machine. */
3424 if (byte_order == BFD_ENDIAN_BIG)
3425 bitpos = bytesize * 8 - bitpos - bitsize;
3426
3427 oword &= ~(mask << bitpos);
3428 oword |= fieldval << bitpos;
3429
3430 store_unsigned_integer (addr, bytesize, byte_order, oword);
3431 }
3432 \f
3433 /* Pack NUM into BUF using a target format of TYPE. */
3434
3435 void
3436 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3437 {
3438 enum bfd_endian byte_order = type_byte_order (type);
3439 LONGEST len;
3440
3441 type = check_typedef (type);
3442 len = TYPE_LENGTH (type);
3443
3444 switch (type->code ())
3445 {
3446 case TYPE_CODE_RANGE:
3447 num -= type->bounds ()->bias;
3448 /* Fall through. */
3449 case TYPE_CODE_INT:
3450 case TYPE_CODE_CHAR:
3451 case TYPE_CODE_ENUM:
3452 case TYPE_CODE_FLAGS:
3453 case TYPE_CODE_BOOL:
3454 case TYPE_CODE_MEMBERPTR:
3455 if (type->bit_size_differs_p ())
3456 {
3457 unsigned bit_off = type->bit_offset ();
3458 unsigned bit_size = type->bit_size ();
3459 num &= ((ULONGEST) 1 << bit_size) - 1;
3460 num <<= bit_off;
3461 }
3462 store_signed_integer (buf, len, byte_order, num);
3463 break;
3464
3465 case TYPE_CODE_REF:
3466 case TYPE_CODE_RVALUE_REF:
3467 case TYPE_CODE_PTR:
3468 store_typed_address (buf, type, (CORE_ADDR) num);
3469 break;
3470
3471 case TYPE_CODE_FLT:
3472 case TYPE_CODE_DECFLOAT:
3473 target_float_from_longest (buf, type, num);
3474 break;
3475
3476 default:
3477 error (_("Unexpected type (%d) encountered for integer constant."),
3478 type->code ());
3479 }
3480 }
3481
3482
3483 /* Pack NUM into BUF using a target format of TYPE. */
3484
3485 static void
3486 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3487 {
3488 LONGEST len;
3489 enum bfd_endian byte_order;
3490
3491 type = check_typedef (type);
3492 len = TYPE_LENGTH (type);
3493 byte_order = type_byte_order (type);
3494
3495 switch (type->code ())
3496 {
3497 case TYPE_CODE_INT:
3498 case TYPE_CODE_CHAR:
3499 case TYPE_CODE_ENUM:
3500 case TYPE_CODE_FLAGS:
3501 case TYPE_CODE_BOOL:
3502 case TYPE_CODE_RANGE:
3503 case TYPE_CODE_MEMBERPTR:
3504 if (type->bit_size_differs_p ())
3505 {
3506 unsigned bit_off = type->bit_offset ();
3507 unsigned bit_size = type->bit_size ();
3508 num &= ((ULONGEST) 1 << bit_size) - 1;
3509 num <<= bit_off;
3510 }
3511 store_unsigned_integer (buf, len, byte_order, num);
3512 break;
3513
3514 case TYPE_CODE_REF:
3515 case TYPE_CODE_RVALUE_REF:
3516 case TYPE_CODE_PTR:
3517 store_typed_address (buf, type, (CORE_ADDR) num);
3518 break;
3519
3520 case TYPE_CODE_FLT:
3521 case TYPE_CODE_DECFLOAT:
3522 target_float_from_ulongest (buf, type, num);
3523 break;
3524
3525 default:
3526 error (_("Unexpected type (%d) encountered "
3527 "for unsigned integer constant."),
3528 type->code ());
3529 }
3530 }
3531
3532
3533 /* Create a value of type TYPE that is zero, and return it. */
3534
3535 struct value *
3536 value_zero (struct type *type, enum lval_type lv)
3537 {
3538 struct value *val = allocate_value_lazy (type);
3539
3540 VALUE_LVAL (val) = (lv == lval_computed ? not_lval : lv);
3541 val->is_zero = true;
3542 return val;
3543 }
3544
3545 /* Convert C numbers into newly allocated values. */
3546
3547 struct value *
3548 value_from_longest (struct type *type, LONGEST num)
3549 {
3550 struct value *val = allocate_value (type);
3551
3552 pack_long (value_contents_raw (val).data (), type, num);
3553 return val;
3554 }
3555
3556
3557 /* Convert C unsigned numbers into newly allocated values. */
3558
3559 struct value *
3560 value_from_ulongest (struct type *type, ULONGEST num)
3561 {
3562 struct value *val = allocate_value (type);
3563
3564 pack_unsigned_long (value_contents_raw (val).data (), type, num);
3565
3566 return val;
3567 }
3568
3569
3570 /* Create a value representing a pointer of type TYPE to the address
3571 ADDR. */
3572
3573 struct value *
3574 value_from_pointer (struct type *type, CORE_ADDR addr)
3575 {
3576 struct value *val = allocate_value (type);
3577
3578 store_typed_address (value_contents_raw (val).data (),
3579 check_typedef (type), addr);
3580 return val;
3581 }
3582
3583 /* Create and return a value object of TYPE containing the value D. The
3584 TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once
3585 it is converted to target format. */
3586
3587 struct value *
3588 value_from_host_double (struct type *type, double d)
3589 {
3590 struct value *value = allocate_value (type);
3591 gdb_assert (type->code () == TYPE_CODE_FLT);
3592 target_float_from_host_double (value_contents_raw (value).data (),
3593 value_type (value), d);
3594 return value;
3595 }
3596
3597 /* Create a value of type TYPE whose contents come from VALADDR, if it
3598 is non-null, and whose memory address (in the inferior) is
3599 ADDRESS. The type of the created value may differ from the passed
3600 type TYPE. Make sure to retrieve values new type after this call.
3601 Note that TYPE is not passed through resolve_dynamic_type; this is
3602 a special API intended for use only by Ada. */
3603
3604 struct value *
3605 value_from_contents_and_address_unresolved (struct type *type,
3606 const gdb_byte *valaddr,
3607 CORE_ADDR address)
3608 {
3609 struct value *v;
3610
3611 if (valaddr == NULL)
3612 v = allocate_value_lazy (type);
3613 else
3614 v = value_from_contents (type, valaddr);
3615 VALUE_LVAL (v) = lval_memory;
3616 set_value_address (v, address);
3617 return v;
3618 }
3619
3620 /* Create a value of type TYPE whose contents come from VALADDR, if it
3621 is non-null, and whose memory address (in the inferior) is
3622 ADDRESS. The type of the created value may differ from the passed
3623 type TYPE. Make sure to retrieve values new type after this call. */
3624
3625 struct value *
3626 value_from_contents_and_address (struct type *type,
3627 const gdb_byte *valaddr,
3628 CORE_ADDR address)
3629 {
3630 gdb::array_view<const gdb_byte> view;
3631 if (valaddr != nullptr)
3632 view = gdb::make_array_view (valaddr, TYPE_LENGTH (type));
3633 struct type *resolved_type = resolve_dynamic_type (type, view, address);
3634 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3635 struct value *v;
3636
3637 if (valaddr == NULL)
3638 v = allocate_value_lazy (resolved_type);
3639 else
3640 v = value_from_contents (resolved_type, valaddr);
3641 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3642 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3643 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3644 VALUE_LVAL (v) = lval_memory;
3645 set_value_address (v, address);
3646 return v;
3647 }
3648
3649 /* Create a value of type TYPE holding the contents CONTENTS.
3650 The new value is `not_lval'. */
3651
3652 struct value *
3653 value_from_contents (struct type *type, const gdb_byte *contents)
3654 {
3655 struct value *result;
3656
3657 result = allocate_value (type);
3658 memcpy (value_contents_raw (result).data (), contents, TYPE_LENGTH (type));
3659 return result;
3660 }
3661
3662 /* Extract a value from the history file. Input will be of the form
3663 $digits or $$digits. See block comment above 'write_dollar_variable'
3664 for details. */
3665
3666 struct value *
3667 value_from_history_ref (const char *h, const char **endp)
3668 {
3669 int index, len;
3670
3671 if (h[0] == '$')
3672 len = 1;
3673 else
3674 return NULL;
3675
3676 if (h[1] == '$')
3677 len = 2;
3678
3679 /* Find length of numeral string. */
3680 for (; isdigit (h[len]); len++)
3681 ;
3682
3683 /* Make sure numeral string is not part of an identifier. */
3684 if (h[len] == '_' || isalpha (h[len]))
3685 return NULL;
3686
3687 /* Now collect the index value. */
3688 if (h[1] == '$')
3689 {
3690 if (len == 2)
3691 {
3692 /* For some bizarre reason, "$$" is equivalent to "$$1",
3693 rather than to "$$0" as it ought to be! */
3694 index = -1;
3695 *endp += len;
3696 }
3697 else
3698 {
3699 char *local_end;
3700
3701 index = -strtol (&h[2], &local_end, 10);
3702 *endp = local_end;
3703 }
3704 }
3705 else
3706 {
3707 if (len == 1)
3708 {
3709 /* "$" is equivalent to "$0". */
3710 index = 0;
3711 *endp += len;
3712 }
3713 else
3714 {
3715 char *local_end;
3716
3717 index = strtol (&h[1], &local_end, 10);
3718 *endp = local_end;
3719 }
3720 }
3721
3722 return access_value_history (index);
3723 }
3724
3725 /* Get the component value (offset by OFFSET bytes) of a struct or
3726 union WHOLE. Component's type is TYPE. */
3727
3728 struct value *
3729 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3730 {
3731 struct value *v;
3732
3733 if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole))
3734 v = allocate_value_lazy (type);
3735 else
3736 {
3737 v = allocate_value (type);
3738 value_contents_copy (v, value_embedded_offset (v),
3739 whole, value_embedded_offset (whole) + offset,
3740 type_length_units (type));
3741 }
3742 v->offset = value_offset (whole) + offset + value_embedded_offset (whole);
3743 set_value_component_location (v, whole);
3744
3745 return v;
3746 }
3747
3748 struct value *
3749 coerce_ref_if_computed (const struct value *arg)
3750 {
3751 const struct lval_funcs *funcs;
3752
3753 if (!TYPE_IS_REFERENCE (check_typedef (value_type (arg))))
3754 return NULL;
3755
3756 if (value_lval_const (arg) != lval_computed)
3757 return NULL;
3758
3759 funcs = value_computed_funcs (arg);
3760 if (funcs->coerce_ref == NULL)
3761 return NULL;
3762
3763 return funcs->coerce_ref (arg);
3764 }
3765
3766 /* Look at value.h for description. */
3767
3768 struct value *
3769 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3770 const struct type *original_type,
3771 struct value *original_value,
3772 CORE_ADDR original_value_address)
3773 {
3774 gdb_assert (original_type->is_pointer_or_reference ());
3775
3776 struct type *original_target_type = TYPE_TARGET_TYPE (original_type);
3777 gdb::array_view<const gdb_byte> view;
3778 struct type *resolved_original_target_type
3779 = resolve_dynamic_type (original_target_type, view,
3780 original_value_address);
3781
3782 /* Re-adjust type. */
3783 deprecated_set_value_type (value, resolved_original_target_type);
3784
3785 /* Add embedding info. */
3786 set_value_enclosing_type (value, enc_type);
3787 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3788
3789 /* We may be pointing to an object of some derived type. */
3790 return value_full_object (value, NULL, 0, 0, 0);
3791 }
3792
3793 struct value *
3794 coerce_ref (struct value *arg)
3795 {
3796 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3797 struct value *retval;
3798 struct type *enc_type;
3799
3800 retval = coerce_ref_if_computed (arg);
3801 if (retval)
3802 return retval;
3803
3804 if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3805 return arg;
3806
3807 enc_type = check_typedef (value_enclosing_type (arg));
3808 enc_type = TYPE_TARGET_TYPE (enc_type);
3809
3810 CORE_ADDR addr = unpack_pointer (value_type (arg), value_contents (arg).data ());
3811 retval = value_at_lazy (enc_type, addr);
3812 enc_type = value_type (retval);
3813 return readjust_indirect_value_type (retval, enc_type, value_type_arg_tmp,
3814 arg, addr);
3815 }
3816
3817 struct value *
3818 coerce_array (struct value *arg)
3819 {
3820 struct type *type;
3821
3822 arg = coerce_ref (arg);
3823 type = check_typedef (value_type (arg));
3824
3825 switch (type->code ())
3826 {
3827 case TYPE_CODE_ARRAY:
3828 if (!type->is_vector () && current_language->c_style_arrays_p ())
3829 arg = value_coerce_array (arg);
3830 break;
3831 case TYPE_CODE_FUNC:
3832 arg = value_coerce_function (arg);
3833 break;
3834 }
3835 return arg;
3836 }
3837 \f
3838
3839 /* Return the return value convention that will be used for the
3840 specified type. */
3841
3842 enum return_value_convention
3843 struct_return_convention (struct gdbarch *gdbarch,
3844 struct value *function, struct type *value_type)
3845 {
3846 enum type_code code = value_type->code ();
3847
3848 if (code == TYPE_CODE_ERROR)
3849 error (_("Function return type unknown."));
3850
3851 /* Probe the architecture for the return-value convention. */
3852 return gdbarch_return_value (gdbarch, function, value_type,
3853 NULL, NULL, NULL);
3854 }
3855
3856 /* Return true if the function returning the specified type is using
3857 the convention of returning structures in memory (passing in the
3858 address as a hidden first parameter). */
3859
3860 int
3861 using_struct_return (struct gdbarch *gdbarch,
3862 struct value *function, struct type *value_type)
3863 {
3864 if (value_type->code () == TYPE_CODE_VOID)
3865 /* A void return value is never in memory. See also corresponding
3866 code in "print_return_value". */
3867 return 0;
3868
3869 return (struct_return_convention (gdbarch, function, value_type)
3870 != RETURN_VALUE_REGISTER_CONVENTION);
3871 }
3872
3873 /* Set the initialized field in a value struct. */
3874
3875 void
3876 set_value_initialized (struct value *val, int status)
3877 {
3878 val->initialized = status;
3879 }
3880
3881 /* Return the initialized field in a value struct. */
3882
3883 int
3884 value_initialized (const struct value *val)
3885 {
3886 return val->initialized;
3887 }
3888
3889 /* Helper for value_fetch_lazy when the value is a bitfield. */
3890
3891 static void
3892 value_fetch_lazy_bitfield (struct value *val)
3893 {
3894 gdb_assert (value_bitsize (val) != 0);
3895
3896 /* To read a lazy bitfield, read the entire enclosing value. This
3897 prevents reading the same block of (possibly volatile) memory once
3898 per bitfield. It would be even better to read only the containing
3899 word, but we have no way to record that just specific bits of a
3900 value have been fetched. */
3901 struct value *parent = value_parent (val);
3902
3903 if (value_lazy (parent))
3904 value_fetch_lazy (parent);
3905
3906 unpack_value_bitfield (val, value_bitpos (val), value_bitsize (val),
3907 value_contents_for_printing (parent).data (),
3908 value_offset (val), parent);
3909 }
3910
3911 /* Helper for value_fetch_lazy when the value is in memory. */
3912
3913 static void
3914 value_fetch_lazy_memory (struct value *val)
3915 {
3916 gdb_assert (VALUE_LVAL (val) == lval_memory);
3917
3918 CORE_ADDR addr = value_address (val);
3919 struct type *type = check_typedef (value_enclosing_type (val));
3920
3921 if (TYPE_LENGTH (type))
3922 read_value_memory (val, 0, value_stack (val),
3923 addr, value_contents_all_raw (val).data (),
3924 type_length_units (type));
3925 }
3926
3927 /* Helper for value_fetch_lazy when the value is in a register. */
3928
3929 static void
3930 value_fetch_lazy_register (struct value *val)
3931 {
3932 struct frame_info *next_frame;
3933 int regnum;
3934 struct type *type = check_typedef (value_type (val));
3935 struct value *new_val = val, *mark = value_mark ();
3936
3937 /* Offsets are not supported here; lazy register values must
3938 refer to the entire register. */
3939 gdb_assert (value_offset (val) == 0);
3940
3941 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3942 {
3943 struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
3944
3945 next_frame = frame_find_by_id (next_frame_id);
3946 regnum = VALUE_REGNUM (new_val);
3947
3948 gdb_assert (next_frame != NULL);
3949
3950 /* Convertible register routines are used for multi-register
3951 values and for interpretation in different types
3952 (e.g. float or int from a double register). Lazy
3953 register values should have the register's natural type,
3954 so they do not apply. */
3955 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
3956 regnum, type));
3957
3958 /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
3959 Since a "->next" operation was performed when setting
3960 this field, we do not need to perform a "next" operation
3961 again when unwinding the register. That's why
3962 frame_unwind_register_value() is called here instead of
3963 get_frame_register_value(). */
3964 new_val = frame_unwind_register_value (next_frame, regnum);
3965
3966 /* If we get another lazy lval_register value, it means the
3967 register is found by reading it from NEXT_FRAME's next frame.
3968 frame_unwind_register_value should never return a value with
3969 the frame id pointing to NEXT_FRAME. If it does, it means we
3970 either have two consecutive frames with the same frame id
3971 in the frame chain, or some code is trying to unwind
3972 behind get_prev_frame's back (e.g., a frame unwind
3973 sniffer trying to unwind), bypassing its validations. In
3974 any case, it should always be an internal error to end up
3975 in this situation. */
3976 if (VALUE_LVAL (new_val) == lval_register
3977 && value_lazy (new_val)
3978 && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id))
3979 internal_error (__FILE__, __LINE__,
3980 _("infinite loop while fetching a register"));
3981 }
3982
3983 /* If it's still lazy (for instance, a saved register on the
3984 stack), fetch it. */
3985 if (value_lazy (new_val))
3986 value_fetch_lazy (new_val);
3987
3988 /* Copy the contents and the unavailability/optimized-out
3989 meta-data from NEW_VAL to VAL. */
3990 set_value_lazy (val, 0);
3991 value_contents_copy (val, value_embedded_offset (val),
3992 new_val, value_embedded_offset (new_val),
3993 type_length_units (type));
3994
3995 if (frame_debug)
3996 {
3997 struct gdbarch *gdbarch;
3998 struct frame_info *frame;
3999 frame = frame_find_by_id (VALUE_NEXT_FRAME_ID (val));
4000 frame = get_prev_frame_always (frame);
4001 regnum = VALUE_REGNUM (val);
4002 gdbarch = get_frame_arch (frame);
4003
4004 string_file debug_file;
4005 gdb_printf (&debug_file,
4006 "(frame=%d, regnum=%d(%s), ...) ",
4007 frame_relative_level (frame), regnum,
4008 user_reg_map_regnum_to_name (gdbarch, regnum));
4009
4010 gdb_printf (&debug_file, "->");
4011 if (value_optimized_out (new_val))
4012 {
4013 gdb_printf (&debug_file, " ");
4014 val_print_optimized_out (new_val, &debug_file);
4015 }
4016 else
4017 {
4018 int i;
4019 gdb::array_view<const gdb_byte> buf = value_contents (new_val);
4020
4021 if (VALUE_LVAL (new_val) == lval_register)
4022 gdb_printf (&debug_file, " register=%d",
4023 VALUE_REGNUM (new_val));
4024 else if (VALUE_LVAL (new_val) == lval_memory)
4025 gdb_printf (&debug_file, " address=%s",
4026 paddress (gdbarch,
4027 value_address (new_val)));
4028 else
4029 gdb_printf (&debug_file, " computed");
4030
4031 gdb_printf (&debug_file, " bytes=");
4032 gdb_printf (&debug_file, "[");
4033 for (i = 0; i < register_size (gdbarch, regnum); i++)
4034 gdb_printf (&debug_file, "%02x", buf[i]);
4035 gdb_printf (&debug_file, "]");
4036 }
4037
4038 frame_debug_printf ("%s", debug_file.c_str ());
4039 }
4040
4041 /* Dispose of the intermediate values. This prevents
4042 watchpoints from trying to watch the saved frame pointer. */
4043 value_free_to_mark (mark);
4044 }
4045
4046 /* Load the actual content of a lazy value. Fetch the data from the
4047 user's process and clear the lazy flag to indicate that the data in
4048 the buffer is valid.
4049
4050 If the value is zero-length, we avoid calling read_memory, which
4051 would abort. We mark the value as fetched anyway -- all 0 bytes of
4052 it. */
4053
4054 void
4055 value_fetch_lazy (struct value *val)
4056 {
4057 gdb_assert (value_lazy (val));
4058 allocate_value_contents (val);
4059 /* A value is either lazy, or fully fetched. The
4060 availability/validity is only established as we try to fetch a
4061 value. */
4062 gdb_assert (val->optimized_out.empty ());
4063 gdb_assert (val->unavailable.empty ());
4064 if (val->is_zero)
4065 {
4066 /* Nothing. */
4067 }
4068 else if (value_bitsize (val))
4069 value_fetch_lazy_bitfield (val);
4070 else if (VALUE_LVAL (val) == lval_memory)
4071 value_fetch_lazy_memory (val);
4072 else if (VALUE_LVAL (val) == lval_register)
4073 value_fetch_lazy_register (val);
4074 else if (VALUE_LVAL (val) == lval_computed
4075 && value_computed_funcs (val)->read != NULL)
4076 value_computed_funcs (val)->read (val);
4077 else
4078 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
4079
4080 set_value_lazy (val, 0);
4081 }
4082
4083 /* Implementation of the convenience function $_isvoid. */
4084
4085 static struct value *
4086 isvoid_internal_fn (struct gdbarch *gdbarch,
4087 const struct language_defn *language,
4088 void *cookie, int argc, struct value **argv)
4089 {
4090 int ret;
4091
4092 if (argc != 1)
4093 error (_("You must provide one argument for $_isvoid."));
4094
4095 ret = value_type (argv[0])->code () == TYPE_CODE_VOID;
4096
4097 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
4098 }
4099
4100 /* Implementation of the convenience function $_creal. Extracts the
4101 real part from a complex number. */
4102
4103 static struct value *
4104 creal_internal_fn (struct gdbarch *gdbarch,
4105 const struct language_defn *language,
4106 void *cookie, int argc, struct value **argv)
4107 {
4108 if (argc != 1)
4109 error (_("You must provide one argument for $_creal."));
4110
4111 value *cval = argv[0];
4112 type *ctype = check_typedef (value_type (cval));
4113 if (ctype->code () != TYPE_CODE_COMPLEX)
4114 error (_("expected a complex number"));
4115 return value_real_part (cval);
4116 }
4117
4118 /* Implementation of the convenience function $_cimag. Extracts the
4119 imaginary part from a complex number. */
4120
4121 static struct value *
4122 cimag_internal_fn (struct gdbarch *gdbarch,
4123 const struct language_defn *language,
4124 void *cookie, int argc,
4125 struct value **argv)
4126 {
4127 if (argc != 1)
4128 error (_("You must provide one argument for $_cimag."));
4129
4130 value *cval = argv[0];
4131 type *ctype = check_typedef (value_type (cval));
4132 if (ctype->code () != TYPE_CODE_COMPLEX)
4133 error (_("expected a complex number"));
4134 return value_imaginary_part (cval);
4135 }
4136
4137 #if GDB_SELF_TEST
4138 namespace selftests
4139 {
4140
4141 /* Test the ranges_contain function. */
4142
4143 static void
4144 test_ranges_contain ()
4145 {
4146 std::vector<range> ranges;
4147 range r;
4148
4149 /* [10, 14] */
4150 r.offset = 10;
4151 r.length = 5;
4152 ranges.push_back (r);
4153
4154 /* [20, 24] */
4155 r.offset = 20;
4156 r.length = 5;
4157 ranges.push_back (r);
4158
4159 /* [2, 6] */
4160 SELF_CHECK (!ranges_contain (ranges, 2, 5));
4161 /* [9, 13] */
4162 SELF_CHECK (ranges_contain (ranges, 9, 5));
4163 /* [10, 11] */
4164 SELF_CHECK (ranges_contain (ranges, 10, 2));
4165 /* [10, 14] */
4166 SELF_CHECK (ranges_contain (ranges, 10, 5));
4167 /* [13, 18] */
4168 SELF_CHECK (ranges_contain (ranges, 13, 6));
4169 /* [14, 18] */
4170 SELF_CHECK (ranges_contain (ranges, 14, 5));
4171 /* [15, 18] */
4172 SELF_CHECK (!ranges_contain (ranges, 15, 4));
4173 /* [16, 19] */
4174 SELF_CHECK (!ranges_contain (ranges, 16, 4));
4175 /* [16, 21] */
4176 SELF_CHECK (ranges_contain (ranges, 16, 6));
4177 /* [21, 21] */
4178 SELF_CHECK (ranges_contain (ranges, 21, 1));
4179 /* [21, 25] */
4180 SELF_CHECK (ranges_contain (ranges, 21, 5));
4181 /* [26, 28] */
4182 SELF_CHECK (!ranges_contain (ranges, 26, 3));
4183 }
4184
4185 /* Check that RANGES contains the same ranges as EXPECTED. */
4186
4187 static bool
4188 check_ranges_vector (gdb::array_view<const range> ranges,
4189 gdb::array_view<const range> expected)
4190 {
4191 return ranges == expected;
4192 }
4193
4194 /* Test the insert_into_bit_range_vector function. */
4195
4196 static void
4197 test_insert_into_bit_range_vector ()
4198 {
4199 std::vector<range> ranges;
4200
4201 /* [10, 14] */
4202 {
4203 insert_into_bit_range_vector (&ranges, 10, 5);
4204 static const range expected[] = {
4205 {10, 5}
4206 };
4207 SELF_CHECK (check_ranges_vector (ranges, expected));
4208 }
4209
4210 /* [10, 14] */
4211 {
4212 insert_into_bit_range_vector (&ranges, 11, 4);
4213 static const range expected = {10, 5};
4214 SELF_CHECK (check_ranges_vector (ranges, expected));
4215 }
4216
4217 /* [10, 14] [20, 24] */
4218 {
4219 insert_into_bit_range_vector (&ranges, 20, 5);
4220 static const range expected[] = {
4221 {10, 5},
4222 {20, 5},
4223 };
4224 SELF_CHECK (check_ranges_vector (ranges, expected));
4225 }
4226
4227 /* [10, 14] [17, 24] */
4228 {
4229 insert_into_bit_range_vector (&ranges, 17, 5);
4230 static const range expected[] = {
4231 {10, 5},
4232 {17, 8},
4233 };
4234 SELF_CHECK (check_ranges_vector (ranges, expected));
4235 }
4236
4237 /* [2, 8] [10, 14] [17, 24] */
4238 {
4239 insert_into_bit_range_vector (&ranges, 2, 7);
4240 static const range expected[] = {
4241 {2, 7},
4242 {10, 5},
4243 {17, 8},
4244 };
4245 SELF_CHECK (check_ranges_vector (ranges, expected));
4246 }
4247
4248 /* [2, 14] [17, 24] */
4249 {
4250 insert_into_bit_range_vector (&ranges, 9, 1);
4251 static const range expected[] = {
4252 {2, 13},
4253 {17, 8},
4254 };
4255 SELF_CHECK (check_ranges_vector (ranges, expected));
4256 }
4257
4258 /* [2, 14] [17, 24] */
4259 {
4260 insert_into_bit_range_vector (&ranges, 9, 1);
4261 static const range expected[] = {
4262 {2, 13},
4263 {17, 8},
4264 };
4265 SELF_CHECK (check_ranges_vector (ranges, expected));
4266 }
4267
4268 /* [2, 33] */
4269 {
4270 insert_into_bit_range_vector (&ranges, 4, 30);
4271 static const range expected = {2, 32};
4272 SELF_CHECK (check_ranges_vector (ranges, expected));
4273 }
4274 }
4275
4276 static void
4277 test_value_copy ()
4278 {
4279 type *type = builtin_type (current_inferior ()->gdbarch)->builtin_int;
4280
4281 /* Verify that we can copy an entirely optimized out value, that may not have
4282 its contents allocated. */
4283 value_ref_ptr val = release_value (allocate_optimized_out_value (type));
4284 value_ref_ptr copy = release_value (value_copy (val.get ()));
4285
4286 SELF_CHECK (value_entirely_optimized_out (val.get ()));
4287 SELF_CHECK (value_entirely_optimized_out (copy.get ()));
4288 }
4289
4290 } /* namespace selftests */
4291 #endif /* GDB_SELF_TEST */
4292
4293 void _initialize_values ();
4294 void
4295 _initialize_values ()
4296 {
4297 cmd_list_element *show_convenience_cmd
4298 = add_cmd ("convenience", no_class, show_convenience, _("\
4299 Debugger convenience (\"$foo\") variables and functions.\n\
4300 Convenience variables are created when you assign them values;\n\
4301 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4302 \n\
4303 A few convenience variables are given values automatically:\n\
4304 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4305 \"$__\" holds the contents of the last address examined with \"x\"."
4306 #ifdef HAVE_PYTHON
4307 "\n\n\
4308 Convenience functions are defined via the Python API."
4309 #endif
4310 ), &showlist);
4311 add_alias_cmd ("conv", show_convenience_cmd, no_class, 1, &showlist);
4312
4313 add_cmd ("values", no_set_class, show_values, _("\
4314 Elements of value history around item number IDX (or last ten)."),
4315 &showlist);
4316
4317 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4318 Initialize a convenience variable if necessary.\n\
4319 init-if-undefined VARIABLE = EXPRESSION\n\
4320 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4321 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4322 VARIABLE is already initialized."));
4323
4324 add_prefix_cmd ("function", no_class, function_command, _("\
4325 Placeholder command for showing help on convenience functions."),
4326 &functionlist, 0, &cmdlist);
4327
4328 add_internal_function ("_isvoid", _("\
4329 Check whether an expression is void.\n\
4330 Usage: $_isvoid (expression)\n\
4331 Return 1 if the expression is void, zero otherwise."),
4332 isvoid_internal_fn, NULL);
4333
4334 add_internal_function ("_creal", _("\
4335 Extract the real part of a complex number.\n\
4336 Usage: $_creal (expression)\n\
4337 Return the real part of a complex number, the type depends on the\n\
4338 type of a complex number."),
4339 creal_internal_fn, NULL);
4340
4341 add_internal_function ("_cimag", _("\
4342 Extract the imaginary part of a complex number.\n\
4343 Usage: $_cimag (expression)\n\
4344 Return the imaginary part of a complex number, the type depends on the\n\
4345 type of a complex number."),
4346 cimag_internal_fn, NULL);
4347
4348 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4349 class_support, &max_value_size, _("\
4350 Set maximum sized value gdb will load from the inferior."), _("\
4351 Show maximum sized value gdb will load from the inferior."), _("\
4352 Use this to control the maximum size, in bytes, of a value that gdb\n\
4353 will load from the inferior. Setting this value to 'unlimited'\n\
4354 disables checking.\n\
4355 Setting this does not invalidate already allocated values, it only\n\
4356 prevents future values, larger than this size, from being allocated."),
4357 set_max_value_size,
4358 show_max_value_size,
4359 &setlist, &showlist);
4360 set_show_commands vsize_limit
4361 = add_setshow_zuinteger_unlimited_cmd ("varsize-limit", class_support,
4362 &max_value_size, _("\
4363 Set the maximum number of bytes allowed in a variable-size object."), _("\
4364 Show the maximum number of bytes allowed in a variable-size object."), _("\
4365 Attempts to access an object whose size is not a compile-time constant\n\
4366 and exceeds this limit will cause an error."),
4367 NULL, NULL, &setlist, &showlist);
4368 deprecate_cmd (vsize_limit.set, "set max-value-size");
4369
4370 #if GDB_SELF_TEST
4371 selftests::register_test ("ranges_contain", selftests::test_ranges_contain);
4372 selftests::register_test ("insert_into_bit_range_vector",
4373 selftests::test_insert_into_bit_range_vector);
4374 selftests::register_test ("value_copy", selftests::test_value_copy);
4375 #endif
4376 }
4377
4378 /* See value.h. */
4379
4380 void
4381 finalize_values ()
4382 {
4383 all_values.clear ();
4384 }