[gdb/testsuite] Prevent timeout in gdb.ada/float-bits.exp
[binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2022 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "regcache.h"
32 #include "block.h"
33 #include "target-float.h"
34 #include "objfiles.h"
35 #include "valprint.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
38 #include <ctype.h>
39 #include "tracepoint.h"
40 #include "cp-abi.h"
41 #include "user-regs.h"
42 #include <algorithm>
43 #include <iterator>
44 #include <utility>
45 #include <vector>
46 #include "completer.h"
47 #include "gdbsupport/selftest.h"
48 #include "gdbsupport/array-view.h"
49 #include "cli/cli-style.h"
50 #include "expop.h"
51 #include "inferior.h"
52 #include "varobj.h"
53
54 /* Definition of a user function. */
55 struct internal_function
56 {
57 /* The name of the function. It is a bit odd to have this in the
58 function itself -- the user might use a differently-named
59 convenience variable to hold the function. */
60 char *name;
61
62 /* The handler. */
63 internal_function_fn handler;
64
65 /* User data for the handler. */
66 void *cookie;
67 };
68
69 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
70
71 struct range
72 {
73 /* Lowest offset in the range. */
74 LONGEST offset;
75
76 /* Length of the range. */
77 LONGEST length;
78
79 /* Returns true if THIS is strictly less than OTHER, useful for
80 searching. We keep ranges sorted by offset and coalesce
81 overlapping and contiguous ranges, so this just compares the
82 starting offset. */
83
84 bool operator< (const range &other) const
85 {
86 return offset < other.offset;
87 }
88
89 /* Returns true if THIS is equal to OTHER. */
90 bool operator== (const range &other) const
91 {
92 return offset == other.offset && length == other.length;
93 }
94 };
95
96 /* Returns true if the ranges defined by [offset1, offset1+len1) and
97 [offset2, offset2+len2) overlap. */
98
99 static int
100 ranges_overlap (LONGEST offset1, LONGEST len1,
101 LONGEST offset2, LONGEST len2)
102 {
103 ULONGEST h, l;
104
105 l = std::max (offset1, offset2);
106 h = std::min (offset1 + len1, offset2 + len2);
107 return (l < h);
108 }
109
110 /* Returns true if RANGES contains any range that overlaps [OFFSET,
111 OFFSET+LENGTH). */
112
113 static int
114 ranges_contain (const std::vector<range> &ranges, LONGEST offset,
115 LONGEST length)
116 {
117 range what;
118
119 what.offset = offset;
120 what.length = length;
121
122 /* We keep ranges sorted by offset and coalesce overlapping and
123 contiguous ranges, so to check if a range list contains a given
124 range, we can do a binary search for the position the given range
125 would be inserted if we only considered the starting OFFSET of
126 ranges. We call that position I. Since we also have LENGTH to
127 care for (this is a range afterall), we need to check if the
128 _previous_ range overlaps the I range. E.g.,
129
130 R
131 |---|
132 |---| |---| |------| ... |--|
133 0 1 2 N
134
135 I=1
136
137 In the case above, the binary search would return `I=1', meaning,
138 this OFFSET should be inserted at position 1, and the current
139 position 1 should be pushed further (and before 2). But, `0'
140 overlaps with R.
141
142 Then we need to check if the I range overlaps the I range itself.
143 E.g.,
144
145 R
146 |---|
147 |---| |---| |-------| ... |--|
148 0 1 2 N
149
150 I=1
151 */
152
153
154 auto i = std::lower_bound (ranges.begin (), ranges.end (), what);
155
156 if (i > ranges.begin ())
157 {
158 const struct range &bef = *(i - 1);
159
160 if (ranges_overlap (bef.offset, bef.length, offset, length))
161 return 1;
162 }
163
164 if (i < ranges.end ())
165 {
166 const struct range &r = *i;
167
168 if (ranges_overlap (r.offset, r.length, offset, length))
169 return 1;
170 }
171
172 return 0;
173 }
174
175 static struct cmd_list_element *functionlist;
176
177 /* Note that the fields in this structure are arranged to save a bit
178 of memory. */
179
180 struct value
181 {
182 explicit value (struct type *type_)
183 : modifiable (1),
184 lazy (1),
185 initialized (1),
186 stack (0),
187 is_zero (false),
188 type (type_),
189 enclosing_type (type_)
190 {
191 }
192
193 ~value ()
194 {
195 if (VALUE_LVAL (this) == lval_computed)
196 {
197 const struct lval_funcs *funcs = location.computed.funcs;
198
199 if (funcs->free_closure)
200 funcs->free_closure (this);
201 }
202 else if (VALUE_LVAL (this) == lval_xcallable)
203 delete location.xm_worker;
204 }
205
206 DISABLE_COPY_AND_ASSIGN (value);
207
208 /* Type of value; either not an lval, or one of the various
209 different possible kinds of lval. */
210 enum lval_type lval = not_lval;
211
212 /* Is it modifiable? Only relevant if lval != not_lval. */
213 unsigned int modifiable : 1;
214
215 /* If zero, contents of this value are in the contents field. If
216 nonzero, contents are in inferior. If the lval field is lval_memory,
217 the contents are in inferior memory at location.address plus offset.
218 The lval field may also be lval_register.
219
220 WARNING: This field is used by the code which handles watchpoints
221 (see breakpoint.c) to decide whether a particular value can be
222 watched by hardware watchpoints. If the lazy flag is set for
223 some member of a value chain, it is assumed that this member of
224 the chain doesn't need to be watched as part of watching the
225 value itself. This is how GDB avoids watching the entire struct
226 or array when the user wants to watch a single struct member or
227 array element. If you ever change the way lazy flag is set and
228 reset, be sure to consider this use as well! */
229 unsigned int lazy : 1;
230
231 /* If value is a variable, is it initialized or not. */
232 unsigned int initialized : 1;
233
234 /* If value is from the stack. If this is set, read_stack will be
235 used instead of read_memory to enable extra caching. */
236 unsigned int stack : 1;
237
238 /* True if this is a zero value, created by 'value_zero'; false
239 otherwise. */
240 bool is_zero : 1;
241
242 /* Location of value (if lval). */
243 union
244 {
245 /* If lval == lval_memory, this is the address in the inferior */
246 CORE_ADDR address;
247
248 /*If lval == lval_register, the value is from a register. */
249 struct
250 {
251 /* Register number. */
252 int regnum;
253 /* Frame ID of "next" frame to which a register value is relative.
254 If the register value is found relative to frame F, then the
255 frame id of F->next will be stored in next_frame_id. */
256 struct frame_id next_frame_id;
257 } reg;
258
259 /* Pointer to internal variable. */
260 struct internalvar *internalvar;
261
262 /* Pointer to xmethod worker. */
263 struct xmethod_worker *xm_worker;
264
265 /* If lval == lval_computed, this is a set of function pointers
266 to use to access and describe the value, and a closure pointer
267 for them to use. */
268 struct
269 {
270 /* Functions to call. */
271 const struct lval_funcs *funcs;
272
273 /* Closure for those functions to use. */
274 void *closure;
275 } computed;
276 } location {};
277
278 /* Describes offset of a value within lval of a structure in target
279 addressable memory units. Note also the member embedded_offset
280 below. */
281 LONGEST offset = 0;
282
283 /* Only used for bitfields; number of bits contained in them. */
284 LONGEST bitsize = 0;
285
286 /* Only used for bitfields; position of start of field. For
287 little-endian targets, it is the position of the LSB. For
288 big-endian targets, it is the position of the MSB. */
289 LONGEST bitpos = 0;
290
291 /* The number of references to this value. When a value is created,
292 the value chain holds a reference, so REFERENCE_COUNT is 1. If
293 release_value is called, this value is removed from the chain but
294 the caller of release_value now has a reference to this value.
295 The caller must arrange for a call to value_free later. */
296 int reference_count = 1;
297
298 /* Only used for bitfields; the containing value. This allows a
299 single read from the target when displaying multiple
300 bitfields. */
301 value_ref_ptr parent;
302
303 /* Type of the value. */
304 struct type *type;
305
306 /* If a value represents a C++ object, then the `type' field gives
307 the object's compile-time type. If the object actually belongs
308 to some class derived from `type', perhaps with other base
309 classes and additional members, then `type' is just a subobject
310 of the real thing, and the full object is probably larger than
311 `type' would suggest.
312
313 If `type' is a dynamic class (i.e. one with a vtable), then GDB
314 can actually determine the object's run-time type by looking at
315 the run-time type information in the vtable. When this
316 information is available, we may elect to read in the entire
317 object, for several reasons:
318
319 - When printing the value, the user would probably rather see the
320 full object, not just the limited portion apparent from the
321 compile-time type.
322
323 - If `type' has virtual base classes, then even printing `type'
324 alone may require reaching outside the `type' portion of the
325 object to wherever the virtual base class has been stored.
326
327 When we store the entire object, `enclosing_type' is the run-time
328 type -- the complete object -- and `embedded_offset' is the
329 offset of `type' within that larger type, in target addressable memory
330 units. The value_contents() macro takes `embedded_offset' into account,
331 so most GDB code continues to see the `type' portion of the value, just
332 as the inferior would.
333
334 If `type' is a pointer to an object, then `enclosing_type' is a
335 pointer to the object's run-time type, and `pointed_to_offset' is
336 the offset in target addressable memory units from the full object
337 to the pointed-to object -- that is, the value `embedded_offset' would
338 have if we followed the pointer and fetched the complete object.
339 (I don't really see the point. Why not just determine the
340 run-time type when you indirect, and avoid the special case? The
341 contents don't matter until you indirect anyway.)
342
343 If we're not doing anything fancy, `enclosing_type' is equal to
344 `type', and `embedded_offset' is zero, so everything works
345 normally. */
346 struct type *enclosing_type;
347 LONGEST embedded_offset = 0;
348 LONGEST pointed_to_offset = 0;
349
350 /* Actual contents of the value. Target byte-order.
351
352 May be nullptr if the value is lazy or is entirely optimized out.
353 Guaranteed to be non-nullptr otherwise. */
354 gdb::unique_xmalloc_ptr<gdb_byte> contents;
355
356 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
357 rather than available, since the common and default case is for a
358 value to be available. This is filled in at value read time.
359 The unavailable ranges are tracked in bits. Note that a contents
360 bit that has been optimized out doesn't really exist in the
361 program, so it can't be marked unavailable either. */
362 std::vector<range> unavailable;
363
364 /* Likewise, but for optimized out contents (a chunk of the value of
365 a variable that does not actually exist in the program). If LVAL
366 is lval_register, this is a register ($pc, $sp, etc., never a
367 program variable) that has not been saved in the frame. Not
368 saved registers and optimized-out program variables values are
369 treated pretty much the same, except not-saved registers have a
370 different string representation and related error strings. */
371 std::vector<range> optimized_out;
372 };
373
374 /* See value.h. */
375
376 struct gdbarch *
377 get_value_arch (const struct value *value)
378 {
379 return value_type (value)->arch ();
380 }
381
382 int
383 value_bits_available (const struct value *value, LONGEST offset, LONGEST length)
384 {
385 gdb_assert (!value->lazy);
386
387 return !ranges_contain (value->unavailable, offset, length);
388 }
389
390 int
391 value_bytes_available (const struct value *value,
392 LONGEST offset, LONGEST length)
393 {
394 return value_bits_available (value,
395 offset * TARGET_CHAR_BIT,
396 length * TARGET_CHAR_BIT);
397 }
398
399 int
400 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
401 {
402 gdb_assert (!value->lazy);
403
404 return ranges_contain (value->optimized_out, bit_offset, bit_length);
405 }
406
407 int
408 value_entirely_available (struct value *value)
409 {
410 /* We can only tell whether the whole value is available when we try
411 to read it. */
412 if (value->lazy)
413 value_fetch_lazy (value);
414
415 if (value->unavailable.empty ())
416 return 1;
417 return 0;
418 }
419
420 /* Returns true if VALUE is entirely covered by RANGES. If the value
421 is lazy, it'll be read now. Note that RANGE is a pointer to
422 pointer because reading the value might change *RANGE. */
423
424 static int
425 value_entirely_covered_by_range_vector (struct value *value,
426 const std::vector<range> &ranges)
427 {
428 /* We can only tell whether the whole value is optimized out /
429 unavailable when we try to read it. */
430 if (value->lazy)
431 value_fetch_lazy (value);
432
433 if (ranges.size () == 1)
434 {
435 const struct range &t = ranges[0];
436
437 if (t.offset == 0
438 && t.length == (TARGET_CHAR_BIT
439 * value_enclosing_type (value)->length ()))
440 return 1;
441 }
442
443 return 0;
444 }
445
446 int
447 value_entirely_unavailable (struct value *value)
448 {
449 return value_entirely_covered_by_range_vector (value, value->unavailable);
450 }
451
452 int
453 value_entirely_optimized_out (struct value *value)
454 {
455 return value_entirely_covered_by_range_vector (value, value->optimized_out);
456 }
457
458 /* Insert into the vector pointed to by VECTORP the bit range starting of
459 OFFSET bits, and extending for the next LENGTH bits. */
460
461 static void
462 insert_into_bit_range_vector (std::vector<range> *vectorp,
463 LONGEST offset, LONGEST length)
464 {
465 range newr;
466
467 /* Insert the range sorted. If there's overlap or the new range
468 would be contiguous with an existing range, merge. */
469
470 newr.offset = offset;
471 newr.length = length;
472
473 /* Do a binary search for the position the given range would be
474 inserted if we only considered the starting OFFSET of ranges.
475 Call that position I. Since we also have LENGTH to care for
476 (this is a range afterall), we need to check if the _previous_
477 range overlaps the I range. E.g., calling R the new range:
478
479 #1 - overlaps with previous
480
481 R
482 |-...-|
483 |---| |---| |------| ... |--|
484 0 1 2 N
485
486 I=1
487
488 In the case #1 above, the binary search would return `I=1',
489 meaning, this OFFSET should be inserted at position 1, and the
490 current position 1 should be pushed further (and become 2). But,
491 note that `0' overlaps with R, so we want to merge them.
492
493 A similar consideration needs to be taken if the new range would
494 be contiguous with the previous range:
495
496 #2 - contiguous with previous
497
498 R
499 |-...-|
500 |--| |---| |------| ... |--|
501 0 1 2 N
502
503 I=1
504
505 If there's no overlap with the previous range, as in:
506
507 #3 - not overlapping and not contiguous
508
509 R
510 |-...-|
511 |--| |---| |------| ... |--|
512 0 1 2 N
513
514 I=1
515
516 or if I is 0:
517
518 #4 - R is the range with lowest offset
519
520 R
521 |-...-|
522 |--| |---| |------| ... |--|
523 0 1 2 N
524
525 I=0
526
527 ... we just push the new range to I.
528
529 All the 4 cases above need to consider that the new range may
530 also overlap several of the ranges that follow, or that R may be
531 contiguous with the following range, and merge. E.g.,
532
533 #5 - overlapping following ranges
534
535 R
536 |------------------------|
537 |--| |---| |------| ... |--|
538 0 1 2 N
539
540 I=0
541
542 or:
543
544 R
545 |-------|
546 |--| |---| |------| ... |--|
547 0 1 2 N
548
549 I=1
550
551 */
552
553 auto i = std::lower_bound (vectorp->begin (), vectorp->end (), newr);
554 if (i > vectorp->begin ())
555 {
556 struct range &bef = *(i - 1);
557
558 if (ranges_overlap (bef.offset, bef.length, offset, length))
559 {
560 /* #1 */
561 ULONGEST l = std::min (bef.offset, offset);
562 ULONGEST h = std::max (bef.offset + bef.length, offset + length);
563
564 bef.offset = l;
565 bef.length = h - l;
566 i--;
567 }
568 else if (offset == bef.offset + bef.length)
569 {
570 /* #2 */
571 bef.length += length;
572 i--;
573 }
574 else
575 {
576 /* #3 */
577 i = vectorp->insert (i, newr);
578 }
579 }
580 else
581 {
582 /* #4 */
583 i = vectorp->insert (i, newr);
584 }
585
586 /* Check whether the ranges following the one we've just added or
587 touched can be folded in (#5 above). */
588 if (i != vectorp->end () && i + 1 < vectorp->end ())
589 {
590 int removed = 0;
591 auto next = i + 1;
592
593 /* Get the range we just touched. */
594 struct range &t = *i;
595 removed = 0;
596
597 i = next;
598 for (; i < vectorp->end (); i++)
599 {
600 struct range &r = *i;
601 if (r.offset <= t.offset + t.length)
602 {
603 ULONGEST l, h;
604
605 l = std::min (t.offset, r.offset);
606 h = std::max (t.offset + t.length, r.offset + r.length);
607
608 t.offset = l;
609 t.length = h - l;
610
611 removed++;
612 }
613 else
614 {
615 /* If we couldn't merge this one, we won't be able to
616 merge following ones either, since the ranges are
617 always sorted by OFFSET. */
618 break;
619 }
620 }
621
622 if (removed != 0)
623 vectorp->erase (next, next + removed);
624 }
625 }
626
627 void
628 mark_value_bits_unavailable (struct value *value,
629 LONGEST offset, LONGEST length)
630 {
631 insert_into_bit_range_vector (&value->unavailable, offset, length);
632 }
633
634 void
635 mark_value_bytes_unavailable (struct value *value,
636 LONGEST offset, LONGEST length)
637 {
638 mark_value_bits_unavailable (value,
639 offset * TARGET_CHAR_BIT,
640 length * TARGET_CHAR_BIT);
641 }
642
643 /* Find the first range in RANGES that overlaps the range defined by
644 OFFSET and LENGTH, starting at element POS in the RANGES vector,
645 Returns the index into RANGES where such overlapping range was
646 found, or -1 if none was found. */
647
648 static int
649 find_first_range_overlap (const std::vector<range> *ranges, int pos,
650 LONGEST offset, LONGEST length)
651 {
652 int i;
653
654 for (i = pos; i < ranges->size (); i++)
655 {
656 const range &r = (*ranges)[i];
657 if (ranges_overlap (r.offset, r.length, offset, length))
658 return i;
659 }
660
661 return -1;
662 }
663
664 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
665 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
666 return non-zero.
667
668 It must always be the case that:
669 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
670
671 It is assumed that memory can be accessed from:
672 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
673 to:
674 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
675 / TARGET_CHAR_BIT) */
676 static int
677 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
678 const gdb_byte *ptr2, size_t offset2_bits,
679 size_t length_bits)
680 {
681 gdb_assert (offset1_bits % TARGET_CHAR_BIT
682 == offset2_bits % TARGET_CHAR_BIT);
683
684 if (offset1_bits % TARGET_CHAR_BIT != 0)
685 {
686 size_t bits;
687 gdb_byte mask, b1, b2;
688
689 /* The offset from the base pointers PTR1 and PTR2 is not a complete
690 number of bytes. A number of bits up to either the next exact
691 byte boundary, or LENGTH_BITS (which ever is sooner) will be
692 compared. */
693 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
694 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
695 mask = (1 << bits) - 1;
696
697 if (length_bits < bits)
698 {
699 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
700 bits = length_bits;
701 }
702
703 /* Now load the two bytes and mask off the bits we care about. */
704 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
705 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
706
707 if (b1 != b2)
708 return 1;
709
710 /* Now update the length and offsets to take account of the bits
711 we've just compared. */
712 length_bits -= bits;
713 offset1_bits += bits;
714 offset2_bits += bits;
715 }
716
717 if (length_bits % TARGET_CHAR_BIT != 0)
718 {
719 size_t bits;
720 size_t o1, o2;
721 gdb_byte mask, b1, b2;
722
723 /* The length is not an exact number of bytes. After the previous
724 IF.. block then the offsets are byte aligned, or the
725 length is zero (in which case this code is not reached). Compare
726 a number of bits at the end of the region, starting from an exact
727 byte boundary. */
728 bits = length_bits % TARGET_CHAR_BIT;
729 o1 = offset1_bits + length_bits - bits;
730 o2 = offset2_bits + length_bits - bits;
731
732 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
733 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
734
735 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
736 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
737
738 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
739 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
740
741 if (b1 != b2)
742 return 1;
743
744 length_bits -= bits;
745 }
746
747 if (length_bits > 0)
748 {
749 /* We've now taken care of any stray "bits" at the start, or end of
750 the region to compare, the remainder can be covered with a simple
751 memcmp. */
752 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
753 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
754 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
755
756 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
757 ptr2 + offset2_bits / TARGET_CHAR_BIT,
758 length_bits / TARGET_CHAR_BIT);
759 }
760
761 /* Length is zero, regions match. */
762 return 0;
763 }
764
765 /* Helper struct for find_first_range_overlap_and_match and
766 value_contents_bits_eq. Keep track of which slot of a given ranges
767 vector have we last looked at. */
768
769 struct ranges_and_idx
770 {
771 /* The ranges. */
772 const std::vector<range> *ranges;
773
774 /* The range we've last found in RANGES. Given ranges are sorted,
775 we can start the next lookup here. */
776 int idx;
777 };
778
779 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
780 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
781 ranges starting at OFFSET2 bits. Return true if the ranges match
782 and fill in *L and *H with the overlapping window relative to
783 (both) OFFSET1 or OFFSET2. */
784
785 static int
786 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
787 struct ranges_and_idx *rp2,
788 LONGEST offset1, LONGEST offset2,
789 LONGEST length, ULONGEST *l, ULONGEST *h)
790 {
791 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
792 offset1, length);
793 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
794 offset2, length);
795
796 if (rp1->idx == -1 && rp2->idx == -1)
797 {
798 *l = length;
799 *h = length;
800 return 1;
801 }
802 else if (rp1->idx == -1 || rp2->idx == -1)
803 return 0;
804 else
805 {
806 const range *r1, *r2;
807 ULONGEST l1, h1;
808 ULONGEST l2, h2;
809
810 r1 = &(*rp1->ranges)[rp1->idx];
811 r2 = &(*rp2->ranges)[rp2->idx];
812
813 /* Get the unavailable windows intersected by the incoming
814 ranges. The first and last ranges that overlap the argument
815 range may be wider than said incoming arguments ranges. */
816 l1 = std::max (offset1, r1->offset);
817 h1 = std::min (offset1 + length, r1->offset + r1->length);
818
819 l2 = std::max (offset2, r2->offset);
820 h2 = std::min (offset2 + length, offset2 + r2->length);
821
822 /* Make them relative to the respective start offsets, so we can
823 compare them for equality. */
824 l1 -= offset1;
825 h1 -= offset1;
826
827 l2 -= offset2;
828 h2 -= offset2;
829
830 /* Different ranges, no match. */
831 if (l1 != l2 || h1 != h2)
832 return 0;
833
834 *h = h1;
835 *l = l1;
836 return 1;
837 }
838 }
839
840 /* Helper function for value_contents_eq. The only difference is that
841 this function is bit rather than byte based.
842
843 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
844 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
845 Return true if the available bits match. */
846
847 static bool
848 value_contents_bits_eq (const struct value *val1, int offset1,
849 const struct value *val2, int offset2,
850 int length)
851 {
852 /* Each array element corresponds to a ranges source (unavailable,
853 optimized out). '1' is for VAL1, '2' for VAL2. */
854 struct ranges_and_idx rp1[2], rp2[2];
855
856 /* See function description in value.h. */
857 gdb_assert (!val1->lazy && !val2->lazy);
858
859 /* We shouldn't be trying to compare past the end of the values. */
860 gdb_assert (offset1 + length
861 <= val1->enclosing_type->length () * TARGET_CHAR_BIT);
862 gdb_assert (offset2 + length
863 <= val2->enclosing_type->length () * TARGET_CHAR_BIT);
864
865 memset (&rp1, 0, sizeof (rp1));
866 memset (&rp2, 0, sizeof (rp2));
867 rp1[0].ranges = &val1->unavailable;
868 rp2[0].ranges = &val2->unavailable;
869 rp1[1].ranges = &val1->optimized_out;
870 rp2[1].ranges = &val2->optimized_out;
871
872 while (length > 0)
873 {
874 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
875 int i;
876
877 for (i = 0; i < 2; i++)
878 {
879 ULONGEST l_tmp, h_tmp;
880
881 /* The contents only match equal if the invalid/unavailable
882 contents ranges match as well. */
883 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
884 offset1, offset2, length,
885 &l_tmp, &h_tmp))
886 return false;
887
888 /* We're interested in the lowest/first range found. */
889 if (i == 0 || l_tmp < l)
890 {
891 l = l_tmp;
892 h = h_tmp;
893 }
894 }
895
896 /* Compare the available/valid contents. */
897 if (memcmp_with_bit_offsets (val1->contents.get (), offset1,
898 val2->contents.get (), offset2, l) != 0)
899 return false;
900
901 length -= h;
902 offset1 += h;
903 offset2 += h;
904 }
905
906 return true;
907 }
908
909 bool
910 value_contents_eq (const struct value *val1, LONGEST offset1,
911 const struct value *val2, LONGEST offset2,
912 LONGEST length)
913 {
914 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
915 val2, offset2 * TARGET_CHAR_BIT,
916 length * TARGET_CHAR_BIT);
917 }
918
919 /* See value.h. */
920
921 bool
922 value_contents_eq (const struct value *val1, const struct value *val2)
923 {
924 ULONGEST len1 = check_typedef (value_enclosing_type (val1))->length ();
925 ULONGEST len2 = check_typedef (value_enclosing_type (val2))->length ();
926 if (len1 != len2)
927 return false;
928 return value_contents_eq (val1, 0, val2, 0, len1);
929 }
930
931 /* The value-history records all the values printed by print commands
932 during this session. */
933
934 static std::vector<value_ref_ptr> value_history;
935
936 \f
937 /* List of all value objects currently allocated
938 (except for those released by calls to release_value)
939 This is so they can be freed after each command. */
940
941 static std::vector<value_ref_ptr> all_values;
942
943 /* Allocate a lazy value for type TYPE. Its actual content is
944 "lazily" allocated too: the content field of the return value is
945 NULL; it will be allocated when it is fetched from the target. */
946
947 struct value *
948 allocate_value_lazy (struct type *type)
949 {
950 struct value *val;
951
952 /* Call check_typedef on our type to make sure that, if TYPE
953 is a TYPE_CODE_TYPEDEF, its length is set to the length
954 of the target type instead of zero. However, we do not
955 replace the typedef type by the target type, because we want
956 to keep the typedef in order to be able to set the VAL's type
957 description correctly. */
958 check_typedef (type);
959
960 val = new struct value (type);
961
962 /* Values start out on the all_values chain. */
963 all_values.emplace_back (val);
964
965 return val;
966 }
967
968 /* The maximum size, in bytes, that GDB will try to allocate for a value.
969 The initial value of 64k was not selected for any specific reason, it is
970 just a reasonable starting point. */
971
972 static int max_value_size = 65536; /* 64k bytes */
973
974 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
975 LONGEST, otherwise GDB will not be able to parse integer values from the
976 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
977 be unable to parse "set max-value-size 2".
978
979 As we want a consistent GDB experience across hosts with different sizes
980 of LONGEST, this arbitrary minimum value was selected, so long as this
981 is bigger than LONGEST on all GDB supported hosts we're fine. */
982
983 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
984 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
985
986 /* Implement the "set max-value-size" command. */
987
988 static void
989 set_max_value_size (const char *args, int from_tty,
990 struct cmd_list_element *c)
991 {
992 gdb_assert (max_value_size == -1 || max_value_size >= 0);
993
994 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
995 {
996 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
997 error (_("max-value-size set too low, increasing to %d bytes"),
998 max_value_size);
999 }
1000 }
1001
1002 /* Implement the "show max-value-size" command. */
1003
1004 static void
1005 show_max_value_size (struct ui_file *file, int from_tty,
1006 struct cmd_list_element *c, const char *value)
1007 {
1008 if (max_value_size == -1)
1009 gdb_printf (file, _("Maximum value size is unlimited.\n"));
1010 else
1011 gdb_printf (file, _("Maximum value size is %d bytes.\n"),
1012 max_value_size);
1013 }
1014
1015 /* Called before we attempt to allocate or reallocate a buffer for the
1016 contents of a value. TYPE is the type of the value for which we are
1017 allocating the buffer. If the buffer is too large (based on the user
1018 controllable setting) then throw an error. If this function returns
1019 then we should attempt to allocate the buffer. */
1020
1021 static void
1022 check_type_length_before_alloc (const struct type *type)
1023 {
1024 ULONGEST length = type->length ();
1025
1026 if (max_value_size > -1 && length > max_value_size)
1027 {
1028 if (type->name () != NULL)
1029 error (_("value of type `%s' requires %s bytes, which is more "
1030 "than max-value-size"), type->name (), pulongest (length));
1031 else
1032 error (_("value requires %s bytes, which is more than "
1033 "max-value-size"), pulongest (length));
1034 }
1035 }
1036
1037 /* Allocate the contents of VAL if it has not been allocated yet. */
1038
1039 static void
1040 allocate_value_contents (struct value *val)
1041 {
1042 if (!val->contents)
1043 {
1044 check_type_length_before_alloc (val->enclosing_type);
1045 val->contents.reset
1046 ((gdb_byte *) xzalloc (val->enclosing_type->length ()));
1047 }
1048 }
1049
1050 /* Allocate a value and its contents for type TYPE. */
1051
1052 struct value *
1053 allocate_value (struct type *type)
1054 {
1055 struct value *val = allocate_value_lazy (type);
1056
1057 allocate_value_contents (val);
1058 val->lazy = 0;
1059 return val;
1060 }
1061
1062 /* Allocate a value that has the correct length
1063 for COUNT repetitions of type TYPE. */
1064
1065 struct value *
1066 allocate_repeat_value (struct type *type, int count)
1067 {
1068 /* Despite the fact that we are really creating an array of TYPE here, we
1069 use the string lower bound as the array lower bound. This seems to
1070 work fine for now. */
1071 int low_bound = current_language->string_lower_bound ();
1072 /* FIXME-type-allocation: need a way to free this type when we are
1073 done with it. */
1074 struct type *array_type
1075 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1076
1077 return allocate_value (array_type);
1078 }
1079
1080 struct value *
1081 allocate_computed_value (struct type *type,
1082 const struct lval_funcs *funcs,
1083 void *closure)
1084 {
1085 struct value *v = allocate_value_lazy (type);
1086
1087 VALUE_LVAL (v) = lval_computed;
1088 v->location.computed.funcs = funcs;
1089 v->location.computed.closure = closure;
1090
1091 return v;
1092 }
1093
1094 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1095
1096 struct value *
1097 allocate_optimized_out_value (struct type *type)
1098 {
1099 struct value *retval = allocate_value_lazy (type);
1100
1101 mark_value_bytes_optimized_out (retval, 0, type->length ());
1102 set_value_lazy (retval, 0);
1103 return retval;
1104 }
1105
1106 /* Accessor methods. */
1107
1108 struct type *
1109 value_type (const struct value *value)
1110 {
1111 return value->type;
1112 }
1113 void
1114 deprecated_set_value_type (struct value *value, struct type *type)
1115 {
1116 value->type = type;
1117 }
1118
1119 LONGEST
1120 value_offset (const struct value *value)
1121 {
1122 return value->offset;
1123 }
1124 void
1125 set_value_offset (struct value *value, LONGEST offset)
1126 {
1127 value->offset = offset;
1128 }
1129
1130 LONGEST
1131 value_bitpos (const struct value *value)
1132 {
1133 return value->bitpos;
1134 }
1135 void
1136 set_value_bitpos (struct value *value, LONGEST bit)
1137 {
1138 value->bitpos = bit;
1139 }
1140
1141 LONGEST
1142 value_bitsize (const struct value *value)
1143 {
1144 return value->bitsize;
1145 }
1146 void
1147 set_value_bitsize (struct value *value, LONGEST bit)
1148 {
1149 value->bitsize = bit;
1150 }
1151
1152 struct value *
1153 value_parent (const struct value *value)
1154 {
1155 return value->parent.get ();
1156 }
1157
1158 /* See value.h. */
1159
1160 void
1161 set_value_parent (struct value *value, struct value *parent)
1162 {
1163 value->parent = value_ref_ptr::new_reference (parent);
1164 }
1165
1166 gdb::array_view<gdb_byte>
1167 value_contents_raw (struct value *value)
1168 {
1169 struct gdbarch *arch = get_value_arch (value);
1170 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1171
1172 allocate_value_contents (value);
1173
1174 ULONGEST length = value_type (value)->length ();
1175 return gdb::make_array_view
1176 (value->contents.get () + value->embedded_offset * unit_size, length);
1177 }
1178
1179 gdb::array_view<gdb_byte>
1180 value_contents_all_raw (struct value *value)
1181 {
1182 allocate_value_contents (value);
1183
1184 ULONGEST length = value_enclosing_type (value)->length ();
1185 return gdb::make_array_view (value->contents.get (), length);
1186 }
1187
1188 struct type *
1189 value_enclosing_type (const struct value *value)
1190 {
1191 return value->enclosing_type;
1192 }
1193
1194 /* Look at value.h for description. */
1195
1196 struct type *
1197 value_actual_type (struct value *value, int resolve_simple_types,
1198 int *real_type_found)
1199 {
1200 struct value_print_options opts;
1201 struct type *result;
1202
1203 get_user_print_options (&opts);
1204
1205 if (real_type_found)
1206 *real_type_found = 0;
1207 result = value_type (value);
1208 if (opts.objectprint)
1209 {
1210 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1211 fetch its rtti type. */
1212 if (result->is_pointer_or_reference ()
1213 && (check_typedef (result->target_type ())->code ()
1214 == TYPE_CODE_STRUCT)
1215 && !value_optimized_out (value))
1216 {
1217 struct type *real_type;
1218
1219 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1220 if (real_type)
1221 {
1222 if (real_type_found)
1223 *real_type_found = 1;
1224 result = real_type;
1225 }
1226 }
1227 else if (resolve_simple_types)
1228 {
1229 if (real_type_found)
1230 *real_type_found = 1;
1231 result = value_enclosing_type (value);
1232 }
1233 }
1234
1235 return result;
1236 }
1237
1238 void
1239 error_value_optimized_out (void)
1240 {
1241 throw_error (OPTIMIZED_OUT_ERROR, _("value has been optimized out"));
1242 }
1243
1244 static void
1245 require_not_optimized_out (const struct value *value)
1246 {
1247 if (!value->optimized_out.empty ())
1248 {
1249 if (value->lval == lval_register)
1250 throw_error (OPTIMIZED_OUT_ERROR,
1251 _("register has not been saved in frame"));
1252 else
1253 error_value_optimized_out ();
1254 }
1255 }
1256
1257 static void
1258 require_available (const struct value *value)
1259 {
1260 if (!value->unavailable.empty ())
1261 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1262 }
1263
1264 gdb::array_view<const gdb_byte>
1265 value_contents_for_printing (struct value *value)
1266 {
1267 if (value->lazy)
1268 value_fetch_lazy (value);
1269
1270 ULONGEST length = value_enclosing_type (value)->length ();
1271 return gdb::make_array_view (value->contents.get (), length);
1272 }
1273
1274 gdb::array_view<const gdb_byte>
1275 value_contents_for_printing_const (const struct value *value)
1276 {
1277 gdb_assert (!value->lazy);
1278
1279 ULONGEST length = value_enclosing_type (value)->length ();
1280 return gdb::make_array_view (value->contents.get (), length);
1281 }
1282
1283 gdb::array_view<const gdb_byte>
1284 value_contents_all (struct value *value)
1285 {
1286 gdb::array_view<const gdb_byte> result = value_contents_for_printing (value);
1287 require_not_optimized_out (value);
1288 require_available (value);
1289 return result;
1290 }
1291
1292 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1293 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1294
1295 static void
1296 ranges_copy_adjusted (std::vector<range> *dst_range, int dst_bit_offset,
1297 const std::vector<range> &src_range, int src_bit_offset,
1298 int bit_length)
1299 {
1300 for (const range &r : src_range)
1301 {
1302 ULONGEST h, l;
1303
1304 l = std::max (r.offset, (LONGEST) src_bit_offset);
1305 h = std::min (r.offset + r.length,
1306 (LONGEST) src_bit_offset + bit_length);
1307
1308 if (l < h)
1309 insert_into_bit_range_vector (dst_range,
1310 dst_bit_offset + (l - src_bit_offset),
1311 h - l);
1312 }
1313 }
1314
1315 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1316 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1317
1318 static void
1319 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1320 const struct value *src, int src_bit_offset,
1321 int bit_length)
1322 {
1323 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1324 src->unavailable, src_bit_offset,
1325 bit_length);
1326 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1327 src->optimized_out, src_bit_offset,
1328 bit_length);
1329 }
1330
1331 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1332 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1333 contents, starting at DST_OFFSET. If unavailable contents are
1334 being copied from SRC, the corresponding DST contents are marked
1335 unavailable accordingly. Neither DST nor SRC may be lazy
1336 values.
1337
1338 It is assumed the contents of DST in the [DST_OFFSET,
1339 DST_OFFSET+LENGTH) range are wholly available. */
1340
1341 static void
1342 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1343 struct value *src, LONGEST src_offset, LONGEST length)
1344 {
1345 LONGEST src_bit_offset, dst_bit_offset, bit_length;
1346 struct gdbarch *arch = get_value_arch (src);
1347 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1348
1349 /* A lazy DST would make that this copy operation useless, since as
1350 soon as DST's contents were un-lazied (by a later value_contents
1351 call, say), the contents would be overwritten. A lazy SRC would
1352 mean we'd be copying garbage. */
1353 gdb_assert (!dst->lazy && !src->lazy);
1354
1355 /* The overwritten DST range gets unavailability ORed in, not
1356 replaced. Make sure to remember to implement replacing if it
1357 turns out actually necessary. */
1358 gdb_assert (value_bytes_available (dst, dst_offset, length));
1359 gdb_assert (!value_bits_any_optimized_out (dst,
1360 TARGET_CHAR_BIT * dst_offset,
1361 TARGET_CHAR_BIT * length));
1362
1363 /* Copy the data. */
1364 gdb::array_view<gdb_byte> dst_contents
1365 = value_contents_all_raw (dst).slice (dst_offset * unit_size,
1366 length * unit_size);
1367 gdb::array_view<const gdb_byte> src_contents
1368 = value_contents_all_raw (src).slice (src_offset * unit_size,
1369 length * unit_size);
1370 copy (src_contents, dst_contents);
1371
1372 /* Copy the meta-data, adjusted. */
1373 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1374 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1375 bit_length = length * unit_size * HOST_CHAR_BIT;
1376
1377 value_ranges_copy_adjusted (dst, dst_bit_offset,
1378 src, src_bit_offset,
1379 bit_length);
1380 }
1381
1382 /* A helper for value_from_component_bitsize that copies bits from SRC
1383 to DEST. */
1384
1385 static void
1386 value_contents_copy_raw_bitwise (struct value *dst, LONGEST dst_bit_offset,
1387 struct value *src, LONGEST src_bit_offset,
1388 LONGEST bit_length)
1389 {
1390 /* A lazy DST would make that this copy operation useless, since as
1391 soon as DST's contents were un-lazied (by a later value_contents
1392 call, say), the contents would be overwritten. A lazy SRC would
1393 mean we'd be copying garbage. */
1394 gdb_assert (!dst->lazy && !src->lazy);
1395
1396 /* The overwritten DST range gets unavailability ORed in, not
1397 replaced. Make sure to remember to implement replacing if it
1398 turns out actually necessary. */
1399 LONGEST dst_offset = dst_bit_offset / TARGET_CHAR_BIT;
1400 LONGEST length = bit_length / TARGET_CHAR_BIT;
1401 gdb_assert (value_bytes_available (dst, dst_offset, length));
1402 gdb_assert (!value_bits_any_optimized_out (dst, dst_bit_offset,
1403 bit_length));
1404
1405 /* Copy the data. */
1406 gdb::array_view<gdb_byte> dst_contents = value_contents_all_raw (dst);
1407 gdb::array_view<const gdb_byte> src_contents = value_contents_all_raw (src);
1408 copy_bitwise (dst_contents.data (), dst_bit_offset,
1409 src_contents.data (), src_bit_offset,
1410 bit_length,
1411 type_byte_order (value_type (src)) == BFD_ENDIAN_BIG);
1412
1413 /* Copy the meta-data. */
1414 value_ranges_copy_adjusted (dst, dst_bit_offset,
1415 src, src_bit_offset,
1416 bit_length);
1417 }
1418
1419 /* Copy LENGTH bytes of SRC value's (all) contents
1420 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1421 (all) contents, starting at DST_OFFSET. If unavailable contents
1422 are being copied from SRC, the corresponding DST contents are
1423 marked unavailable accordingly. DST must not be lazy. If SRC is
1424 lazy, it will be fetched now.
1425
1426 It is assumed the contents of DST in the [DST_OFFSET,
1427 DST_OFFSET+LENGTH) range are wholly available. */
1428
1429 void
1430 value_contents_copy (struct value *dst, LONGEST dst_offset,
1431 struct value *src, LONGEST src_offset, LONGEST length)
1432 {
1433 if (src->lazy)
1434 value_fetch_lazy (src);
1435
1436 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1437 }
1438
1439 int
1440 value_lazy (const struct value *value)
1441 {
1442 return value->lazy;
1443 }
1444
1445 void
1446 set_value_lazy (struct value *value, int val)
1447 {
1448 value->lazy = val;
1449 }
1450
1451 int
1452 value_stack (const struct value *value)
1453 {
1454 return value->stack;
1455 }
1456
1457 void
1458 set_value_stack (struct value *value, int val)
1459 {
1460 value->stack = val;
1461 }
1462
1463 gdb::array_view<const gdb_byte>
1464 value_contents (struct value *value)
1465 {
1466 gdb::array_view<const gdb_byte> result = value_contents_writeable (value);
1467 require_not_optimized_out (value);
1468 require_available (value);
1469 return result;
1470 }
1471
1472 gdb::array_view<gdb_byte>
1473 value_contents_writeable (struct value *value)
1474 {
1475 if (value->lazy)
1476 value_fetch_lazy (value);
1477 return value_contents_raw (value);
1478 }
1479
1480 int
1481 value_optimized_out (struct value *value)
1482 {
1483 if (value->lazy)
1484 {
1485 /* See if we can compute the result without fetching the
1486 value. */
1487 if (VALUE_LVAL (value) == lval_memory)
1488 return false;
1489 else if (VALUE_LVAL (value) == lval_computed)
1490 {
1491 const struct lval_funcs *funcs = value->location.computed.funcs;
1492
1493 if (funcs->is_optimized_out != nullptr)
1494 return funcs->is_optimized_out (value);
1495 }
1496
1497 /* Fall back to fetching. */
1498 try
1499 {
1500 value_fetch_lazy (value);
1501 }
1502 catch (const gdb_exception_error &ex)
1503 {
1504 switch (ex.error)
1505 {
1506 case MEMORY_ERROR:
1507 case OPTIMIZED_OUT_ERROR:
1508 case NOT_AVAILABLE_ERROR:
1509 /* These can normally happen when we try to access an
1510 optimized out or unavailable register, either in a
1511 physical register or spilled to memory. */
1512 break;
1513 default:
1514 throw;
1515 }
1516 }
1517 }
1518
1519 return !value->optimized_out.empty ();
1520 }
1521
1522 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1523 the following LENGTH bytes. */
1524
1525 void
1526 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1527 {
1528 mark_value_bits_optimized_out (value,
1529 offset * TARGET_CHAR_BIT,
1530 length * TARGET_CHAR_BIT);
1531 }
1532
1533 /* See value.h. */
1534
1535 void
1536 mark_value_bits_optimized_out (struct value *value,
1537 LONGEST offset, LONGEST length)
1538 {
1539 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1540 }
1541
1542 int
1543 value_bits_synthetic_pointer (const struct value *value,
1544 LONGEST offset, LONGEST length)
1545 {
1546 if (value->lval != lval_computed
1547 || !value->location.computed.funcs->check_synthetic_pointer)
1548 return 0;
1549 return value->location.computed.funcs->check_synthetic_pointer (value,
1550 offset,
1551 length);
1552 }
1553
1554 LONGEST
1555 value_embedded_offset (const struct value *value)
1556 {
1557 return value->embedded_offset;
1558 }
1559
1560 void
1561 set_value_embedded_offset (struct value *value, LONGEST val)
1562 {
1563 value->embedded_offset = val;
1564 }
1565
1566 LONGEST
1567 value_pointed_to_offset (const struct value *value)
1568 {
1569 return value->pointed_to_offset;
1570 }
1571
1572 void
1573 set_value_pointed_to_offset (struct value *value, LONGEST val)
1574 {
1575 value->pointed_to_offset = val;
1576 }
1577
1578 const struct lval_funcs *
1579 value_computed_funcs (const struct value *v)
1580 {
1581 gdb_assert (value_lval_const (v) == lval_computed);
1582
1583 return v->location.computed.funcs;
1584 }
1585
1586 void *
1587 value_computed_closure (const struct value *v)
1588 {
1589 gdb_assert (v->lval == lval_computed);
1590
1591 return v->location.computed.closure;
1592 }
1593
1594 enum lval_type *
1595 deprecated_value_lval_hack (struct value *value)
1596 {
1597 return &value->lval;
1598 }
1599
1600 enum lval_type
1601 value_lval_const (const struct value *value)
1602 {
1603 return value->lval;
1604 }
1605
1606 CORE_ADDR
1607 value_address (const struct value *value)
1608 {
1609 if (value->lval != lval_memory)
1610 return 0;
1611 if (value->parent != NULL)
1612 return value_address (value->parent.get ()) + value->offset;
1613 if (NULL != TYPE_DATA_LOCATION (value_type (value)))
1614 {
1615 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
1616 return TYPE_DATA_LOCATION_ADDR (value_type (value));
1617 }
1618
1619 return value->location.address + value->offset;
1620 }
1621
1622 CORE_ADDR
1623 value_raw_address (const struct value *value)
1624 {
1625 if (value->lval != lval_memory)
1626 return 0;
1627 return value->location.address;
1628 }
1629
1630 void
1631 set_value_address (struct value *value, CORE_ADDR addr)
1632 {
1633 gdb_assert (value->lval == lval_memory);
1634 value->location.address = addr;
1635 }
1636
1637 struct internalvar **
1638 deprecated_value_internalvar_hack (struct value *value)
1639 {
1640 return &value->location.internalvar;
1641 }
1642
1643 struct frame_id *
1644 deprecated_value_next_frame_id_hack (struct value *value)
1645 {
1646 gdb_assert (value->lval == lval_register);
1647 return &value->location.reg.next_frame_id;
1648 }
1649
1650 int *
1651 deprecated_value_regnum_hack (struct value *value)
1652 {
1653 gdb_assert (value->lval == lval_register);
1654 return &value->location.reg.regnum;
1655 }
1656
1657 int
1658 deprecated_value_modifiable (const struct value *value)
1659 {
1660 return value->modifiable;
1661 }
1662 \f
1663 /* Return a mark in the value chain. All values allocated after the
1664 mark is obtained (except for those released) are subject to being freed
1665 if a subsequent value_free_to_mark is passed the mark. */
1666 struct value *
1667 value_mark (void)
1668 {
1669 if (all_values.empty ())
1670 return nullptr;
1671 return all_values.back ().get ();
1672 }
1673
1674 /* See value.h. */
1675
1676 void
1677 value_incref (struct value *val)
1678 {
1679 val->reference_count++;
1680 }
1681
1682 /* Release a reference to VAL, which was acquired with value_incref.
1683 This function is also called to deallocate values from the value
1684 chain. */
1685
1686 void
1687 value_decref (struct value *val)
1688 {
1689 if (val != nullptr)
1690 {
1691 gdb_assert (val->reference_count > 0);
1692 val->reference_count--;
1693 if (val->reference_count == 0)
1694 delete val;
1695 }
1696 }
1697
1698 /* Free all values allocated since MARK was obtained by value_mark
1699 (except for those released). */
1700 void
1701 value_free_to_mark (const struct value *mark)
1702 {
1703 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1704 if (iter == all_values.end ())
1705 all_values.clear ();
1706 else
1707 all_values.erase (iter + 1, all_values.end ());
1708 }
1709
1710 /* Remove VAL from the chain all_values
1711 so it will not be freed automatically. */
1712
1713 value_ref_ptr
1714 release_value (struct value *val)
1715 {
1716 if (val == nullptr)
1717 return value_ref_ptr ();
1718
1719 std::vector<value_ref_ptr>::reverse_iterator iter;
1720 for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter)
1721 {
1722 if (*iter == val)
1723 {
1724 value_ref_ptr result = *iter;
1725 all_values.erase (iter.base () - 1);
1726 return result;
1727 }
1728 }
1729
1730 /* We must always return an owned reference. Normally this happens
1731 because we transfer the reference from the value chain, but in
1732 this case the value was not on the chain. */
1733 return value_ref_ptr::new_reference (val);
1734 }
1735
1736 /* See value.h. */
1737
1738 std::vector<value_ref_ptr>
1739 value_release_to_mark (const struct value *mark)
1740 {
1741 std::vector<value_ref_ptr> result;
1742
1743 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1744 if (iter == all_values.end ())
1745 std::swap (result, all_values);
1746 else
1747 {
1748 std::move (iter + 1, all_values.end (), std::back_inserter (result));
1749 all_values.erase (iter + 1, all_values.end ());
1750 }
1751 std::reverse (result.begin (), result.end ());
1752 return result;
1753 }
1754
1755 /* Return a copy of the value ARG.
1756 It contains the same contents, for same memory address,
1757 but it's a different block of storage. */
1758
1759 struct value *
1760 value_copy (const value *arg)
1761 {
1762 struct type *encl_type = value_enclosing_type (arg);
1763 struct value *val;
1764
1765 if (value_lazy (arg))
1766 val = allocate_value_lazy (encl_type);
1767 else
1768 val = allocate_value (encl_type);
1769 val->type = arg->type;
1770 VALUE_LVAL (val) = arg->lval;
1771 val->location = arg->location;
1772 val->offset = arg->offset;
1773 val->bitpos = arg->bitpos;
1774 val->bitsize = arg->bitsize;
1775 val->lazy = arg->lazy;
1776 val->embedded_offset = value_embedded_offset (arg);
1777 val->pointed_to_offset = arg->pointed_to_offset;
1778 val->modifiable = arg->modifiable;
1779 val->stack = arg->stack;
1780 val->is_zero = arg->is_zero;
1781 val->initialized = arg->initialized;
1782 val->unavailable = arg->unavailable;
1783 val->optimized_out = arg->optimized_out;
1784
1785 if (!value_lazy (val) && !value_entirely_optimized_out (val))
1786 {
1787 gdb_assert (arg->contents != nullptr);
1788 ULONGEST length = value_enclosing_type (arg)->length ();
1789 const auto &arg_view
1790 = gdb::make_array_view (arg->contents.get (), length);
1791 copy (arg_view, value_contents_all_raw (val));
1792 }
1793
1794 val->parent = arg->parent;
1795 if (VALUE_LVAL (val) == lval_computed)
1796 {
1797 const struct lval_funcs *funcs = val->location.computed.funcs;
1798
1799 if (funcs->copy_closure)
1800 val->location.computed.closure = funcs->copy_closure (val);
1801 }
1802 return val;
1803 }
1804
1805 /* Return a "const" and/or "volatile" qualified version of the value V.
1806 If CNST is true, then the returned value will be qualified with
1807 "const".
1808 if VOLTL is true, then the returned value will be qualified with
1809 "volatile". */
1810
1811 struct value *
1812 make_cv_value (int cnst, int voltl, struct value *v)
1813 {
1814 struct type *val_type = value_type (v);
1815 struct type *enclosing_type = value_enclosing_type (v);
1816 struct value *cv_val = value_copy (v);
1817
1818 deprecated_set_value_type (cv_val,
1819 make_cv_type (cnst, voltl, val_type, NULL));
1820 set_value_enclosing_type (cv_val,
1821 make_cv_type (cnst, voltl, enclosing_type, NULL));
1822
1823 return cv_val;
1824 }
1825
1826 /* Return a version of ARG that is non-lvalue. */
1827
1828 struct value *
1829 value_non_lval (struct value *arg)
1830 {
1831 if (VALUE_LVAL (arg) != not_lval)
1832 {
1833 struct type *enc_type = value_enclosing_type (arg);
1834 struct value *val = allocate_value (enc_type);
1835
1836 copy (value_contents_all (arg), value_contents_all_raw (val));
1837 val->type = arg->type;
1838 set_value_embedded_offset (val, value_embedded_offset (arg));
1839 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1840 return val;
1841 }
1842 return arg;
1843 }
1844
1845 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1846
1847 void
1848 value_force_lval (struct value *v, CORE_ADDR addr)
1849 {
1850 gdb_assert (VALUE_LVAL (v) == not_lval);
1851
1852 write_memory (addr, value_contents_raw (v).data (), value_type (v)->length ());
1853 v->lval = lval_memory;
1854 v->location.address = addr;
1855 }
1856
1857 void
1858 set_value_component_location (struct value *component,
1859 const struct value *whole)
1860 {
1861 struct type *type;
1862
1863 gdb_assert (whole->lval != lval_xcallable);
1864
1865 if (whole->lval == lval_internalvar)
1866 VALUE_LVAL (component) = lval_internalvar_component;
1867 else
1868 VALUE_LVAL (component) = whole->lval;
1869
1870 component->location = whole->location;
1871 if (whole->lval == lval_computed)
1872 {
1873 const struct lval_funcs *funcs = whole->location.computed.funcs;
1874
1875 if (funcs->copy_closure)
1876 component->location.computed.closure = funcs->copy_closure (whole);
1877 }
1878
1879 /* If the WHOLE value has a dynamically resolved location property then
1880 update the address of the COMPONENT. */
1881 type = value_type (whole);
1882 if (NULL != TYPE_DATA_LOCATION (type)
1883 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1884 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1885
1886 /* Similarly, if the COMPONENT value has a dynamically resolved location
1887 property then update its address. */
1888 type = value_type (component);
1889 if (NULL != TYPE_DATA_LOCATION (type)
1890 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1891 {
1892 /* If the COMPONENT has a dynamic location, and is an
1893 lval_internalvar_component, then we change it to a lval_memory.
1894
1895 Usually a component of an internalvar is created non-lazy, and has
1896 its content immediately copied from the parent internalvar.
1897 However, for components with a dynamic location, the content of
1898 the component is not contained within the parent, but is instead
1899 accessed indirectly. Further, the component will be created as a
1900 lazy value.
1901
1902 By changing the type of the component to lval_memory we ensure
1903 that value_fetch_lazy can successfully load the component.
1904
1905 This solution isn't ideal, but a real fix would require values to
1906 carry around both the parent value contents, and the contents of
1907 any dynamic fields within the parent. This is a substantial
1908 change to how values work in GDB. */
1909 if (VALUE_LVAL (component) == lval_internalvar_component)
1910 {
1911 gdb_assert (value_lazy (component));
1912 VALUE_LVAL (component) = lval_memory;
1913 }
1914 else
1915 gdb_assert (VALUE_LVAL (component) == lval_memory);
1916 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1917 }
1918 }
1919
1920 /* Access to the value history. */
1921
1922 /* Record a new value in the value history.
1923 Returns the absolute history index of the entry. */
1924
1925 int
1926 record_latest_value (struct value *val)
1927 {
1928 /* We don't want this value to have anything to do with the inferior anymore.
1929 In particular, "set $1 = 50" should not affect the variable from which
1930 the value was taken, and fast watchpoints should be able to assume that
1931 a value on the value history never changes. */
1932 if (value_lazy (val))
1933 value_fetch_lazy (val);
1934 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1935 from. This is a bit dubious, because then *&$1 does not just return $1
1936 but the current contents of that location. c'est la vie... */
1937 val->modifiable = 0;
1938
1939 value_history.push_back (release_value (val));
1940
1941 return value_history.size ();
1942 }
1943
1944 /* Return a copy of the value in the history with sequence number NUM. */
1945
1946 struct value *
1947 access_value_history (int num)
1948 {
1949 int absnum = num;
1950
1951 if (absnum <= 0)
1952 absnum += value_history.size ();
1953
1954 if (absnum <= 0)
1955 {
1956 if (num == 0)
1957 error (_("The history is empty."));
1958 else if (num == 1)
1959 error (_("There is only one value in the history."));
1960 else
1961 error (_("History does not go back to $$%d."), -num);
1962 }
1963 if (absnum > value_history.size ())
1964 error (_("History has not yet reached $%d."), absnum);
1965
1966 absnum--;
1967
1968 return value_copy (value_history[absnum].get ());
1969 }
1970
1971 /* See value.h. */
1972
1973 ULONGEST
1974 value_history_count ()
1975 {
1976 return value_history.size ();
1977 }
1978
1979 static void
1980 show_values (const char *num_exp, int from_tty)
1981 {
1982 int i;
1983 struct value *val;
1984 static int num = 1;
1985
1986 if (num_exp)
1987 {
1988 /* "show values +" should print from the stored position.
1989 "show values <exp>" should print around value number <exp>. */
1990 if (num_exp[0] != '+' || num_exp[1] != '\0')
1991 num = parse_and_eval_long (num_exp) - 5;
1992 }
1993 else
1994 {
1995 /* "show values" means print the last 10 values. */
1996 num = value_history.size () - 9;
1997 }
1998
1999 if (num <= 0)
2000 num = 1;
2001
2002 for (i = num; i < num + 10 && i <= value_history.size (); i++)
2003 {
2004 struct value_print_options opts;
2005
2006 val = access_value_history (i);
2007 gdb_printf (("$%d = "), i);
2008 get_user_print_options (&opts);
2009 value_print (val, gdb_stdout, &opts);
2010 gdb_printf (("\n"));
2011 }
2012
2013 /* The next "show values +" should start after what we just printed. */
2014 num += 10;
2015
2016 /* Hitting just return after this command should do the same thing as
2017 "show values +". If num_exp is null, this is unnecessary, since
2018 "show values +" is not useful after "show values". */
2019 if (from_tty && num_exp)
2020 set_repeat_arguments ("+");
2021 }
2022 \f
2023 enum internalvar_kind
2024 {
2025 /* The internal variable is empty. */
2026 INTERNALVAR_VOID,
2027
2028 /* The value of the internal variable is provided directly as
2029 a GDB value object. */
2030 INTERNALVAR_VALUE,
2031
2032 /* A fresh value is computed via a call-back routine on every
2033 access to the internal variable. */
2034 INTERNALVAR_MAKE_VALUE,
2035
2036 /* The internal variable holds a GDB internal convenience function. */
2037 INTERNALVAR_FUNCTION,
2038
2039 /* The variable holds an integer value. */
2040 INTERNALVAR_INTEGER,
2041
2042 /* The variable holds a GDB-provided string. */
2043 INTERNALVAR_STRING,
2044 };
2045
2046 union internalvar_data
2047 {
2048 /* A value object used with INTERNALVAR_VALUE. */
2049 struct value *value;
2050
2051 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
2052 struct
2053 {
2054 /* The functions to call. */
2055 const struct internalvar_funcs *functions;
2056
2057 /* The function's user-data. */
2058 void *data;
2059 } make_value;
2060
2061 /* The internal function used with INTERNALVAR_FUNCTION. */
2062 struct
2063 {
2064 struct internal_function *function;
2065 /* True if this is the canonical name for the function. */
2066 int canonical;
2067 } fn;
2068
2069 /* An integer value used with INTERNALVAR_INTEGER. */
2070 struct
2071 {
2072 /* If type is non-NULL, it will be used as the type to generate
2073 a value for this internal variable. If type is NULL, a default
2074 integer type for the architecture is used. */
2075 struct type *type;
2076 LONGEST val;
2077 } integer;
2078
2079 /* A string value used with INTERNALVAR_STRING. */
2080 char *string;
2081 };
2082
2083 /* Internal variables. These are variables within the debugger
2084 that hold values assigned by debugger commands.
2085 The user refers to them with a '$' prefix
2086 that does not appear in the variable names stored internally. */
2087
2088 struct internalvar
2089 {
2090 struct internalvar *next;
2091 char *name;
2092
2093 /* We support various different kinds of content of an internal variable.
2094 enum internalvar_kind specifies the kind, and union internalvar_data
2095 provides the data associated with this particular kind. */
2096
2097 enum internalvar_kind kind;
2098
2099 union internalvar_data u;
2100 };
2101
2102 static struct internalvar *internalvars;
2103
2104 /* If the variable does not already exist create it and give it the
2105 value given. If no value is given then the default is zero. */
2106 static void
2107 init_if_undefined_command (const char* args, int from_tty)
2108 {
2109 struct internalvar *intvar = nullptr;
2110
2111 /* Parse the expression - this is taken from set_command(). */
2112 expression_up expr = parse_expression (args);
2113
2114 /* Validate the expression.
2115 Was the expression an assignment?
2116 Or even an expression at all? */
2117 if (expr->first_opcode () != BINOP_ASSIGN)
2118 error (_("Init-if-undefined requires an assignment expression."));
2119
2120 /* Extract the variable from the parsed expression. */
2121 expr::assign_operation *assign
2122 = dynamic_cast<expr::assign_operation *> (expr->op.get ());
2123 if (assign != nullptr)
2124 {
2125 expr::operation *lhs = assign->get_lhs ();
2126 expr::internalvar_operation *ivarop
2127 = dynamic_cast<expr::internalvar_operation *> (lhs);
2128 if (ivarop != nullptr)
2129 intvar = ivarop->get_internalvar ();
2130 }
2131
2132 if (intvar == nullptr)
2133 error (_("The first parameter to init-if-undefined "
2134 "should be a GDB variable."));
2135
2136 /* Only evaluate the expression if the lvalue is void.
2137 This may still fail if the expression is invalid. */
2138 if (intvar->kind == INTERNALVAR_VOID)
2139 evaluate_expression (expr.get ());
2140 }
2141
2142
2143 /* Look up an internal variable with name NAME. NAME should not
2144 normally include a dollar sign.
2145
2146 If the specified internal variable does not exist,
2147 the return value is NULL. */
2148
2149 struct internalvar *
2150 lookup_only_internalvar (const char *name)
2151 {
2152 struct internalvar *var;
2153
2154 for (var = internalvars; var; var = var->next)
2155 if (strcmp (var->name, name) == 0)
2156 return var;
2157
2158 return NULL;
2159 }
2160
2161 /* Complete NAME by comparing it to the names of internal
2162 variables. */
2163
2164 void
2165 complete_internalvar (completion_tracker &tracker, const char *name)
2166 {
2167 struct internalvar *var;
2168 int len;
2169
2170 len = strlen (name);
2171
2172 for (var = internalvars; var; var = var->next)
2173 if (strncmp (var->name, name, len) == 0)
2174 tracker.add_completion (make_unique_xstrdup (var->name));
2175 }
2176
2177 /* Create an internal variable with name NAME and with a void value.
2178 NAME should not normally include a dollar sign. */
2179
2180 struct internalvar *
2181 create_internalvar (const char *name)
2182 {
2183 struct internalvar *var = XNEW (struct internalvar);
2184
2185 var->name = xstrdup (name);
2186 var->kind = INTERNALVAR_VOID;
2187 var->next = internalvars;
2188 internalvars = var;
2189 return var;
2190 }
2191
2192 /* Create an internal variable with name NAME and register FUN as the
2193 function that value_of_internalvar uses to create a value whenever
2194 this variable is referenced. NAME should not normally include a
2195 dollar sign. DATA is passed uninterpreted to FUN when it is
2196 called. CLEANUP, if not NULL, is called when the internal variable
2197 is destroyed. It is passed DATA as its only argument. */
2198
2199 struct internalvar *
2200 create_internalvar_type_lazy (const char *name,
2201 const struct internalvar_funcs *funcs,
2202 void *data)
2203 {
2204 struct internalvar *var = create_internalvar (name);
2205
2206 var->kind = INTERNALVAR_MAKE_VALUE;
2207 var->u.make_value.functions = funcs;
2208 var->u.make_value.data = data;
2209 return var;
2210 }
2211
2212 /* See documentation in value.h. */
2213
2214 int
2215 compile_internalvar_to_ax (struct internalvar *var,
2216 struct agent_expr *expr,
2217 struct axs_value *value)
2218 {
2219 if (var->kind != INTERNALVAR_MAKE_VALUE
2220 || var->u.make_value.functions->compile_to_ax == NULL)
2221 return 0;
2222
2223 var->u.make_value.functions->compile_to_ax (var, expr, value,
2224 var->u.make_value.data);
2225 return 1;
2226 }
2227
2228 /* Look up an internal variable with name NAME. NAME should not
2229 normally include a dollar sign.
2230
2231 If the specified internal variable does not exist,
2232 one is created, with a void value. */
2233
2234 struct internalvar *
2235 lookup_internalvar (const char *name)
2236 {
2237 struct internalvar *var;
2238
2239 var = lookup_only_internalvar (name);
2240 if (var)
2241 return var;
2242
2243 return create_internalvar (name);
2244 }
2245
2246 /* Return current value of internal variable VAR. For variables that
2247 are not inherently typed, use a value type appropriate for GDBARCH. */
2248
2249 struct value *
2250 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2251 {
2252 struct value *val;
2253 struct trace_state_variable *tsv;
2254
2255 /* If there is a trace state variable of the same name, assume that
2256 is what we really want to see. */
2257 tsv = find_trace_state_variable (var->name);
2258 if (tsv)
2259 {
2260 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2261 &(tsv->value));
2262 if (tsv->value_known)
2263 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2264 tsv->value);
2265 else
2266 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2267 return val;
2268 }
2269
2270 switch (var->kind)
2271 {
2272 case INTERNALVAR_VOID:
2273 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2274 break;
2275
2276 case INTERNALVAR_FUNCTION:
2277 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2278 break;
2279
2280 case INTERNALVAR_INTEGER:
2281 if (!var->u.integer.type)
2282 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2283 var->u.integer.val);
2284 else
2285 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2286 break;
2287
2288 case INTERNALVAR_STRING:
2289 val = value_cstring (var->u.string, strlen (var->u.string),
2290 builtin_type (gdbarch)->builtin_char);
2291 break;
2292
2293 case INTERNALVAR_VALUE:
2294 val = value_copy (var->u.value);
2295 if (value_lazy (val))
2296 value_fetch_lazy (val);
2297 break;
2298
2299 case INTERNALVAR_MAKE_VALUE:
2300 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2301 var->u.make_value.data);
2302 break;
2303
2304 default:
2305 internal_error (_("bad kind"));
2306 }
2307
2308 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2309 on this value go back to affect the original internal variable.
2310
2311 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2312 no underlying modifiable state in the internal variable.
2313
2314 Likewise, if the variable's value is a computed lvalue, we want
2315 references to it to produce another computed lvalue, where
2316 references and assignments actually operate through the
2317 computed value's functions.
2318
2319 This means that internal variables with computed values
2320 behave a little differently from other internal variables:
2321 assignments to them don't just replace the previous value
2322 altogether. At the moment, this seems like the behavior we
2323 want. */
2324
2325 if (var->kind != INTERNALVAR_MAKE_VALUE
2326 && val->lval != lval_computed)
2327 {
2328 VALUE_LVAL (val) = lval_internalvar;
2329 VALUE_INTERNALVAR (val) = var;
2330 }
2331
2332 return val;
2333 }
2334
2335 int
2336 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2337 {
2338 if (var->kind == INTERNALVAR_INTEGER)
2339 {
2340 *result = var->u.integer.val;
2341 return 1;
2342 }
2343
2344 if (var->kind == INTERNALVAR_VALUE)
2345 {
2346 struct type *type = check_typedef (value_type (var->u.value));
2347
2348 if (type->code () == TYPE_CODE_INT)
2349 {
2350 *result = value_as_long (var->u.value);
2351 return 1;
2352 }
2353 }
2354
2355 return 0;
2356 }
2357
2358 static int
2359 get_internalvar_function (struct internalvar *var,
2360 struct internal_function **result)
2361 {
2362 switch (var->kind)
2363 {
2364 case INTERNALVAR_FUNCTION:
2365 *result = var->u.fn.function;
2366 return 1;
2367
2368 default:
2369 return 0;
2370 }
2371 }
2372
2373 void
2374 set_internalvar_component (struct internalvar *var,
2375 LONGEST offset, LONGEST bitpos,
2376 LONGEST bitsize, struct value *newval)
2377 {
2378 gdb_byte *addr;
2379 struct gdbarch *arch;
2380 int unit_size;
2381
2382 switch (var->kind)
2383 {
2384 case INTERNALVAR_VALUE:
2385 addr = value_contents_writeable (var->u.value).data ();
2386 arch = get_value_arch (var->u.value);
2387 unit_size = gdbarch_addressable_memory_unit_size (arch);
2388
2389 if (bitsize)
2390 modify_field (value_type (var->u.value), addr + offset,
2391 value_as_long (newval), bitpos, bitsize);
2392 else
2393 memcpy (addr + offset * unit_size, value_contents (newval).data (),
2394 value_type (newval)->length ());
2395 break;
2396
2397 default:
2398 /* We can never get a component of any other kind. */
2399 internal_error (_("set_internalvar_component"));
2400 }
2401 }
2402
2403 void
2404 set_internalvar (struct internalvar *var, struct value *val)
2405 {
2406 enum internalvar_kind new_kind;
2407 union internalvar_data new_data = { 0 };
2408
2409 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2410 error (_("Cannot overwrite convenience function %s"), var->name);
2411
2412 /* Prepare new contents. */
2413 switch (check_typedef (value_type (val))->code ())
2414 {
2415 case TYPE_CODE_VOID:
2416 new_kind = INTERNALVAR_VOID;
2417 break;
2418
2419 case TYPE_CODE_INTERNAL_FUNCTION:
2420 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2421 new_kind = INTERNALVAR_FUNCTION;
2422 get_internalvar_function (VALUE_INTERNALVAR (val),
2423 &new_data.fn.function);
2424 /* Copies created here are never canonical. */
2425 break;
2426
2427 default:
2428 new_kind = INTERNALVAR_VALUE;
2429 struct value *copy = value_copy (val);
2430 copy->modifiable = 1;
2431
2432 /* Force the value to be fetched from the target now, to avoid problems
2433 later when this internalvar is referenced and the target is gone or
2434 has changed. */
2435 if (value_lazy (copy))
2436 value_fetch_lazy (copy);
2437
2438 /* Release the value from the value chain to prevent it from being
2439 deleted by free_all_values. From here on this function should not
2440 call error () until new_data is installed into the var->u to avoid
2441 leaking memory. */
2442 new_data.value = release_value (copy).release ();
2443
2444 /* Internal variables which are created from values with a dynamic
2445 location don't need the location property of the origin anymore.
2446 The resolved dynamic location is used prior then any other address
2447 when accessing the value.
2448 If we keep it, we would still refer to the origin value.
2449 Remove the location property in case it exist. */
2450 value_type (new_data.value)->remove_dyn_prop (DYN_PROP_DATA_LOCATION);
2451
2452 break;
2453 }
2454
2455 /* Clean up old contents. */
2456 clear_internalvar (var);
2457
2458 /* Switch over. */
2459 var->kind = new_kind;
2460 var->u = new_data;
2461 /* End code which must not call error(). */
2462 }
2463
2464 void
2465 set_internalvar_integer (struct internalvar *var, LONGEST l)
2466 {
2467 /* Clean up old contents. */
2468 clear_internalvar (var);
2469
2470 var->kind = INTERNALVAR_INTEGER;
2471 var->u.integer.type = NULL;
2472 var->u.integer.val = l;
2473 }
2474
2475 void
2476 set_internalvar_string (struct internalvar *var, const char *string)
2477 {
2478 /* Clean up old contents. */
2479 clear_internalvar (var);
2480
2481 var->kind = INTERNALVAR_STRING;
2482 var->u.string = xstrdup (string);
2483 }
2484
2485 static void
2486 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2487 {
2488 /* Clean up old contents. */
2489 clear_internalvar (var);
2490
2491 var->kind = INTERNALVAR_FUNCTION;
2492 var->u.fn.function = f;
2493 var->u.fn.canonical = 1;
2494 /* Variables installed here are always the canonical version. */
2495 }
2496
2497 void
2498 clear_internalvar (struct internalvar *var)
2499 {
2500 /* Clean up old contents. */
2501 switch (var->kind)
2502 {
2503 case INTERNALVAR_VALUE:
2504 value_decref (var->u.value);
2505 break;
2506
2507 case INTERNALVAR_STRING:
2508 xfree (var->u.string);
2509 break;
2510
2511 default:
2512 break;
2513 }
2514
2515 /* Reset to void kind. */
2516 var->kind = INTERNALVAR_VOID;
2517 }
2518
2519 const char *
2520 internalvar_name (const struct internalvar *var)
2521 {
2522 return var->name;
2523 }
2524
2525 static struct internal_function *
2526 create_internal_function (const char *name,
2527 internal_function_fn handler, void *cookie)
2528 {
2529 struct internal_function *ifn = XNEW (struct internal_function);
2530
2531 ifn->name = xstrdup (name);
2532 ifn->handler = handler;
2533 ifn->cookie = cookie;
2534 return ifn;
2535 }
2536
2537 const char *
2538 value_internal_function_name (struct value *val)
2539 {
2540 struct internal_function *ifn;
2541 int result;
2542
2543 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2544 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2545 gdb_assert (result);
2546
2547 return ifn->name;
2548 }
2549
2550 struct value *
2551 call_internal_function (struct gdbarch *gdbarch,
2552 const struct language_defn *language,
2553 struct value *func, int argc, struct value **argv)
2554 {
2555 struct internal_function *ifn;
2556 int result;
2557
2558 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2559 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2560 gdb_assert (result);
2561
2562 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2563 }
2564
2565 /* The 'function' command. This does nothing -- it is just a
2566 placeholder to let "help function NAME" work. This is also used as
2567 the implementation of the sub-command that is created when
2568 registering an internal function. */
2569 static void
2570 function_command (const char *command, int from_tty)
2571 {
2572 /* Do nothing. */
2573 }
2574
2575 /* Helper function that does the work for add_internal_function. */
2576
2577 static struct cmd_list_element *
2578 do_add_internal_function (const char *name, const char *doc,
2579 internal_function_fn handler, void *cookie)
2580 {
2581 struct internal_function *ifn;
2582 struct internalvar *var = lookup_internalvar (name);
2583
2584 ifn = create_internal_function (name, handler, cookie);
2585 set_internalvar_function (var, ifn);
2586
2587 return add_cmd (name, no_class, function_command, doc, &functionlist);
2588 }
2589
2590 /* See value.h. */
2591
2592 void
2593 add_internal_function (const char *name, const char *doc,
2594 internal_function_fn handler, void *cookie)
2595 {
2596 do_add_internal_function (name, doc, handler, cookie);
2597 }
2598
2599 /* See value.h. */
2600
2601 void
2602 add_internal_function (gdb::unique_xmalloc_ptr<char> &&name,
2603 gdb::unique_xmalloc_ptr<char> &&doc,
2604 internal_function_fn handler, void *cookie)
2605 {
2606 struct cmd_list_element *cmd
2607 = do_add_internal_function (name.get (), doc.get (), handler, cookie);
2608 doc.release ();
2609 cmd->doc_allocated = 1;
2610 name.release ();
2611 cmd->name_allocated = 1;
2612 }
2613
2614 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2615 prevent cycles / duplicates. */
2616
2617 void
2618 preserve_one_value (struct value *value, struct objfile *objfile,
2619 htab_t copied_types)
2620 {
2621 if (value->type->objfile_owner () == objfile)
2622 value->type = copy_type_recursive (value->type, copied_types);
2623
2624 if (value->enclosing_type->objfile_owner () == objfile)
2625 value->enclosing_type = copy_type_recursive (value->enclosing_type,
2626 copied_types);
2627 }
2628
2629 /* Likewise for internal variable VAR. */
2630
2631 static void
2632 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2633 htab_t copied_types)
2634 {
2635 switch (var->kind)
2636 {
2637 case INTERNALVAR_INTEGER:
2638 if (var->u.integer.type
2639 && var->u.integer.type->objfile_owner () == objfile)
2640 var->u.integer.type
2641 = copy_type_recursive (var->u.integer.type, copied_types);
2642 break;
2643
2644 case INTERNALVAR_VALUE:
2645 preserve_one_value (var->u.value, objfile, copied_types);
2646 break;
2647 }
2648 }
2649
2650 /* Make sure that all types and values referenced by VAROBJ are updated before
2651 OBJFILE is discarded. COPIED_TYPES is used to prevent cycles and
2652 duplicates. */
2653
2654 static void
2655 preserve_one_varobj (struct varobj *varobj, struct objfile *objfile,
2656 htab_t copied_types)
2657 {
2658 if (varobj->type->is_objfile_owned ()
2659 && varobj->type->objfile_owner () == objfile)
2660 {
2661 varobj->type
2662 = copy_type_recursive (varobj->type, copied_types);
2663 }
2664
2665 if (varobj->value != nullptr)
2666 preserve_one_value (varobj->value.get (), objfile, copied_types);
2667 }
2668
2669 /* Update the internal variables and value history when OBJFILE is
2670 discarded; we must copy the types out of the objfile. New global types
2671 will be created for every convenience variable which currently points to
2672 this objfile's types, and the convenience variables will be adjusted to
2673 use the new global types. */
2674
2675 void
2676 preserve_values (struct objfile *objfile)
2677 {
2678 struct internalvar *var;
2679
2680 /* Create the hash table. We allocate on the objfile's obstack, since
2681 it is soon to be deleted. */
2682 htab_up copied_types = create_copied_types_hash ();
2683
2684 for (const value_ref_ptr &item : value_history)
2685 preserve_one_value (item.get (), objfile, copied_types.get ());
2686
2687 for (var = internalvars; var; var = var->next)
2688 preserve_one_internalvar (var, objfile, copied_types.get ());
2689
2690 /* For the remaining varobj, check that none has type owned by OBJFILE. */
2691 all_root_varobjs ([&copied_types, objfile] (struct varobj *varobj)
2692 {
2693 preserve_one_varobj (varobj, objfile,
2694 copied_types.get ());
2695 });
2696
2697 preserve_ext_lang_values (objfile, copied_types.get ());
2698 }
2699
2700 static void
2701 show_convenience (const char *ignore, int from_tty)
2702 {
2703 struct gdbarch *gdbarch = get_current_arch ();
2704 struct internalvar *var;
2705 int varseen = 0;
2706 struct value_print_options opts;
2707
2708 get_user_print_options (&opts);
2709 for (var = internalvars; var; var = var->next)
2710 {
2711
2712 if (!varseen)
2713 {
2714 varseen = 1;
2715 }
2716 gdb_printf (("$%s = "), var->name);
2717
2718 try
2719 {
2720 struct value *val;
2721
2722 val = value_of_internalvar (gdbarch, var);
2723 value_print (val, gdb_stdout, &opts);
2724 }
2725 catch (const gdb_exception_error &ex)
2726 {
2727 fprintf_styled (gdb_stdout, metadata_style.style (),
2728 _("<error: %s>"), ex.what ());
2729 }
2730
2731 gdb_printf (("\n"));
2732 }
2733 if (!varseen)
2734 {
2735 /* This text does not mention convenience functions on purpose.
2736 The user can't create them except via Python, and if Python support
2737 is installed this message will never be printed ($_streq will
2738 exist). */
2739 gdb_printf (_("No debugger convenience variables now defined.\n"
2740 "Convenience variables have "
2741 "names starting with \"$\";\n"
2742 "use \"set\" as in \"set "
2743 "$foo = 5\" to define them.\n"));
2744 }
2745 }
2746 \f
2747
2748 /* See value.h. */
2749
2750 struct value *
2751 value_from_xmethod (xmethod_worker_up &&worker)
2752 {
2753 struct value *v;
2754
2755 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2756 v->lval = lval_xcallable;
2757 v->location.xm_worker = worker.release ();
2758 v->modifiable = 0;
2759
2760 return v;
2761 }
2762
2763 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2764
2765 struct type *
2766 result_type_of_xmethod (struct value *method, gdb::array_view<value *> argv)
2767 {
2768 gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD
2769 && method->lval == lval_xcallable && !argv.empty ());
2770
2771 return method->location.xm_worker->get_result_type (argv[0], argv.slice (1));
2772 }
2773
2774 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2775
2776 struct value *
2777 call_xmethod (struct value *method, gdb::array_view<value *> argv)
2778 {
2779 gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD
2780 && method->lval == lval_xcallable && !argv.empty ());
2781
2782 return method->location.xm_worker->invoke (argv[0], argv.slice (1));
2783 }
2784 \f
2785 /* Extract a value as a C number (either long or double).
2786 Knows how to convert fixed values to double, or
2787 floating values to long.
2788 Does not deallocate the value. */
2789
2790 LONGEST
2791 value_as_long (struct value *val)
2792 {
2793 /* This coerces arrays and functions, which is necessary (e.g.
2794 in disassemble_command). It also dereferences references, which
2795 I suspect is the most logical thing to do. */
2796 val = coerce_array (val);
2797 return unpack_long (value_type (val), value_contents (val).data ());
2798 }
2799
2800 /* Extract a value as a C pointer. Does not deallocate the value.
2801 Note that val's type may not actually be a pointer; value_as_long
2802 handles all the cases. */
2803 CORE_ADDR
2804 value_as_address (struct value *val)
2805 {
2806 struct gdbarch *gdbarch = value_type (val)->arch ();
2807
2808 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2809 whether we want this to be true eventually. */
2810 #if 0
2811 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2812 non-address (e.g. argument to "signal", "info break", etc.), or
2813 for pointers to char, in which the low bits *are* significant. */
2814 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2815 #else
2816
2817 /* There are several targets (IA-64, PowerPC, and others) which
2818 don't represent pointers to functions as simply the address of
2819 the function's entry point. For example, on the IA-64, a
2820 function pointer points to a two-word descriptor, generated by
2821 the linker, which contains the function's entry point, and the
2822 value the IA-64 "global pointer" register should have --- to
2823 support position-independent code. The linker generates
2824 descriptors only for those functions whose addresses are taken.
2825
2826 On such targets, it's difficult for GDB to convert an arbitrary
2827 function address into a function pointer; it has to either find
2828 an existing descriptor for that function, or call malloc and
2829 build its own. On some targets, it is impossible for GDB to
2830 build a descriptor at all: the descriptor must contain a jump
2831 instruction; data memory cannot be executed; and code memory
2832 cannot be modified.
2833
2834 Upon entry to this function, if VAL is a value of type `function'
2835 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2836 value_address (val) is the address of the function. This is what
2837 you'll get if you evaluate an expression like `main'. The call
2838 to COERCE_ARRAY below actually does all the usual unary
2839 conversions, which includes converting values of type `function'
2840 to `pointer to function'. This is the challenging conversion
2841 discussed above. Then, `unpack_long' will convert that pointer
2842 back into an address.
2843
2844 So, suppose the user types `disassemble foo' on an architecture
2845 with a strange function pointer representation, on which GDB
2846 cannot build its own descriptors, and suppose further that `foo'
2847 has no linker-built descriptor. The address->pointer conversion
2848 will signal an error and prevent the command from running, even
2849 though the next step would have been to convert the pointer
2850 directly back into the same address.
2851
2852 The following shortcut avoids this whole mess. If VAL is a
2853 function, just return its address directly. */
2854 if (value_type (val)->code () == TYPE_CODE_FUNC
2855 || value_type (val)->code () == TYPE_CODE_METHOD)
2856 return value_address (val);
2857
2858 val = coerce_array (val);
2859
2860 /* Some architectures (e.g. Harvard), map instruction and data
2861 addresses onto a single large unified address space. For
2862 instance: An architecture may consider a large integer in the
2863 range 0x10000000 .. 0x1000ffff to already represent a data
2864 addresses (hence not need a pointer to address conversion) while
2865 a small integer would still need to be converted integer to
2866 pointer to address. Just assume such architectures handle all
2867 integer conversions in a single function. */
2868
2869 /* JimB writes:
2870
2871 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2872 must admonish GDB hackers to make sure its behavior matches the
2873 compiler's, whenever possible.
2874
2875 In general, I think GDB should evaluate expressions the same way
2876 the compiler does. When the user copies an expression out of
2877 their source code and hands it to a `print' command, they should
2878 get the same value the compiler would have computed. Any
2879 deviation from this rule can cause major confusion and annoyance,
2880 and needs to be justified carefully. In other words, GDB doesn't
2881 really have the freedom to do these conversions in clever and
2882 useful ways.
2883
2884 AndrewC pointed out that users aren't complaining about how GDB
2885 casts integers to pointers; they are complaining that they can't
2886 take an address from a disassembly listing and give it to `x/i'.
2887 This is certainly important.
2888
2889 Adding an architecture method like integer_to_address() certainly
2890 makes it possible for GDB to "get it right" in all circumstances
2891 --- the target has complete control over how things get done, so
2892 people can Do The Right Thing for their target without breaking
2893 anyone else. The standard doesn't specify how integers get
2894 converted to pointers; usually, the ABI doesn't either, but
2895 ABI-specific code is a more reasonable place to handle it. */
2896
2897 if (!value_type (val)->is_pointer_or_reference ()
2898 && gdbarch_integer_to_address_p (gdbarch))
2899 return gdbarch_integer_to_address (gdbarch, value_type (val),
2900 value_contents (val).data ());
2901
2902 return unpack_long (value_type (val), value_contents (val).data ());
2903 #endif
2904 }
2905 \f
2906 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2907 as a long, or as a double, assuming the raw data is described
2908 by type TYPE. Knows how to convert different sizes of values
2909 and can convert between fixed and floating point. We don't assume
2910 any alignment for the raw data. Return value is in host byte order.
2911
2912 If you want functions and arrays to be coerced to pointers, and
2913 references to be dereferenced, call value_as_long() instead.
2914
2915 C++: It is assumed that the front-end has taken care of
2916 all matters concerning pointers to members. A pointer
2917 to member which reaches here is considered to be equivalent
2918 to an INT (or some size). After all, it is only an offset. */
2919
2920 LONGEST
2921 unpack_long (struct type *type, const gdb_byte *valaddr)
2922 {
2923 if (is_fixed_point_type (type))
2924 type = type->fixed_point_type_base_type ();
2925
2926 enum bfd_endian byte_order = type_byte_order (type);
2927 enum type_code code = type->code ();
2928 int len = type->length ();
2929 int nosign = type->is_unsigned ();
2930
2931 switch (code)
2932 {
2933 case TYPE_CODE_TYPEDEF:
2934 return unpack_long (check_typedef (type), valaddr);
2935 case TYPE_CODE_ENUM:
2936 case TYPE_CODE_FLAGS:
2937 case TYPE_CODE_BOOL:
2938 case TYPE_CODE_INT:
2939 case TYPE_CODE_CHAR:
2940 case TYPE_CODE_RANGE:
2941 case TYPE_CODE_MEMBERPTR:
2942 {
2943 LONGEST result;
2944
2945 if (type->bit_size_differs_p ())
2946 {
2947 unsigned bit_off = type->bit_offset ();
2948 unsigned bit_size = type->bit_size ();
2949 if (bit_size == 0)
2950 {
2951 /* unpack_bits_as_long doesn't handle this case the
2952 way we'd like, so handle it here. */
2953 result = 0;
2954 }
2955 else
2956 result = unpack_bits_as_long (type, valaddr, bit_off, bit_size);
2957 }
2958 else
2959 {
2960 if (nosign)
2961 result = extract_unsigned_integer (valaddr, len, byte_order);
2962 else
2963 result = extract_signed_integer (valaddr, len, byte_order);
2964 }
2965 if (code == TYPE_CODE_RANGE)
2966 result += type->bounds ()->bias;
2967 return result;
2968 }
2969
2970 case TYPE_CODE_FLT:
2971 case TYPE_CODE_DECFLOAT:
2972 return target_float_to_longest (valaddr, type);
2973
2974 case TYPE_CODE_FIXED_POINT:
2975 {
2976 gdb_mpq vq;
2977 vq.read_fixed_point (gdb::make_array_view (valaddr, len),
2978 byte_order, nosign,
2979 type->fixed_point_scaling_factor ());
2980
2981 gdb_mpz vz;
2982 mpz_tdiv_q (vz.val, mpq_numref (vq.val), mpq_denref (vq.val));
2983 return vz.as_integer<LONGEST> ();
2984 }
2985
2986 case TYPE_CODE_PTR:
2987 case TYPE_CODE_REF:
2988 case TYPE_CODE_RVALUE_REF:
2989 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2990 whether we want this to be true eventually. */
2991 return extract_typed_address (valaddr, type);
2992
2993 default:
2994 error (_("Value can't be converted to integer."));
2995 }
2996 }
2997
2998 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2999 as a CORE_ADDR, assuming the raw data is described by type TYPE.
3000 We don't assume any alignment for the raw data. Return value is in
3001 host byte order.
3002
3003 If you want functions and arrays to be coerced to pointers, and
3004 references to be dereferenced, call value_as_address() instead.
3005
3006 C++: It is assumed that the front-end has taken care of
3007 all matters concerning pointers to members. A pointer
3008 to member which reaches here is considered to be equivalent
3009 to an INT (or some size). After all, it is only an offset. */
3010
3011 CORE_ADDR
3012 unpack_pointer (struct type *type, const gdb_byte *valaddr)
3013 {
3014 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
3015 whether we want this to be true eventually. */
3016 return unpack_long (type, valaddr);
3017 }
3018
3019 bool
3020 is_floating_value (struct value *val)
3021 {
3022 struct type *type = check_typedef (value_type (val));
3023
3024 if (is_floating_type (type))
3025 {
3026 if (!target_float_is_valid (value_contents (val).data (), type))
3027 error (_("Invalid floating value found in program."));
3028 return true;
3029 }
3030
3031 return false;
3032 }
3033
3034 \f
3035 /* Get the value of the FIELDNO'th field (which must be static) of
3036 TYPE. */
3037
3038 struct value *
3039 value_static_field (struct type *type, int fieldno)
3040 {
3041 struct value *retval;
3042
3043 switch (type->field (fieldno).loc_kind ())
3044 {
3045 case FIELD_LOC_KIND_PHYSADDR:
3046 retval = value_at_lazy (type->field (fieldno).type (),
3047 type->field (fieldno).loc_physaddr ());
3048 break;
3049 case FIELD_LOC_KIND_PHYSNAME:
3050 {
3051 const char *phys_name = type->field (fieldno).loc_physname ();
3052 /* type->field (fieldno).name (); */
3053 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
3054
3055 if (sym.symbol == NULL)
3056 {
3057 /* With some compilers, e.g. HP aCC, static data members are
3058 reported as non-debuggable symbols. */
3059 struct bound_minimal_symbol msym
3060 = lookup_minimal_symbol (phys_name, NULL, NULL);
3061 struct type *field_type = type->field (fieldno).type ();
3062
3063 if (!msym.minsym)
3064 retval = allocate_optimized_out_value (field_type);
3065 else
3066 retval = value_at_lazy (field_type, msym.value_address ());
3067 }
3068 else
3069 retval = value_of_variable (sym.symbol, sym.block);
3070 break;
3071 }
3072 default:
3073 gdb_assert_not_reached ("unexpected field location kind");
3074 }
3075
3076 return retval;
3077 }
3078
3079 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
3080 You have to be careful here, since the size of the data area for the value
3081 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
3082 than the old enclosing type, you have to allocate more space for the
3083 data. */
3084
3085 void
3086 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
3087 {
3088 if (new_encl_type->length () > value_enclosing_type (val)->length ())
3089 {
3090 check_type_length_before_alloc (new_encl_type);
3091 val->contents
3092 .reset ((gdb_byte *) xrealloc (val->contents.release (),
3093 new_encl_type->length ()));
3094 }
3095
3096 val->enclosing_type = new_encl_type;
3097 }
3098
3099 /* Given a value ARG1 (offset by OFFSET bytes)
3100 of a struct or union type ARG_TYPE,
3101 extract and return the value of one of its (non-static) fields.
3102 FIELDNO says which field. */
3103
3104 struct value *
3105 value_primitive_field (struct value *arg1, LONGEST offset,
3106 int fieldno, struct type *arg_type)
3107 {
3108 struct value *v;
3109 struct type *type;
3110 struct gdbarch *arch = get_value_arch (arg1);
3111 int unit_size = gdbarch_addressable_memory_unit_size (arch);
3112
3113 arg_type = check_typedef (arg_type);
3114 type = arg_type->field (fieldno).type ();
3115
3116 /* Call check_typedef on our type to make sure that, if TYPE
3117 is a TYPE_CODE_TYPEDEF, its length is set to the length
3118 of the target type instead of zero. However, we do not
3119 replace the typedef type by the target type, because we want
3120 to keep the typedef in order to be able to print the type
3121 description correctly. */
3122 check_typedef (type);
3123
3124 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
3125 {
3126 /* Handle packed fields.
3127
3128 Create a new value for the bitfield, with bitpos and bitsize
3129 set. If possible, arrange offset and bitpos so that we can
3130 do a single aligned read of the size of the containing type.
3131 Otherwise, adjust offset to the byte containing the first
3132 bit. Assume that the address, offset, and embedded offset
3133 are sufficiently aligned. */
3134
3135 LONGEST bitpos = arg_type->field (fieldno).loc_bitpos ();
3136 LONGEST container_bitsize = type->length () * 8;
3137
3138 v = allocate_value_lazy (type);
3139 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
3140 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
3141 && type->length () <= (int) sizeof (LONGEST))
3142 v->bitpos = bitpos % container_bitsize;
3143 else
3144 v->bitpos = bitpos % 8;
3145 v->offset = (value_embedded_offset (arg1)
3146 + offset
3147 + (bitpos - v->bitpos) / 8);
3148 set_value_parent (v, arg1);
3149 if (!value_lazy (arg1))
3150 value_fetch_lazy (v);
3151 }
3152 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3153 {
3154 /* This field is actually a base subobject, so preserve the
3155 entire object's contents for later references to virtual
3156 bases, etc. */
3157 LONGEST boffset;
3158
3159 /* Lazy register values with offsets are not supported. */
3160 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3161 value_fetch_lazy (arg1);
3162
3163 /* We special case virtual inheritance here because this
3164 requires access to the contents, which we would rather avoid
3165 for references to ordinary fields of unavailable values. */
3166 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3167 boffset = baseclass_offset (arg_type, fieldno,
3168 value_contents (arg1).data (),
3169 value_embedded_offset (arg1),
3170 value_address (arg1),
3171 arg1);
3172 else
3173 boffset = arg_type->field (fieldno).loc_bitpos () / 8;
3174
3175 if (value_lazy (arg1))
3176 v = allocate_value_lazy (value_enclosing_type (arg1));
3177 else
3178 {
3179 v = allocate_value (value_enclosing_type (arg1));
3180 value_contents_copy_raw (v, 0, arg1, 0,
3181 value_enclosing_type (arg1)->length ());
3182 }
3183 v->type = type;
3184 v->offset = value_offset (arg1);
3185 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3186 }
3187 else if (NULL != TYPE_DATA_LOCATION (type))
3188 {
3189 /* Field is a dynamic data member. */
3190
3191 gdb_assert (0 == offset);
3192 /* We expect an already resolved data location. */
3193 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
3194 /* For dynamic data types defer memory allocation
3195 until we actual access the value. */
3196 v = allocate_value_lazy (type);
3197 }
3198 else
3199 {
3200 /* Plain old data member */
3201 offset += (arg_type->field (fieldno).loc_bitpos ()
3202 / (HOST_CHAR_BIT * unit_size));
3203
3204 /* Lazy register values with offsets are not supported. */
3205 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3206 value_fetch_lazy (arg1);
3207
3208 if (value_lazy (arg1))
3209 v = allocate_value_lazy (type);
3210 else
3211 {
3212 v = allocate_value (type);
3213 value_contents_copy_raw (v, value_embedded_offset (v),
3214 arg1, value_embedded_offset (arg1) + offset,
3215 type_length_units (type));
3216 }
3217 v->offset = (value_offset (arg1) + offset
3218 + value_embedded_offset (arg1));
3219 }
3220 set_value_component_location (v, arg1);
3221 return v;
3222 }
3223
3224 /* Given a value ARG1 of a struct or union type,
3225 extract and return the value of one of its (non-static) fields.
3226 FIELDNO says which field. */
3227
3228 struct value *
3229 value_field (struct value *arg1, int fieldno)
3230 {
3231 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3232 }
3233
3234 /* Return a non-virtual function as a value.
3235 F is the list of member functions which contains the desired method.
3236 J is an index into F which provides the desired method.
3237
3238 We only use the symbol for its address, so be happy with either a
3239 full symbol or a minimal symbol. */
3240
3241 struct value *
3242 value_fn_field (struct value **arg1p, struct fn_field *f,
3243 int j, struct type *type,
3244 LONGEST offset)
3245 {
3246 struct value *v;
3247 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3248 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3249 struct symbol *sym;
3250 struct bound_minimal_symbol msym;
3251
3252 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3253 if (sym == nullptr)
3254 {
3255 msym = lookup_bound_minimal_symbol (physname);
3256 if (msym.minsym == NULL)
3257 return NULL;
3258 }
3259
3260 v = allocate_value (ftype);
3261 VALUE_LVAL (v) = lval_memory;
3262 if (sym)
3263 {
3264 set_value_address (v, sym->value_block ()->entry_pc ());
3265 }
3266 else
3267 {
3268 /* The minimal symbol might point to a function descriptor;
3269 resolve it to the actual code address instead. */
3270 struct objfile *objfile = msym.objfile;
3271 struct gdbarch *gdbarch = objfile->arch ();
3272
3273 set_value_address (v,
3274 gdbarch_convert_from_func_ptr_addr
3275 (gdbarch, msym.value_address (),
3276 current_inferior ()->top_target ()));
3277 }
3278
3279 if (arg1p)
3280 {
3281 if (type != value_type (*arg1p))
3282 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3283 value_addr (*arg1p)));
3284
3285 /* Move the `this' pointer according to the offset.
3286 VALUE_OFFSET (*arg1p) += offset; */
3287 }
3288
3289 return v;
3290 }
3291
3292 \f
3293
3294 /* See value.h. */
3295
3296 LONGEST
3297 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3298 LONGEST bitpos, LONGEST bitsize)
3299 {
3300 enum bfd_endian byte_order = type_byte_order (field_type);
3301 ULONGEST val;
3302 ULONGEST valmask;
3303 int lsbcount;
3304 LONGEST bytes_read;
3305 LONGEST read_offset;
3306
3307 /* Read the minimum number of bytes required; there may not be
3308 enough bytes to read an entire ULONGEST. */
3309 field_type = check_typedef (field_type);
3310 if (bitsize)
3311 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3312 else
3313 {
3314 bytes_read = field_type->length ();
3315 bitsize = 8 * bytes_read;
3316 }
3317
3318 read_offset = bitpos / 8;
3319
3320 val = extract_unsigned_integer (valaddr + read_offset,
3321 bytes_read, byte_order);
3322
3323 /* Extract bits. See comment above. */
3324
3325 if (byte_order == BFD_ENDIAN_BIG)
3326 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3327 else
3328 lsbcount = (bitpos % 8);
3329 val >>= lsbcount;
3330
3331 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3332 If the field is signed, and is negative, then sign extend. */
3333
3334 if (bitsize < 8 * (int) sizeof (val))
3335 {
3336 valmask = (((ULONGEST) 1) << bitsize) - 1;
3337 val &= valmask;
3338 if (!field_type->is_unsigned ())
3339 {
3340 if (val & (valmask ^ (valmask >> 1)))
3341 {
3342 val |= ~valmask;
3343 }
3344 }
3345 }
3346
3347 return val;
3348 }
3349
3350 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3351 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3352 ORIGINAL_VALUE, which must not be NULL. See
3353 unpack_value_bits_as_long for more details. */
3354
3355 int
3356 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3357 LONGEST embedded_offset, int fieldno,
3358 const struct value *val, LONGEST *result)
3359 {
3360 int bitpos = type->field (fieldno).loc_bitpos ();
3361 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3362 struct type *field_type = type->field (fieldno).type ();
3363 int bit_offset;
3364
3365 gdb_assert (val != NULL);
3366
3367 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3368 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3369 || !value_bits_available (val, bit_offset, bitsize))
3370 return 0;
3371
3372 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3373 bitpos, bitsize);
3374 return 1;
3375 }
3376
3377 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3378 object at VALADDR. See unpack_bits_as_long for more details. */
3379
3380 LONGEST
3381 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3382 {
3383 int bitpos = type->field (fieldno).loc_bitpos ();
3384 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3385 struct type *field_type = type->field (fieldno).type ();
3386
3387 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3388 }
3389
3390 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3391 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3392 the contents in DEST_VAL, zero or sign extending if the type of
3393 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3394 VAL. If the VAL's contents required to extract the bitfield from
3395 are unavailable/optimized out, DEST_VAL is correspondingly
3396 marked unavailable/optimized out. */
3397
3398 void
3399 unpack_value_bitfield (struct value *dest_val,
3400 LONGEST bitpos, LONGEST bitsize,
3401 const gdb_byte *valaddr, LONGEST embedded_offset,
3402 const struct value *val)
3403 {
3404 enum bfd_endian byte_order;
3405 int src_bit_offset;
3406 int dst_bit_offset;
3407 struct type *field_type = value_type (dest_val);
3408
3409 byte_order = type_byte_order (field_type);
3410
3411 /* First, unpack and sign extend the bitfield as if it was wholly
3412 valid. Optimized out/unavailable bits are read as zero, but
3413 that's OK, as they'll end up marked below. If the VAL is
3414 wholly-invalid we may have skipped allocating its contents,
3415 though. See allocate_optimized_out_value. */
3416 if (valaddr != NULL)
3417 {
3418 LONGEST num;
3419
3420 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3421 bitpos, bitsize);
3422 store_signed_integer (value_contents_raw (dest_val).data (),
3423 field_type->length (), byte_order, num);
3424 }
3425
3426 /* Now copy the optimized out / unavailability ranges to the right
3427 bits. */
3428 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3429 if (byte_order == BFD_ENDIAN_BIG)
3430 dst_bit_offset = field_type->length () * TARGET_CHAR_BIT - bitsize;
3431 else
3432 dst_bit_offset = 0;
3433 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3434 val, src_bit_offset, bitsize);
3435 }
3436
3437 /* Return a new value with type TYPE, which is FIELDNO field of the
3438 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3439 of VAL. If the VAL's contents required to extract the bitfield
3440 from are unavailable/optimized out, the new value is
3441 correspondingly marked unavailable/optimized out. */
3442
3443 struct value *
3444 value_field_bitfield (struct type *type, int fieldno,
3445 const gdb_byte *valaddr,
3446 LONGEST embedded_offset, const struct value *val)
3447 {
3448 int bitpos = type->field (fieldno).loc_bitpos ();
3449 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3450 struct value *res_val = allocate_value (type->field (fieldno).type ());
3451
3452 unpack_value_bitfield (res_val, bitpos, bitsize,
3453 valaddr, embedded_offset, val);
3454
3455 return res_val;
3456 }
3457
3458 /* Modify the value of a bitfield. ADDR points to a block of memory in
3459 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3460 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3461 indicate which bits (in target bit order) comprise the bitfield.
3462 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3463 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3464
3465 void
3466 modify_field (struct type *type, gdb_byte *addr,
3467 LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3468 {
3469 enum bfd_endian byte_order = type_byte_order (type);
3470 ULONGEST oword;
3471 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3472 LONGEST bytesize;
3473
3474 /* Normalize BITPOS. */
3475 addr += bitpos / 8;
3476 bitpos %= 8;
3477
3478 /* If a negative fieldval fits in the field in question, chop
3479 off the sign extension bits. */
3480 if ((~fieldval & ~(mask >> 1)) == 0)
3481 fieldval &= mask;
3482
3483 /* Warn if value is too big to fit in the field in question. */
3484 if (0 != (fieldval & ~mask))
3485 {
3486 /* FIXME: would like to include fieldval in the message, but
3487 we don't have a sprintf_longest. */
3488 warning (_("Value does not fit in %s bits."), plongest (bitsize));
3489
3490 /* Truncate it, otherwise adjoining fields may be corrupted. */
3491 fieldval &= mask;
3492 }
3493
3494 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3495 false valgrind reports. */
3496
3497 bytesize = (bitpos + bitsize + 7) / 8;
3498 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3499
3500 /* Shifting for bit field depends on endianness of the target machine. */
3501 if (byte_order == BFD_ENDIAN_BIG)
3502 bitpos = bytesize * 8 - bitpos - bitsize;
3503
3504 oword &= ~(mask << bitpos);
3505 oword |= fieldval << bitpos;
3506
3507 store_unsigned_integer (addr, bytesize, byte_order, oword);
3508 }
3509 \f
3510 /* Pack NUM into BUF using a target format of TYPE. */
3511
3512 void
3513 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3514 {
3515 enum bfd_endian byte_order = type_byte_order (type);
3516 LONGEST len;
3517
3518 type = check_typedef (type);
3519 len = type->length ();
3520
3521 switch (type->code ())
3522 {
3523 case TYPE_CODE_RANGE:
3524 num -= type->bounds ()->bias;
3525 /* Fall through. */
3526 case TYPE_CODE_INT:
3527 case TYPE_CODE_CHAR:
3528 case TYPE_CODE_ENUM:
3529 case TYPE_CODE_FLAGS:
3530 case TYPE_CODE_BOOL:
3531 case TYPE_CODE_MEMBERPTR:
3532 if (type->bit_size_differs_p ())
3533 {
3534 unsigned bit_off = type->bit_offset ();
3535 unsigned bit_size = type->bit_size ();
3536 num &= ((ULONGEST) 1 << bit_size) - 1;
3537 num <<= bit_off;
3538 }
3539 store_signed_integer (buf, len, byte_order, num);
3540 break;
3541
3542 case TYPE_CODE_REF:
3543 case TYPE_CODE_RVALUE_REF:
3544 case TYPE_CODE_PTR:
3545 store_typed_address (buf, type, (CORE_ADDR) num);
3546 break;
3547
3548 case TYPE_CODE_FLT:
3549 case TYPE_CODE_DECFLOAT:
3550 target_float_from_longest (buf, type, num);
3551 break;
3552
3553 default:
3554 error (_("Unexpected type (%d) encountered for integer constant."),
3555 type->code ());
3556 }
3557 }
3558
3559
3560 /* Pack NUM into BUF using a target format of TYPE. */
3561
3562 static void
3563 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3564 {
3565 LONGEST len;
3566 enum bfd_endian byte_order;
3567
3568 type = check_typedef (type);
3569 len = type->length ();
3570 byte_order = type_byte_order (type);
3571
3572 switch (type->code ())
3573 {
3574 case TYPE_CODE_INT:
3575 case TYPE_CODE_CHAR:
3576 case TYPE_CODE_ENUM:
3577 case TYPE_CODE_FLAGS:
3578 case TYPE_CODE_BOOL:
3579 case TYPE_CODE_RANGE:
3580 case TYPE_CODE_MEMBERPTR:
3581 if (type->bit_size_differs_p ())
3582 {
3583 unsigned bit_off = type->bit_offset ();
3584 unsigned bit_size = type->bit_size ();
3585 num &= ((ULONGEST) 1 << bit_size) - 1;
3586 num <<= bit_off;
3587 }
3588 store_unsigned_integer (buf, len, byte_order, num);
3589 break;
3590
3591 case TYPE_CODE_REF:
3592 case TYPE_CODE_RVALUE_REF:
3593 case TYPE_CODE_PTR:
3594 store_typed_address (buf, type, (CORE_ADDR) num);
3595 break;
3596
3597 case TYPE_CODE_FLT:
3598 case TYPE_CODE_DECFLOAT:
3599 target_float_from_ulongest (buf, type, num);
3600 break;
3601
3602 default:
3603 error (_("Unexpected type (%d) encountered "
3604 "for unsigned integer constant."),
3605 type->code ());
3606 }
3607 }
3608
3609
3610 /* Create a value of type TYPE that is zero, and return it. */
3611
3612 struct value *
3613 value_zero (struct type *type, enum lval_type lv)
3614 {
3615 struct value *val = allocate_value_lazy (type);
3616
3617 VALUE_LVAL (val) = (lv == lval_computed ? not_lval : lv);
3618 val->is_zero = true;
3619 return val;
3620 }
3621
3622 /* Convert C numbers into newly allocated values. */
3623
3624 struct value *
3625 value_from_longest (struct type *type, LONGEST num)
3626 {
3627 struct value *val = allocate_value (type);
3628
3629 pack_long (value_contents_raw (val).data (), type, num);
3630 return val;
3631 }
3632
3633
3634 /* Convert C unsigned numbers into newly allocated values. */
3635
3636 struct value *
3637 value_from_ulongest (struct type *type, ULONGEST num)
3638 {
3639 struct value *val = allocate_value (type);
3640
3641 pack_unsigned_long (value_contents_raw (val).data (), type, num);
3642
3643 return val;
3644 }
3645
3646
3647 /* Create a value representing a pointer of type TYPE to the address
3648 ADDR. */
3649
3650 struct value *
3651 value_from_pointer (struct type *type, CORE_ADDR addr)
3652 {
3653 struct value *val = allocate_value (type);
3654
3655 store_typed_address (value_contents_raw (val).data (),
3656 check_typedef (type), addr);
3657 return val;
3658 }
3659
3660 /* Create and return a value object of TYPE containing the value D. The
3661 TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once
3662 it is converted to target format. */
3663
3664 struct value *
3665 value_from_host_double (struct type *type, double d)
3666 {
3667 struct value *value = allocate_value (type);
3668 gdb_assert (type->code () == TYPE_CODE_FLT);
3669 target_float_from_host_double (value_contents_raw (value).data (),
3670 value_type (value), d);
3671 return value;
3672 }
3673
3674 /* Create a value of type TYPE whose contents come from VALADDR, if it
3675 is non-null, and whose memory address (in the inferior) is
3676 ADDRESS. The type of the created value may differ from the passed
3677 type TYPE. Make sure to retrieve values new type after this call.
3678 Note that TYPE is not passed through resolve_dynamic_type; this is
3679 a special API intended for use only by Ada. */
3680
3681 struct value *
3682 value_from_contents_and_address_unresolved (struct type *type,
3683 const gdb_byte *valaddr,
3684 CORE_ADDR address)
3685 {
3686 struct value *v;
3687
3688 if (valaddr == NULL)
3689 v = allocate_value_lazy (type);
3690 else
3691 v = value_from_contents (type, valaddr);
3692 VALUE_LVAL (v) = lval_memory;
3693 set_value_address (v, address);
3694 return v;
3695 }
3696
3697 /* Create a value of type TYPE whose contents come from VALADDR, if it
3698 is non-null, and whose memory address (in the inferior) is
3699 ADDRESS. The type of the created value may differ from the passed
3700 type TYPE. Make sure to retrieve values new type after this call. */
3701
3702 struct value *
3703 value_from_contents_and_address (struct type *type,
3704 const gdb_byte *valaddr,
3705 CORE_ADDR address)
3706 {
3707 gdb::array_view<const gdb_byte> view;
3708 if (valaddr != nullptr)
3709 view = gdb::make_array_view (valaddr, type->length ());
3710 struct type *resolved_type = resolve_dynamic_type (type, view, address);
3711 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3712 struct value *v;
3713
3714 if (valaddr == NULL)
3715 v = allocate_value_lazy (resolved_type);
3716 else
3717 v = value_from_contents (resolved_type, valaddr);
3718 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3719 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3720 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3721 VALUE_LVAL (v) = lval_memory;
3722 set_value_address (v, address);
3723 return v;
3724 }
3725
3726 /* Create a value of type TYPE holding the contents CONTENTS.
3727 The new value is `not_lval'. */
3728
3729 struct value *
3730 value_from_contents (struct type *type, const gdb_byte *contents)
3731 {
3732 struct value *result;
3733
3734 result = allocate_value (type);
3735 memcpy (value_contents_raw (result).data (), contents, type->length ());
3736 return result;
3737 }
3738
3739 /* Extract a value from the history file. Input will be of the form
3740 $digits or $$digits. See block comment above 'write_dollar_variable'
3741 for details. */
3742
3743 struct value *
3744 value_from_history_ref (const char *h, const char **endp)
3745 {
3746 int index, len;
3747
3748 if (h[0] == '$')
3749 len = 1;
3750 else
3751 return NULL;
3752
3753 if (h[1] == '$')
3754 len = 2;
3755
3756 /* Find length of numeral string. */
3757 for (; isdigit (h[len]); len++)
3758 ;
3759
3760 /* Make sure numeral string is not part of an identifier. */
3761 if (h[len] == '_' || isalpha (h[len]))
3762 return NULL;
3763
3764 /* Now collect the index value. */
3765 if (h[1] == '$')
3766 {
3767 if (len == 2)
3768 {
3769 /* For some bizarre reason, "$$" is equivalent to "$$1",
3770 rather than to "$$0" as it ought to be! */
3771 index = -1;
3772 *endp += len;
3773 }
3774 else
3775 {
3776 char *local_end;
3777
3778 index = -strtol (&h[2], &local_end, 10);
3779 *endp = local_end;
3780 }
3781 }
3782 else
3783 {
3784 if (len == 1)
3785 {
3786 /* "$" is equivalent to "$0". */
3787 index = 0;
3788 *endp += len;
3789 }
3790 else
3791 {
3792 char *local_end;
3793
3794 index = strtol (&h[1], &local_end, 10);
3795 *endp = local_end;
3796 }
3797 }
3798
3799 return access_value_history (index);
3800 }
3801
3802 /* Get the component value (offset by OFFSET bytes) of a struct or
3803 union WHOLE. Component's type is TYPE. */
3804
3805 struct value *
3806 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3807 {
3808 struct value *v;
3809
3810 if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole))
3811 v = allocate_value_lazy (type);
3812 else
3813 {
3814 v = allocate_value (type);
3815 value_contents_copy (v, value_embedded_offset (v),
3816 whole, value_embedded_offset (whole) + offset,
3817 type_length_units (type));
3818 }
3819 v->offset = value_offset (whole) + offset + value_embedded_offset (whole);
3820 set_value_component_location (v, whole);
3821
3822 return v;
3823 }
3824
3825 /* See value.h. */
3826
3827 struct value *
3828 value_from_component_bitsize (struct value *whole, struct type *type,
3829 LONGEST bit_offset, LONGEST bit_length)
3830 {
3831 gdb_assert (!value_lazy (whole));
3832
3833 /* Preserve lvalue-ness if possible. This is needed to avoid
3834 array-printing failures (including crashes) when printing Ada
3835 arrays in programs compiled with -fgnat-encodings=all. */
3836 if ((bit_offset % TARGET_CHAR_BIT) == 0
3837 && (bit_length % TARGET_CHAR_BIT) == 0
3838 && bit_length == TARGET_CHAR_BIT * type->length ())
3839 return value_from_component (whole, type, bit_offset / TARGET_CHAR_BIT);
3840
3841 struct value *v = allocate_value (type);
3842
3843 LONGEST dst_offset = TARGET_CHAR_BIT * value_embedded_offset (v);
3844 if (is_scalar_type (type) && type_byte_order (type) == BFD_ENDIAN_BIG)
3845 dst_offset += TARGET_CHAR_BIT * type->length () - bit_length;
3846
3847 value_contents_copy_raw_bitwise (v, dst_offset,
3848 whole,
3849 TARGET_CHAR_BIT
3850 * value_embedded_offset (whole)
3851 + bit_offset,
3852 bit_length);
3853 return v;
3854 }
3855
3856 struct value *
3857 coerce_ref_if_computed (const struct value *arg)
3858 {
3859 const struct lval_funcs *funcs;
3860
3861 if (!TYPE_IS_REFERENCE (check_typedef (value_type (arg))))
3862 return NULL;
3863
3864 if (value_lval_const (arg) != lval_computed)
3865 return NULL;
3866
3867 funcs = value_computed_funcs (arg);
3868 if (funcs->coerce_ref == NULL)
3869 return NULL;
3870
3871 return funcs->coerce_ref (arg);
3872 }
3873
3874 /* Look at value.h for description. */
3875
3876 struct value *
3877 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3878 const struct type *original_type,
3879 struct value *original_value,
3880 CORE_ADDR original_value_address)
3881 {
3882 gdb_assert (original_type->is_pointer_or_reference ());
3883
3884 struct type *original_target_type = original_type->target_type ();
3885 gdb::array_view<const gdb_byte> view;
3886 struct type *resolved_original_target_type
3887 = resolve_dynamic_type (original_target_type, view,
3888 original_value_address);
3889
3890 /* Re-adjust type. */
3891 deprecated_set_value_type (value, resolved_original_target_type);
3892
3893 /* Add embedding info. */
3894 set_value_enclosing_type (value, enc_type);
3895 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3896
3897 /* We may be pointing to an object of some derived type. */
3898 return value_full_object (value, NULL, 0, 0, 0);
3899 }
3900
3901 struct value *
3902 coerce_ref (struct value *arg)
3903 {
3904 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3905 struct value *retval;
3906 struct type *enc_type;
3907
3908 retval = coerce_ref_if_computed (arg);
3909 if (retval)
3910 return retval;
3911
3912 if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3913 return arg;
3914
3915 enc_type = check_typedef (value_enclosing_type (arg));
3916 enc_type = enc_type->target_type ();
3917
3918 CORE_ADDR addr = unpack_pointer (value_type (arg), value_contents (arg).data ());
3919 retval = value_at_lazy (enc_type, addr);
3920 enc_type = value_type (retval);
3921 return readjust_indirect_value_type (retval, enc_type, value_type_arg_tmp,
3922 arg, addr);
3923 }
3924
3925 struct value *
3926 coerce_array (struct value *arg)
3927 {
3928 struct type *type;
3929
3930 arg = coerce_ref (arg);
3931 type = check_typedef (value_type (arg));
3932
3933 switch (type->code ())
3934 {
3935 case TYPE_CODE_ARRAY:
3936 if (!type->is_vector () && current_language->c_style_arrays_p ())
3937 arg = value_coerce_array (arg);
3938 break;
3939 case TYPE_CODE_FUNC:
3940 arg = value_coerce_function (arg);
3941 break;
3942 }
3943 return arg;
3944 }
3945 \f
3946
3947 /* Return the return value convention that will be used for the
3948 specified type. */
3949
3950 enum return_value_convention
3951 struct_return_convention (struct gdbarch *gdbarch,
3952 struct value *function, struct type *value_type)
3953 {
3954 enum type_code code = value_type->code ();
3955
3956 if (code == TYPE_CODE_ERROR)
3957 error (_("Function return type unknown."));
3958
3959 /* Probe the architecture for the return-value convention. */
3960 return gdbarch_return_value (gdbarch, function, value_type,
3961 NULL, NULL, NULL);
3962 }
3963
3964 /* Return true if the function returning the specified type is using
3965 the convention of returning structures in memory (passing in the
3966 address as a hidden first parameter). */
3967
3968 int
3969 using_struct_return (struct gdbarch *gdbarch,
3970 struct value *function, struct type *value_type)
3971 {
3972 if (value_type->code () == TYPE_CODE_VOID)
3973 /* A void return value is never in memory. See also corresponding
3974 code in "print_return_value". */
3975 return 0;
3976
3977 return (struct_return_convention (gdbarch, function, value_type)
3978 != RETURN_VALUE_REGISTER_CONVENTION);
3979 }
3980
3981 /* Set the initialized field in a value struct. */
3982
3983 void
3984 set_value_initialized (struct value *val, int status)
3985 {
3986 val->initialized = status;
3987 }
3988
3989 /* Return the initialized field in a value struct. */
3990
3991 int
3992 value_initialized (const struct value *val)
3993 {
3994 return val->initialized;
3995 }
3996
3997 /* Helper for value_fetch_lazy when the value is a bitfield. */
3998
3999 static void
4000 value_fetch_lazy_bitfield (struct value *val)
4001 {
4002 gdb_assert (value_bitsize (val) != 0);
4003
4004 /* To read a lazy bitfield, read the entire enclosing value. This
4005 prevents reading the same block of (possibly volatile) memory once
4006 per bitfield. It would be even better to read only the containing
4007 word, but we have no way to record that just specific bits of a
4008 value have been fetched. */
4009 struct value *parent = value_parent (val);
4010
4011 if (value_lazy (parent))
4012 value_fetch_lazy (parent);
4013
4014 unpack_value_bitfield (val, value_bitpos (val), value_bitsize (val),
4015 value_contents_for_printing (parent).data (),
4016 value_offset (val), parent);
4017 }
4018
4019 /* Helper for value_fetch_lazy when the value is in memory. */
4020
4021 static void
4022 value_fetch_lazy_memory (struct value *val)
4023 {
4024 gdb_assert (VALUE_LVAL (val) == lval_memory);
4025
4026 CORE_ADDR addr = value_address (val);
4027 struct type *type = check_typedef (value_enclosing_type (val));
4028
4029 if (type->length ())
4030 read_value_memory (val, 0, value_stack (val),
4031 addr, value_contents_all_raw (val).data (),
4032 type_length_units (type));
4033 }
4034
4035 /* Helper for value_fetch_lazy when the value is in a register. */
4036
4037 static void
4038 value_fetch_lazy_register (struct value *val)
4039 {
4040 frame_info_ptr next_frame;
4041 int regnum;
4042 struct type *type = check_typedef (value_type (val));
4043 struct value *new_val = val, *mark = value_mark ();
4044
4045 /* Offsets are not supported here; lazy register values must
4046 refer to the entire register. */
4047 gdb_assert (value_offset (val) == 0);
4048
4049 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
4050 {
4051 struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
4052
4053 next_frame = frame_find_by_id (next_frame_id);
4054 regnum = VALUE_REGNUM (new_val);
4055
4056 gdb_assert (next_frame != NULL);
4057
4058 /* Convertible register routines are used for multi-register
4059 values and for interpretation in different types
4060 (e.g. float or int from a double register). Lazy
4061 register values should have the register's natural type,
4062 so they do not apply. */
4063 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
4064 regnum, type));
4065
4066 /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
4067 Since a "->next" operation was performed when setting
4068 this field, we do not need to perform a "next" operation
4069 again when unwinding the register. That's why
4070 frame_unwind_register_value() is called here instead of
4071 get_frame_register_value(). */
4072 new_val = frame_unwind_register_value (next_frame, regnum);
4073
4074 /* If we get another lazy lval_register value, it means the
4075 register is found by reading it from NEXT_FRAME's next frame.
4076 frame_unwind_register_value should never return a value with
4077 the frame id pointing to NEXT_FRAME. If it does, it means we
4078 either have two consecutive frames with the same frame id
4079 in the frame chain, or some code is trying to unwind
4080 behind get_prev_frame's back (e.g., a frame unwind
4081 sniffer trying to unwind), bypassing its validations. In
4082 any case, it should always be an internal error to end up
4083 in this situation. */
4084 if (VALUE_LVAL (new_val) == lval_register
4085 && value_lazy (new_val)
4086 && VALUE_NEXT_FRAME_ID (new_val) == next_frame_id)
4087 internal_error (_("infinite loop while fetching a register"));
4088 }
4089
4090 /* If it's still lazy (for instance, a saved register on the
4091 stack), fetch it. */
4092 if (value_lazy (new_val))
4093 value_fetch_lazy (new_val);
4094
4095 /* Copy the contents and the unavailability/optimized-out
4096 meta-data from NEW_VAL to VAL. */
4097 set_value_lazy (val, 0);
4098 value_contents_copy (val, value_embedded_offset (val),
4099 new_val, value_embedded_offset (new_val),
4100 type_length_units (type));
4101
4102 if (frame_debug)
4103 {
4104 struct gdbarch *gdbarch;
4105 frame_info_ptr frame;
4106 frame = frame_find_by_id (VALUE_NEXT_FRAME_ID (val));
4107 frame = get_prev_frame_always (frame);
4108 regnum = VALUE_REGNUM (val);
4109 gdbarch = get_frame_arch (frame);
4110
4111 string_file debug_file;
4112 gdb_printf (&debug_file,
4113 "(frame=%d, regnum=%d(%s), ...) ",
4114 frame_relative_level (frame), regnum,
4115 user_reg_map_regnum_to_name (gdbarch, regnum));
4116
4117 gdb_printf (&debug_file, "->");
4118 if (value_optimized_out (new_val))
4119 {
4120 gdb_printf (&debug_file, " ");
4121 val_print_optimized_out (new_val, &debug_file);
4122 }
4123 else
4124 {
4125 int i;
4126 gdb::array_view<const gdb_byte> buf = value_contents (new_val);
4127
4128 if (VALUE_LVAL (new_val) == lval_register)
4129 gdb_printf (&debug_file, " register=%d",
4130 VALUE_REGNUM (new_val));
4131 else if (VALUE_LVAL (new_val) == lval_memory)
4132 gdb_printf (&debug_file, " address=%s",
4133 paddress (gdbarch,
4134 value_address (new_val)));
4135 else
4136 gdb_printf (&debug_file, " computed");
4137
4138 gdb_printf (&debug_file, " bytes=");
4139 gdb_printf (&debug_file, "[");
4140 for (i = 0; i < register_size (gdbarch, regnum); i++)
4141 gdb_printf (&debug_file, "%02x", buf[i]);
4142 gdb_printf (&debug_file, "]");
4143 }
4144
4145 frame_debug_printf ("%s", debug_file.c_str ());
4146 }
4147
4148 /* Dispose of the intermediate values. This prevents
4149 watchpoints from trying to watch the saved frame pointer. */
4150 value_free_to_mark (mark);
4151 }
4152
4153 /* Load the actual content of a lazy value. Fetch the data from the
4154 user's process and clear the lazy flag to indicate that the data in
4155 the buffer is valid.
4156
4157 If the value is zero-length, we avoid calling read_memory, which
4158 would abort. We mark the value as fetched anyway -- all 0 bytes of
4159 it. */
4160
4161 void
4162 value_fetch_lazy (struct value *val)
4163 {
4164 gdb_assert (value_lazy (val));
4165 allocate_value_contents (val);
4166 /* A value is either lazy, or fully fetched. The
4167 availability/validity is only established as we try to fetch a
4168 value. */
4169 gdb_assert (val->optimized_out.empty ());
4170 gdb_assert (val->unavailable.empty ());
4171 if (val->is_zero)
4172 {
4173 /* Nothing. */
4174 }
4175 else if (value_bitsize (val))
4176 value_fetch_lazy_bitfield (val);
4177 else if (VALUE_LVAL (val) == lval_memory)
4178 value_fetch_lazy_memory (val);
4179 else if (VALUE_LVAL (val) == lval_register)
4180 value_fetch_lazy_register (val);
4181 else if (VALUE_LVAL (val) == lval_computed
4182 && value_computed_funcs (val)->read != NULL)
4183 value_computed_funcs (val)->read (val);
4184 else
4185 internal_error (_("Unexpected lazy value type."));
4186
4187 set_value_lazy (val, 0);
4188 }
4189
4190 /* Implementation of the convenience function $_isvoid. */
4191
4192 static struct value *
4193 isvoid_internal_fn (struct gdbarch *gdbarch,
4194 const struct language_defn *language,
4195 void *cookie, int argc, struct value **argv)
4196 {
4197 int ret;
4198
4199 if (argc != 1)
4200 error (_("You must provide one argument for $_isvoid."));
4201
4202 ret = value_type (argv[0])->code () == TYPE_CODE_VOID;
4203
4204 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
4205 }
4206
4207 /* Implementation of the convenience function $_creal. Extracts the
4208 real part from a complex number. */
4209
4210 static struct value *
4211 creal_internal_fn (struct gdbarch *gdbarch,
4212 const struct language_defn *language,
4213 void *cookie, int argc, struct value **argv)
4214 {
4215 if (argc != 1)
4216 error (_("You must provide one argument for $_creal."));
4217
4218 value *cval = argv[0];
4219 type *ctype = check_typedef (value_type (cval));
4220 if (ctype->code () != TYPE_CODE_COMPLEX)
4221 error (_("expected a complex number"));
4222 return value_real_part (cval);
4223 }
4224
4225 /* Implementation of the convenience function $_cimag. Extracts the
4226 imaginary part from a complex number. */
4227
4228 static struct value *
4229 cimag_internal_fn (struct gdbarch *gdbarch,
4230 const struct language_defn *language,
4231 void *cookie, int argc,
4232 struct value **argv)
4233 {
4234 if (argc != 1)
4235 error (_("You must provide one argument for $_cimag."));
4236
4237 value *cval = argv[0];
4238 type *ctype = check_typedef (value_type (cval));
4239 if (ctype->code () != TYPE_CODE_COMPLEX)
4240 error (_("expected a complex number"));
4241 return value_imaginary_part (cval);
4242 }
4243
4244 #if GDB_SELF_TEST
4245 namespace selftests
4246 {
4247
4248 /* Test the ranges_contain function. */
4249
4250 static void
4251 test_ranges_contain ()
4252 {
4253 std::vector<range> ranges;
4254 range r;
4255
4256 /* [10, 14] */
4257 r.offset = 10;
4258 r.length = 5;
4259 ranges.push_back (r);
4260
4261 /* [20, 24] */
4262 r.offset = 20;
4263 r.length = 5;
4264 ranges.push_back (r);
4265
4266 /* [2, 6] */
4267 SELF_CHECK (!ranges_contain (ranges, 2, 5));
4268 /* [9, 13] */
4269 SELF_CHECK (ranges_contain (ranges, 9, 5));
4270 /* [10, 11] */
4271 SELF_CHECK (ranges_contain (ranges, 10, 2));
4272 /* [10, 14] */
4273 SELF_CHECK (ranges_contain (ranges, 10, 5));
4274 /* [13, 18] */
4275 SELF_CHECK (ranges_contain (ranges, 13, 6));
4276 /* [14, 18] */
4277 SELF_CHECK (ranges_contain (ranges, 14, 5));
4278 /* [15, 18] */
4279 SELF_CHECK (!ranges_contain (ranges, 15, 4));
4280 /* [16, 19] */
4281 SELF_CHECK (!ranges_contain (ranges, 16, 4));
4282 /* [16, 21] */
4283 SELF_CHECK (ranges_contain (ranges, 16, 6));
4284 /* [21, 21] */
4285 SELF_CHECK (ranges_contain (ranges, 21, 1));
4286 /* [21, 25] */
4287 SELF_CHECK (ranges_contain (ranges, 21, 5));
4288 /* [26, 28] */
4289 SELF_CHECK (!ranges_contain (ranges, 26, 3));
4290 }
4291
4292 /* Check that RANGES contains the same ranges as EXPECTED. */
4293
4294 static bool
4295 check_ranges_vector (gdb::array_view<const range> ranges,
4296 gdb::array_view<const range> expected)
4297 {
4298 return ranges == expected;
4299 }
4300
4301 /* Test the insert_into_bit_range_vector function. */
4302
4303 static void
4304 test_insert_into_bit_range_vector ()
4305 {
4306 std::vector<range> ranges;
4307
4308 /* [10, 14] */
4309 {
4310 insert_into_bit_range_vector (&ranges, 10, 5);
4311 static const range expected[] = {
4312 {10, 5}
4313 };
4314 SELF_CHECK (check_ranges_vector (ranges, expected));
4315 }
4316
4317 /* [10, 14] */
4318 {
4319 insert_into_bit_range_vector (&ranges, 11, 4);
4320 static const range expected = {10, 5};
4321 SELF_CHECK (check_ranges_vector (ranges, expected));
4322 }
4323
4324 /* [10, 14] [20, 24] */
4325 {
4326 insert_into_bit_range_vector (&ranges, 20, 5);
4327 static const range expected[] = {
4328 {10, 5},
4329 {20, 5},
4330 };
4331 SELF_CHECK (check_ranges_vector (ranges, expected));
4332 }
4333
4334 /* [10, 14] [17, 24] */
4335 {
4336 insert_into_bit_range_vector (&ranges, 17, 5);
4337 static const range expected[] = {
4338 {10, 5},
4339 {17, 8},
4340 };
4341 SELF_CHECK (check_ranges_vector (ranges, expected));
4342 }
4343
4344 /* [2, 8] [10, 14] [17, 24] */
4345 {
4346 insert_into_bit_range_vector (&ranges, 2, 7);
4347 static const range expected[] = {
4348 {2, 7},
4349 {10, 5},
4350 {17, 8},
4351 };
4352 SELF_CHECK (check_ranges_vector (ranges, expected));
4353 }
4354
4355 /* [2, 14] [17, 24] */
4356 {
4357 insert_into_bit_range_vector (&ranges, 9, 1);
4358 static const range expected[] = {
4359 {2, 13},
4360 {17, 8},
4361 };
4362 SELF_CHECK (check_ranges_vector (ranges, expected));
4363 }
4364
4365 /* [2, 14] [17, 24] */
4366 {
4367 insert_into_bit_range_vector (&ranges, 9, 1);
4368 static const range expected[] = {
4369 {2, 13},
4370 {17, 8},
4371 };
4372 SELF_CHECK (check_ranges_vector (ranges, expected));
4373 }
4374
4375 /* [2, 33] */
4376 {
4377 insert_into_bit_range_vector (&ranges, 4, 30);
4378 static const range expected = {2, 32};
4379 SELF_CHECK (check_ranges_vector (ranges, expected));
4380 }
4381 }
4382
4383 static void
4384 test_value_copy ()
4385 {
4386 type *type = builtin_type (current_inferior ()->gdbarch)->builtin_int;
4387
4388 /* Verify that we can copy an entirely optimized out value, that may not have
4389 its contents allocated. */
4390 value_ref_ptr val = release_value (allocate_optimized_out_value (type));
4391 value_ref_ptr copy = release_value (value_copy (val.get ()));
4392
4393 SELF_CHECK (value_entirely_optimized_out (val.get ()));
4394 SELF_CHECK (value_entirely_optimized_out (copy.get ()));
4395 }
4396
4397 } /* namespace selftests */
4398 #endif /* GDB_SELF_TEST */
4399
4400 void _initialize_values ();
4401 void
4402 _initialize_values ()
4403 {
4404 cmd_list_element *show_convenience_cmd
4405 = add_cmd ("convenience", no_class, show_convenience, _("\
4406 Debugger convenience (\"$foo\") variables and functions.\n\
4407 Convenience variables are created when you assign them values;\n\
4408 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4409 \n\
4410 A few convenience variables are given values automatically:\n\
4411 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4412 \"$__\" holds the contents of the last address examined with \"x\"."
4413 #ifdef HAVE_PYTHON
4414 "\n\n\
4415 Convenience functions are defined via the Python API."
4416 #endif
4417 ), &showlist);
4418 add_alias_cmd ("conv", show_convenience_cmd, no_class, 1, &showlist);
4419
4420 add_cmd ("values", no_set_class, show_values, _("\
4421 Elements of value history around item number IDX (or last ten)."),
4422 &showlist);
4423
4424 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4425 Initialize a convenience variable if necessary.\n\
4426 init-if-undefined VARIABLE = EXPRESSION\n\
4427 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4428 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4429 VARIABLE is already initialized."));
4430
4431 add_prefix_cmd ("function", no_class, function_command, _("\
4432 Placeholder command for showing help on convenience functions."),
4433 &functionlist, 0, &cmdlist);
4434
4435 add_internal_function ("_isvoid", _("\
4436 Check whether an expression is void.\n\
4437 Usage: $_isvoid (expression)\n\
4438 Return 1 if the expression is void, zero otherwise."),
4439 isvoid_internal_fn, NULL);
4440
4441 add_internal_function ("_creal", _("\
4442 Extract the real part of a complex number.\n\
4443 Usage: $_creal (expression)\n\
4444 Return the real part of a complex number, the type depends on the\n\
4445 type of a complex number."),
4446 creal_internal_fn, NULL);
4447
4448 add_internal_function ("_cimag", _("\
4449 Extract the imaginary part of a complex number.\n\
4450 Usage: $_cimag (expression)\n\
4451 Return the imaginary part of a complex number, the type depends on the\n\
4452 type of a complex number."),
4453 cimag_internal_fn, NULL);
4454
4455 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4456 class_support, &max_value_size, _("\
4457 Set maximum sized value gdb will load from the inferior."), _("\
4458 Show maximum sized value gdb will load from the inferior."), _("\
4459 Use this to control the maximum size, in bytes, of a value that gdb\n\
4460 will load from the inferior. Setting this value to 'unlimited'\n\
4461 disables checking.\n\
4462 Setting this does not invalidate already allocated values, it only\n\
4463 prevents future values, larger than this size, from being allocated."),
4464 set_max_value_size,
4465 show_max_value_size,
4466 &setlist, &showlist);
4467 set_show_commands vsize_limit
4468 = add_setshow_zuinteger_unlimited_cmd ("varsize-limit", class_support,
4469 &max_value_size, _("\
4470 Set the maximum number of bytes allowed in a variable-size object."), _("\
4471 Show the maximum number of bytes allowed in a variable-size object."), _("\
4472 Attempts to access an object whose size is not a compile-time constant\n\
4473 and exceeds this limit will cause an error."),
4474 NULL, NULL, &setlist, &showlist);
4475 deprecate_cmd (vsize_limit.set, "set max-value-size");
4476
4477 #if GDB_SELF_TEST
4478 selftests::register_test ("ranges_contain", selftests::test_ranges_contain);
4479 selftests::register_test ("insert_into_bit_range_vector",
4480 selftests::test_insert_into_bit_range_vector);
4481 selftests::register_test ("value_copy", selftests::test_value_copy);
4482 #endif
4483 }
4484
4485 /* See value.h. */
4486
4487 void
4488 finalize_values ()
4489 {
4490 all_values.clear ();
4491 }