6f176b0fad975edf2dd1154b0a66ec68c837b04b
[binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2023 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "regcache.h"
32 #include "block.h"
33 #include "target-float.h"
34 #include "objfiles.h"
35 #include "valprint.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
38 #include <ctype.h>
39 #include "tracepoint.h"
40 #include "cp-abi.h"
41 #include "user-regs.h"
42 #include <algorithm>
43 #include <iterator>
44 #include <utility>
45 #include <vector>
46 #include "completer.h"
47 #include "gdbsupport/selftest.h"
48 #include "gdbsupport/array-view.h"
49 #include "cli/cli-style.h"
50 #include "expop.h"
51 #include "inferior.h"
52 #include "varobj.h"
53
54 /* Definition of a user function. */
55 struct internal_function
56 {
57 /* The name of the function. It is a bit odd to have this in the
58 function itself -- the user might use a differently-named
59 convenience variable to hold the function. */
60 char *name;
61
62 /* The handler. */
63 internal_function_fn handler;
64
65 /* User data for the handler. */
66 void *cookie;
67 };
68
69 /* Returns true if the ranges defined by [offset1, offset1+len1) and
70 [offset2, offset2+len2) overlap. */
71
72 static int
73 ranges_overlap (LONGEST offset1, ULONGEST len1,
74 LONGEST offset2, ULONGEST len2)
75 {
76 LONGEST h, l;
77
78 l = std::max (offset1, offset2);
79 h = std::min (offset1 + len1, offset2 + len2);
80 return (l < h);
81 }
82
83 /* Returns true if RANGES contains any range that overlaps [OFFSET,
84 OFFSET+LENGTH). */
85
86 static int
87 ranges_contain (const std::vector<range> &ranges, LONGEST offset,
88 ULONGEST length)
89 {
90 range what;
91
92 what.offset = offset;
93 what.length = length;
94
95 /* We keep ranges sorted by offset and coalesce overlapping and
96 contiguous ranges, so to check if a range list contains a given
97 range, we can do a binary search for the position the given range
98 would be inserted if we only considered the starting OFFSET of
99 ranges. We call that position I. Since we also have LENGTH to
100 care for (this is a range afterall), we need to check if the
101 _previous_ range overlaps the I range. E.g.,
102
103 R
104 |---|
105 |---| |---| |------| ... |--|
106 0 1 2 N
107
108 I=1
109
110 In the case above, the binary search would return `I=1', meaning,
111 this OFFSET should be inserted at position 1, and the current
112 position 1 should be pushed further (and before 2). But, `0'
113 overlaps with R.
114
115 Then we need to check if the I range overlaps the I range itself.
116 E.g.,
117
118 R
119 |---|
120 |---| |---| |-------| ... |--|
121 0 1 2 N
122
123 I=1
124 */
125
126
127 auto i = std::lower_bound (ranges.begin (), ranges.end (), what);
128
129 if (i > ranges.begin ())
130 {
131 const struct range &bef = *(i - 1);
132
133 if (ranges_overlap (bef.offset, bef.length, offset, length))
134 return 1;
135 }
136
137 if (i < ranges.end ())
138 {
139 const struct range &r = *i;
140
141 if (ranges_overlap (r.offset, r.length, offset, length))
142 return 1;
143 }
144
145 return 0;
146 }
147
148 static struct cmd_list_element *functionlist;
149
150 value::~value ()
151 {
152 if (VALUE_LVAL (this) == lval_computed)
153 {
154 const struct lval_funcs *funcs = m_location.computed.funcs;
155
156 if (funcs->free_closure)
157 funcs->free_closure (this);
158 }
159 else if (VALUE_LVAL (this) == lval_xcallable)
160 delete m_location.xm_worker;
161 }
162
163 /* See value.h. */
164
165 struct gdbarch *
166 get_value_arch (const struct value *value)
167 {
168 return value->type ()->arch ();
169 }
170
171 int
172 value_bits_available (const struct value *value,
173 LONGEST offset, ULONGEST length)
174 {
175 gdb_assert (!value->m_lazy);
176
177 /* Don't pretend we have anything available there in the history beyond
178 the boundaries of the value recorded. It's not like inferior memory
179 where there is actual stuff underneath. */
180 ULONGEST val_len = TARGET_CHAR_BIT * value_enclosing_type (value)->length ();
181 return !((value->m_in_history
182 && (offset < 0 || offset + length > val_len))
183 || ranges_contain (value->m_unavailable, offset, length));
184 }
185
186 int
187 value_bytes_available (const struct value *value,
188 LONGEST offset, ULONGEST length)
189 {
190 ULONGEST sign = (1ULL << (sizeof (ULONGEST) * 8 - 1)) / TARGET_CHAR_BIT;
191 ULONGEST mask = (sign << 1) - 1;
192
193 if (offset != ((offset & mask) ^ sign) - sign
194 || length != ((length & mask) ^ sign) - sign
195 || (length > 0 && (~offset & (offset + length - 1) & sign) != 0))
196 error (_("Integer overflow in data location calculation"));
197
198 return value_bits_available (value,
199 offset * TARGET_CHAR_BIT,
200 length * TARGET_CHAR_BIT);
201 }
202
203 int
204 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
205 {
206 gdb_assert (!value->m_lazy);
207
208 return ranges_contain (value->m_optimized_out, bit_offset, bit_length);
209 }
210
211 int
212 value_entirely_available (struct value *value)
213 {
214 /* We can only tell whether the whole value is available when we try
215 to read it. */
216 if (value->m_lazy)
217 value_fetch_lazy (value);
218
219 if (value->m_unavailable.empty ())
220 return 1;
221 return 0;
222 }
223
224 /* Returns true if VALUE is entirely covered by RANGES. If the value
225 is lazy, it'll be read now. Note that RANGE is a pointer to
226 pointer because reading the value might change *RANGE. */
227
228 static int
229 value_entirely_covered_by_range_vector (struct value *value,
230 const std::vector<range> &ranges)
231 {
232 /* We can only tell whether the whole value is optimized out /
233 unavailable when we try to read it. */
234 if (value->m_lazy)
235 value_fetch_lazy (value);
236
237 if (ranges.size () == 1)
238 {
239 const struct range &t = ranges[0];
240
241 if (t.offset == 0
242 && t.length == (TARGET_CHAR_BIT
243 * value_enclosing_type (value)->length ()))
244 return 1;
245 }
246
247 return 0;
248 }
249
250 int
251 value_entirely_unavailable (struct value *value)
252 {
253 return value_entirely_covered_by_range_vector (value, value->m_unavailable);
254 }
255
256 int
257 value_entirely_optimized_out (struct value *value)
258 {
259 return value_entirely_covered_by_range_vector (value, value->m_optimized_out);
260 }
261
262 /* Insert into the vector pointed to by VECTORP the bit range starting of
263 OFFSET bits, and extending for the next LENGTH bits. */
264
265 static void
266 insert_into_bit_range_vector (std::vector<range> *vectorp,
267 LONGEST offset, ULONGEST length)
268 {
269 range newr;
270
271 /* Insert the range sorted. If there's overlap or the new range
272 would be contiguous with an existing range, merge. */
273
274 newr.offset = offset;
275 newr.length = length;
276
277 /* Do a binary search for the position the given range would be
278 inserted if we only considered the starting OFFSET of ranges.
279 Call that position I. Since we also have LENGTH to care for
280 (this is a range afterall), we need to check if the _previous_
281 range overlaps the I range. E.g., calling R the new range:
282
283 #1 - overlaps with previous
284
285 R
286 |-...-|
287 |---| |---| |------| ... |--|
288 0 1 2 N
289
290 I=1
291
292 In the case #1 above, the binary search would return `I=1',
293 meaning, this OFFSET should be inserted at position 1, and the
294 current position 1 should be pushed further (and become 2). But,
295 note that `0' overlaps with R, so we want to merge them.
296
297 A similar consideration needs to be taken if the new range would
298 be contiguous with the previous range:
299
300 #2 - contiguous with previous
301
302 R
303 |-...-|
304 |--| |---| |------| ... |--|
305 0 1 2 N
306
307 I=1
308
309 If there's no overlap with the previous range, as in:
310
311 #3 - not overlapping and not contiguous
312
313 R
314 |-...-|
315 |--| |---| |------| ... |--|
316 0 1 2 N
317
318 I=1
319
320 or if I is 0:
321
322 #4 - R is the range with lowest offset
323
324 R
325 |-...-|
326 |--| |---| |------| ... |--|
327 0 1 2 N
328
329 I=0
330
331 ... we just push the new range to I.
332
333 All the 4 cases above need to consider that the new range may
334 also overlap several of the ranges that follow, or that R may be
335 contiguous with the following range, and merge. E.g.,
336
337 #5 - overlapping following ranges
338
339 R
340 |------------------------|
341 |--| |---| |------| ... |--|
342 0 1 2 N
343
344 I=0
345
346 or:
347
348 R
349 |-------|
350 |--| |---| |------| ... |--|
351 0 1 2 N
352
353 I=1
354
355 */
356
357 auto i = std::lower_bound (vectorp->begin (), vectorp->end (), newr);
358 if (i > vectorp->begin ())
359 {
360 struct range &bef = *(i - 1);
361
362 if (ranges_overlap (bef.offset, bef.length, offset, length))
363 {
364 /* #1 */
365 LONGEST l = std::min (bef.offset, offset);
366 LONGEST h = std::max (bef.offset + bef.length, offset + length);
367
368 bef.offset = l;
369 bef.length = h - l;
370 i--;
371 }
372 else if (offset == bef.offset + bef.length)
373 {
374 /* #2 */
375 bef.length += length;
376 i--;
377 }
378 else
379 {
380 /* #3 */
381 i = vectorp->insert (i, newr);
382 }
383 }
384 else
385 {
386 /* #4 */
387 i = vectorp->insert (i, newr);
388 }
389
390 /* Check whether the ranges following the one we've just added or
391 touched can be folded in (#5 above). */
392 if (i != vectorp->end () && i + 1 < vectorp->end ())
393 {
394 int removed = 0;
395 auto next = i + 1;
396
397 /* Get the range we just touched. */
398 struct range &t = *i;
399 removed = 0;
400
401 i = next;
402 for (; i < vectorp->end (); i++)
403 {
404 struct range &r = *i;
405 if (r.offset <= t.offset + t.length)
406 {
407 LONGEST l, h;
408
409 l = std::min (t.offset, r.offset);
410 h = std::max (t.offset + t.length, r.offset + r.length);
411
412 t.offset = l;
413 t.length = h - l;
414
415 removed++;
416 }
417 else
418 {
419 /* If we couldn't merge this one, we won't be able to
420 merge following ones either, since the ranges are
421 always sorted by OFFSET. */
422 break;
423 }
424 }
425
426 if (removed != 0)
427 vectorp->erase (next, next + removed);
428 }
429 }
430
431 void
432 mark_value_bits_unavailable (struct value *value,
433 LONGEST offset, ULONGEST length)
434 {
435 insert_into_bit_range_vector (&value->m_unavailable, offset, length);
436 }
437
438 void
439 mark_value_bytes_unavailable (struct value *value,
440 LONGEST offset, ULONGEST length)
441 {
442 mark_value_bits_unavailable (value,
443 offset * TARGET_CHAR_BIT,
444 length * TARGET_CHAR_BIT);
445 }
446
447 /* Find the first range in RANGES that overlaps the range defined by
448 OFFSET and LENGTH, starting at element POS in the RANGES vector,
449 Returns the index into RANGES where such overlapping range was
450 found, or -1 if none was found. */
451
452 static int
453 find_first_range_overlap (const std::vector<range> *ranges, int pos,
454 LONGEST offset, LONGEST length)
455 {
456 int i;
457
458 for (i = pos; i < ranges->size (); i++)
459 {
460 const range &r = (*ranges)[i];
461 if (ranges_overlap (r.offset, r.length, offset, length))
462 return i;
463 }
464
465 return -1;
466 }
467
468 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
469 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
470 return non-zero.
471
472 It must always be the case that:
473 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
474
475 It is assumed that memory can be accessed from:
476 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
477 to:
478 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
479 / TARGET_CHAR_BIT) */
480 static int
481 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
482 const gdb_byte *ptr2, size_t offset2_bits,
483 size_t length_bits)
484 {
485 gdb_assert (offset1_bits % TARGET_CHAR_BIT
486 == offset2_bits % TARGET_CHAR_BIT);
487
488 if (offset1_bits % TARGET_CHAR_BIT != 0)
489 {
490 size_t bits;
491 gdb_byte mask, b1, b2;
492
493 /* The offset from the base pointers PTR1 and PTR2 is not a complete
494 number of bytes. A number of bits up to either the next exact
495 byte boundary, or LENGTH_BITS (which ever is sooner) will be
496 compared. */
497 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
498 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
499 mask = (1 << bits) - 1;
500
501 if (length_bits < bits)
502 {
503 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
504 bits = length_bits;
505 }
506
507 /* Now load the two bytes and mask off the bits we care about. */
508 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
509 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
510
511 if (b1 != b2)
512 return 1;
513
514 /* Now update the length and offsets to take account of the bits
515 we've just compared. */
516 length_bits -= bits;
517 offset1_bits += bits;
518 offset2_bits += bits;
519 }
520
521 if (length_bits % TARGET_CHAR_BIT != 0)
522 {
523 size_t bits;
524 size_t o1, o2;
525 gdb_byte mask, b1, b2;
526
527 /* The length is not an exact number of bytes. After the previous
528 IF.. block then the offsets are byte aligned, or the
529 length is zero (in which case this code is not reached). Compare
530 a number of bits at the end of the region, starting from an exact
531 byte boundary. */
532 bits = length_bits % TARGET_CHAR_BIT;
533 o1 = offset1_bits + length_bits - bits;
534 o2 = offset2_bits + length_bits - bits;
535
536 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
537 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
538
539 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
540 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
541
542 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
543 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
544
545 if (b1 != b2)
546 return 1;
547
548 length_bits -= bits;
549 }
550
551 if (length_bits > 0)
552 {
553 /* We've now taken care of any stray "bits" at the start, or end of
554 the region to compare, the remainder can be covered with a simple
555 memcmp. */
556 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
557 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
558 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
559
560 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
561 ptr2 + offset2_bits / TARGET_CHAR_BIT,
562 length_bits / TARGET_CHAR_BIT);
563 }
564
565 /* Length is zero, regions match. */
566 return 0;
567 }
568
569 /* Helper struct for find_first_range_overlap_and_match and
570 value_contents_bits_eq. Keep track of which slot of a given ranges
571 vector have we last looked at. */
572
573 struct ranges_and_idx
574 {
575 /* The ranges. */
576 const std::vector<range> *ranges;
577
578 /* The range we've last found in RANGES. Given ranges are sorted,
579 we can start the next lookup here. */
580 int idx;
581 };
582
583 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
584 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
585 ranges starting at OFFSET2 bits. Return true if the ranges match
586 and fill in *L and *H with the overlapping window relative to
587 (both) OFFSET1 or OFFSET2. */
588
589 static int
590 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
591 struct ranges_and_idx *rp2,
592 LONGEST offset1, LONGEST offset2,
593 ULONGEST length, ULONGEST *l, ULONGEST *h)
594 {
595 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
596 offset1, length);
597 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
598 offset2, length);
599
600 if (rp1->idx == -1 && rp2->idx == -1)
601 {
602 *l = length;
603 *h = length;
604 return 1;
605 }
606 else if (rp1->idx == -1 || rp2->idx == -1)
607 return 0;
608 else
609 {
610 const range *r1, *r2;
611 ULONGEST l1, h1;
612 ULONGEST l2, h2;
613
614 r1 = &(*rp1->ranges)[rp1->idx];
615 r2 = &(*rp2->ranges)[rp2->idx];
616
617 /* Get the unavailable windows intersected by the incoming
618 ranges. The first and last ranges that overlap the argument
619 range may be wider than said incoming arguments ranges. */
620 l1 = std::max (offset1, r1->offset);
621 h1 = std::min (offset1 + length, r1->offset + r1->length);
622
623 l2 = std::max (offset2, r2->offset);
624 h2 = std::min (offset2 + length, offset2 + r2->length);
625
626 /* Make them relative to the respective start offsets, so we can
627 compare them for equality. */
628 l1 -= offset1;
629 h1 -= offset1;
630
631 l2 -= offset2;
632 h2 -= offset2;
633
634 /* Different ranges, no match. */
635 if (l1 != l2 || h1 != h2)
636 return 0;
637
638 *h = h1;
639 *l = l1;
640 return 1;
641 }
642 }
643
644 /* Helper function for value_contents_eq. The only difference is that
645 this function is bit rather than byte based.
646
647 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
648 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
649 Return true if the available bits match. */
650
651 static bool
652 value_contents_bits_eq (const struct value *val1, int offset1,
653 const struct value *val2, int offset2,
654 int length)
655 {
656 /* Each array element corresponds to a ranges source (unavailable,
657 optimized out). '1' is for VAL1, '2' for VAL2. */
658 struct ranges_and_idx rp1[2], rp2[2];
659
660 /* See function description in value.h. */
661 gdb_assert (!val1->m_lazy && !val2->m_lazy);
662
663 /* We shouldn't be trying to compare past the end of the values. */
664 gdb_assert (offset1 + length
665 <= val1->m_enclosing_type->length () * TARGET_CHAR_BIT);
666 gdb_assert (offset2 + length
667 <= val2->m_enclosing_type->length () * TARGET_CHAR_BIT);
668
669 memset (&rp1, 0, sizeof (rp1));
670 memset (&rp2, 0, sizeof (rp2));
671 rp1[0].ranges = &val1->m_unavailable;
672 rp2[0].ranges = &val2->m_unavailable;
673 rp1[1].ranges = &val1->m_optimized_out;
674 rp2[1].ranges = &val2->m_optimized_out;
675
676 while (length > 0)
677 {
678 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
679 int i;
680
681 for (i = 0; i < 2; i++)
682 {
683 ULONGEST l_tmp, h_tmp;
684
685 /* The contents only match equal if the invalid/unavailable
686 contents ranges match as well. */
687 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
688 offset1, offset2, length,
689 &l_tmp, &h_tmp))
690 return false;
691
692 /* We're interested in the lowest/first range found. */
693 if (i == 0 || l_tmp < l)
694 {
695 l = l_tmp;
696 h = h_tmp;
697 }
698 }
699
700 /* Compare the available/valid contents. */
701 if (memcmp_with_bit_offsets (val1->m_contents.get (), offset1,
702 val2->m_contents.get (), offset2, l) != 0)
703 return false;
704
705 length -= h;
706 offset1 += h;
707 offset2 += h;
708 }
709
710 return true;
711 }
712
713 bool
714 value_contents_eq (const struct value *val1, LONGEST offset1,
715 const struct value *val2, LONGEST offset2,
716 LONGEST length)
717 {
718 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
719 val2, offset2 * TARGET_CHAR_BIT,
720 length * TARGET_CHAR_BIT);
721 }
722
723 /* See value.h. */
724
725 bool
726 value_contents_eq (const struct value *val1, const struct value *val2)
727 {
728 ULONGEST len1 = check_typedef (value_enclosing_type (val1))->length ();
729 ULONGEST len2 = check_typedef (value_enclosing_type (val2))->length ();
730 if (len1 != len2)
731 return false;
732 return value_contents_eq (val1, 0, val2, 0, len1);
733 }
734
735 /* The value-history records all the values printed by print commands
736 during this session. */
737
738 static std::vector<value_ref_ptr> value_history;
739
740 \f
741 /* List of all value objects currently allocated
742 (except for those released by calls to release_value)
743 This is so they can be freed after each command. */
744
745 static std::vector<value_ref_ptr> all_values;
746
747 /* Allocate a lazy value for type TYPE. Its actual content is
748 "lazily" allocated too: the content field of the return value is
749 NULL; it will be allocated when it is fetched from the target. */
750
751 struct value *
752 allocate_value_lazy (struct type *type)
753 {
754 struct value *val;
755
756 /* Call check_typedef on our type to make sure that, if TYPE
757 is a TYPE_CODE_TYPEDEF, its length is set to the length
758 of the target type instead of zero. However, we do not
759 replace the typedef type by the target type, because we want
760 to keep the typedef in order to be able to set the VAL's type
761 description correctly. */
762 check_typedef (type);
763
764 val = new struct value (type);
765
766 /* Values start out on the all_values chain. */
767 all_values.emplace_back (val);
768
769 return val;
770 }
771
772 /* The maximum size, in bytes, that GDB will try to allocate for a value.
773 The initial value of 64k was not selected for any specific reason, it is
774 just a reasonable starting point. */
775
776 static int max_value_size = 65536; /* 64k bytes */
777
778 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
779 LONGEST, otherwise GDB will not be able to parse integer values from the
780 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
781 be unable to parse "set max-value-size 2".
782
783 As we want a consistent GDB experience across hosts with different sizes
784 of LONGEST, this arbitrary minimum value was selected, so long as this
785 is bigger than LONGEST on all GDB supported hosts we're fine. */
786
787 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
788 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
789
790 /* Implement the "set max-value-size" command. */
791
792 static void
793 set_max_value_size (const char *args, int from_tty,
794 struct cmd_list_element *c)
795 {
796 gdb_assert (max_value_size == -1 || max_value_size >= 0);
797
798 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
799 {
800 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
801 error (_("max-value-size set too low, increasing to %d bytes"),
802 max_value_size);
803 }
804 }
805
806 /* Implement the "show max-value-size" command. */
807
808 static void
809 show_max_value_size (struct ui_file *file, int from_tty,
810 struct cmd_list_element *c, const char *value)
811 {
812 if (max_value_size == -1)
813 gdb_printf (file, _("Maximum value size is unlimited.\n"));
814 else
815 gdb_printf (file, _("Maximum value size is %d bytes.\n"),
816 max_value_size);
817 }
818
819 /* Called before we attempt to allocate or reallocate a buffer for the
820 contents of a value. TYPE is the type of the value for which we are
821 allocating the buffer. If the buffer is too large (based on the user
822 controllable setting) then throw an error. If this function returns
823 then we should attempt to allocate the buffer. */
824
825 static void
826 check_type_length_before_alloc (const struct type *type)
827 {
828 ULONGEST length = type->length ();
829
830 if (max_value_size > -1 && length > max_value_size)
831 {
832 if (type->name () != NULL)
833 error (_("value of type `%s' requires %s bytes, which is more "
834 "than max-value-size"), type->name (), pulongest (length));
835 else
836 error (_("value requires %s bytes, which is more than "
837 "max-value-size"), pulongest (length));
838 }
839 }
840
841 /* When this has a value, it is used to limit the number of array elements
842 of an array that are loaded into memory when an array value is made
843 non-lazy. */
844 static gdb::optional<int> array_length_limiting_element_count;
845
846 /* See value.h. */
847 scoped_array_length_limiting::scoped_array_length_limiting (int elements)
848 {
849 m_old_value = array_length_limiting_element_count;
850 array_length_limiting_element_count.emplace (elements);
851 }
852
853 /* See value.h. */
854 scoped_array_length_limiting::~scoped_array_length_limiting ()
855 {
856 array_length_limiting_element_count = m_old_value;
857 }
858
859 /* Find the inner element type for ARRAY_TYPE. */
860
861 static struct type *
862 find_array_element_type (struct type *array_type)
863 {
864 array_type = check_typedef (array_type);
865 gdb_assert (array_type->code () == TYPE_CODE_ARRAY);
866
867 if (current_language->la_language == language_fortran)
868 while (array_type->code () == TYPE_CODE_ARRAY)
869 {
870 array_type = array_type->target_type ();
871 array_type = check_typedef (array_type);
872 }
873 else
874 {
875 array_type = array_type->target_type ();
876 array_type = check_typedef (array_type);
877 }
878
879 return array_type;
880 }
881
882 /* Return the limited length of ARRAY_TYPE, which must be of
883 TYPE_CODE_ARRAY. This function can only be called when the global
884 ARRAY_LENGTH_LIMITING_ELEMENT_COUNT has a value.
885
886 The limited length of an array is the smallest of either (1) the total
887 size of the array type, or (2) the array target type multiplies by the
888 array_length_limiting_element_count. */
889
890 static ULONGEST
891 calculate_limited_array_length (struct type *array_type)
892 {
893 gdb_assert (array_length_limiting_element_count.has_value ());
894
895 array_type = check_typedef (array_type);
896 gdb_assert (array_type->code () == TYPE_CODE_ARRAY);
897
898 struct type *elm_type = find_array_element_type (array_type);
899 ULONGEST len = (elm_type->length ()
900 * (*array_length_limiting_element_count));
901 len = std::min (len, array_type->length ());
902
903 return len;
904 }
905
906 /* Try to limit ourselves to only fetching the limited number of
907 elements. However, if this limited number of elements still
908 puts us over max_value_size, then we still refuse it and
909 return failure here, which will ultimately throw an error. */
910
911 static bool
912 set_limited_array_length (struct value *val)
913 {
914 ULONGEST limit = val->m_limited_length;
915 ULONGEST len = val->type ()->length ();
916
917 if (array_length_limiting_element_count.has_value ())
918 len = calculate_limited_array_length (val->type ());
919
920 if (limit != 0 && len > limit)
921 len = limit;
922 if (len > max_value_size)
923 return false;
924
925 val->m_limited_length = max_value_size;
926 return true;
927 }
928
929 /* Allocate the contents of VAL if it has not been allocated yet.
930 If CHECK_SIZE is true, then apply the usual max-value-size checks. */
931
932 static void
933 allocate_value_contents (struct value *val, bool check_size)
934 {
935 if (!val->m_contents)
936 {
937 struct type *enclosing_type = value_enclosing_type (val);
938 ULONGEST len = enclosing_type->length ();
939
940 if (check_size)
941 {
942 /* If we are allocating the contents of an array, which
943 is greater in size than max_value_size, and there is
944 an element limit in effect, then we can possibly try
945 to load only a sub-set of the array contents into
946 GDB's memory. */
947 if (val->type () == enclosing_type
948 && val->type ()->code () == TYPE_CODE_ARRAY
949 && len > max_value_size
950 && set_limited_array_length (val))
951 len = val->m_limited_length;
952 else
953 check_type_length_before_alloc (enclosing_type);
954 }
955
956 val->m_contents.reset ((gdb_byte *) xzalloc (len));
957 }
958 }
959
960 /* Allocate a value and its contents for type TYPE. If CHECK_SIZE is true,
961 then apply the usual max-value-size checks. */
962
963 static struct value *
964 allocate_value (struct type *type, bool check_size)
965 {
966 struct value *val = allocate_value_lazy (type);
967
968 allocate_value_contents (val, check_size);
969 val->m_lazy = 0;
970 return val;
971 }
972
973 /* Allocate a value and its contents for type TYPE. */
974
975 struct value *
976 allocate_value (struct type *type)
977 {
978 return allocate_value (type, true);
979 }
980
981 /* Allocate a value that has the correct length
982 for COUNT repetitions of type TYPE. */
983
984 struct value *
985 allocate_repeat_value (struct type *type, int count)
986 {
987 /* Despite the fact that we are really creating an array of TYPE here, we
988 use the string lower bound as the array lower bound. This seems to
989 work fine for now. */
990 int low_bound = current_language->string_lower_bound ();
991 /* FIXME-type-allocation: need a way to free this type when we are
992 done with it. */
993 struct type *array_type
994 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
995
996 return allocate_value (array_type);
997 }
998
999 struct value *
1000 allocate_computed_value (struct type *type,
1001 const struct lval_funcs *funcs,
1002 void *closure)
1003 {
1004 struct value *v = allocate_value_lazy (type);
1005
1006 VALUE_LVAL (v) = lval_computed;
1007 v->m_location.computed.funcs = funcs;
1008 v->m_location.computed.closure = closure;
1009
1010 return v;
1011 }
1012
1013 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1014
1015 struct value *
1016 allocate_optimized_out_value (struct type *type)
1017 {
1018 struct value *retval = allocate_value_lazy (type);
1019
1020 mark_value_bytes_optimized_out (retval, 0, type->length ());
1021 set_value_lazy (retval, 0);
1022 return retval;
1023 }
1024
1025 /* Accessor methods. */
1026
1027 void
1028 deprecated_set_value_type (struct value *value, struct type *type)
1029 {
1030 value->m_type = type;
1031 }
1032
1033 LONGEST
1034 value_offset (const struct value *value)
1035 {
1036 return value->m_offset;
1037 }
1038 void
1039 set_value_offset (struct value *value, LONGEST offset)
1040 {
1041 value->m_offset = offset;
1042 }
1043
1044 LONGEST
1045 value_bitpos (const struct value *value)
1046 {
1047 return value->m_bitpos;
1048 }
1049 void
1050 set_value_bitpos (struct value *value, LONGEST bit)
1051 {
1052 value->m_bitpos = bit;
1053 }
1054
1055 LONGEST
1056 value_bitsize (const struct value *value)
1057 {
1058 return value->m_bitsize;
1059 }
1060 void
1061 set_value_bitsize (struct value *value, LONGEST bit)
1062 {
1063 value->m_bitsize = bit;
1064 }
1065
1066 struct value *
1067 value_parent (const struct value *value)
1068 {
1069 return value->m_parent.get ();
1070 }
1071
1072 /* See value.h. */
1073
1074 void
1075 set_value_parent (struct value *value, struct value *parent)
1076 {
1077 value->m_parent = value_ref_ptr::new_reference (parent);
1078 }
1079
1080 gdb::array_view<gdb_byte>
1081 value_contents_raw (struct value *value)
1082 {
1083 struct gdbarch *arch = get_value_arch (value);
1084 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1085
1086 allocate_value_contents (value, true);
1087
1088 ULONGEST length = value->type ()->length ();
1089 return gdb::make_array_view
1090 (value->m_contents.get () + value->m_embedded_offset * unit_size, length);
1091 }
1092
1093 gdb::array_view<gdb_byte>
1094 value_contents_all_raw (struct value *value)
1095 {
1096 allocate_value_contents (value, true);
1097
1098 ULONGEST length = value_enclosing_type (value)->length ();
1099 return gdb::make_array_view (value->m_contents.get (), length);
1100 }
1101
1102 struct type *
1103 value_enclosing_type (const struct value *value)
1104 {
1105 return value->m_enclosing_type;
1106 }
1107
1108 /* Look at value.h for description. */
1109
1110 struct type *
1111 value_actual_type (struct value *value, int resolve_simple_types,
1112 int *real_type_found)
1113 {
1114 struct value_print_options opts;
1115 struct type *result;
1116
1117 get_user_print_options (&opts);
1118
1119 if (real_type_found)
1120 *real_type_found = 0;
1121 result = value->type ();
1122 if (opts.objectprint)
1123 {
1124 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1125 fetch its rtti type. */
1126 if (result->is_pointer_or_reference ()
1127 && (check_typedef (result->target_type ())->code ()
1128 == TYPE_CODE_STRUCT)
1129 && !value_optimized_out (value))
1130 {
1131 struct type *real_type;
1132
1133 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1134 if (real_type)
1135 {
1136 if (real_type_found)
1137 *real_type_found = 1;
1138 result = real_type;
1139 }
1140 }
1141 else if (resolve_simple_types)
1142 {
1143 if (real_type_found)
1144 *real_type_found = 1;
1145 result = value_enclosing_type (value);
1146 }
1147 }
1148
1149 return result;
1150 }
1151
1152 void
1153 error_value_optimized_out (void)
1154 {
1155 throw_error (OPTIMIZED_OUT_ERROR, _("value has been optimized out"));
1156 }
1157
1158 static void
1159 require_not_optimized_out (const struct value *value)
1160 {
1161 if (!value->m_optimized_out.empty ())
1162 {
1163 if (value->m_lval == lval_register)
1164 throw_error (OPTIMIZED_OUT_ERROR,
1165 _("register has not been saved in frame"));
1166 else
1167 error_value_optimized_out ();
1168 }
1169 }
1170
1171 static void
1172 require_available (const struct value *value)
1173 {
1174 if (!value->m_unavailable.empty ())
1175 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1176 }
1177
1178 gdb::array_view<const gdb_byte>
1179 value_contents_for_printing (struct value *value)
1180 {
1181 if (value->m_lazy)
1182 value_fetch_lazy (value);
1183
1184 ULONGEST length = value_enclosing_type (value)->length ();
1185 return gdb::make_array_view (value->m_contents.get (), length);
1186 }
1187
1188 gdb::array_view<const gdb_byte>
1189 value_contents_for_printing_const (const struct value *value)
1190 {
1191 gdb_assert (!value->m_lazy);
1192
1193 ULONGEST length = value_enclosing_type (value)->length ();
1194 return gdb::make_array_view (value->m_contents.get (), length);
1195 }
1196
1197 gdb::array_view<const gdb_byte>
1198 value_contents_all (struct value *value)
1199 {
1200 gdb::array_view<const gdb_byte> result = value_contents_for_printing (value);
1201 require_not_optimized_out (value);
1202 require_available (value);
1203 return result;
1204 }
1205
1206 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1207 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1208
1209 static void
1210 ranges_copy_adjusted (std::vector<range> *dst_range, int dst_bit_offset,
1211 const std::vector<range> &src_range, int src_bit_offset,
1212 unsigned int bit_length)
1213 {
1214 for (const range &r : src_range)
1215 {
1216 LONGEST h, l;
1217
1218 l = std::max (r.offset, (LONGEST) src_bit_offset);
1219 h = std::min ((LONGEST) (r.offset + r.length),
1220 (LONGEST) src_bit_offset + bit_length);
1221
1222 if (l < h)
1223 insert_into_bit_range_vector (dst_range,
1224 dst_bit_offset + (l - src_bit_offset),
1225 h - l);
1226 }
1227 }
1228
1229 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1230 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1231
1232 static void
1233 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1234 const struct value *src, int src_bit_offset,
1235 int bit_length)
1236 {
1237 ranges_copy_adjusted (&dst->m_unavailable, dst_bit_offset,
1238 src->m_unavailable, src_bit_offset,
1239 bit_length);
1240 ranges_copy_adjusted (&dst->m_optimized_out, dst_bit_offset,
1241 src->m_optimized_out, src_bit_offset,
1242 bit_length);
1243 }
1244
1245 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1246 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1247 contents, starting at DST_OFFSET. If unavailable contents are
1248 being copied from SRC, the corresponding DST contents are marked
1249 unavailable accordingly. Neither DST nor SRC may be lazy
1250 values.
1251
1252 It is assumed the contents of DST in the [DST_OFFSET,
1253 DST_OFFSET+LENGTH) range are wholly available. */
1254
1255 static void
1256 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1257 struct value *src, LONGEST src_offset, LONGEST length)
1258 {
1259 LONGEST src_bit_offset, dst_bit_offset, bit_length;
1260 struct gdbarch *arch = get_value_arch (src);
1261 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1262
1263 /* A lazy DST would make that this copy operation useless, since as
1264 soon as DST's contents were un-lazied (by a later value_contents
1265 call, say), the contents would be overwritten. A lazy SRC would
1266 mean we'd be copying garbage. */
1267 gdb_assert (!dst->m_lazy && !src->m_lazy);
1268
1269 /* The overwritten DST range gets unavailability ORed in, not
1270 replaced. Make sure to remember to implement replacing if it
1271 turns out actually necessary. */
1272 gdb_assert (value_bytes_available (dst, dst_offset, length));
1273 gdb_assert (!value_bits_any_optimized_out (dst,
1274 TARGET_CHAR_BIT * dst_offset,
1275 TARGET_CHAR_BIT * length));
1276
1277 /* Copy the data. */
1278 gdb::array_view<gdb_byte> dst_contents
1279 = value_contents_all_raw (dst).slice (dst_offset * unit_size,
1280 length * unit_size);
1281 gdb::array_view<const gdb_byte> src_contents
1282 = value_contents_all_raw (src).slice (src_offset * unit_size,
1283 length * unit_size);
1284 copy (src_contents, dst_contents);
1285
1286 /* Copy the meta-data, adjusted. */
1287 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1288 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1289 bit_length = length * unit_size * HOST_CHAR_BIT;
1290
1291 value_ranges_copy_adjusted (dst, dst_bit_offset,
1292 src, src_bit_offset,
1293 bit_length);
1294 }
1295
1296 /* A helper for value_from_component_bitsize that copies bits from SRC
1297 to DEST. */
1298
1299 static void
1300 value_contents_copy_raw_bitwise (struct value *dst, LONGEST dst_bit_offset,
1301 struct value *src, LONGEST src_bit_offset,
1302 LONGEST bit_length)
1303 {
1304 /* A lazy DST would make that this copy operation useless, since as
1305 soon as DST's contents were un-lazied (by a later value_contents
1306 call, say), the contents would be overwritten. A lazy SRC would
1307 mean we'd be copying garbage. */
1308 gdb_assert (!dst->m_lazy && !src->m_lazy);
1309
1310 /* The overwritten DST range gets unavailability ORed in, not
1311 replaced. Make sure to remember to implement replacing if it
1312 turns out actually necessary. */
1313 LONGEST dst_offset = dst_bit_offset / TARGET_CHAR_BIT;
1314 LONGEST length = bit_length / TARGET_CHAR_BIT;
1315 gdb_assert (value_bytes_available (dst, dst_offset, length));
1316 gdb_assert (!value_bits_any_optimized_out (dst, dst_bit_offset,
1317 bit_length));
1318
1319 /* Copy the data. */
1320 gdb::array_view<gdb_byte> dst_contents = value_contents_all_raw (dst);
1321 gdb::array_view<const gdb_byte> src_contents = value_contents_all_raw (src);
1322 copy_bitwise (dst_contents.data (), dst_bit_offset,
1323 src_contents.data (), src_bit_offset,
1324 bit_length,
1325 type_byte_order (src->type ()) == BFD_ENDIAN_BIG);
1326
1327 /* Copy the meta-data. */
1328 value_ranges_copy_adjusted (dst, dst_bit_offset,
1329 src, src_bit_offset,
1330 bit_length);
1331 }
1332
1333 /* Copy LENGTH bytes of SRC value's (all) contents
1334 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1335 (all) contents, starting at DST_OFFSET. If unavailable contents
1336 are being copied from SRC, the corresponding DST contents are
1337 marked unavailable accordingly. DST must not be lazy. If SRC is
1338 lazy, it will be fetched now.
1339
1340 It is assumed the contents of DST in the [DST_OFFSET,
1341 DST_OFFSET+LENGTH) range are wholly available. */
1342
1343 void
1344 value_contents_copy (struct value *dst, LONGEST dst_offset,
1345 struct value *src, LONGEST src_offset, LONGEST length)
1346 {
1347 if (src->m_lazy)
1348 value_fetch_lazy (src);
1349
1350 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1351 }
1352
1353 int
1354 value_lazy (const struct value *value)
1355 {
1356 return value->m_lazy;
1357 }
1358
1359 void
1360 set_value_lazy (struct value *value, int val)
1361 {
1362 value->m_lazy = val;
1363 }
1364
1365 int
1366 value_stack (const struct value *value)
1367 {
1368 return value->m_stack;
1369 }
1370
1371 void
1372 set_value_stack (struct value *value, int val)
1373 {
1374 value->m_stack = val;
1375 }
1376
1377 gdb::array_view<const gdb_byte>
1378 value_contents (struct value *value)
1379 {
1380 gdb::array_view<const gdb_byte> result = value_contents_writeable (value);
1381 require_not_optimized_out (value);
1382 require_available (value);
1383 return result;
1384 }
1385
1386 gdb::array_view<gdb_byte>
1387 value_contents_writeable (struct value *value)
1388 {
1389 if (value->m_lazy)
1390 value_fetch_lazy (value);
1391 return value_contents_raw (value);
1392 }
1393
1394 int
1395 value_optimized_out (struct value *value)
1396 {
1397 if (value->m_lazy)
1398 {
1399 /* See if we can compute the result without fetching the
1400 value. */
1401 if (VALUE_LVAL (value) == lval_memory)
1402 return false;
1403 else if (VALUE_LVAL (value) == lval_computed)
1404 {
1405 const struct lval_funcs *funcs = value->m_location.computed.funcs;
1406
1407 if (funcs->is_optimized_out != nullptr)
1408 return funcs->is_optimized_out (value);
1409 }
1410
1411 /* Fall back to fetching. */
1412 try
1413 {
1414 value_fetch_lazy (value);
1415 }
1416 catch (const gdb_exception_error &ex)
1417 {
1418 switch (ex.error)
1419 {
1420 case MEMORY_ERROR:
1421 case OPTIMIZED_OUT_ERROR:
1422 case NOT_AVAILABLE_ERROR:
1423 /* These can normally happen when we try to access an
1424 optimized out or unavailable register, either in a
1425 physical register or spilled to memory. */
1426 break;
1427 default:
1428 throw;
1429 }
1430 }
1431 }
1432
1433 return !value->m_optimized_out.empty ();
1434 }
1435
1436 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1437 the following LENGTH bytes. */
1438
1439 void
1440 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1441 {
1442 mark_value_bits_optimized_out (value,
1443 offset * TARGET_CHAR_BIT,
1444 length * TARGET_CHAR_BIT);
1445 }
1446
1447 /* See value.h. */
1448
1449 void
1450 mark_value_bits_optimized_out (struct value *value,
1451 LONGEST offset, LONGEST length)
1452 {
1453 insert_into_bit_range_vector (&value->m_optimized_out, offset, length);
1454 }
1455
1456 int
1457 value_bits_synthetic_pointer (const struct value *value,
1458 LONGEST offset, LONGEST length)
1459 {
1460 if (value->m_lval != lval_computed
1461 || !value->m_location.computed.funcs->check_synthetic_pointer)
1462 return 0;
1463 return value->m_location.computed.funcs->check_synthetic_pointer (value,
1464 offset,
1465 length);
1466 }
1467
1468 LONGEST
1469 value_embedded_offset (const struct value *value)
1470 {
1471 return value->m_embedded_offset;
1472 }
1473
1474 void
1475 set_value_embedded_offset (struct value *value, LONGEST val)
1476 {
1477 value->m_embedded_offset = val;
1478 }
1479
1480 LONGEST
1481 value_pointed_to_offset (const struct value *value)
1482 {
1483 return value->m_pointed_to_offset;
1484 }
1485
1486 void
1487 set_value_pointed_to_offset (struct value *value, LONGEST val)
1488 {
1489 value->m_pointed_to_offset = val;
1490 }
1491
1492 const struct lval_funcs *
1493 value_computed_funcs (const struct value *v)
1494 {
1495 gdb_assert (value_lval_const (v) == lval_computed);
1496
1497 return v->m_location.computed.funcs;
1498 }
1499
1500 void *
1501 value_computed_closure (const struct value *v)
1502 {
1503 gdb_assert (v->m_lval == lval_computed);
1504
1505 return v->m_location.computed.closure;
1506 }
1507
1508 enum lval_type *
1509 deprecated_value_lval_hack (struct value *value)
1510 {
1511 return &value->m_lval;
1512 }
1513
1514 enum lval_type
1515 value_lval_const (const struct value *value)
1516 {
1517 return value->m_lval;
1518 }
1519
1520 CORE_ADDR
1521 value_address (const struct value *value)
1522 {
1523 if (value->m_lval != lval_memory)
1524 return 0;
1525 if (value->m_parent != NULL)
1526 return value_address (value->m_parent.get ()) + value->m_offset;
1527 if (NULL != TYPE_DATA_LOCATION (value->type ()))
1528 {
1529 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value->type ()));
1530 return TYPE_DATA_LOCATION_ADDR (value->type ());
1531 }
1532
1533 return value->m_location.address + value->m_offset;
1534 }
1535
1536 CORE_ADDR
1537 value_raw_address (const struct value *value)
1538 {
1539 if (value->m_lval != lval_memory)
1540 return 0;
1541 return value->m_location.address;
1542 }
1543
1544 void
1545 set_value_address (struct value *value, CORE_ADDR addr)
1546 {
1547 gdb_assert (value->m_lval == lval_memory);
1548 value->m_location.address = addr;
1549 }
1550
1551 struct internalvar **
1552 deprecated_value_internalvar_hack (struct value *value)
1553 {
1554 return &value->m_location.internalvar;
1555 }
1556
1557 struct frame_id *
1558 deprecated_value_next_frame_id_hack (struct value *value)
1559 {
1560 gdb_assert (value->m_lval == lval_register);
1561 return &value->m_location.reg.next_frame_id;
1562 }
1563
1564 int *
1565 deprecated_value_regnum_hack (struct value *value)
1566 {
1567 gdb_assert (value->m_lval == lval_register);
1568 return &value->m_location.reg.regnum;
1569 }
1570
1571 int
1572 deprecated_value_modifiable (const struct value *value)
1573 {
1574 return value->m_modifiable;
1575 }
1576 \f
1577 /* Return a mark in the value chain. All values allocated after the
1578 mark is obtained (except for those released) are subject to being freed
1579 if a subsequent value_free_to_mark is passed the mark. */
1580 struct value *
1581 value_mark (void)
1582 {
1583 if (all_values.empty ())
1584 return nullptr;
1585 return all_values.back ().get ();
1586 }
1587
1588 /* See value.h. */
1589
1590 void
1591 value_incref (struct value *val)
1592 {
1593 val->m_reference_count++;
1594 }
1595
1596 /* Release a reference to VAL, which was acquired with value_incref.
1597 This function is also called to deallocate values from the value
1598 chain. */
1599
1600 void
1601 value_decref (struct value *val)
1602 {
1603 if (val != nullptr)
1604 {
1605 gdb_assert (val->m_reference_count > 0);
1606 val->m_reference_count--;
1607 if (val->m_reference_count == 0)
1608 delete val;
1609 }
1610 }
1611
1612 /* Free all values allocated since MARK was obtained by value_mark
1613 (except for those released). */
1614 void
1615 value_free_to_mark (const struct value *mark)
1616 {
1617 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1618 if (iter == all_values.end ())
1619 all_values.clear ();
1620 else
1621 all_values.erase (iter + 1, all_values.end ());
1622 }
1623
1624 /* Remove VAL from the chain all_values
1625 so it will not be freed automatically. */
1626
1627 value_ref_ptr
1628 release_value (struct value *val)
1629 {
1630 if (val == nullptr)
1631 return value_ref_ptr ();
1632
1633 std::vector<value_ref_ptr>::reverse_iterator iter;
1634 for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter)
1635 {
1636 if (*iter == val)
1637 {
1638 value_ref_ptr result = *iter;
1639 all_values.erase (iter.base () - 1);
1640 return result;
1641 }
1642 }
1643
1644 /* We must always return an owned reference. Normally this happens
1645 because we transfer the reference from the value chain, but in
1646 this case the value was not on the chain. */
1647 return value_ref_ptr::new_reference (val);
1648 }
1649
1650 /* See value.h. */
1651
1652 std::vector<value_ref_ptr>
1653 value_release_to_mark (const struct value *mark)
1654 {
1655 std::vector<value_ref_ptr> result;
1656
1657 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1658 if (iter == all_values.end ())
1659 std::swap (result, all_values);
1660 else
1661 {
1662 std::move (iter + 1, all_values.end (), std::back_inserter (result));
1663 all_values.erase (iter + 1, all_values.end ());
1664 }
1665 std::reverse (result.begin (), result.end ());
1666 return result;
1667 }
1668
1669 /* Return a copy of the value ARG. It contains the same contents,
1670 for the same memory address, but it's a different block of storage. */
1671
1672 struct value *
1673 value_copy (const value *arg)
1674 {
1675 struct type *encl_type = value_enclosing_type (arg);
1676 struct value *val;
1677
1678 val = allocate_value_lazy (encl_type);
1679 val->m_type = arg->m_type;
1680 VALUE_LVAL (val) = arg->m_lval;
1681 val->m_location = arg->m_location;
1682 val->m_offset = arg->m_offset;
1683 val->m_bitpos = arg->m_bitpos;
1684 val->m_bitsize = arg->m_bitsize;
1685 val->m_lazy = arg->m_lazy;
1686 val->m_embedded_offset = value_embedded_offset (arg);
1687 val->m_pointed_to_offset = arg->m_pointed_to_offset;
1688 val->m_modifiable = arg->m_modifiable;
1689 val->m_stack = arg->m_stack;
1690 val->m_is_zero = arg->m_is_zero;
1691 val->m_in_history = arg->m_in_history;
1692 val->m_initialized = arg->m_initialized;
1693 val->m_unavailable = arg->m_unavailable;
1694 val->m_optimized_out = arg->m_optimized_out;
1695 val->m_parent = arg->m_parent;
1696 val->m_limited_length = arg->m_limited_length;
1697
1698 if (!value_lazy (val)
1699 && !(value_entirely_optimized_out (val)
1700 || value_entirely_unavailable (val)))
1701 {
1702 ULONGEST length = val->m_limited_length;
1703 if (length == 0)
1704 length = value_enclosing_type (val)->length ();
1705
1706 gdb_assert (arg->m_contents != nullptr);
1707 const auto &arg_view
1708 = gdb::make_array_view (arg->m_contents.get (), length);
1709
1710 allocate_value_contents (val, false);
1711 gdb::array_view<gdb_byte> val_contents
1712 = value_contents_all_raw (val).slice (0, length);
1713
1714 copy (arg_view, val_contents);
1715 }
1716
1717 if (VALUE_LVAL (val) == lval_computed)
1718 {
1719 const struct lval_funcs *funcs = val->m_location.computed.funcs;
1720
1721 if (funcs->copy_closure)
1722 val->m_location.computed.closure = funcs->copy_closure (val);
1723 }
1724 return val;
1725 }
1726
1727 /* Return a "const" and/or "volatile" qualified version of the value V.
1728 If CNST is true, then the returned value will be qualified with
1729 "const".
1730 if VOLTL is true, then the returned value will be qualified with
1731 "volatile". */
1732
1733 struct value *
1734 make_cv_value (int cnst, int voltl, struct value *v)
1735 {
1736 struct type *val_type = v->type ();
1737 struct type *m_enclosing_type = value_enclosing_type (v);
1738 struct value *cv_val = value_copy (v);
1739
1740 deprecated_set_value_type (cv_val,
1741 make_cv_type (cnst, voltl, val_type, NULL));
1742 set_value_enclosing_type (cv_val,
1743 make_cv_type (cnst, voltl, m_enclosing_type, NULL));
1744
1745 return cv_val;
1746 }
1747
1748 /* Return a version of ARG that is non-lvalue. */
1749
1750 struct value *
1751 value_non_lval (struct value *arg)
1752 {
1753 if (VALUE_LVAL (arg) != not_lval)
1754 {
1755 struct type *enc_type = value_enclosing_type (arg);
1756 struct value *val = allocate_value (enc_type);
1757
1758 copy (value_contents_all (arg), value_contents_all_raw (val));
1759 val->m_type = arg->m_type;
1760 set_value_embedded_offset (val, value_embedded_offset (arg));
1761 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1762 return val;
1763 }
1764 return arg;
1765 }
1766
1767 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1768
1769 void
1770 value_force_lval (struct value *v, CORE_ADDR addr)
1771 {
1772 gdb_assert (VALUE_LVAL (v) == not_lval);
1773
1774 write_memory (addr, value_contents_raw (v).data (), v->type ()->length ());
1775 v->m_lval = lval_memory;
1776 v->m_location.address = addr;
1777 }
1778
1779 void
1780 set_value_component_location (struct value *component,
1781 const struct value *whole)
1782 {
1783 struct type *type;
1784
1785 gdb_assert (whole->m_lval != lval_xcallable);
1786
1787 if (whole->m_lval == lval_internalvar)
1788 VALUE_LVAL (component) = lval_internalvar_component;
1789 else
1790 VALUE_LVAL (component) = whole->m_lval;
1791
1792 component->m_location = whole->m_location;
1793 if (whole->m_lval == lval_computed)
1794 {
1795 const struct lval_funcs *funcs = whole->m_location.computed.funcs;
1796
1797 if (funcs->copy_closure)
1798 component->m_location.computed.closure = funcs->copy_closure (whole);
1799 }
1800
1801 /* If the WHOLE value has a dynamically resolved location property then
1802 update the address of the COMPONENT. */
1803 type = whole->type ();
1804 if (NULL != TYPE_DATA_LOCATION (type)
1805 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1806 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1807
1808 /* Similarly, if the COMPONENT value has a dynamically resolved location
1809 property then update its address. */
1810 type = component->type ();
1811 if (NULL != TYPE_DATA_LOCATION (type)
1812 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1813 {
1814 /* If the COMPONENT has a dynamic location, and is an
1815 lval_internalvar_component, then we change it to a lval_memory.
1816
1817 Usually a component of an internalvar is created non-lazy, and has
1818 its content immediately copied from the parent internalvar.
1819 However, for components with a dynamic location, the content of
1820 the component is not contained within the parent, but is instead
1821 accessed indirectly. Further, the component will be created as a
1822 lazy value.
1823
1824 By changing the type of the component to lval_memory we ensure
1825 that value_fetch_lazy can successfully load the component.
1826
1827 This solution isn't ideal, but a real fix would require values to
1828 carry around both the parent value contents, and the contents of
1829 any dynamic fields within the parent. This is a substantial
1830 change to how values work in GDB. */
1831 if (VALUE_LVAL (component) == lval_internalvar_component)
1832 {
1833 gdb_assert (value_lazy (component));
1834 VALUE_LVAL (component) = lval_memory;
1835 }
1836 else
1837 gdb_assert (VALUE_LVAL (component) == lval_memory);
1838 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1839 }
1840 }
1841
1842 /* Access to the value history. */
1843
1844 /* Record a new value in the value history.
1845 Returns the absolute history index of the entry. */
1846
1847 int
1848 record_latest_value (struct value *val)
1849 {
1850 struct type *enclosing_type = value_enclosing_type (val);
1851 struct type *type = val->type ();
1852
1853 /* We don't want this value to have anything to do with the inferior anymore.
1854 In particular, "set $1 = 50" should not affect the variable from which
1855 the value was taken, and fast watchpoints should be able to assume that
1856 a value on the value history never changes. */
1857 if (value_lazy (val))
1858 {
1859 /* We know that this is a _huge_ array, any attempt to fetch this
1860 is going to cause GDB to throw an error. However, to allow
1861 the array to still be displayed we fetch its contents up to
1862 `max_value_size' and mark anything beyond "unavailable" in
1863 the history. */
1864 if (type->code () == TYPE_CODE_ARRAY
1865 && type->length () > max_value_size
1866 && array_length_limiting_element_count.has_value ()
1867 && enclosing_type == type
1868 && calculate_limited_array_length (type) <= max_value_size)
1869 val->m_limited_length = max_value_size;
1870
1871 value_fetch_lazy (val);
1872 }
1873
1874 ULONGEST limit = val->m_limited_length;
1875 if (limit != 0)
1876 mark_value_bytes_unavailable (val, limit,
1877 enclosing_type->length () - limit);
1878
1879 /* Mark the value as recorded in the history for the availability check. */
1880 val->m_in_history = true;
1881
1882 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1883 from. This is a bit dubious, because then *&$1 does not just return $1
1884 but the current contents of that location. c'est la vie... */
1885 val->m_modifiable = 0;
1886
1887 value_history.push_back (release_value (val));
1888
1889 return value_history.size ();
1890 }
1891
1892 /* Return a copy of the value in the history with sequence number NUM. */
1893
1894 struct value *
1895 access_value_history (int num)
1896 {
1897 int absnum = num;
1898
1899 if (absnum <= 0)
1900 absnum += value_history.size ();
1901
1902 if (absnum <= 0)
1903 {
1904 if (num == 0)
1905 error (_("The history is empty."));
1906 else if (num == 1)
1907 error (_("There is only one value in the history."));
1908 else
1909 error (_("History does not go back to $$%d."), -num);
1910 }
1911 if (absnum > value_history.size ())
1912 error (_("History has not yet reached $%d."), absnum);
1913
1914 absnum--;
1915
1916 return value_copy (value_history[absnum].get ());
1917 }
1918
1919 /* See value.h. */
1920
1921 ULONGEST
1922 value_history_count ()
1923 {
1924 return value_history.size ();
1925 }
1926
1927 static void
1928 show_values (const char *num_exp, int from_tty)
1929 {
1930 int i;
1931 struct value *val;
1932 static int num = 1;
1933
1934 if (num_exp)
1935 {
1936 /* "show values +" should print from the stored position.
1937 "show values <exp>" should print around value number <exp>. */
1938 if (num_exp[0] != '+' || num_exp[1] != '\0')
1939 num = parse_and_eval_long (num_exp) - 5;
1940 }
1941 else
1942 {
1943 /* "show values" means print the last 10 values. */
1944 num = value_history.size () - 9;
1945 }
1946
1947 if (num <= 0)
1948 num = 1;
1949
1950 for (i = num; i < num + 10 && i <= value_history.size (); i++)
1951 {
1952 struct value_print_options opts;
1953
1954 val = access_value_history (i);
1955 gdb_printf (("$%d = "), i);
1956 get_user_print_options (&opts);
1957 value_print (val, gdb_stdout, &opts);
1958 gdb_printf (("\n"));
1959 }
1960
1961 /* The next "show values +" should start after what we just printed. */
1962 num += 10;
1963
1964 /* Hitting just return after this command should do the same thing as
1965 "show values +". If num_exp is null, this is unnecessary, since
1966 "show values +" is not useful after "show values". */
1967 if (from_tty && num_exp)
1968 set_repeat_arguments ("+");
1969 }
1970 \f
1971 enum internalvar_kind
1972 {
1973 /* The internal variable is empty. */
1974 INTERNALVAR_VOID,
1975
1976 /* The value of the internal variable is provided directly as
1977 a GDB value object. */
1978 INTERNALVAR_VALUE,
1979
1980 /* A fresh value is computed via a call-back routine on every
1981 access to the internal variable. */
1982 INTERNALVAR_MAKE_VALUE,
1983
1984 /* The internal variable holds a GDB internal convenience function. */
1985 INTERNALVAR_FUNCTION,
1986
1987 /* The variable holds an integer value. */
1988 INTERNALVAR_INTEGER,
1989
1990 /* The variable holds a GDB-provided string. */
1991 INTERNALVAR_STRING,
1992 };
1993
1994 union internalvar_data
1995 {
1996 /* A value object used with INTERNALVAR_VALUE. */
1997 struct value *value;
1998
1999 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
2000 struct
2001 {
2002 /* The functions to call. */
2003 const struct internalvar_funcs *functions;
2004
2005 /* The function's user-data. */
2006 void *data;
2007 } make_value;
2008
2009 /* The internal function used with INTERNALVAR_FUNCTION. */
2010 struct
2011 {
2012 struct internal_function *function;
2013 /* True if this is the canonical name for the function. */
2014 int canonical;
2015 } fn;
2016
2017 /* An integer value used with INTERNALVAR_INTEGER. */
2018 struct
2019 {
2020 /* If type is non-NULL, it will be used as the type to generate
2021 a value for this internal variable. If type is NULL, a default
2022 integer type for the architecture is used. */
2023 struct type *type;
2024 LONGEST val;
2025 } integer;
2026
2027 /* A string value used with INTERNALVAR_STRING. */
2028 char *string;
2029 };
2030
2031 /* Internal variables. These are variables within the debugger
2032 that hold values assigned by debugger commands.
2033 The user refers to them with a '$' prefix
2034 that does not appear in the variable names stored internally. */
2035
2036 struct internalvar
2037 {
2038 struct internalvar *next;
2039 char *name;
2040
2041 /* We support various different kinds of content of an internal variable.
2042 enum internalvar_kind specifies the kind, and union internalvar_data
2043 provides the data associated with this particular kind. */
2044
2045 enum internalvar_kind kind;
2046
2047 union internalvar_data u;
2048 };
2049
2050 static struct internalvar *internalvars;
2051
2052 /* If the variable does not already exist create it and give it the
2053 value given. If no value is given then the default is zero. */
2054 static void
2055 init_if_undefined_command (const char* args, int from_tty)
2056 {
2057 struct internalvar *intvar = nullptr;
2058
2059 /* Parse the expression - this is taken from set_command(). */
2060 expression_up expr = parse_expression (args);
2061
2062 /* Validate the expression.
2063 Was the expression an assignment?
2064 Or even an expression at all? */
2065 if (expr->first_opcode () != BINOP_ASSIGN)
2066 error (_("Init-if-undefined requires an assignment expression."));
2067
2068 /* Extract the variable from the parsed expression. */
2069 expr::assign_operation *assign
2070 = dynamic_cast<expr::assign_operation *> (expr->op.get ());
2071 if (assign != nullptr)
2072 {
2073 expr::operation *lhs = assign->get_lhs ();
2074 expr::internalvar_operation *ivarop
2075 = dynamic_cast<expr::internalvar_operation *> (lhs);
2076 if (ivarop != nullptr)
2077 intvar = ivarop->get_internalvar ();
2078 }
2079
2080 if (intvar == nullptr)
2081 error (_("The first parameter to init-if-undefined "
2082 "should be a GDB variable."));
2083
2084 /* Only evaluate the expression if the lvalue is void.
2085 This may still fail if the expression is invalid. */
2086 if (intvar->kind == INTERNALVAR_VOID)
2087 evaluate_expression (expr.get ());
2088 }
2089
2090
2091 /* Look up an internal variable with name NAME. NAME should not
2092 normally include a dollar sign.
2093
2094 If the specified internal variable does not exist,
2095 the return value is NULL. */
2096
2097 struct internalvar *
2098 lookup_only_internalvar (const char *name)
2099 {
2100 struct internalvar *var;
2101
2102 for (var = internalvars; var; var = var->next)
2103 if (strcmp (var->name, name) == 0)
2104 return var;
2105
2106 return NULL;
2107 }
2108
2109 /* Complete NAME by comparing it to the names of internal
2110 variables. */
2111
2112 void
2113 complete_internalvar (completion_tracker &tracker, const char *name)
2114 {
2115 struct internalvar *var;
2116 int len;
2117
2118 len = strlen (name);
2119
2120 for (var = internalvars; var; var = var->next)
2121 if (strncmp (var->name, name, len) == 0)
2122 tracker.add_completion (make_unique_xstrdup (var->name));
2123 }
2124
2125 /* Create an internal variable with name NAME and with a void value.
2126 NAME should not normally include a dollar sign. */
2127
2128 struct internalvar *
2129 create_internalvar (const char *name)
2130 {
2131 struct internalvar *var = XNEW (struct internalvar);
2132
2133 var->name = xstrdup (name);
2134 var->kind = INTERNALVAR_VOID;
2135 var->next = internalvars;
2136 internalvars = var;
2137 return var;
2138 }
2139
2140 /* Create an internal variable with name NAME and register FUN as the
2141 function that value_of_internalvar uses to create a value whenever
2142 this variable is referenced. NAME should not normally include a
2143 dollar sign. DATA is passed uninterpreted to FUN when it is
2144 called. CLEANUP, if not NULL, is called when the internal variable
2145 is destroyed. It is passed DATA as its only argument. */
2146
2147 struct internalvar *
2148 create_internalvar_type_lazy (const char *name,
2149 const struct internalvar_funcs *funcs,
2150 void *data)
2151 {
2152 struct internalvar *var = create_internalvar (name);
2153
2154 var->kind = INTERNALVAR_MAKE_VALUE;
2155 var->u.make_value.functions = funcs;
2156 var->u.make_value.data = data;
2157 return var;
2158 }
2159
2160 /* See documentation in value.h. */
2161
2162 int
2163 compile_internalvar_to_ax (struct internalvar *var,
2164 struct agent_expr *expr,
2165 struct axs_value *value)
2166 {
2167 if (var->kind != INTERNALVAR_MAKE_VALUE
2168 || var->u.make_value.functions->compile_to_ax == NULL)
2169 return 0;
2170
2171 var->u.make_value.functions->compile_to_ax (var, expr, value,
2172 var->u.make_value.data);
2173 return 1;
2174 }
2175
2176 /* Look up an internal variable with name NAME. NAME should not
2177 normally include a dollar sign.
2178
2179 If the specified internal variable does not exist,
2180 one is created, with a void value. */
2181
2182 struct internalvar *
2183 lookup_internalvar (const char *name)
2184 {
2185 struct internalvar *var;
2186
2187 var = lookup_only_internalvar (name);
2188 if (var)
2189 return var;
2190
2191 return create_internalvar (name);
2192 }
2193
2194 /* Return current value of internal variable VAR. For variables that
2195 are not inherently typed, use a value type appropriate for GDBARCH. */
2196
2197 struct value *
2198 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2199 {
2200 struct value *val;
2201 struct trace_state_variable *tsv;
2202
2203 /* If there is a trace state variable of the same name, assume that
2204 is what we really want to see. */
2205 tsv = find_trace_state_variable (var->name);
2206 if (tsv)
2207 {
2208 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2209 &(tsv->value));
2210 if (tsv->value_known)
2211 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2212 tsv->value);
2213 else
2214 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2215 return val;
2216 }
2217
2218 switch (var->kind)
2219 {
2220 case INTERNALVAR_VOID:
2221 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2222 break;
2223
2224 case INTERNALVAR_FUNCTION:
2225 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2226 break;
2227
2228 case INTERNALVAR_INTEGER:
2229 if (!var->u.integer.type)
2230 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2231 var->u.integer.val);
2232 else
2233 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2234 break;
2235
2236 case INTERNALVAR_STRING:
2237 val = value_cstring (var->u.string, strlen (var->u.string),
2238 builtin_type (gdbarch)->builtin_char);
2239 break;
2240
2241 case INTERNALVAR_VALUE:
2242 val = value_copy (var->u.value);
2243 if (value_lazy (val))
2244 value_fetch_lazy (val);
2245 break;
2246
2247 case INTERNALVAR_MAKE_VALUE:
2248 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2249 var->u.make_value.data);
2250 break;
2251
2252 default:
2253 internal_error (_("bad kind"));
2254 }
2255
2256 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2257 on this value go back to affect the original internal variable.
2258
2259 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2260 no underlying modifiable state in the internal variable.
2261
2262 Likewise, if the variable's value is a computed lvalue, we want
2263 references to it to produce another computed lvalue, where
2264 references and assignments actually operate through the
2265 computed value's functions.
2266
2267 This means that internal variables with computed values
2268 behave a little differently from other internal variables:
2269 assignments to them don't just replace the previous value
2270 altogether. At the moment, this seems like the behavior we
2271 want. */
2272
2273 if (var->kind != INTERNALVAR_MAKE_VALUE
2274 && val->m_lval != lval_computed)
2275 {
2276 VALUE_LVAL (val) = lval_internalvar;
2277 VALUE_INTERNALVAR (val) = var;
2278 }
2279
2280 return val;
2281 }
2282
2283 int
2284 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2285 {
2286 if (var->kind == INTERNALVAR_INTEGER)
2287 {
2288 *result = var->u.integer.val;
2289 return 1;
2290 }
2291
2292 if (var->kind == INTERNALVAR_VALUE)
2293 {
2294 struct type *type = check_typedef (var->u.value->type ());
2295
2296 if (type->code () == TYPE_CODE_INT)
2297 {
2298 *result = value_as_long (var->u.value);
2299 return 1;
2300 }
2301 }
2302
2303 return 0;
2304 }
2305
2306 static int
2307 get_internalvar_function (struct internalvar *var,
2308 struct internal_function **result)
2309 {
2310 switch (var->kind)
2311 {
2312 case INTERNALVAR_FUNCTION:
2313 *result = var->u.fn.function;
2314 return 1;
2315
2316 default:
2317 return 0;
2318 }
2319 }
2320
2321 void
2322 set_internalvar_component (struct internalvar *var,
2323 LONGEST offset, LONGEST bitpos,
2324 LONGEST bitsize, struct value *newval)
2325 {
2326 gdb_byte *addr;
2327 struct gdbarch *arch;
2328 int unit_size;
2329
2330 switch (var->kind)
2331 {
2332 case INTERNALVAR_VALUE:
2333 addr = value_contents_writeable (var->u.value).data ();
2334 arch = get_value_arch (var->u.value);
2335 unit_size = gdbarch_addressable_memory_unit_size (arch);
2336
2337 if (bitsize)
2338 modify_field (var->u.value->type (), addr + offset,
2339 value_as_long (newval), bitpos, bitsize);
2340 else
2341 memcpy (addr + offset * unit_size, value_contents (newval).data (),
2342 newval->type ()->length ());
2343 break;
2344
2345 default:
2346 /* We can never get a component of any other kind. */
2347 internal_error (_("set_internalvar_component"));
2348 }
2349 }
2350
2351 void
2352 set_internalvar (struct internalvar *var, struct value *val)
2353 {
2354 enum internalvar_kind new_kind;
2355 union internalvar_data new_data = { 0 };
2356
2357 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2358 error (_("Cannot overwrite convenience function %s"), var->name);
2359
2360 /* Prepare new contents. */
2361 switch (check_typedef (val->type ())->code ())
2362 {
2363 case TYPE_CODE_VOID:
2364 new_kind = INTERNALVAR_VOID;
2365 break;
2366
2367 case TYPE_CODE_INTERNAL_FUNCTION:
2368 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2369 new_kind = INTERNALVAR_FUNCTION;
2370 get_internalvar_function (VALUE_INTERNALVAR (val),
2371 &new_data.fn.function);
2372 /* Copies created here are never canonical. */
2373 break;
2374
2375 default:
2376 new_kind = INTERNALVAR_VALUE;
2377 struct value *copy = value_copy (val);
2378 copy->m_modifiable = 1;
2379
2380 /* Force the value to be fetched from the target now, to avoid problems
2381 later when this internalvar is referenced and the target is gone or
2382 has changed. */
2383 if (value_lazy (copy))
2384 value_fetch_lazy (copy);
2385
2386 /* Release the value from the value chain to prevent it from being
2387 deleted by free_all_values. From here on this function should not
2388 call error () until new_data is installed into the var->u to avoid
2389 leaking memory. */
2390 new_data.value = release_value (copy).release ();
2391
2392 /* Internal variables which are created from values with a dynamic
2393 location don't need the location property of the origin anymore.
2394 The resolved dynamic location is used prior then any other address
2395 when accessing the value.
2396 If we keep it, we would still refer to the origin value.
2397 Remove the location property in case it exist. */
2398 new_data.value->type ()->remove_dyn_prop (DYN_PROP_DATA_LOCATION);
2399
2400 break;
2401 }
2402
2403 /* Clean up old contents. */
2404 clear_internalvar (var);
2405
2406 /* Switch over. */
2407 var->kind = new_kind;
2408 var->u = new_data;
2409 /* End code which must not call error(). */
2410 }
2411
2412 void
2413 set_internalvar_integer (struct internalvar *var, LONGEST l)
2414 {
2415 /* Clean up old contents. */
2416 clear_internalvar (var);
2417
2418 var->kind = INTERNALVAR_INTEGER;
2419 var->u.integer.type = NULL;
2420 var->u.integer.val = l;
2421 }
2422
2423 void
2424 set_internalvar_string (struct internalvar *var, const char *string)
2425 {
2426 /* Clean up old contents. */
2427 clear_internalvar (var);
2428
2429 var->kind = INTERNALVAR_STRING;
2430 var->u.string = xstrdup (string);
2431 }
2432
2433 static void
2434 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2435 {
2436 /* Clean up old contents. */
2437 clear_internalvar (var);
2438
2439 var->kind = INTERNALVAR_FUNCTION;
2440 var->u.fn.function = f;
2441 var->u.fn.canonical = 1;
2442 /* Variables installed here are always the canonical version. */
2443 }
2444
2445 void
2446 clear_internalvar (struct internalvar *var)
2447 {
2448 /* Clean up old contents. */
2449 switch (var->kind)
2450 {
2451 case INTERNALVAR_VALUE:
2452 value_decref (var->u.value);
2453 break;
2454
2455 case INTERNALVAR_STRING:
2456 xfree (var->u.string);
2457 break;
2458
2459 default:
2460 break;
2461 }
2462
2463 /* Reset to void kind. */
2464 var->kind = INTERNALVAR_VOID;
2465 }
2466
2467 const char *
2468 internalvar_name (const struct internalvar *var)
2469 {
2470 return var->name;
2471 }
2472
2473 static struct internal_function *
2474 create_internal_function (const char *name,
2475 internal_function_fn handler, void *cookie)
2476 {
2477 struct internal_function *ifn = XNEW (struct internal_function);
2478
2479 ifn->name = xstrdup (name);
2480 ifn->handler = handler;
2481 ifn->cookie = cookie;
2482 return ifn;
2483 }
2484
2485 const char *
2486 value_internal_function_name (struct value *val)
2487 {
2488 struct internal_function *ifn;
2489 int result;
2490
2491 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2492 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2493 gdb_assert (result);
2494
2495 return ifn->name;
2496 }
2497
2498 struct value *
2499 call_internal_function (struct gdbarch *gdbarch,
2500 const struct language_defn *language,
2501 struct value *func, int argc, struct value **argv)
2502 {
2503 struct internal_function *ifn;
2504 int result;
2505
2506 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2507 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2508 gdb_assert (result);
2509
2510 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2511 }
2512
2513 /* The 'function' command. This does nothing -- it is just a
2514 placeholder to let "help function NAME" work. This is also used as
2515 the implementation of the sub-command that is created when
2516 registering an internal function. */
2517 static void
2518 function_command (const char *command, int from_tty)
2519 {
2520 /* Do nothing. */
2521 }
2522
2523 /* Helper function that does the work for add_internal_function. */
2524
2525 static struct cmd_list_element *
2526 do_add_internal_function (const char *name, const char *doc,
2527 internal_function_fn handler, void *cookie)
2528 {
2529 struct internal_function *ifn;
2530 struct internalvar *var = lookup_internalvar (name);
2531
2532 ifn = create_internal_function (name, handler, cookie);
2533 set_internalvar_function (var, ifn);
2534
2535 return add_cmd (name, no_class, function_command, doc, &functionlist);
2536 }
2537
2538 /* See value.h. */
2539
2540 void
2541 add_internal_function (const char *name, const char *doc,
2542 internal_function_fn handler, void *cookie)
2543 {
2544 do_add_internal_function (name, doc, handler, cookie);
2545 }
2546
2547 /* See value.h. */
2548
2549 void
2550 add_internal_function (gdb::unique_xmalloc_ptr<char> &&name,
2551 gdb::unique_xmalloc_ptr<char> &&doc,
2552 internal_function_fn handler, void *cookie)
2553 {
2554 struct cmd_list_element *cmd
2555 = do_add_internal_function (name.get (), doc.get (), handler, cookie);
2556 doc.release ();
2557 cmd->doc_allocated = 1;
2558 name.release ();
2559 cmd->name_allocated = 1;
2560 }
2561
2562 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2563 prevent cycles / duplicates. */
2564
2565 void
2566 preserve_one_value (struct value *value, struct objfile *objfile,
2567 htab_t copied_types)
2568 {
2569 if (value->m_type->objfile_owner () == objfile)
2570 value->m_type = copy_type_recursive (value->m_type, copied_types);
2571
2572 if (value->m_enclosing_type->objfile_owner () == objfile)
2573 value->m_enclosing_type = copy_type_recursive (value->m_enclosing_type,
2574 copied_types);
2575 }
2576
2577 /* Likewise for internal variable VAR. */
2578
2579 static void
2580 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2581 htab_t copied_types)
2582 {
2583 switch (var->kind)
2584 {
2585 case INTERNALVAR_INTEGER:
2586 if (var->u.integer.type
2587 && var->u.integer.type->objfile_owner () == objfile)
2588 var->u.integer.type
2589 = copy_type_recursive (var->u.integer.type, copied_types);
2590 break;
2591
2592 case INTERNALVAR_VALUE:
2593 preserve_one_value (var->u.value, objfile, copied_types);
2594 break;
2595 }
2596 }
2597
2598 /* Make sure that all types and values referenced by VAROBJ are updated before
2599 OBJFILE is discarded. COPIED_TYPES is used to prevent cycles and
2600 duplicates. */
2601
2602 static void
2603 preserve_one_varobj (struct varobj *varobj, struct objfile *objfile,
2604 htab_t copied_types)
2605 {
2606 if (varobj->type->is_objfile_owned ()
2607 && varobj->type->objfile_owner () == objfile)
2608 {
2609 varobj->type
2610 = copy_type_recursive (varobj->type, copied_types);
2611 }
2612
2613 if (varobj->value != nullptr)
2614 preserve_one_value (varobj->value.get (), objfile, copied_types);
2615 }
2616
2617 /* Update the internal variables and value history when OBJFILE is
2618 discarded; we must copy the types out of the objfile. New global types
2619 will be created for every convenience variable which currently points to
2620 this objfile's types, and the convenience variables will be adjusted to
2621 use the new global types. */
2622
2623 void
2624 preserve_values (struct objfile *objfile)
2625 {
2626 struct internalvar *var;
2627
2628 /* Create the hash table. We allocate on the objfile's obstack, since
2629 it is soon to be deleted. */
2630 htab_up copied_types = create_copied_types_hash ();
2631
2632 for (const value_ref_ptr &item : value_history)
2633 preserve_one_value (item.get (), objfile, copied_types.get ());
2634
2635 for (var = internalvars; var; var = var->next)
2636 preserve_one_internalvar (var, objfile, copied_types.get ());
2637
2638 /* For the remaining varobj, check that none has type owned by OBJFILE. */
2639 all_root_varobjs ([&copied_types, objfile] (struct varobj *varobj)
2640 {
2641 preserve_one_varobj (varobj, objfile,
2642 copied_types.get ());
2643 });
2644
2645 preserve_ext_lang_values (objfile, copied_types.get ());
2646 }
2647
2648 static void
2649 show_convenience (const char *ignore, int from_tty)
2650 {
2651 struct gdbarch *gdbarch = get_current_arch ();
2652 struct internalvar *var;
2653 int varseen = 0;
2654 struct value_print_options opts;
2655
2656 get_user_print_options (&opts);
2657 for (var = internalvars; var; var = var->next)
2658 {
2659
2660 if (!varseen)
2661 {
2662 varseen = 1;
2663 }
2664 gdb_printf (("$%s = "), var->name);
2665
2666 try
2667 {
2668 struct value *val;
2669
2670 val = value_of_internalvar (gdbarch, var);
2671 value_print (val, gdb_stdout, &opts);
2672 }
2673 catch (const gdb_exception_error &ex)
2674 {
2675 fprintf_styled (gdb_stdout, metadata_style.style (),
2676 _("<error: %s>"), ex.what ());
2677 }
2678
2679 gdb_printf (("\n"));
2680 }
2681 if (!varseen)
2682 {
2683 /* This text does not mention convenience functions on purpose.
2684 The user can't create them except via Python, and if Python support
2685 is installed this message will never be printed ($_streq will
2686 exist). */
2687 gdb_printf (_("No debugger convenience variables now defined.\n"
2688 "Convenience variables have "
2689 "names starting with \"$\";\n"
2690 "use \"set\" as in \"set "
2691 "$foo = 5\" to define them.\n"));
2692 }
2693 }
2694 \f
2695
2696 /* See value.h. */
2697
2698 struct value *
2699 value_from_xmethod (xmethod_worker_up &&worker)
2700 {
2701 struct value *v;
2702
2703 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2704 v->m_lval = lval_xcallable;
2705 v->m_location.xm_worker = worker.release ();
2706 v->m_modifiable = 0;
2707
2708 return v;
2709 }
2710
2711 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2712
2713 struct type *
2714 result_type_of_xmethod (struct value *method, gdb::array_view<value *> argv)
2715 {
2716 gdb_assert (method->type ()->code () == TYPE_CODE_XMETHOD
2717 && method->m_lval == lval_xcallable && !argv.empty ());
2718
2719 return method->m_location.xm_worker->get_result_type (argv[0], argv.slice (1));
2720 }
2721
2722 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2723
2724 struct value *
2725 call_xmethod (struct value *method, gdb::array_view<value *> argv)
2726 {
2727 gdb_assert (method->type ()->code () == TYPE_CODE_XMETHOD
2728 && method->m_lval == lval_xcallable && !argv.empty ());
2729
2730 return method->m_location.xm_worker->invoke (argv[0], argv.slice (1));
2731 }
2732 \f
2733 /* Extract a value as a C number (either long or double).
2734 Knows how to convert fixed values to double, or
2735 floating values to long.
2736 Does not deallocate the value. */
2737
2738 LONGEST
2739 value_as_long (struct value *val)
2740 {
2741 /* This coerces arrays and functions, which is necessary (e.g.
2742 in disassemble_command). It also dereferences references, which
2743 I suspect is the most logical thing to do. */
2744 val = coerce_array (val);
2745 return unpack_long (val->type (), value_contents (val).data ());
2746 }
2747
2748 /* Extract a value as a C pointer. Does not deallocate the value.
2749 Note that val's type may not actually be a pointer; value_as_long
2750 handles all the cases. */
2751 CORE_ADDR
2752 value_as_address (struct value *val)
2753 {
2754 struct gdbarch *gdbarch = val->type ()->arch ();
2755
2756 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2757 whether we want this to be true eventually. */
2758 #if 0
2759 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2760 non-address (e.g. argument to "signal", "info break", etc.), or
2761 for pointers to char, in which the low bits *are* significant. */
2762 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2763 #else
2764
2765 /* There are several targets (IA-64, PowerPC, and others) which
2766 don't represent pointers to functions as simply the address of
2767 the function's entry point. For example, on the IA-64, a
2768 function pointer points to a two-word descriptor, generated by
2769 the linker, which contains the function's entry point, and the
2770 value the IA-64 "global pointer" register should have --- to
2771 support position-independent code. The linker generates
2772 descriptors only for those functions whose addresses are taken.
2773
2774 On such targets, it's difficult for GDB to convert an arbitrary
2775 function address into a function pointer; it has to either find
2776 an existing descriptor for that function, or call malloc and
2777 build its own. On some targets, it is impossible for GDB to
2778 build a descriptor at all: the descriptor must contain a jump
2779 instruction; data memory cannot be executed; and code memory
2780 cannot be modified.
2781
2782 Upon entry to this function, if VAL is a value of type `function'
2783 (that is, TYPE_CODE (val->type ()) == TYPE_CODE_FUNC), then
2784 value_address (val) is the address of the function. This is what
2785 you'll get if you evaluate an expression like `main'. The call
2786 to COERCE_ARRAY below actually does all the usual unary
2787 conversions, which includes converting values of type `function'
2788 to `pointer to function'. This is the challenging conversion
2789 discussed above. Then, `unpack_long' will convert that pointer
2790 back into an address.
2791
2792 So, suppose the user types `disassemble foo' on an architecture
2793 with a strange function pointer representation, on which GDB
2794 cannot build its own descriptors, and suppose further that `foo'
2795 has no linker-built descriptor. The address->pointer conversion
2796 will signal an error and prevent the command from running, even
2797 though the next step would have been to convert the pointer
2798 directly back into the same address.
2799
2800 The following shortcut avoids this whole mess. If VAL is a
2801 function, just return its address directly. */
2802 if (val->type ()->code () == TYPE_CODE_FUNC
2803 || val->type ()->code () == TYPE_CODE_METHOD)
2804 return value_address (val);
2805
2806 val = coerce_array (val);
2807
2808 /* Some architectures (e.g. Harvard), map instruction and data
2809 addresses onto a single large unified address space. For
2810 instance: An architecture may consider a large integer in the
2811 range 0x10000000 .. 0x1000ffff to already represent a data
2812 addresses (hence not need a pointer to address conversion) while
2813 a small integer would still need to be converted integer to
2814 pointer to address. Just assume such architectures handle all
2815 integer conversions in a single function. */
2816
2817 /* JimB writes:
2818
2819 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2820 must admonish GDB hackers to make sure its behavior matches the
2821 compiler's, whenever possible.
2822
2823 In general, I think GDB should evaluate expressions the same way
2824 the compiler does. When the user copies an expression out of
2825 their source code and hands it to a `print' command, they should
2826 get the same value the compiler would have computed. Any
2827 deviation from this rule can cause major confusion and annoyance,
2828 and needs to be justified carefully. In other words, GDB doesn't
2829 really have the freedom to do these conversions in clever and
2830 useful ways.
2831
2832 AndrewC pointed out that users aren't complaining about how GDB
2833 casts integers to pointers; they are complaining that they can't
2834 take an address from a disassembly listing and give it to `x/i'.
2835 This is certainly important.
2836
2837 Adding an architecture method like integer_to_address() certainly
2838 makes it possible for GDB to "get it right" in all circumstances
2839 --- the target has complete control over how things get done, so
2840 people can Do The Right Thing for their target without breaking
2841 anyone else. The standard doesn't specify how integers get
2842 converted to pointers; usually, the ABI doesn't either, but
2843 ABI-specific code is a more reasonable place to handle it. */
2844
2845 if (!val->type ()->is_pointer_or_reference ()
2846 && gdbarch_integer_to_address_p (gdbarch))
2847 return gdbarch_integer_to_address (gdbarch, val->type (),
2848 value_contents (val).data ());
2849
2850 return unpack_long (val->type (), value_contents (val).data ());
2851 #endif
2852 }
2853 \f
2854 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2855 as a long, or as a double, assuming the raw data is described
2856 by type TYPE. Knows how to convert different sizes of values
2857 and can convert between fixed and floating point. We don't assume
2858 any alignment for the raw data. Return value is in host byte order.
2859
2860 If you want functions and arrays to be coerced to pointers, and
2861 references to be dereferenced, call value_as_long() instead.
2862
2863 C++: It is assumed that the front-end has taken care of
2864 all matters concerning pointers to members. A pointer
2865 to member which reaches here is considered to be equivalent
2866 to an INT (or some size). After all, it is only an offset. */
2867
2868 LONGEST
2869 unpack_long (struct type *type, const gdb_byte *valaddr)
2870 {
2871 if (is_fixed_point_type (type))
2872 type = type->fixed_point_type_base_type ();
2873
2874 enum bfd_endian byte_order = type_byte_order (type);
2875 enum type_code code = type->code ();
2876 int len = type->length ();
2877 int nosign = type->is_unsigned ();
2878
2879 switch (code)
2880 {
2881 case TYPE_CODE_TYPEDEF:
2882 return unpack_long (check_typedef (type), valaddr);
2883 case TYPE_CODE_ENUM:
2884 case TYPE_CODE_FLAGS:
2885 case TYPE_CODE_BOOL:
2886 case TYPE_CODE_INT:
2887 case TYPE_CODE_CHAR:
2888 case TYPE_CODE_RANGE:
2889 case TYPE_CODE_MEMBERPTR:
2890 {
2891 LONGEST result;
2892
2893 if (type->bit_size_differs_p ())
2894 {
2895 unsigned bit_off = type->bit_offset ();
2896 unsigned bit_size = type->bit_size ();
2897 if (bit_size == 0)
2898 {
2899 /* unpack_bits_as_long doesn't handle this case the
2900 way we'd like, so handle it here. */
2901 result = 0;
2902 }
2903 else
2904 result = unpack_bits_as_long (type, valaddr, bit_off, bit_size);
2905 }
2906 else
2907 {
2908 if (nosign)
2909 result = extract_unsigned_integer (valaddr, len, byte_order);
2910 else
2911 result = extract_signed_integer (valaddr, len, byte_order);
2912 }
2913 if (code == TYPE_CODE_RANGE)
2914 result += type->bounds ()->bias;
2915 return result;
2916 }
2917
2918 case TYPE_CODE_FLT:
2919 case TYPE_CODE_DECFLOAT:
2920 return target_float_to_longest (valaddr, type);
2921
2922 case TYPE_CODE_FIXED_POINT:
2923 {
2924 gdb_mpq vq;
2925 vq.read_fixed_point (gdb::make_array_view (valaddr, len),
2926 byte_order, nosign,
2927 type->fixed_point_scaling_factor ());
2928
2929 gdb_mpz vz;
2930 mpz_tdiv_q (vz.val, mpq_numref (vq.val), mpq_denref (vq.val));
2931 return vz.as_integer<LONGEST> ();
2932 }
2933
2934 case TYPE_CODE_PTR:
2935 case TYPE_CODE_REF:
2936 case TYPE_CODE_RVALUE_REF:
2937 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2938 whether we want this to be true eventually. */
2939 return extract_typed_address (valaddr, type);
2940
2941 default:
2942 error (_("Value can't be converted to integer."));
2943 }
2944 }
2945
2946 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2947 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2948 We don't assume any alignment for the raw data. Return value is in
2949 host byte order.
2950
2951 If you want functions and arrays to be coerced to pointers, and
2952 references to be dereferenced, call value_as_address() instead.
2953
2954 C++: It is assumed that the front-end has taken care of
2955 all matters concerning pointers to members. A pointer
2956 to member which reaches here is considered to be equivalent
2957 to an INT (or some size). After all, it is only an offset. */
2958
2959 CORE_ADDR
2960 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2961 {
2962 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2963 whether we want this to be true eventually. */
2964 return unpack_long (type, valaddr);
2965 }
2966
2967 bool
2968 is_floating_value (struct value *val)
2969 {
2970 struct type *type = check_typedef (val->type ());
2971
2972 if (is_floating_type (type))
2973 {
2974 if (!target_float_is_valid (value_contents (val).data (), type))
2975 error (_("Invalid floating value found in program."));
2976 return true;
2977 }
2978
2979 return false;
2980 }
2981
2982 \f
2983 /* Get the value of the FIELDNO'th field (which must be static) of
2984 TYPE. */
2985
2986 struct value *
2987 value_static_field (struct type *type, int fieldno)
2988 {
2989 struct value *retval;
2990
2991 switch (type->field (fieldno).loc_kind ())
2992 {
2993 case FIELD_LOC_KIND_PHYSADDR:
2994 retval = value_at_lazy (type->field (fieldno).type (),
2995 type->field (fieldno).loc_physaddr ());
2996 break;
2997 case FIELD_LOC_KIND_PHYSNAME:
2998 {
2999 const char *phys_name = type->field (fieldno).loc_physname ();
3000 /* type->field (fieldno).name (); */
3001 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
3002
3003 if (sym.symbol == NULL)
3004 {
3005 /* With some compilers, e.g. HP aCC, static data members are
3006 reported as non-debuggable symbols. */
3007 struct bound_minimal_symbol msym
3008 = lookup_minimal_symbol (phys_name, NULL, NULL);
3009 struct type *field_type = type->field (fieldno).type ();
3010
3011 if (!msym.minsym)
3012 retval = allocate_optimized_out_value (field_type);
3013 else
3014 retval = value_at_lazy (field_type, msym.value_address ());
3015 }
3016 else
3017 retval = value_of_variable (sym.symbol, sym.block);
3018 break;
3019 }
3020 default:
3021 gdb_assert_not_reached ("unexpected field location kind");
3022 }
3023
3024 return retval;
3025 }
3026
3027 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
3028 You have to be careful here, since the size of the data area for the value
3029 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
3030 than the old enclosing type, you have to allocate more space for the
3031 data. */
3032
3033 void
3034 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
3035 {
3036 if (new_encl_type->length () > value_enclosing_type (val)->length ())
3037 {
3038 check_type_length_before_alloc (new_encl_type);
3039 val->m_contents
3040 .reset ((gdb_byte *) xrealloc (val->m_contents.release (),
3041 new_encl_type->length ()));
3042 }
3043
3044 val->m_enclosing_type = new_encl_type;
3045 }
3046
3047 /* Given a value ARG1 (offset by OFFSET bytes)
3048 of a struct or union type ARG_TYPE,
3049 extract and return the value of one of its (non-static) fields.
3050 FIELDNO says which field. */
3051
3052 struct value *
3053 value_primitive_field (struct value *arg1, LONGEST offset,
3054 int fieldno, struct type *arg_type)
3055 {
3056 struct value *v;
3057 struct type *type;
3058 struct gdbarch *arch = get_value_arch (arg1);
3059 int unit_size = gdbarch_addressable_memory_unit_size (arch);
3060
3061 arg_type = check_typedef (arg_type);
3062 type = arg_type->field (fieldno).type ();
3063
3064 /* Call check_typedef on our type to make sure that, if TYPE
3065 is a TYPE_CODE_TYPEDEF, its length is set to the length
3066 of the target type instead of zero. However, we do not
3067 replace the typedef type by the target type, because we want
3068 to keep the typedef in order to be able to print the type
3069 description correctly. */
3070 check_typedef (type);
3071
3072 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
3073 {
3074 /* Handle packed fields.
3075
3076 Create a new value for the bitfield, with bitpos and bitsize
3077 set. If possible, arrange offset and bitpos so that we can
3078 do a single aligned read of the size of the containing type.
3079 Otherwise, adjust offset to the byte containing the first
3080 bit. Assume that the address, offset, and embedded offset
3081 are sufficiently aligned. */
3082
3083 LONGEST bitpos = arg_type->field (fieldno).loc_bitpos ();
3084 LONGEST container_bitsize = type->length () * 8;
3085
3086 v = allocate_value_lazy (type);
3087 v->m_bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
3088 if ((bitpos % container_bitsize) + v->m_bitsize <= container_bitsize
3089 && type->length () <= (int) sizeof (LONGEST))
3090 v->m_bitpos = bitpos % container_bitsize;
3091 else
3092 v->m_bitpos = bitpos % 8;
3093 v->m_offset = (value_embedded_offset (arg1)
3094 + offset
3095 + (bitpos - v->m_bitpos) / 8);
3096 set_value_parent (v, arg1);
3097 if (!value_lazy (arg1))
3098 value_fetch_lazy (v);
3099 }
3100 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3101 {
3102 /* This field is actually a base subobject, so preserve the
3103 entire object's contents for later references to virtual
3104 bases, etc. */
3105 LONGEST boffset;
3106
3107 /* Lazy register values with offsets are not supported. */
3108 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3109 value_fetch_lazy (arg1);
3110
3111 /* We special case virtual inheritance here because this
3112 requires access to the contents, which we would rather avoid
3113 for references to ordinary fields of unavailable values. */
3114 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3115 boffset = baseclass_offset (arg_type, fieldno,
3116 value_contents (arg1).data (),
3117 value_embedded_offset (arg1),
3118 value_address (arg1),
3119 arg1);
3120 else
3121 boffset = arg_type->field (fieldno).loc_bitpos () / 8;
3122
3123 if (value_lazy (arg1))
3124 v = allocate_value_lazy (value_enclosing_type (arg1));
3125 else
3126 {
3127 v = allocate_value (value_enclosing_type (arg1));
3128 value_contents_copy_raw (v, 0, arg1, 0,
3129 value_enclosing_type (arg1)->length ());
3130 }
3131 v->m_type = type;
3132 v->m_offset = value_offset (arg1);
3133 v->m_embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3134 }
3135 else if (NULL != TYPE_DATA_LOCATION (type))
3136 {
3137 /* Field is a dynamic data member. */
3138
3139 gdb_assert (0 == offset);
3140 /* We expect an already resolved data location. */
3141 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
3142 /* For dynamic data types defer memory allocation
3143 until we actual access the value. */
3144 v = allocate_value_lazy (type);
3145 }
3146 else
3147 {
3148 /* Plain old data member */
3149 offset += (arg_type->field (fieldno).loc_bitpos ()
3150 / (HOST_CHAR_BIT * unit_size));
3151
3152 /* Lazy register values with offsets are not supported. */
3153 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3154 value_fetch_lazy (arg1);
3155
3156 if (value_lazy (arg1))
3157 v = allocate_value_lazy (type);
3158 else
3159 {
3160 v = allocate_value (type);
3161 value_contents_copy_raw (v, value_embedded_offset (v),
3162 arg1, value_embedded_offset (arg1) + offset,
3163 type_length_units (type));
3164 }
3165 v->m_offset = (value_offset (arg1) + offset
3166 + value_embedded_offset (arg1));
3167 }
3168 set_value_component_location (v, arg1);
3169 return v;
3170 }
3171
3172 /* Given a value ARG1 of a struct or union type,
3173 extract and return the value of one of its (non-static) fields.
3174 FIELDNO says which field. */
3175
3176 struct value *
3177 value_field (struct value *arg1, int fieldno)
3178 {
3179 return value_primitive_field (arg1, 0, fieldno, arg1->type ());
3180 }
3181
3182 /* Return a non-virtual function as a value.
3183 F is the list of member functions which contains the desired method.
3184 J is an index into F which provides the desired method.
3185
3186 We only use the symbol for its address, so be happy with either a
3187 full symbol or a minimal symbol. */
3188
3189 struct value *
3190 value_fn_field (struct value **arg1p, struct fn_field *f,
3191 int j, struct type *type,
3192 LONGEST offset)
3193 {
3194 struct value *v;
3195 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3196 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3197 struct symbol *sym;
3198 struct bound_minimal_symbol msym;
3199
3200 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3201 if (sym == nullptr)
3202 {
3203 msym = lookup_bound_minimal_symbol (physname);
3204 if (msym.minsym == NULL)
3205 return NULL;
3206 }
3207
3208 v = allocate_value (ftype);
3209 VALUE_LVAL (v) = lval_memory;
3210 if (sym)
3211 {
3212 set_value_address (v, sym->value_block ()->entry_pc ());
3213 }
3214 else
3215 {
3216 /* The minimal symbol might point to a function descriptor;
3217 resolve it to the actual code address instead. */
3218 struct objfile *objfile = msym.objfile;
3219 struct gdbarch *gdbarch = objfile->arch ();
3220
3221 set_value_address (v,
3222 gdbarch_convert_from_func_ptr_addr
3223 (gdbarch, msym.value_address (),
3224 current_inferior ()->top_target ()));
3225 }
3226
3227 if (arg1p)
3228 {
3229 if (type != (*arg1p)->type ())
3230 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3231 value_addr (*arg1p)));
3232
3233 /* Move the `this' pointer according to the offset.
3234 VALUE_OFFSET (*arg1p) += offset; */
3235 }
3236
3237 return v;
3238 }
3239
3240 \f
3241
3242 /* See value.h. */
3243
3244 LONGEST
3245 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3246 LONGEST bitpos, LONGEST bitsize)
3247 {
3248 enum bfd_endian byte_order = type_byte_order (field_type);
3249 ULONGEST val;
3250 ULONGEST valmask;
3251 int lsbcount;
3252 LONGEST bytes_read;
3253 LONGEST read_offset;
3254
3255 /* Read the minimum number of bytes required; there may not be
3256 enough bytes to read an entire ULONGEST. */
3257 field_type = check_typedef (field_type);
3258 if (bitsize)
3259 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3260 else
3261 {
3262 bytes_read = field_type->length ();
3263 bitsize = 8 * bytes_read;
3264 }
3265
3266 read_offset = bitpos / 8;
3267
3268 val = extract_unsigned_integer (valaddr + read_offset,
3269 bytes_read, byte_order);
3270
3271 /* Extract bits. See comment above. */
3272
3273 if (byte_order == BFD_ENDIAN_BIG)
3274 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3275 else
3276 lsbcount = (bitpos % 8);
3277 val >>= lsbcount;
3278
3279 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3280 If the field is signed, and is negative, then sign extend. */
3281
3282 if (bitsize < 8 * (int) sizeof (val))
3283 {
3284 valmask = (((ULONGEST) 1) << bitsize) - 1;
3285 val &= valmask;
3286 if (!field_type->is_unsigned ())
3287 {
3288 if (val & (valmask ^ (valmask >> 1)))
3289 {
3290 val |= ~valmask;
3291 }
3292 }
3293 }
3294
3295 return val;
3296 }
3297
3298 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3299 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3300 ORIGINAL_VALUE, which must not be NULL. See
3301 unpack_value_bits_as_long for more details. */
3302
3303 int
3304 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3305 LONGEST embedded_offset, int fieldno,
3306 const struct value *val, LONGEST *result)
3307 {
3308 int bitpos = type->field (fieldno).loc_bitpos ();
3309 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3310 struct type *field_type = type->field (fieldno).type ();
3311 int bit_offset;
3312
3313 gdb_assert (val != NULL);
3314
3315 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3316 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3317 || !value_bits_available (val, bit_offset, bitsize))
3318 return 0;
3319
3320 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3321 bitpos, bitsize);
3322 return 1;
3323 }
3324
3325 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3326 object at VALADDR. See unpack_bits_as_long for more details. */
3327
3328 LONGEST
3329 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3330 {
3331 int bitpos = type->field (fieldno).loc_bitpos ();
3332 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3333 struct type *field_type = type->field (fieldno).type ();
3334
3335 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3336 }
3337
3338 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3339 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3340 the contents in DEST_VAL, zero or sign extending if the type of
3341 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3342 VAL. If the VAL's contents required to extract the bitfield from
3343 are unavailable/optimized out, DEST_VAL is correspondingly
3344 marked unavailable/optimized out. */
3345
3346 void
3347 unpack_value_bitfield (struct value *dest_val,
3348 LONGEST bitpos, LONGEST bitsize,
3349 const gdb_byte *valaddr, LONGEST embedded_offset,
3350 const struct value *val)
3351 {
3352 enum bfd_endian byte_order;
3353 int src_bit_offset;
3354 int dst_bit_offset;
3355 struct type *field_type = dest_val->type ();
3356
3357 byte_order = type_byte_order (field_type);
3358
3359 /* First, unpack and sign extend the bitfield as if it was wholly
3360 valid. Optimized out/unavailable bits are read as zero, but
3361 that's OK, as they'll end up marked below. If the VAL is
3362 wholly-invalid we may have skipped allocating its contents,
3363 though. See allocate_optimized_out_value. */
3364 if (valaddr != NULL)
3365 {
3366 LONGEST num;
3367
3368 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3369 bitpos, bitsize);
3370 store_signed_integer (value_contents_raw (dest_val).data (),
3371 field_type->length (), byte_order, num);
3372 }
3373
3374 /* Now copy the optimized out / unavailability ranges to the right
3375 bits. */
3376 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3377 if (byte_order == BFD_ENDIAN_BIG)
3378 dst_bit_offset = field_type->length () * TARGET_CHAR_BIT - bitsize;
3379 else
3380 dst_bit_offset = 0;
3381 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3382 val, src_bit_offset, bitsize);
3383 }
3384
3385 /* Return a new value with type TYPE, which is FIELDNO field of the
3386 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3387 of VAL. If the VAL's contents required to extract the bitfield
3388 from are unavailable/optimized out, the new value is
3389 correspondingly marked unavailable/optimized out. */
3390
3391 struct value *
3392 value_field_bitfield (struct type *type, int fieldno,
3393 const gdb_byte *valaddr,
3394 LONGEST embedded_offset, const struct value *val)
3395 {
3396 int bitpos = type->field (fieldno).loc_bitpos ();
3397 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3398 struct value *res_val = allocate_value (type->field (fieldno).type ());
3399
3400 unpack_value_bitfield (res_val, bitpos, bitsize,
3401 valaddr, embedded_offset, val);
3402
3403 return res_val;
3404 }
3405
3406 /* Modify the value of a bitfield. ADDR points to a block of memory in
3407 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3408 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3409 indicate which bits (in target bit order) comprise the bitfield.
3410 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3411 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3412
3413 void
3414 modify_field (struct type *type, gdb_byte *addr,
3415 LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3416 {
3417 enum bfd_endian byte_order = type_byte_order (type);
3418 ULONGEST oword;
3419 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3420 LONGEST bytesize;
3421
3422 /* Normalize BITPOS. */
3423 addr += bitpos / 8;
3424 bitpos %= 8;
3425
3426 /* If a negative fieldval fits in the field in question, chop
3427 off the sign extension bits. */
3428 if ((~fieldval & ~(mask >> 1)) == 0)
3429 fieldval &= mask;
3430
3431 /* Warn if value is too big to fit in the field in question. */
3432 if (0 != (fieldval & ~mask))
3433 {
3434 /* FIXME: would like to include fieldval in the message, but
3435 we don't have a sprintf_longest. */
3436 warning (_("Value does not fit in %s bits."), plongest (bitsize));
3437
3438 /* Truncate it, otherwise adjoining fields may be corrupted. */
3439 fieldval &= mask;
3440 }
3441
3442 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3443 false valgrind reports. */
3444
3445 bytesize = (bitpos + bitsize + 7) / 8;
3446 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3447
3448 /* Shifting for bit field depends on endianness of the target machine. */
3449 if (byte_order == BFD_ENDIAN_BIG)
3450 bitpos = bytesize * 8 - bitpos - bitsize;
3451
3452 oword &= ~(mask << bitpos);
3453 oword |= fieldval << bitpos;
3454
3455 store_unsigned_integer (addr, bytesize, byte_order, oword);
3456 }
3457 \f
3458 /* Pack NUM into BUF using a target format of TYPE. */
3459
3460 void
3461 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3462 {
3463 enum bfd_endian byte_order = type_byte_order (type);
3464 LONGEST len;
3465
3466 type = check_typedef (type);
3467 len = type->length ();
3468
3469 switch (type->code ())
3470 {
3471 case TYPE_CODE_RANGE:
3472 num -= type->bounds ()->bias;
3473 /* Fall through. */
3474 case TYPE_CODE_INT:
3475 case TYPE_CODE_CHAR:
3476 case TYPE_CODE_ENUM:
3477 case TYPE_CODE_FLAGS:
3478 case TYPE_CODE_BOOL:
3479 case TYPE_CODE_MEMBERPTR:
3480 if (type->bit_size_differs_p ())
3481 {
3482 unsigned bit_off = type->bit_offset ();
3483 unsigned bit_size = type->bit_size ();
3484 num &= ((ULONGEST) 1 << bit_size) - 1;
3485 num <<= bit_off;
3486 }
3487 store_signed_integer (buf, len, byte_order, num);
3488 break;
3489
3490 case TYPE_CODE_REF:
3491 case TYPE_CODE_RVALUE_REF:
3492 case TYPE_CODE_PTR:
3493 store_typed_address (buf, type, (CORE_ADDR) num);
3494 break;
3495
3496 case TYPE_CODE_FLT:
3497 case TYPE_CODE_DECFLOAT:
3498 target_float_from_longest (buf, type, num);
3499 break;
3500
3501 default:
3502 error (_("Unexpected type (%d) encountered for integer constant."),
3503 type->code ());
3504 }
3505 }
3506
3507
3508 /* Pack NUM into BUF using a target format of TYPE. */
3509
3510 static void
3511 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3512 {
3513 LONGEST len;
3514 enum bfd_endian byte_order;
3515
3516 type = check_typedef (type);
3517 len = type->length ();
3518 byte_order = type_byte_order (type);
3519
3520 switch (type->code ())
3521 {
3522 case TYPE_CODE_INT:
3523 case TYPE_CODE_CHAR:
3524 case TYPE_CODE_ENUM:
3525 case TYPE_CODE_FLAGS:
3526 case TYPE_CODE_BOOL:
3527 case TYPE_CODE_RANGE:
3528 case TYPE_CODE_MEMBERPTR:
3529 if (type->bit_size_differs_p ())
3530 {
3531 unsigned bit_off = type->bit_offset ();
3532 unsigned bit_size = type->bit_size ();
3533 num &= ((ULONGEST) 1 << bit_size) - 1;
3534 num <<= bit_off;
3535 }
3536 store_unsigned_integer (buf, len, byte_order, num);
3537 break;
3538
3539 case TYPE_CODE_REF:
3540 case TYPE_CODE_RVALUE_REF:
3541 case TYPE_CODE_PTR:
3542 store_typed_address (buf, type, (CORE_ADDR) num);
3543 break;
3544
3545 case TYPE_CODE_FLT:
3546 case TYPE_CODE_DECFLOAT:
3547 target_float_from_ulongest (buf, type, num);
3548 break;
3549
3550 default:
3551 error (_("Unexpected type (%d) encountered "
3552 "for unsigned integer constant."),
3553 type->code ());
3554 }
3555 }
3556
3557
3558 /* Create a value of type TYPE that is zero, and return it. */
3559
3560 struct value *
3561 value_zero (struct type *type, enum lval_type lv)
3562 {
3563 struct value *val = allocate_value_lazy (type);
3564
3565 VALUE_LVAL (val) = (lv == lval_computed ? not_lval : lv);
3566 val->m_is_zero = true;
3567 return val;
3568 }
3569
3570 /* Convert C numbers into newly allocated values. */
3571
3572 struct value *
3573 value_from_longest (struct type *type, LONGEST num)
3574 {
3575 struct value *val = allocate_value (type);
3576
3577 pack_long (value_contents_raw (val).data (), type, num);
3578 return val;
3579 }
3580
3581
3582 /* Convert C unsigned numbers into newly allocated values. */
3583
3584 struct value *
3585 value_from_ulongest (struct type *type, ULONGEST num)
3586 {
3587 struct value *val = allocate_value (type);
3588
3589 pack_unsigned_long (value_contents_raw (val).data (), type, num);
3590
3591 return val;
3592 }
3593
3594
3595 /* Create a value representing a pointer of type TYPE to the address
3596 ADDR. */
3597
3598 struct value *
3599 value_from_pointer (struct type *type, CORE_ADDR addr)
3600 {
3601 struct value *val = allocate_value (type);
3602
3603 store_typed_address (value_contents_raw (val).data (),
3604 check_typedef (type), addr);
3605 return val;
3606 }
3607
3608 /* Create and return a value object of TYPE containing the value D. The
3609 TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once
3610 it is converted to target format. */
3611
3612 struct value *
3613 value_from_host_double (struct type *type, double d)
3614 {
3615 struct value *value = allocate_value (type);
3616 gdb_assert (type->code () == TYPE_CODE_FLT);
3617 target_float_from_host_double (value_contents_raw (value).data (),
3618 value->type (), d);
3619 return value;
3620 }
3621
3622 /* Create a value of type TYPE whose contents come from VALADDR, if it
3623 is non-null, and whose memory address (in the inferior) is
3624 ADDRESS. The type of the created value may differ from the passed
3625 type TYPE. Make sure to retrieve values new type after this call.
3626 Note that TYPE is not passed through resolve_dynamic_type; this is
3627 a special API intended for use only by Ada. */
3628
3629 struct value *
3630 value_from_contents_and_address_unresolved (struct type *type,
3631 const gdb_byte *valaddr,
3632 CORE_ADDR address)
3633 {
3634 struct value *v;
3635
3636 if (valaddr == NULL)
3637 v = allocate_value_lazy (type);
3638 else
3639 v = value_from_contents (type, valaddr);
3640 VALUE_LVAL (v) = lval_memory;
3641 set_value_address (v, address);
3642 return v;
3643 }
3644
3645 /* Create a value of type TYPE whose contents come from VALADDR, if it
3646 is non-null, and whose memory address (in the inferior) is
3647 ADDRESS. The type of the created value may differ from the passed
3648 type TYPE. Make sure to retrieve values new type after this call. */
3649
3650 struct value *
3651 value_from_contents_and_address (struct type *type,
3652 const gdb_byte *valaddr,
3653 CORE_ADDR address)
3654 {
3655 gdb::array_view<const gdb_byte> view;
3656 if (valaddr != nullptr)
3657 view = gdb::make_array_view (valaddr, type->length ());
3658 struct type *resolved_type = resolve_dynamic_type (type, view, address);
3659 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3660 struct value *v;
3661
3662 if (valaddr == NULL)
3663 v = allocate_value_lazy (resolved_type);
3664 else
3665 v = value_from_contents (resolved_type, valaddr);
3666 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3667 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3668 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3669 VALUE_LVAL (v) = lval_memory;
3670 set_value_address (v, address);
3671 return v;
3672 }
3673
3674 /* Create a value of type TYPE holding the contents CONTENTS.
3675 The new value is `not_lval'. */
3676
3677 struct value *
3678 value_from_contents (struct type *type, const gdb_byte *contents)
3679 {
3680 struct value *result;
3681
3682 result = allocate_value (type);
3683 memcpy (value_contents_raw (result).data (), contents, type->length ());
3684 return result;
3685 }
3686
3687 /* Extract a value from the history file. Input will be of the form
3688 $digits or $$digits. See block comment above 'write_dollar_variable'
3689 for details. */
3690
3691 struct value *
3692 value_from_history_ref (const char *h, const char **endp)
3693 {
3694 int index, len;
3695
3696 if (h[0] == '$')
3697 len = 1;
3698 else
3699 return NULL;
3700
3701 if (h[1] == '$')
3702 len = 2;
3703
3704 /* Find length of numeral string. */
3705 for (; isdigit (h[len]); len++)
3706 ;
3707
3708 /* Make sure numeral string is not part of an identifier. */
3709 if (h[len] == '_' || isalpha (h[len]))
3710 return NULL;
3711
3712 /* Now collect the index value. */
3713 if (h[1] == '$')
3714 {
3715 if (len == 2)
3716 {
3717 /* For some bizarre reason, "$$" is equivalent to "$$1",
3718 rather than to "$$0" as it ought to be! */
3719 index = -1;
3720 *endp += len;
3721 }
3722 else
3723 {
3724 char *local_end;
3725
3726 index = -strtol (&h[2], &local_end, 10);
3727 *endp = local_end;
3728 }
3729 }
3730 else
3731 {
3732 if (len == 1)
3733 {
3734 /* "$" is equivalent to "$0". */
3735 index = 0;
3736 *endp += len;
3737 }
3738 else
3739 {
3740 char *local_end;
3741
3742 index = strtol (&h[1], &local_end, 10);
3743 *endp = local_end;
3744 }
3745 }
3746
3747 return access_value_history (index);
3748 }
3749
3750 /* Get the component value (offset by OFFSET bytes) of a struct or
3751 union WHOLE. Component's type is TYPE. */
3752
3753 struct value *
3754 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3755 {
3756 struct value *v;
3757
3758 if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole))
3759 v = allocate_value_lazy (type);
3760 else
3761 {
3762 v = allocate_value (type);
3763 value_contents_copy (v, value_embedded_offset (v),
3764 whole, value_embedded_offset (whole) + offset,
3765 type_length_units (type));
3766 }
3767 v->m_offset = value_offset (whole) + offset + value_embedded_offset (whole);
3768 set_value_component_location (v, whole);
3769
3770 return v;
3771 }
3772
3773 /* See value.h. */
3774
3775 struct value *
3776 value_from_component_bitsize (struct value *whole, struct type *type,
3777 LONGEST bit_offset, LONGEST bit_length)
3778 {
3779 gdb_assert (!value_lazy (whole));
3780
3781 /* Preserve lvalue-ness if possible. This is needed to avoid
3782 array-printing failures (including crashes) when printing Ada
3783 arrays in programs compiled with -fgnat-encodings=all. */
3784 if ((bit_offset % TARGET_CHAR_BIT) == 0
3785 && (bit_length % TARGET_CHAR_BIT) == 0
3786 && bit_length == TARGET_CHAR_BIT * type->length ())
3787 return value_from_component (whole, type, bit_offset / TARGET_CHAR_BIT);
3788
3789 struct value *v = allocate_value (type);
3790
3791 LONGEST dst_offset = TARGET_CHAR_BIT * value_embedded_offset (v);
3792 if (is_scalar_type (type) && type_byte_order (type) == BFD_ENDIAN_BIG)
3793 dst_offset += TARGET_CHAR_BIT * type->length () - bit_length;
3794
3795 value_contents_copy_raw_bitwise (v, dst_offset,
3796 whole,
3797 TARGET_CHAR_BIT
3798 * value_embedded_offset (whole)
3799 + bit_offset,
3800 bit_length);
3801 return v;
3802 }
3803
3804 struct value *
3805 coerce_ref_if_computed (const struct value *arg)
3806 {
3807 const struct lval_funcs *funcs;
3808
3809 if (!TYPE_IS_REFERENCE (check_typedef (arg->type ())))
3810 return NULL;
3811
3812 if (value_lval_const (arg) != lval_computed)
3813 return NULL;
3814
3815 funcs = value_computed_funcs (arg);
3816 if (funcs->coerce_ref == NULL)
3817 return NULL;
3818
3819 return funcs->coerce_ref (arg);
3820 }
3821
3822 /* Look at value.h for description. */
3823
3824 struct value *
3825 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3826 const struct type *original_type,
3827 struct value *original_value,
3828 CORE_ADDR original_value_address)
3829 {
3830 gdb_assert (original_type->is_pointer_or_reference ());
3831
3832 struct type *original_target_type = original_type->target_type ();
3833 gdb::array_view<const gdb_byte> view;
3834 struct type *resolved_original_target_type
3835 = resolve_dynamic_type (original_target_type, view,
3836 original_value_address);
3837
3838 /* Re-adjust type. */
3839 deprecated_set_value_type (value, resolved_original_target_type);
3840
3841 /* Add embedding info. */
3842 set_value_enclosing_type (value, enc_type);
3843 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3844
3845 /* We may be pointing to an object of some derived type. */
3846 return value_full_object (value, NULL, 0, 0, 0);
3847 }
3848
3849 struct value *
3850 coerce_ref (struct value *arg)
3851 {
3852 struct type *value_type_arg_tmp = check_typedef (arg->type ());
3853 struct value *retval;
3854 struct type *enc_type;
3855
3856 retval = coerce_ref_if_computed (arg);
3857 if (retval)
3858 return retval;
3859
3860 if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3861 return arg;
3862
3863 enc_type = check_typedef (value_enclosing_type (arg));
3864 enc_type = enc_type->target_type ();
3865
3866 CORE_ADDR addr = unpack_pointer (arg->type (), value_contents (arg).data ());
3867 retval = value_at_lazy (enc_type, addr);
3868 enc_type = retval->type ();
3869 return readjust_indirect_value_type (retval, enc_type, value_type_arg_tmp,
3870 arg, addr);
3871 }
3872
3873 struct value *
3874 coerce_array (struct value *arg)
3875 {
3876 struct type *type;
3877
3878 arg = coerce_ref (arg);
3879 type = check_typedef (arg->type ());
3880
3881 switch (type->code ())
3882 {
3883 case TYPE_CODE_ARRAY:
3884 if (!type->is_vector () && current_language->c_style_arrays_p ())
3885 arg = value_coerce_array (arg);
3886 break;
3887 case TYPE_CODE_FUNC:
3888 arg = value_coerce_function (arg);
3889 break;
3890 }
3891 return arg;
3892 }
3893 \f
3894
3895 /* Return the return value convention that will be used for the
3896 specified type. */
3897
3898 enum return_value_convention
3899 struct_return_convention (struct gdbarch *gdbarch,
3900 struct value *function, struct type *value_type)
3901 {
3902 enum type_code code = value_type->code ();
3903
3904 if (code == TYPE_CODE_ERROR)
3905 error (_("Function return type unknown."));
3906
3907 /* Probe the architecture for the return-value convention. */
3908 return gdbarch_return_value_as_value (gdbarch, function, value_type,
3909 NULL, NULL, NULL);
3910 }
3911
3912 /* Return true if the function returning the specified type is using
3913 the convention of returning structures in memory (passing in the
3914 address as a hidden first parameter). */
3915
3916 int
3917 using_struct_return (struct gdbarch *gdbarch,
3918 struct value *function, struct type *value_type)
3919 {
3920 if (value_type->code () == TYPE_CODE_VOID)
3921 /* A void return value is never in memory. See also corresponding
3922 code in "print_return_value". */
3923 return 0;
3924
3925 return (struct_return_convention (gdbarch, function, value_type)
3926 != RETURN_VALUE_REGISTER_CONVENTION);
3927 }
3928
3929 /* Set the initialized field in a value struct. */
3930
3931 void
3932 set_value_initialized (struct value *val, int status)
3933 {
3934 val->m_initialized = status;
3935 }
3936
3937 /* Return the initialized field in a value struct. */
3938
3939 int
3940 value_initialized (const struct value *val)
3941 {
3942 return val->m_initialized;
3943 }
3944
3945 /* Helper for value_fetch_lazy when the value is a bitfield. */
3946
3947 static void
3948 value_fetch_lazy_bitfield (struct value *val)
3949 {
3950 gdb_assert (value_bitsize (val) != 0);
3951
3952 /* To read a lazy bitfield, read the entire enclosing value. This
3953 prevents reading the same block of (possibly volatile) memory once
3954 per bitfield. It would be even better to read only the containing
3955 word, but we have no way to record that just specific bits of a
3956 value have been fetched. */
3957 struct value *parent = value_parent (val);
3958
3959 if (value_lazy (parent))
3960 value_fetch_lazy (parent);
3961
3962 unpack_value_bitfield (val, value_bitpos (val), value_bitsize (val),
3963 value_contents_for_printing (parent).data (),
3964 value_offset (val), parent);
3965 }
3966
3967 /* Helper for value_fetch_lazy when the value is in memory. */
3968
3969 static void
3970 value_fetch_lazy_memory (struct value *val)
3971 {
3972 gdb_assert (VALUE_LVAL (val) == lval_memory);
3973
3974 CORE_ADDR addr = value_address (val);
3975 struct type *type = check_typedef (value_enclosing_type (val));
3976
3977 /* Figure out how much we should copy from memory. Usually, this is just
3978 the size of the type, but, for arrays, we might only be loading a
3979 small part of the array (this is only done for very large arrays). */
3980 int len = 0;
3981 if (val->m_limited_length > 0)
3982 {
3983 gdb_assert (val->type ()->code () == TYPE_CODE_ARRAY);
3984 len = val->m_limited_length;
3985 }
3986 else if (type->length () > 0)
3987 len = type_length_units (type);
3988
3989 gdb_assert (len >= 0);
3990
3991 if (len > 0)
3992 read_value_memory (val, 0, value_stack (val), addr,
3993 value_contents_all_raw (val).data (), len);
3994 }
3995
3996 /* Helper for value_fetch_lazy when the value is in a register. */
3997
3998 static void
3999 value_fetch_lazy_register (struct value *val)
4000 {
4001 frame_info_ptr next_frame;
4002 int regnum;
4003 struct type *type = check_typedef (val->type ());
4004 struct value *new_val = val, *mark = value_mark ();
4005
4006 /* Offsets are not supported here; lazy register values must
4007 refer to the entire register. */
4008 gdb_assert (value_offset (val) == 0);
4009
4010 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
4011 {
4012 struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
4013
4014 next_frame = frame_find_by_id (next_frame_id);
4015 regnum = VALUE_REGNUM (new_val);
4016
4017 gdb_assert (next_frame != NULL);
4018
4019 /* Convertible register routines are used for multi-register
4020 values and for interpretation in different types
4021 (e.g. float or int from a double register). Lazy
4022 register values should have the register's natural type,
4023 so they do not apply. */
4024 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
4025 regnum, type));
4026
4027 /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
4028 Since a "->next" operation was performed when setting
4029 this field, we do not need to perform a "next" operation
4030 again when unwinding the register. That's why
4031 frame_unwind_register_value() is called here instead of
4032 get_frame_register_value(). */
4033 new_val = frame_unwind_register_value (next_frame, regnum);
4034
4035 /* If we get another lazy lval_register value, it means the
4036 register is found by reading it from NEXT_FRAME's next frame.
4037 frame_unwind_register_value should never return a value with
4038 the frame id pointing to NEXT_FRAME. If it does, it means we
4039 either have two consecutive frames with the same frame id
4040 in the frame chain, or some code is trying to unwind
4041 behind get_prev_frame's back (e.g., a frame unwind
4042 sniffer trying to unwind), bypassing its validations. In
4043 any case, it should always be an internal error to end up
4044 in this situation. */
4045 if (VALUE_LVAL (new_val) == lval_register
4046 && value_lazy (new_val)
4047 && VALUE_NEXT_FRAME_ID (new_val) == next_frame_id)
4048 internal_error (_("infinite loop while fetching a register"));
4049 }
4050
4051 /* If it's still lazy (for instance, a saved register on the
4052 stack), fetch it. */
4053 if (value_lazy (new_val))
4054 value_fetch_lazy (new_val);
4055
4056 /* Copy the contents and the unavailability/optimized-out
4057 meta-data from NEW_VAL to VAL. */
4058 set_value_lazy (val, 0);
4059 value_contents_copy (val, value_embedded_offset (val),
4060 new_val, value_embedded_offset (new_val),
4061 type_length_units (type));
4062
4063 if (frame_debug)
4064 {
4065 struct gdbarch *gdbarch;
4066 frame_info_ptr frame;
4067 frame = frame_find_by_id (VALUE_NEXT_FRAME_ID (val));
4068 frame = get_prev_frame_always (frame);
4069 regnum = VALUE_REGNUM (val);
4070 gdbarch = get_frame_arch (frame);
4071
4072 string_file debug_file;
4073 gdb_printf (&debug_file,
4074 "(frame=%d, regnum=%d(%s), ...) ",
4075 frame_relative_level (frame), regnum,
4076 user_reg_map_regnum_to_name (gdbarch, regnum));
4077
4078 gdb_printf (&debug_file, "->");
4079 if (value_optimized_out (new_val))
4080 {
4081 gdb_printf (&debug_file, " ");
4082 val_print_optimized_out (new_val, &debug_file);
4083 }
4084 else
4085 {
4086 int i;
4087 gdb::array_view<const gdb_byte> buf = value_contents (new_val);
4088
4089 if (VALUE_LVAL (new_val) == lval_register)
4090 gdb_printf (&debug_file, " register=%d",
4091 VALUE_REGNUM (new_val));
4092 else if (VALUE_LVAL (new_val) == lval_memory)
4093 gdb_printf (&debug_file, " address=%s",
4094 paddress (gdbarch,
4095 value_address (new_val)));
4096 else
4097 gdb_printf (&debug_file, " computed");
4098
4099 gdb_printf (&debug_file, " bytes=");
4100 gdb_printf (&debug_file, "[");
4101 for (i = 0; i < register_size (gdbarch, regnum); i++)
4102 gdb_printf (&debug_file, "%02x", buf[i]);
4103 gdb_printf (&debug_file, "]");
4104 }
4105
4106 frame_debug_printf ("%s", debug_file.c_str ());
4107 }
4108
4109 /* Dispose of the intermediate values. This prevents
4110 watchpoints from trying to watch the saved frame pointer. */
4111 value_free_to_mark (mark);
4112 }
4113
4114 /* Load the actual content of a lazy value. Fetch the data from the
4115 user's process and clear the lazy flag to indicate that the data in
4116 the buffer is valid.
4117
4118 If the value is zero-length, we avoid calling read_memory, which
4119 would abort. We mark the value as fetched anyway -- all 0 bytes of
4120 it. */
4121
4122 void
4123 value_fetch_lazy (struct value *val)
4124 {
4125 gdb_assert (value_lazy (val));
4126 allocate_value_contents (val, true);
4127 /* A value is either lazy, or fully fetched. The
4128 availability/validity is only established as we try to fetch a
4129 value. */
4130 gdb_assert (val->m_optimized_out.empty ());
4131 gdb_assert (val->m_unavailable.empty ());
4132 if (val->m_is_zero)
4133 {
4134 /* Nothing. */
4135 }
4136 else if (value_bitsize (val))
4137 value_fetch_lazy_bitfield (val);
4138 else if (VALUE_LVAL (val) == lval_memory)
4139 value_fetch_lazy_memory (val);
4140 else if (VALUE_LVAL (val) == lval_register)
4141 value_fetch_lazy_register (val);
4142 else if (VALUE_LVAL (val) == lval_computed
4143 && value_computed_funcs (val)->read != NULL)
4144 value_computed_funcs (val)->read (val);
4145 else
4146 internal_error (_("Unexpected lazy value type."));
4147
4148 set_value_lazy (val, 0);
4149 }
4150
4151 /* Implementation of the convenience function $_isvoid. */
4152
4153 static struct value *
4154 isvoid_internal_fn (struct gdbarch *gdbarch,
4155 const struct language_defn *language,
4156 void *cookie, int argc, struct value **argv)
4157 {
4158 int ret;
4159
4160 if (argc != 1)
4161 error (_("You must provide one argument for $_isvoid."));
4162
4163 ret = argv[0]->type ()->code () == TYPE_CODE_VOID;
4164
4165 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
4166 }
4167
4168 /* Implementation of the convenience function $_creal. Extracts the
4169 real part from a complex number. */
4170
4171 static struct value *
4172 creal_internal_fn (struct gdbarch *gdbarch,
4173 const struct language_defn *language,
4174 void *cookie, int argc, struct value **argv)
4175 {
4176 if (argc != 1)
4177 error (_("You must provide one argument for $_creal."));
4178
4179 value *cval = argv[0];
4180 type *ctype = check_typedef (cval->type ());
4181 if (ctype->code () != TYPE_CODE_COMPLEX)
4182 error (_("expected a complex number"));
4183 return value_real_part (cval);
4184 }
4185
4186 /* Implementation of the convenience function $_cimag. Extracts the
4187 imaginary part from a complex number. */
4188
4189 static struct value *
4190 cimag_internal_fn (struct gdbarch *gdbarch,
4191 const struct language_defn *language,
4192 void *cookie, int argc,
4193 struct value **argv)
4194 {
4195 if (argc != 1)
4196 error (_("You must provide one argument for $_cimag."));
4197
4198 value *cval = argv[0];
4199 type *ctype = check_typedef (cval->type ());
4200 if (ctype->code () != TYPE_CODE_COMPLEX)
4201 error (_("expected a complex number"));
4202 return value_imaginary_part (cval);
4203 }
4204
4205 #if GDB_SELF_TEST
4206 namespace selftests
4207 {
4208
4209 /* Test the ranges_contain function. */
4210
4211 static void
4212 test_ranges_contain ()
4213 {
4214 std::vector<range> ranges;
4215 range r;
4216
4217 /* [10, 14] */
4218 r.offset = 10;
4219 r.length = 5;
4220 ranges.push_back (r);
4221
4222 /* [20, 24] */
4223 r.offset = 20;
4224 r.length = 5;
4225 ranges.push_back (r);
4226
4227 /* [2, 6] */
4228 SELF_CHECK (!ranges_contain (ranges, 2, 5));
4229 /* [9, 13] */
4230 SELF_CHECK (ranges_contain (ranges, 9, 5));
4231 /* [10, 11] */
4232 SELF_CHECK (ranges_contain (ranges, 10, 2));
4233 /* [10, 14] */
4234 SELF_CHECK (ranges_contain (ranges, 10, 5));
4235 /* [13, 18] */
4236 SELF_CHECK (ranges_contain (ranges, 13, 6));
4237 /* [14, 18] */
4238 SELF_CHECK (ranges_contain (ranges, 14, 5));
4239 /* [15, 18] */
4240 SELF_CHECK (!ranges_contain (ranges, 15, 4));
4241 /* [16, 19] */
4242 SELF_CHECK (!ranges_contain (ranges, 16, 4));
4243 /* [16, 21] */
4244 SELF_CHECK (ranges_contain (ranges, 16, 6));
4245 /* [21, 21] */
4246 SELF_CHECK (ranges_contain (ranges, 21, 1));
4247 /* [21, 25] */
4248 SELF_CHECK (ranges_contain (ranges, 21, 5));
4249 /* [26, 28] */
4250 SELF_CHECK (!ranges_contain (ranges, 26, 3));
4251 }
4252
4253 /* Check that RANGES contains the same ranges as EXPECTED. */
4254
4255 static bool
4256 check_ranges_vector (gdb::array_view<const range> ranges,
4257 gdb::array_view<const range> expected)
4258 {
4259 return ranges == expected;
4260 }
4261
4262 /* Test the insert_into_bit_range_vector function. */
4263
4264 static void
4265 test_insert_into_bit_range_vector ()
4266 {
4267 std::vector<range> ranges;
4268
4269 /* [10, 14] */
4270 {
4271 insert_into_bit_range_vector (&ranges, 10, 5);
4272 static const range expected[] = {
4273 {10, 5}
4274 };
4275 SELF_CHECK (check_ranges_vector (ranges, expected));
4276 }
4277
4278 /* [10, 14] */
4279 {
4280 insert_into_bit_range_vector (&ranges, 11, 4);
4281 static const range expected = {10, 5};
4282 SELF_CHECK (check_ranges_vector (ranges, expected));
4283 }
4284
4285 /* [10, 14] [20, 24] */
4286 {
4287 insert_into_bit_range_vector (&ranges, 20, 5);
4288 static const range expected[] = {
4289 {10, 5},
4290 {20, 5},
4291 };
4292 SELF_CHECK (check_ranges_vector (ranges, expected));
4293 }
4294
4295 /* [10, 14] [17, 24] */
4296 {
4297 insert_into_bit_range_vector (&ranges, 17, 5);
4298 static const range expected[] = {
4299 {10, 5},
4300 {17, 8},
4301 };
4302 SELF_CHECK (check_ranges_vector (ranges, expected));
4303 }
4304
4305 /* [2, 8] [10, 14] [17, 24] */
4306 {
4307 insert_into_bit_range_vector (&ranges, 2, 7);
4308 static const range expected[] = {
4309 {2, 7},
4310 {10, 5},
4311 {17, 8},
4312 };
4313 SELF_CHECK (check_ranges_vector (ranges, expected));
4314 }
4315
4316 /* [2, 14] [17, 24] */
4317 {
4318 insert_into_bit_range_vector (&ranges, 9, 1);
4319 static const range expected[] = {
4320 {2, 13},
4321 {17, 8},
4322 };
4323 SELF_CHECK (check_ranges_vector (ranges, expected));
4324 }
4325
4326 /* [2, 14] [17, 24] */
4327 {
4328 insert_into_bit_range_vector (&ranges, 9, 1);
4329 static const range expected[] = {
4330 {2, 13},
4331 {17, 8},
4332 };
4333 SELF_CHECK (check_ranges_vector (ranges, expected));
4334 }
4335
4336 /* [2, 33] */
4337 {
4338 insert_into_bit_range_vector (&ranges, 4, 30);
4339 static const range expected = {2, 32};
4340 SELF_CHECK (check_ranges_vector (ranges, expected));
4341 }
4342 }
4343
4344 static void
4345 test_value_copy ()
4346 {
4347 type *type = builtin_type (current_inferior ()->gdbarch)->builtin_int;
4348
4349 /* Verify that we can copy an entirely optimized out value, that may not have
4350 its contents allocated. */
4351 value_ref_ptr val = release_value (allocate_optimized_out_value (type));
4352 value_ref_ptr copy = release_value (value_copy (val.get ()));
4353
4354 SELF_CHECK (value_entirely_optimized_out (val.get ()));
4355 SELF_CHECK (value_entirely_optimized_out (copy.get ()));
4356 }
4357
4358 } /* namespace selftests */
4359 #endif /* GDB_SELF_TEST */
4360
4361 void _initialize_values ();
4362 void
4363 _initialize_values ()
4364 {
4365 cmd_list_element *show_convenience_cmd
4366 = add_cmd ("convenience", no_class, show_convenience, _("\
4367 Debugger convenience (\"$foo\") variables and functions.\n\
4368 Convenience variables are created when you assign them values;\n\
4369 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4370 \n\
4371 A few convenience variables are given values automatically:\n\
4372 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4373 \"$__\" holds the contents of the last address examined with \"x\"."
4374 #ifdef HAVE_PYTHON
4375 "\n\n\
4376 Convenience functions are defined via the Python API."
4377 #endif
4378 ), &showlist);
4379 add_alias_cmd ("conv", show_convenience_cmd, no_class, 1, &showlist);
4380
4381 add_cmd ("values", no_set_class, show_values, _("\
4382 Elements of value history around item number IDX (or last ten)."),
4383 &showlist);
4384
4385 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4386 Initialize a convenience variable if necessary.\n\
4387 init-if-undefined VARIABLE = EXPRESSION\n\
4388 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4389 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4390 VARIABLE is already initialized."));
4391
4392 add_prefix_cmd ("function", no_class, function_command, _("\
4393 Placeholder command for showing help on convenience functions."),
4394 &functionlist, 0, &cmdlist);
4395
4396 add_internal_function ("_isvoid", _("\
4397 Check whether an expression is void.\n\
4398 Usage: $_isvoid (expression)\n\
4399 Return 1 if the expression is void, zero otherwise."),
4400 isvoid_internal_fn, NULL);
4401
4402 add_internal_function ("_creal", _("\
4403 Extract the real part of a complex number.\n\
4404 Usage: $_creal (expression)\n\
4405 Return the real part of a complex number, the type depends on the\n\
4406 type of a complex number."),
4407 creal_internal_fn, NULL);
4408
4409 add_internal_function ("_cimag", _("\
4410 Extract the imaginary part of a complex number.\n\
4411 Usage: $_cimag (expression)\n\
4412 Return the imaginary part of a complex number, the type depends on the\n\
4413 type of a complex number."),
4414 cimag_internal_fn, NULL);
4415
4416 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4417 class_support, &max_value_size, _("\
4418 Set maximum sized value gdb will load from the inferior."), _("\
4419 Show maximum sized value gdb will load from the inferior."), _("\
4420 Use this to control the maximum size, in bytes, of a value that gdb\n\
4421 will load from the inferior. Setting this value to 'unlimited'\n\
4422 disables checking.\n\
4423 Setting this does not invalidate already allocated values, it only\n\
4424 prevents future values, larger than this size, from being allocated."),
4425 set_max_value_size,
4426 show_max_value_size,
4427 &setlist, &showlist);
4428 set_show_commands vsize_limit
4429 = add_setshow_zuinteger_unlimited_cmd ("varsize-limit", class_support,
4430 &max_value_size, _("\
4431 Set the maximum number of bytes allowed in a variable-size object."), _("\
4432 Show the maximum number of bytes allowed in a variable-size object."), _("\
4433 Attempts to access an object whose size is not a compile-time constant\n\
4434 and exceeds this limit will cause an error."),
4435 NULL, NULL, &setlist, &showlist);
4436 deprecate_cmd (vsize_limit.set, "set max-value-size");
4437
4438 #if GDB_SELF_TEST
4439 selftests::register_test ("ranges_contain", selftests::test_ranges_contain);
4440 selftests::register_test ("insert_into_bit_range_vector",
4441 selftests::test_insert_into_bit_range_vector);
4442 selftests::register_test ("value_copy", selftests::test_value_copy);
4443 #endif
4444 }
4445
4446 /* See value.h. */
4447
4448 void
4449 finalize_values ()
4450 {
4451 all_values.clear ();
4452 }