Turn deprecated_value_modifiable into method
[binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2023 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "regcache.h"
32 #include "block.h"
33 #include "target-float.h"
34 #include "objfiles.h"
35 #include "valprint.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
38 #include <ctype.h>
39 #include "tracepoint.h"
40 #include "cp-abi.h"
41 #include "user-regs.h"
42 #include <algorithm>
43 #include <iterator>
44 #include <utility>
45 #include <vector>
46 #include "completer.h"
47 #include "gdbsupport/selftest.h"
48 #include "gdbsupport/array-view.h"
49 #include "cli/cli-style.h"
50 #include "expop.h"
51 #include "inferior.h"
52 #include "varobj.h"
53
54 /* Definition of a user function. */
55 struct internal_function
56 {
57 /* The name of the function. It is a bit odd to have this in the
58 function itself -- the user might use a differently-named
59 convenience variable to hold the function. */
60 char *name;
61
62 /* The handler. */
63 internal_function_fn handler;
64
65 /* User data for the handler. */
66 void *cookie;
67 };
68
69 /* Returns true if the ranges defined by [offset1, offset1+len1) and
70 [offset2, offset2+len2) overlap. */
71
72 static int
73 ranges_overlap (LONGEST offset1, ULONGEST len1,
74 LONGEST offset2, ULONGEST len2)
75 {
76 LONGEST h, l;
77
78 l = std::max (offset1, offset2);
79 h = std::min (offset1 + len1, offset2 + len2);
80 return (l < h);
81 }
82
83 /* Returns true if RANGES contains any range that overlaps [OFFSET,
84 OFFSET+LENGTH). */
85
86 static int
87 ranges_contain (const std::vector<range> &ranges, LONGEST offset,
88 ULONGEST length)
89 {
90 range what;
91
92 what.offset = offset;
93 what.length = length;
94
95 /* We keep ranges sorted by offset and coalesce overlapping and
96 contiguous ranges, so to check if a range list contains a given
97 range, we can do a binary search for the position the given range
98 would be inserted if we only considered the starting OFFSET of
99 ranges. We call that position I. Since we also have LENGTH to
100 care for (this is a range afterall), we need to check if the
101 _previous_ range overlaps the I range. E.g.,
102
103 R
104 |---|
105 |---| |---| |------| ... |--|
106 0 1 2 N
107
108 I=1
109
110 In the case above, the binary search would return `I=1', meaning,
111 this OFFSET should be inserted at position 1, and the current
112 position 1 should be pushed further (and before 2). But, `0'
113 overlaps with R.
114
115 Then we need to check if the I range overlaps the I range itself.
116 E.g.,
117
118 R
119 |---|
120 |---| |---| |-------| ... |--|
121 0 1 2 N
122
123 I=1
124 */
125
126
127 auto i = std::lower_bound (ranges.begin (), ranges.end (), what);
128
129 if (i > ranges.begin ())
130 {
131 const struct range &bef = *(i - 1);
132
133 if (ranges_overlap (bef.offset, bef.length, offset, length))
134 return 1;
135 }
136
137 if (i < ranges.end ())
138 {
139 const struct range &r = *i;
140
141 if (ranges_overlap (r.offset, r.length, offset, length))
142 return 1;
143 }
144
145 return 0;
146 }
147
148 static struct cmd_list_element *functionlist;
149
150 value::~value ()
151 {
152 if (VALUE_LVAL (this) == lval_computed)
153 {
154 const struct lval_funcs *funcs = m_location.computed.funcs;
155
156 if (funcs->free_closure)
157 funcs->free_closure (this);
158 }
159 else if (VALUE_LVAL (this) == lval_xcallable)
160 delete m_location.xm_worker;
161 }
162
163 /* See value.h. */
164
165 struct gdbarch *
166 value::arch () const
167 {
168 return type ()->arch ();
169 }
170
171 int
172 value_bits_available (const struct value *value,
173 LONGEST offset, ULONGEST length)
174 {
175 gdb_assert (!value->m_lazy);
176
177 /* Don't pretend we have anything available there in the history beyond
178 the boundaries of the value recorded. It's not like inferior memory
179 where there is actual stuff underneath. */
180 ULONGEST val_len = TARGET_CHAR_BIT * value_enclosing_type (value)->length ();
181 return !((value->m_in_history
182 && (offset < 0 || offset + length > val_len))
183 || ranges_contain (value->m_unavailable, offset, length));
184 }
185
186 int
187 value_bytes_available (const struct value *value,
188 LONGEST offset, ULONGEST length)
189 {
190 ULONGEST sign = (1ULL << (sizeof (ULONGEST) * 8 - 1)) / TARGET_CHAR_BIT;
191 ULONGEST mask = (sign << 1) - 1;
192
193 if (offset != ((offset & mask) ^ sign) - sign
194 || length != ((length & mask) ^ sign) - sign
195 || (length > 0 && (~offset & (offset + length - 1) & sign) != 0))
196 error (_("Integer overflow in data location calculation"));
197
198 return value_bits_available (value,
199 offset * TARGET_CHAR_BIT,
200 length * TARGET_CHAR_BIT);
201 }
202
203 int
204 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
205 {
206 gdb_assert (!value->m_lazy);
207
208 return ranges_contain (value->m_optimized_out, bit_offset, bit_length);
209 }
210
211 int
212 value_entirely_available (struct value *value)
213 {
214 /* We can only tell whether the whole value is available when we try
215 to read it. */
216 if (value->m_lazy)
217 value_fetch_lazy (value);
218
219 if (value->m_unavailable.empty ())
220 return 1;
221 return 0;
222 }
223
224 /* Returns true if VALUE is entirely covered by RANGES. If the value
225 is lazy, it'll be read now. Note that RANGE is a pointer to
226 pointer because reading the value might change *RANGE. */
227
228 static int
229 value_entirely_covered_by_range_vector (struct value *value,
230 const std::vector<range> &ranges)
231 {
232 /* We can only tell whether the whole value is optimized out /
233 unavailable when we try to read it. */
234 if (value->m_lazy)
235 value_fetch_lazy (value);
236
237 if (ranges.size () == 1)
238 {
239 const struct range &t = ranges[0];
240
241 if (t.offset == 0
242 && t.length == (TARGET_CHAR_BIT
243 * value_enclosing_type (value)->length ()))
244 return 1;
245 }
246
247 return 0;
248 }
249
250 int
251 value_entirely_unavailable (struct value *value)
252 {
253 return value_entirely_covered_by_range_vector (value, value->m_unavailable);
254 }
255
256 int
257 value_entirely_optimized_out (struct value *value)
258 {
259 return value_entirely_covered_by_range_vector (value, value->m_optimized_out);
260 }
261
262 /* Insert into the vector pointed to by VECTORP the bit range starting of
263 OFFSET bits, and extending for the next LENGTH bits. */
264
265 static void
266 insert_into_bit_range_vector (std::vector<range> *vectorp,
267 LONGEST offset, ULONGEST length)
268 {
269 range newr;
270
271 /* Insert the range sorted. If there's overlap or the new range
272 would be contiguous with an existing range, merge. */
273
274 newr.offset = offset;
275 newr.length = length;
276
277 /* Do a binary search for the position the given range would be
278 inserted if we only considered the starting OFFSET of ranges.
279 Call that position I. Since we also have LENGTH to care for
280 (this is a range afterall), we need to check if the _previous_
281 range overlaps the I range. E.g., calling R the new range:
282
283 #1 - overlaps with previous
284
285 R
286 |-...-|
287 |---| |---| |------| ... |--|
288 0 1 2 N
289
290 I=1
291
292 In the case #1 above, the binary search would return `I=1',
293 meaning, this OFFSET should be inserted at position 1, and the
294 current position 1 should be pushed further (and become 2). But,
295 note that `0' overlaps with R, so we want to merge them.
296
297 A similar consideration needs to be taken if the new range would
298 be contiguous with the previous range:
299
300 #2 - contiguous with previous
301
302 R
303 |-...-|
304 |--| |---| |------| ... |--|
305 0 1 2 N
306
307 I=1
308
309 If there's no overlap with the previous range, as in:
310
311 #3 - not overlapping and not contiguous
312
313 R
314 |-...-|
315 |--| |---| |------| ... |--|
316 0 1 2 N
317
318 I=1
319
320 or if I is 0:
321
322 #4 - R is the range with lowest offset
323
324 R
325 |-...-|
326 |--| |---| |------| ... |--|
327 0 1 2 N
328
329 I=0
330
331 ... we just push the new range to I.
332
333 All the 4 cases above need to consider that the new range may
334 also overlap several of the ranges that follow, or that R may be
335 contiguous with the following range, and merge. E.g.,
336
337 #5 - overlapping following ranges
338
339 R
340 |------------------------|
341 |--| |---| |------| ... |--|
342 0 1 2 N
343
344 I=0
345
346 or:
347
348 R
349 |-------|
350 |--| |---| |------| ... |--|
351 0 1 2 N
352
353 I=1
354
355 */
356
357 auto i = std::lower_bound (vectorp->begin (), vectorp->end (), newr);
358 if (i > vectorp->begin ())
359 {
360 struct range &bef = *(i - 1);
361
362 if (ranges_overlap (bef.offset, bef.length, offset, length))
363 {
364 /* #1 */
365 LONGEST l = std::min (bef.offset, offset);
366 LONGEST h = std::max (bef.offset + bef.length, offset + length);
367
368 bef.offset = l;
369 bef.length = h - l;
370 i--;
371 }
372 else if (offset == bef.offset + bef.length)
373 {
374 /* #2 */
375 bef.length += length;
376 i--;
377 }
378 else
379 {
380 /* #3 */
381 i = vectorp->insert (i, newr);
382 }
383 }
384 else
385 {
386 /* #4 */
387 i = vectorp->insert (i, newr);
388 }
389
390 /* Check whether the ranges following the one we've just added or
391 touched can be folded in (#5 above). */
392 if (i != vectorp->end () && i + 1 < vectorp->end ())
393 {
394 int removed = 0;
395 auto next = i + 1;
396
397 /* Get the range we just touched. */
398 struct range &t = *i;
399 removed = 0;
400
401 i = next;
402 for (; i < vectorp->end (); i++)
403 {
404 struct range &r = *i;
405 if (r.offset <= t.offset + t.length)
406 {
407 LONGEST l, h;
408
409 l = std::min (t.offset, r.offset);
410 h = std::max (t.offset + t.length, r.offset + r.length);
411
412 t.offset = l;
413 t.length = h - l;
414
415 removed++;
416 }
417 else
418 {
419 /* If we couldn't merge this one, we won't be able to
420 merge following ones either, since the ranges are
421 always sorted by OFFSET. */
422 break;
423 }
424 }
425
426 if (removed != 0)
427 vectorp->erase (next, next + removed);
428 }
429 }
430
431 void
432 mark_value_bits_unavailable (struct value *value,
433 LONGEST offset, ULONGEST length)
434 {
435 insert_into_bit_range_vector (&value->m_unavailable, offset, length);
436 }
437
438 void
439 mark_value_bytes_unavailable (struct value *value,
440 LONGEST offset, ULONGEST length)
441 {
442 mark_value_bits_unavailable (value,
443 offset * TARGET_CHAR_BIT,
444 length * TARGET_CHAR_BIT);
445 }
446
447 /* Find the first range in RANGES that overlaps the range defined by
448 OFFSET and LENGTH, starting at element POS in the RANGES vector,
449 Returns the index into RANGES where such overlapping range was
450 found, or -1 if none was found. */
451
452 static int
453 find_first_range_overlap (const std::vector<range> *ranges, int pos,
454 LONGEST offset, LONGEST length)
455 {
456 int i;
457
458 for (i = pos; i < ranges->size (); i++)
459 {
460 const range &r = (*ranges)[i];
461 if (ranges_overlap (r.offset, r.length, offset, length))
462 return i;
463 }
464
465 return -1;
466 }
467
468 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
469 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
470 return non-zero.
471
472 It must always be the case that:
473 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
474
475 It is assumed that memory can be accessed from:
476 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
477 to:
478 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
479 / TARGET_CHAR_BIT) */
480 static int
481 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
482 const gdb_byte *ptr2, size_t offset2_bits,
483 size_t length_bits)
484 {
485 gdb_assert (offset1_bits % TARGET_CHAR_BIT
486 == offset2_bits % TARGET_CHAR_BIT);
487
488 if (offset1_bits % TARGET_CHAR_BIT != 0)
489 {
490 size_t bits;
491 gdb_byte mask, b1, b2;
492
493 /* The offset from the base pointers PTR1 and PTR2 is not a complete
494 number of bytes. A number of bits up to either the next exact
495 byte boundary, or LENGTH_BITS (which ever is sooner) will be
496 compared. */
497 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
498 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
499 mask = (1 << bits) - 1;
500
501 if (length_bits < bits)
502 {
503 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
504 bits = length_bits;
505 }
506
507 /* Now load the two bytes and mask off the bits we care about. */
508 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
509 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
510
511 if (b1 != b2)
512 return 1;
513
514 /* Now update the length and offsets to take account of the bits
515 we've just compared. */
516 length_bits -= bits;
517 offset1_bits += bits;
518 offset2_bits += bits;
519 }
520
521 if (length_bits % TARGET_CHAR_BIT != 0)
522 {
523 size_t bits;
524 size_t o1, o2;
525 gdb_byte mask, b1, b2;
526
527 /* The length is not an exact number of bytes. After the previous
528 IF.. block then the offsets are byte aligned, or the
529 length is zero (in which case this code is not reached). Compare
530 a number of bits at the end of the region, starting from an exact
531 byte boundary. */
532 bits = length_bits % TARGET_CHAR_BIT;
533 o1 = offset1_bits + length_bits - bits;
534 o2 = offset2_bits + length_bits - bits;
535
536 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
537 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
538
539 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
540 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
541
542 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
543 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
544
545 if (b1 != b2)
546 return 1;
547
548 length_bits -= bits;
549 }
550
551 if (length_bits > 0)
552 {
553 /* We've now taken care of any stray "bits" at the start, or end of
554 the region to compare, the remainder can be covered with a simple
555 memcmp. */
556 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
557 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
558 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
559
560 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
561 ptr2 + offset2_bits / TARGET_CHAR_BIT,
562 length_bits / TARGET_CHAR_BIT);
563 }
564
565 /* Length is zero, regions match. */
566 return 0;
567 }
568
569 /* Helper struct for find_first_range_overlap_and_match and
570 value_contents_bits_eq. Keep track of which slot of a given ranges
571 vector have we last looked at. */
572
573 struct ranges_and_idx
574 {
575 /* The ranges. */
576 const std::vector<range> *ranges;
577
578 /* The range we've last found in RANGES. Given ranges are sorted,
579 we can start the next lookup here. */
580 int idx;
581 };
582
583 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
584 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
585 ranges starting at OFFSET2 bits. Return true if the ranges match
586 and fill in *L and *H with the overlapping window relative to
587 (both) OFFSET1 or OFFSET2. */
588
589 static int
590 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
591 struct ranges_and_idx *rp2,
592 LONGEST offset1, LONGEST offset2,
593 ULONGEST length, ULONGEST *l, ULONGEST *h)
594 {
595 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
596 offset1, length);
597 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
598 offset2, length);
599
600 if (rp1->idx == -1 && rp2->idx == -1)
601 {
602 *l = length;
603 *h = length;
604 return 1;
605 }
606 else if (rp1->idx == -1 || rp2->idx == -1)
607 return 0;
608 else
609 {
610 const range *r1, *r2;
611 ULONGEST l1, h1;
612 ULONGEST l2, h2;
613
614 r1 = &(*rp1->ranges)[rp1->idx];
615 r2 = &(*rp2->ranges)[rp2->idx];
616
617 /* Get the unavailable windows intersected by the incoming
618 ranges. The first and last ranges that overlap the argument
619 range may be wider than said incoming arguments ranges. */
620 l1 = std::max (offset1, r1->offset);
621 h1 = std::min (offset1 + length, r1->offset + r1->length);
622
623 l2 = std::max (offset2, r2->offset);
624 h2 = std::min (offset2 + length, offset2 + r2->length);
625
626 /* Make them relative to the respective start offsets, so we can
627 compare them for equality. */
628 l1 -= offset1;
629 h1 -= offset1;
630
631 l2 -= offset2;
632 h2 -= offset2;
633
634 /* Different ranges, no match. */
635 if (l1 != l2 || h1 != h2)
636 return 0;
637
638 *h = h1;
639 *l = l1;
640 return 1;
641 }
642 }
643
644 /* Helper function for value_contents_eq. The only difference is that
645 this function is bit rather than byte based.
646
647 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
648 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
649 Return true if the available bits match. */
650
651 static bool
652 value_contents_bits_eq (const struct value *val1, int offset1,
653 const struct value *val2, int offset2,
654 int length)
655 {
656 /* Each array element corresponds to a ranges source (unavailable,
657 optimized out). '1' is for VAL1, '2' for VAL2. */
658 struct ranges_and_idx rp1[2], rp2[2];
659
660 /* See function description in value.h. */
661 gdb_assert (!val1->m_lazy && !val2->m_lazy);
662
663 /* We shouldn't be trying to compare past the end of the values. */
664 gdb_assert (offset1 + length
665 <= val1->m_enclosing_type->length () * TARGET_CHAR_BIT);
666 gdb_assert (offset2 + length
667 <= val2->m_enclosing_type->length () * TARGET_CHAR_BIT);
668
669 memset (&rp1, 0, sizeof (rp1));
670 memset (&rp2, 0, sizeof (rp2));
671 rp1[0].ranges = &val1->m_unavailable;
672 rp2[0].ranges = &val2->m_unavailable;
673 rp1[1].ranges = &val1->m_optimized_out;
674 rp2[1].ranges = &val2->m_optimized_out;
675
676 while (length > 0)
677 {
678 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
679 int i;
680
681 for (i = 0; i < 2; i++)
682 {
683 ULONGEST l_tmp, h_tmp;
684
685 /* The contents only match equal if the invalid/unavailable
686 contents ranges match as well. */
687 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
688 offset1, offset2, length,
689 &l_tmp, &h_tmp))
690 return false;
691
692 /* We're interested in the lowest/first range found. */
693 if (i == 0 || l_tmp < l)
694 {
695 l = l_tmp;
696 h = h_tmp;
697 }
698 }
699
700 /* Compare the available/valid contents. */
701 if (memcmp_with_bit_offsets (val1->m_contents.get (), offset1,
702 val2->m_contents.get (), offset2, l) != 0)
703 return false;
704
705 length -= h;
706 offset1 += h;
707 offset2 += h;
708 }
709
710 return true;
711 }
712
713 bool
714 value_contents_eq (const struct value *val1, LONGEST offset1,
715 const struct value *val2, LONGEST offset2,
716 LONGEST length)
717 {
718 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
719 val2, offset2 * TARGET_CHAR_BIT,
720 length * TARGET_CHAR_BIT);
721 }
722
723 /* See value.h. */
724
725 bool
726 value_contents_eq (const struct value *val1, const struct value *val2)
727 {
728 ULONGEST len1 = check_typedef (value_enclosing_type (val1))->length ();
729 ULONGEST len2 = check_typedef (value_enclosing_type (val2))->length ();
730 if (len1 != len2)
731 return false;
732 return value_contents_eq (val1, 0, val2, 0, len1);
733 }
734
735 /* The value-history records all the values printed by print commands
736 during this session. */
737
738 static std::vector<value_ref_ptr> value_history;
739
740 \f
741 /* List of all value objects currently allocated
742 (except for those released by calls to release_value)
743 This is so they can be freed after each command. */
744
745 static std::vector<value_ref_ptr> all_values;
746
747 /* Allocate a lazy value for type TYPE. Its actual content is
748 "lazily" allocated too: the content field of the return value is
749 NULL; it will be allocated when it is fetched from the target. */
750
751 struct value *
752 allocate_value_lazy (struct type *type)
753 {
754 struct value *val;
755
756 /* Call check_typedef on our type to make sure that, if TYPE
757 is a TYPE_CODE_TYPEDEF, its length is set to the length
758 of the target type instead of zero. However, we do not
759 replace the typedef type by the target type, because we want
760 to keep the typedef in order to be able to set the VAL's type
761 description correctly. */
762 check_typedef (type);
763
764 val = new struct value (type);
765
766 /* Values start out on the all_values chain. */
767 all_values.emplace_back (val);
768
769 return val;
770 }
771
772 /* The maximum size, in bytes, that GDB will try to allocate for a value.
773 The initial value of 64k was not selected for any specific reason, it is
774 just a reasonable starting point. */
775
776 static int max_value_size = 65536; /* 64k bytes */
777
778 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
779 LONGEST, otherwise GDB will not be able to parse integer values from the
780 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
781 be unable to parse "set max-value-size 2".
782
783 As we want a consistent GDB experience across hosts with different sizes
784 of LONGEST, this arbitrary minimum value was selected, so long as this
785 is bigger than LONGEST on all GDB supported hosts we're fine. */
786
787 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
788 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
789
790 /* Implement the "set max-value-size" command. */
791
792 static void
793 set_max_value_size (const char *args, int from_tty,
794 struct cmd_list_element *c)
795 {
796 gdb_assert (max_value_size == -1 || max_value_size >= 0);
797
798 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
799 {
800 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
801 error (_("max-value-size set too low, increasing to %d bytes"),
802 max_value_size);
803 }
804 }
805
806 /* Implement the "show max-value-size" command. */
807
808 static void
809 show_max_value_size (struct ui_file *file, int from_tty,
810 struct cmd_list_element *c, const char *value)
811 {
812 if (max_value_size == -1)
813 gdb_printf (file, _("Maximum value size is unlimited.\n"));
814 else
815 gdb_printf (file, _("Maximum value size is %d bytes.\n"),
816 max_value_size);
817 }
818
819 /* Called before we attempt to allocate or reallocate a buffer for the
820 contents of a value. TYPE is the type of the value for which we are
821 allocating the buffer. If the buffer is too large (based on the user
822 controllable setting) then throw an error. If this function returns
823 then we should attempt to allocate the buffer. */
824
825 static void
826 check_type_length_before_alloc (const struct type *type)
827 {
828 ULONGEST length = type->length ();
829
830 if (max_value_size > -1 && length > max_value_size)
831 {
832 if (type->name () != NULL)
833 error (_("value of type `%s' requires %s bytes, which is more "
834 "than max-value-size"), type->name (), pulongest (length));
835 else
836 error (_("value requires %s bytes, which is more than "
837 "max-value-size"), pulongest (length));
838 }
839 }
840
841 /* When this has a value, it is used to limit the number of array elements
842 of an array that are loaded into memory when an array value is made
843 non-lazy. */
844 static gdb::optional<int> array_length_limiting_element_count;
845
846 /* See value.h. */
847 scoped_array_length_limiting::scoped_array_length_limiting (int elements)
848 {
849 m_old_value = array_length_limiting_element_count;
850 array_length_limiting_element_count.emplace (elements);
851 }
852
853 /* See value.h. */
854 scoped_array_length_limiting::~scoped_array_length_limiting ()
855 {
856 array_length_limiting_element_count = m_old_value;
857 }
858
859 /* Find the inner element type for ARRAY_TYPE. */
860
861 static struct type *
862 find_array_element_type (struct type *array_type)
863 {
864 array_type = check_typedef (array_type);
865 gdb_assert (array_type->code () == TYPE_CODE_ARRAY);
866
867 if (current_language->la_language == language_fortran)
868 while (array_type->code () == TYPE_CODE_ARRAY)
869 {
870 array_type = array_type->target_type ();
871 array_type = check_typedef (array_type);
872 }
873 else
874 {
875 array_type = array_type->target_type ();
876 array_type = check_typedef (array_type);
877 }
878
879 return array_type;
880 }
881
882 /* Return the limited length of ARRAY_TYPE, which must be of
883 TYPE_CODE_ARRAY. This function can only be called when the global
884 ARRAY_LENGTH_LIMITING_ELEMENT_COUNT has a value.
885
886 The limited length of an array is the smallest of either (1) the total
887 size of the array type, or (2) the array target type multiplies by the
888 array_length_limiting_element_count. */
889
890 static ULONGEST
891 calculate_limited_array_length (struct type *array_type)
892 {
893 gdb_assert (array_length_limiting_element_count.has_value ());
894
895 array_type = check_typedef (array_type);
896 gdb_assert (array_type->code () == TYPE_CODE_ARRAY);
897
898 struct type *elm_type = find_array_element_type (array_type);
899 ULONGEST len = (elm_type->length ()
900 * (*array_length_limiting_element_count));
901 len = std::min (len, array_type->length ());
902
903 return len;
904 }
905
906 /* Try to limit ourselves to only fetching the limited number of
907 elements. However, if this limited number of elements still
908 puts us over max_value_size, then we still refuse it and
909 return failure here, which will ultimately throw an error. */
910
911 static bool
912 set_limited_array_length (struct value *val)
913 {
914 ULONGEST limit = val->m_limited_length;
915 ULONGEST len = val->type ()->length ();
916
917 if (array_length_limiting_element_count.has_value ())
918 len = calculate_limited_array_length (val->type ());
919
920 if (limit != 0 && len > limit)
921 len = limit;
922 if (len > max_value_size)
923 return false;
924
925 val->m_limited_length = max_value_size;
926 return true;
927 }
928
929 /* Allocate the contents of VAL if it has not been allocated yet.
930 If CHECK_SIZE is true, then apply the usual max-value-size checks. */
931
932 static void
933 allocate_value_contents (struct value *val, bool check_size)
934 {
935 if (!val->m_contents)
936 {
937 struct type *enclosing_type = value_enclosing_type (val);
938 ULONGEST len = enclosing_type->length ();
939
940 if (check_size)
941 {
942 /* If we are allocating the contents of an array, which
943 is greater in size than max_value_size, and there is
944 an element limit in effect, then we can possibly try
945 to load only a sub-set of the array contents into
946 GDB's memory. */
947 if (val->type () == enclosing_type
948 && val->type ()->code () == TYPE_CODE_ARRAY
949 && len > max_value_size
950 && set_limited_array_length (val))
951 len = val->m_limited_length;
952 else
953 check_type_length_before_alloc (enclosing_type);
954 }
955
956 val->m_contents.reset ((gdb_byte *) xzalloc (len));
957 }
958 }
959
960 /* Allocate a value and its contents for type TYPE. If CHECK_SIZE is true,
961 then apply the usual max-value-size checks. */
962
963 static struct value *
964 allocate_value (struct type *type, bool check_size)
965 {
966 struct value *val = allocate_value_lazy (type);
967
968 allocate_value_contents (val, check_size);
969 val->m_lazy = 0;
970 return val;
971 }
972
973 /* Allocate a value and its contents for type TYPE. */
974
975 struct value *
976 allocate_value (struct type *type)
977 {
978 return allocate_value (type, true);
979 }
980
981 /* Allocate a value that has the correct length
982 for COUNT repetitions of type TYPE. */
983
984 struct value *
985 allocate_repeat_value (struct type *type, int count)
986 {
987 /* Despite the fact that we are really creating an array of TYPE here, we
988 use the string lower bound as the array lower bound. This seems to
989 work fine for now. */
990 int low_bound = current_language->string_lower_bound ();
991 /* FIXME-type-allocation: need a way to free this type when we are
992 done with it. */
993 struct type *array_type
994 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
995
996 return allocate_value (array_type);
997 }
998
999 struct value *
1000 allocate_computed_value (struct type *type,
1001 const struct lval_funcs *funcs,
1002 void *closure)
1003 {
1004 struct value *v = allocate_value_lazy (type);
1005
1006 VALUE_LVAL (v) = lval_computed;
1007 v->m_location.computed.funcs = funcs;
1008 v->m_location.computed.closure = closure;
1009
1010 return v;
1011 }
1012
1013 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1014
1015 struct value *
1016 allocate_optimized_out_value (struct type *type)
1017 {
1018 struct value *retval = allocate_value_lazy (type);
1019
1020 mark_value_bytes_optimized_out (retval, 0, type->length ());
1021 set_value_lazy (retval, 0);
1022 return retval;
1023 }
1024
1025 /* Accessor methods. */
1026
1027 gdb::array_view<gdb_byte>
1028 value_contents_raw (struct value *value)
1029 {
1030 struct gdbarch *arch = value->arch ();
1031 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1032
1033 allocate_value_contents (value, true);
1034
1035 ULONGEST length = value->type ()->length ();
1036 return gdb::make_array_view
1037 (value->m_contents.get () + value->m_embedded_offset * unit_size, length);
1038 }
1039
1040 gdb::array_view<gdb_byte>
1041 value_contents_all_raw (struct value *value)
1042 {
1043 allocate_value_contents (value, true);
1044
1045 ULONGEST length = value_enclosing_type (value)->length ();
1046 return gdb::make_array_view (value->m_contents.get (), length);
1047 }
1048
1049 struct type *
1050 value_enclosing_type (const struct value *value)
1051 {
1052 return value->m_enclosing_type;
1053 }
1054
1055 /* Look at value.h for description. */
1056
1057 struct type *
1058 value_actual_type (struct value *value, int resolve_simple_types,
1059 int *real_type_found)
1060 {
1061 struct value_print_options opts;
1062 struct type *result;
1063
1064 get_user_print_options (&opts);
1065
1066 if (real_type_found)
1067 *real_type_found = 0;
1068 result = value->type ();
1069 if (opts.objectprint)
1070 {
1071 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1072 fetch its rtti type. */
1073 if (result->is_pointer_or_reference ()
1074 && (check_typedef (result->target_type ())->code ()
1075 == TYPE_CODE_STRUCT)
1076 && !value_optimized_out (value))
1077 {
1078 struct type *real_type;
1079
1080 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1081 if (real_type)
1082 {
1083 if (real_type_found)
1084 *real_type_found = 1;
1085 result = real_type;
1086 }
1087 }
1088 else if (resolve_simple_types)
1089 {
1090 if (real_type_found)
1091 *real_type_found = 1;
1092 result = value_enclosing_type (value);
1093 }
1094 }
1095
1096 return result;
1097 }
1098
1099 void
1100 error_value_optimized_out (void)
1101 {
1102 throw_error (OPTIMIZED_OUT_ERROR, _("value has been optimized out"));
1103 }
1104
1105 static void
1106 require_not_optimized_out (const struct value *value)
1107 {
1108 if (!value->m_optimized_out.empty ())
1109 {
1110 if (value->m_lval == lval_register)
1111 throw_error (OPTIMIZED_OUT_ERROR,
1112 _("register has not been saved in frame"));
1113 else
1114 error_value_optimized_out ();
1115 }
1116 }
1117
1118 static void
1119 require_available (const struct value *value)
1120 {
1121 if (!value->m_unavailable.empty ())
1122 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1123 }
1124
1125 gdb::array_view<const gdb_byte>
1126 value_contents_for_printing (struct value *value)
1127 {
1128 if (value->m_lazy)
1129 value_fetch_lazy (value);
1130
1131 ULONGEST length = value_enclosing_type (value)->length ();
1132 return gdb::make_array_view (value->m_contents.get (), length);
1133 }
1134
1135 gdb::array_view<const gdb_byte>
1136 value_contents_for_printing_const (const struct value *value)
1137 {
1138 gdb_assert (!value->m_lazy);
1139
1140 ULONGEST length = value_enclosing_type (value)->length ();
1141 return gdb::make_array_view (value->m_contents.get (), length);
1142 }
1143
1144 gdb::array_view<const gdb_byte>
1145 value_contents_all (struct value *value)
1146 {
1147 gdb::array_view<const gdb_byte> result = value_contents_for_printing (value);
1148 require_not_optimized_out (value);
1149 require_available (value);
1150 return result;
1151 }
1152
1153 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1154 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1155
1156 static void
1157 ranges_copy_adjusted (std::vector<range> *dst_range, int dst_bit_offset,
1158 const std::vector<range> &src_range, int src_bit_offset,
1159 unsigned int bit_length)
1160 {
1161 for (const range &r : src_range)
1162 {
1163 LONGEST h, l;
1164
1165 l = std::max (r.offset, (LONGEST) src_bit_offset);
1166 h = std::min ((LONGEST) (r.offset + r.length),
1167 (LONGEST) src_bit_offset + bit_length);
1168
1169 if (l < h)
1170 insert_into_bit_range_vector (dst_range,
1171 dst_bit_offset + (l - src_bit_offset),
1172 h - l);
1173 }
1174 }
1175
1176 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1177 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1178
1179 static void
1180 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1181 const struct value *src, int src_bit_offset,
1182 int bit_length)
1183 {
1184 ranges_copy_adjusted (&dst->m_unavailable, dst_bit_offset,
1185 src->m_unavailable, src_bit_offset,
1186 bit_length);
1187 ranges_copy_adjusted (&dst->m_optimized_out, dst_bit_offset,
1188 src->m_optimized_out, src_bit_offset,
1189 bit_length);
1190 }
1191
1192 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1193 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1194 contents, starting at DST_OFFSET. If unavailable contents are
1195 being copied from SRC, the corresponding DST contents are marked
1196 unavailable accordingly. Neither DST nor SRC may be lazy
1197 values.
1198
1199 It is assumed the contents of DST in the [DST_OFFSET,
1200 DST_OFFSET+LENGTH) range are wholly available. */
1201
1202 static void
1203 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1204 struct value *src, LONGEST src_offset, LONGEST length)
1205 {
1206 LONGEST src_bit_offset, dst_bit_offset, bit_length;
1207 struct gdbarch *arch = src->arch ();
1208 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1209
1210 /* A lazy DST would make that this copy operation useless, since as
1211 soon as DST's contents were un-lazied (by a later value_contents
1212 call, say), the contents would be overwritten. A lazy SRC would
1213 mean we'd be copying garbage. */
1214 gdb_assert (!dst->m_lazy && !src->m_lazy);
1215
1216 /* The overwritten DST range gets unavailability ORed in, not
1217 replaced. Make sure to remember to implement replacing if it
1218 turns out actually necessary. */
1219 gdb_assert (value_bytes_available (dst, dst_offset, length));
1220 gdb_assert (!value_bits_any_optimized_out (dst,
1221 TARGET_CHAR_BIT * dst_offset,
1222 TARGET_CHAR_BIT * length));
1223
1224 /* Copy the data. */
1225 gdb::array_view<gdb_byte> dst_contents
1226 = value_contents_all_raw (dst).slice (dst_offset * unit_size,
1227 length * unit_size);
1228 gdb::array_view<const gdb_byte> src_contents
1229 = value_contents_all_raw (src).slice (src_offset * unit_size,
1230 length * unit_size);
1231 copy (src_contents, dst_contents);
1232
1233 /* Copy the meta-data, adjusted. */
1234 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1235 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1236 bit_length = length * unit_size * HOST_CHAR_BIT;
1237
1238 value_ranges_copy_adjusted (dst, dst_bit_offset,
1239 src, src_bit_offset,
1240 bit_length);
1241 }
1242
1243 /* A helper for value_from_component_bitsize that copies bits from SRC
1244 to DEST. */
1245
1246 static void
1247 value_contents_copy_raw_bitwise (struct value *dst, LONGEST dst_bit_offset,
1248 struct value *src, LONGEST src_bit_offset,
1249 LONGEST bit_length)
1250 {
1251 /* A lazy DST would make that this copy operation useless, since as
1252 soon as DST's contents were un-lazied (by a later value_contents
1253 call, say), the contents would be overwritten. A lazy SRC would
1254 mean we'd be copying garbage. */
1255 gdb_assert (!dst->m_lazy && !src->m_lazy);
1256
1257 /* The overwritten DST range gets unavailability ORed in, not
1258 replaced. Make sure to remember to implement replacing if it
1259 turns out actually necessary. */
1260 LONGEST dst_offset = dst_bit_offset / TARGET_CHAR_BIT;
1261 LONGEST length = bit_length / TARGET_CHAR_BIT;
1262 gdb_assert (value_bytes_available (dst, dst_offset, length));
1263 gdb_assert (!value_bits_any_optimized_out (dst, dst_bit_offset,
1264 bit_length));
1265
1266 /* Copy the data. */
1267 gdb::array_view<gdb_byte> dst_contents = value_contents_all_raw (dst);
1268 gdb::array_view<const gdb_byte> src_contents = value_contents_all_raw (src);
1269 copy_bitwise (dst_contents.data (), dst_bit_offset,
1270 src_contents.data (), src_bit_offset,
1271 bit_length,
1272 type_byte_order (src->type ()) == BFD_ENDIAN_BIG);
1273
1274 /* Copy the meta-data. */
1275 value_ranges_copy_adjusted (dst, dst_bit_offset,
1276 src, src_bit_offset,
1277 bit_length);
1278 }
1279
1280 /* Copy LENGTH bytes of SRC value's (all) contents
1281 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1282 (all) contents, starting at DST_OFFSET. If unavailable contents
1283 are being copied from SRC, the corresponding DST contents are
1284 marked unavailable accordingly. DST must not be lazy. If SRC is
1285 lazy, it will be fetched now.
1286
1287 It is assumed the contents of DST in the [DST_OFFSET,
1288 DST_OFFSET+LENGTH) range are wholly available. */
1289
1290 void
1291 value_contents_copy (struct value *dst, LONGEST dst_offset,
1292 struct value *src, LONGEST src_offset, LONGEST length)
1293 {
1294 if (src->m_lazy)
1295 value_fetch_lazy (src);
1296
1297 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1298 }
1299
1300 int
1301 value_lazy (const struct value *value)
1302 {
1303 return value->m_lazy;
1304 }
1305
1306 void
1307 set_value_lazy (struct value *value, int val)
1308 {
1309 value->m_lazy = val;
1310 }
1311
1312 int
1313 value_stack (const struct value *value)
1314 {
1315 return value->m_stack;
1316 }
1317
1318 void
1319 set_value_stack (struct value *value, int val)
1320 {
1321 value->m_stack = val;
1322 }
1323
1324 gdb::array_view<const gdb_byte>
1325 value_contents (struct value *value)
1326 {
1327 gdb::array_view<const gdb_byte> result = value_contents_writeable (value);
1328 require_not_optimized_out (value);
1329 require_available (value);
1330 return result;
1331 }
1332
1333 gdb::array_view<gdb_byte>
1334 value_contents_writeable (struct value *value)
1335 {
1336 if (value->m_lazy)
1337 value_fetch_lazy (value);
1338 return value_contents_raw (value);
1339 }
1340
1341 int
1342 value_optimized_out (struct value *value)
1343 {
1344 if (value->m_lazy)
1345 {
1346 /* See if we can compute the result without fetching the
1347 value. */
1348 if (VALUE_LVAL (value) == lval_memory)
1349 return false;
1350 else if (VALUE_LVAL (value) == lval_computed)
1351 {
1352 const struct lval_funcs *funcs = value->m_location.computed.funcs;
1353
1354 if (funcs->is_optimized_out != nullptr)
1355 return funcs->is_optimized_out (value);
1356 }
1357
1358 /* Fall back to fetching. */
1359 try
1360 {
1361 value_fetch_lazy (value);
1362 }
1363 catch (const gdb_exception_error &ex)
1364 {
1365 switch (ex.error)
1366 {
1367 case MEMORY_ERROR:
1368 case OPTIMIZED_OUT_ERROR:
1369 case NOT_AVAILABLE_ERROR:
1370 /* These can normally happen when we try to access an
1371 optimized out or unavailable register, either in a
1372 physical register or spilled to memory. */
1373 break;
1374 default:
1375 throw;
1376 }
1377 }
1378 }
1379
1380 return !value->m_optimized_out.empty ();
1381 }
1382
1383 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1384 the following LENGTH bytes. */
1385
1386 void
1387 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1388 {
1389 mark_value_bits_optimized_out (value,
1390 offset * TARGET_CHAR_BIT,
1391 length * TARGET_CHAR_BIT);
1392 }
1393
1394 /* See value.h. */
1395
1396 void
1397 mark_value_bits_optimized_out (struct value *value,
1398 LONGEST offset, LONGEST length)
1399 {
1400 insert_into_bit_range_vector (&value->m_optimized_out, offset, length);
1401 }
1402
1403 int
1404 value_bits_synthetic_pointer (const struct value *value,
1405 LONGEST offset, LONGEST length)
1406 {
1407 if (value->m_lval != lval_computed
1408 || !value->m_location.computed.funcs->check_synthetic_pointer)
1409 return 0;
1410 return value->m_location.computed.funcs->check_synthetic_pointer (value,
1411 offset,
1412 length);
1413 }
1414
1415 LONGEST
1416 value_embedded_offset (const struct value *value)
1417 {
1418 return value->m_embedded_offset;
1419 }
1420
1421 void
1422 set_value_embedded_offset (struct value *value, LONGEST val)
1423 {
1424 value->m_embedded_offset = val;
1425 }
1426
1427 LONGEST
1428 value_pointed_to_offset (const struct value *value)
1429 {
1430 return value->m_pointed_to_offset;
1431 }
1432
1433 void
1434 set_value_pointed_to_offset (struct value *value, LONGEST val)
1435 {
1436 value->m_pointed_to_offset = val;
1437 }
1438
1439 const struct lval_funcs *
1440 value_computed_funcs (const struct value *v)
1441 {
1442 gdb_assert (value_lval_const (v) == lval_computed);
1443
1444 return v->m_location.computed.funcs;
1445 }
1446
1447 void *
1448 value_computed_closure (const struct value *v)
1449 {
1450 gdb_assert (v->m_lval == lval_computed);
1451
1452 return v->m_location.computed.closure;
1453 }
1454
1455 enum lval_type *
1456 deprecated_value_lval_hack (struct value *value)
1457 {
1458 return &value->m_lval;
1459 }
1460
1461 enum lval_type
1462 value_lval_const (const struct value *value)
1463 {
1464 return value->m_lval;
1465 }
1466
1467 CORE_ADDR
1468 value_address (const struct value *value)
1469 {
1470 if (value->m_lval != lval_memory)
1471 return 0;
1472 if (value->m_parent != NULL)
1473 return value_address (value->m_parent.get ()) + value->m_offset;
1474 if (NULL != TYPE_DATA_LOCATION (value->type ()))
1475 {
1476 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value->type ()));
1477 return TYPE_DATA_LOCATION_ADDR (value->type ());
1478 }
1479
1480 return value->m_location.address + value->m_offset;
1481 }
1482
1483 CORE_ADDR
1484 value_raw_address (const struct value *value)
1485 {
1486 if (value->m_lval != lval_memory)
1487 return 0;
1488 return value->m_location.address;
1489 }
1490
1491 void
1492 set_value_address (struct value *value, CORE_ADDR addr)
1493 {
1494 gdb_assert (value->m_lval == lval_memory);
1495 value->m_location.address = addr;
1496 }
1497
1498 struct internalvar **
1499 deprecated_value_internalvar_hack (struct value *value)
1500 {
1501 return &value->m_location.internalvar;
1502 }
1503
1504 struct frame_id *
1505 deprecated_value_next_frame_id_hack (struct value *value)
1506 {
1507 gdb_assert (value->m_lval == lval_register);
1508 return &value->m_location.reg.next_frame_id;
1509 }
1510
1511 int *
1512 deprecated_value_regnum_hack (struct value *value)
1513 {
1514 gdb_assert (value->m_lval == lval_register);
1515 return &value->m_location.reg.regnum;
1516 }
1517
1518 \f
1519 /* Return a mark in the value chain. All values allocated after the
1520 mark is obtained (except for those released) are subject to being freed
1521 if a subsequent value_free_to_mark is passed the mark. */
1522 struct value *
1523 value_mark (void)
1524 {
1525 if (all_values.empty ())
1526 return nullptr;
1527 return all_values.back ().get ();
1528 }
1529
1530 /* See value.h. */
1531
1532 void
1533 value_incref (struct value *val)
1534 {
1535 val->m_reference_count++;
1536 }
1537
1538 /* Release a reference to VAL, which was acquired with value_incref.
1539 This function is also called to deallocate values from the value
1540 chain. */
1541
1542 void
1543 value_decref (struct value *val)
1544 {
1545 if (val != nullptr)
1546 {
1547 gdb_assert (val->m_reference_count > 0);
1548 val->m_reference_count--;
1549 if (val->m_reference_count == 0)
1550 delete val;
1551 }
1552 }
1553
1554 /* Free all values allocated since MARK was obtained by value_mark
1555 (except for those released). */
1556 void
1557 value_free_to_mark (const struct value *mark)
1558 {
1559 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1560 if (iter == all_values.end ())
1561 all_values.clear ();
1562 else
1563 all_values.erase (iter + 1, all_values.end ());
1564 }
1565
1566 /* Remove VAL from the chain all_values
1567 so it will not be freed automatically. */
1568
1569 value_ref_ptr
1570 release_value (struct value *val)
1571 {
1572 if (val == nullptr)
1573 return value_ref_ptr ();
1574
1575 std::vector<value_ref_ptr>::reverse_iterator iter;
1576 for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter)
1577 {
1578 if (*iter == val)
1579 {
1580 value_ref_ptr result = *iter;
1581 all_values.erase (iter.base () - 1);
1582 return result;
1583 }
1584 }
1585
1586 /* We must always return an owned reference. Normally this happens
1587 because we transfer the reference from the value chain, but in
1588 this case the value was not on the chain. */
1589 return value_ref_ptr::new_reference (val);
1590 }
1591
1592 /* See value.h. */
1593
1594 std::vector<value_ref_ptr>
1595 value_release_to_mark (const struct value *mark)
1596 {
1597 std::vector<value_ref_ptr> result;
1598
1599 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1600 if (iter == all_values.end ())
1601 std::swap (result, all_values);
1602 else
1603 {
1604 std::move (iter + 1, all_values.end (), std::back_inserter (result));
1605 all_values.erase (iter + 1, all_values.end ());
1606 }
1607 std::reverse (result.begin (), result.end ());
1608 return result;
1609 }
1610
1611 /* Return a copy of the value ARG. It contains the same contents,
1612 for the same memory address, but it's a different block of storage. */
1613
1614 struct value *
1615 value_copy (const value *arg)
1616 {
1617 struct type *encl_type = value_enclosing_type (arg);
1618 struct value *val;
1619
1620 val = allocate_value_lazy (encl_type);
1621 val->m_type = arg->m_type;
1622 VALUE_LVAL (val) = arg->m_lval;
1623 val->m_location = arg->m_location;
1624 val->m_offset = arg->m_offset;
1625 val->m_bitpos = arg->m_bitpos;
1626 val->m_bitsize = arg->m_bitsize;
1627 val->m_lazy = arg->m_lazy;
1628 val->m_embedded_offset = value_embedded_offset (arg);
1629 val->m_pointed_to_offset = arg->m_pointed_to_offset;
1630 val->m_modifiable = arg->m_modifiable;
1631 val->m_stack = arg->m_stack;
1632 val->m_is_zero = arg->m_is_zero;
1633 val->m_in_history = arg->m_in_history;
1634 val->m_initialized = arg->m_initialized;
1635 val->m_unavailable = arg->m_unavailable;
1636 val->m_optimized_out = arg->m_optimized_out;
1637 val->m_parent = arg->m_parent;
1638 val->m_limited_length = arg->m_limited_length;
1639
1640 if (!value_lazy (val)
1641 && !(value_entirely_optimized_out (val)
1642 || value_entirely_unavailable (val)))
1643 {
1644 ULONGEST length = val->m_limited_length;
1645 if (length == 0)
1646 length = value_enclosing_type (val)->length ();
1647
1648 gdb_assert (arg->m_contents != nullptr);
1649 const auto &arg_view
1650 = gdb::make_array_view (arg->m_contents.get (), length);
1651
1652 allocate_value_contents (val, false);
1653 gdb::array_view<gdb_byte> val_contents
1654 = value_contents_all_raw (val).slice (0, length);
1655
1656 copy (arg_view, val_contents);
1657 }
1658
1659 if (VALUE_LVAL (val) == lval_computed)
1660 {
1661 const struct lval_funcs *funcs = val->m_location.computed.funcs;
1662
1663 if (funcs->copy_closure)
1664 val->m_location.computed.closure = funcs->copy_closure (val);
1665 }
1666 return val;
1667 }
1668
1669 /* Return a "const" and/or "volatile" qualified version of the value V.
1670 If CNST is true, then the returned value will be qualified with
1671 "const".
1672 if VOLTL is true, then the returned value will be qualified with
1673 "volatile". */
1674
1675 struct value *
1676 make_cv_value (int cnst, int voltl, struct value *v)
1677 {
1678 struct type *val_type = v->type ();
1679 struct type *m_enclosing_type = value_enclosing_type (v);
1680 struct value *cv_val = value_copy (v);
1681
1682 cv_val->deprecated_set_type (make_cv_type (cnst, voltl, val_type, NULL));
1683 set_value_enclosing_type (cv_val,
1684 make_cv_type (cnst, voltl, m_enclosing_type, NULL));
1685
1686 return cv_val;
1687 }
1688
1689 /* Return a version of ARG that is non-lvalue. */
1690
1691 struct value *
1692 value_non_lval (struct value *arg)
1693 {
1694 if (VALUE_LVAL (arg) != not_lval)
1695 {
1696 struct type *enc_type = value_enclosing_type (arg);
1697 struct value *val = allocate_value (enc_type);
1698
1699 copy (value_contents_all (arg), value_contents_all_raw (val));
1700 val->m_type = arg->m_type;
1701 set_value_embedded_offset (val, value_embedded_offset (arg));
1702 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1703 return val;
1704 }
1705 return arg;
1706 }
1707
1708 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1709
1710 void
1711 value_force_lval (struct value *v, CORE_ADDR addr)
1712 {
1713 gdb_assert (VALUE_LVAL (v) == not_lval);
1714
1715 write_memory (addr, value_contents_raw (v).data (), v->type ()->length ());
1716 v->m_lval = lval_memory;
1717 v->m_location.address = addr;
1718 }
1719
1720 void
1721 set_value_component_location (struct value *component,
1722 const struct value *whole)
1723 {
1724 struct type *type;
1725
1726 gdb_assert (whole->m_lval != lval_xcallable);
1727
1728 if (whole->m_lval == lval_internalvar)
1729 VALUE_LVAL (component) = lval_internalvar_component;
1730 else
1731 VALUE_LVAL (component) = whole->m_lval;
1732
1733 component->m_location = whole->m_location;
1734 if (whole->m_lval == lval_computed)
1735 {
1736 const struct lval_funcs *funcs = whole->m_location.computed.funcs;
1737
1738 if (funcs->copy_closure)
1739 component->m_location.computed.closure = funcs->copy_closure (whole);
1740 }
1741
1742 /* If the WHOLE value has a dynamically resolved location property then
1743 update the address of the COMPONENT. */
1744 type = whole->type ();
1745 if (NULL != TYPE_DATA_LOCATION (type)
1746 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1747 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1748
1749 /* Similarly, if the COMPONENT value has a dynamically resolved location
1750 property then update its address. */
1751 type = component->type ();
1752 if (NULL != TYPE_DATA_LOCATION (type)
1753 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1754 {
1755 /* If the COMPONENT has a dynamic location, and is an
1756 lval_internalvar_component, then we change it to a lval_memory.
1757
1758 Usually a component of an internalvar is created non-lazy, and has
1759 its content immediately copied from the parent internalvar.
1760 However, for components with a dynamic location, the content of
1761 the component is not contained within the parent, but is instead
1762 accessed indirectly. Further, the component will be created as a
1763 lazy value.
1764
1765 By changing the type of the component to lval_memory we ensure
1766 that value_fetch_lazy can successfully load the component.
1767
1768 This solution isn't ideal, but a real fix would require values to
1769 carry around both the parent value contents, and the contents of
1770 any dynamic fields within the parent. This is a substantial
1771 change to how values work in GDB. */
1772 if (VALUE_LVAL (component) == lval_internalvar_component)
1773 {
1774 gdb_assert (value_lazy (component));
1775 VALUE_LVAL (component) = lval_memory;
1776 }
1777 else
1778 gdb_assert (VALUE_LVAL (component) == lval_memory);
1779 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1780 }
1781 }
1782
1783 /* Access to the value history. */
1784
1785 /* Record a new value in the value history.
1786 Returns the absolute history index of the entry. */
1787
1788 int
1789 record_latest_value (struct value *val)
1790 {
1791 struct type *enclosing_type = value_enclosing_type (val);
1792 struct type *type = val->type ();
1793
1794 /* We don't want this value to have anything to do with the inferior anymore.
1795 In particular, "set $1 = 50" should not affect the variable from which
1796 the value was taken, and fast watchpoints should be able to assume that
1797 a value on the value history never changes. */
1798 if (value_lazy (val))
1799 {
1800 /* We know that this is a _huge_ array, any attempt to fetch this
1801 is going to cause GDB to throw an error. However, to allow
1802 the array to still be displayed we fetch its contents up to
1803 `max_value_size' and mark anything beyond "unavailable" in
1804 the history. */
1805 if (type->code () == TYPE_CODE_ARRAY
1806 && type->length () > max_value_size
1807 && array_length_limiting_element_count.has_value ()
1808 && enclosing_type == type
1809 && calculate_limited_array_length (type) <= max_value_size)
1810 val->m_limited_length = max_value_size;
1811
1812 value_fetch_lazy (val);
1813 }
1814
1815 ULONGEST limit = val->m_limited_length;
1816 if (limit != 0)
1817 mark_value_bytes_unavailable (val, limit,
1818 enclosing_type->length () - limit);
1819
1820 /* Mark the value as recorded in the history for the availability check. */
1821 val->m_in_history = true;
1822
1823 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1824 from. This is a bit dubious, because then *&$1 does not just return $1
1825 but the current contents of that location. c'est la vie... */
1826 val->m_modifiable = 0;
1827
1828 value_history.push_back (release_value (val));
1829
1830 return value_history.size ();
1831 }
1832
1833 /* Return a copy of the value in the history with sequence number NUM. */
1834
1835 struct value *
1836 access_value_history (int num)
1837 {
1838 int absnum = num;
1839
1840 if (absnum <= 0)
1841 absnum += value_history.size ();
1842
1843 if (absnum <= 0)
1844 {
1845 if (num == 0)
1846 error (_("The history is empty."));
1847 else if (num == 1)
1848 error (_("There is only one value in the history."));
1849 else
1850 error (_("History does not go back to $$%d."), -num);
1851 }
1852 if (absnum > value_history.size ())
1853 error (_("History has not yet reached $%d."), absnum);
1854
1855 absnum--;
1856
1857 return value_copy (value_history[absnum].get ());
1858 }
1859
1860 /* See value.h. */
1861
1862 ULONGEST
1863 value_history_count ()
1864 {
1865 return value_history.size ();
1866 }
1867
1868 static void
1869 show_values (const char *num_exp, int from_tty)
1870 {
1871 int i;
1872 struct value *val;
1873 static int num = 1;
1874
1875 if (num_exp)
1876 {
1877 /* "show values +" should print from the stored position.
1878 "show values <exp>" should print around value number <exp>. */
1879 if (num_exp[0] != '+' || num_exp[1] != '\0')
1880 num = parse_and_eval_long (num_exp) - 5;
1881 }
1882 else
1883 {
1884 /* "show values" means print the last 10 values. */
1885 num = value_history.size () - 9;
1886 }
1887
1888 if (num <= 0)
1889 num = 1;
1890
1891 for (i = num; i < num + 10 && i <= value_history.size (); i++)
1892 {
1893 struct value_print_options opts;
1894
1895 val = access_value_history (i);
1896 gdb_printf (("$%d = "), i);
1897 get_user_print_options (&opts);
1898 value_print (val, gdb_stdout, &opts);
1899 gdb_printf (("\n"));
1900 }
1901
1902 /* The next "show values +" should start after what we just printed. */
1903 num += 10;
1904
1905 /* Hitting just return after this command should do the same thing as
1906 "show values +". If num_exp is null, this is unnecessary, since
1907 "show values +" is not useful after "show values". */
1908 if (from_tty && num_exp)
1909 set_repeat_arguments ("+");
1910 }
1911 \f
1912 enum internalvar_kind
1913 {
1914 /* The internal variable is empty. */
1915 INTERNALVAR_VOID,
1916
1917 /* The value of the internal variable is provided directly as
1918 a GDB value object. */
1919 INTERNALVAR_VALUE,
1920
1921 /* A fresh value is computed via a call-back routine on every
1922 access to the internal variable. */
1923 INTERNALVAR_MAKE_VALUE,
1924
1925 /* The internal variable holds a GDB internal convenience function. */
1926 INTERNALVAR_FUNCTION,
1927
1928 /* The variable holds an integer value. */
1929 INTERNALVAR_INTEGER,
1930
1931 /* The variable holds a GDB-provided string. */
1932 INTERNALVAR_STRING,
1933 };
1934
1935 union internalvar_data
1936 {
1937 /* A value object used with INTERNALVAR_VALUE. */
1938 struct value *value;
1939
1940 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1941 struct
1942 {
1943 /* The functions to call. */
1944 const struct internalvar_funcs *functions;
1945
1946 /* The function's user-data. */
1947 void *data;
1948 } make_value;
1949
1950 /* The internal function used with INTERNALVAR_FUNCTION. */
1951 struct
1952 {
1953 struct internal_function *function;
1954 /* True if this is the canonical name for the function. */
1955 int canonical;
1956 } fn;
1957
1958 /* An integer value used with INTERNALVAR_INTEGER. */
1959 struct
1960 {
1961 /* If type is non-NULL, it will be used as the type to generate
1962 a value for this internal variable. If type is NULL, a default
1963 integer type for the architecture is used. */
1964 struct type *type;
1965 LONGEST val;
1966 } integer;
1967
1968 /* A string value used with INTERNALVAR_STRING. */
1969 char *string;
1970 };
1971
1972 /* Internal variables. These are variables within the debugger
1973 that hold values assigned by debugger commands.
1974 The user refers to them with a '$' prefix
1975 that does not appear in the variable names stored internally. */
1976
1977 struct internalvar
1978 {
1979 struct internalvar *next;
1980 char *name;
1981
1982 /* We support various different kinds of content of an internal variable.
1983 enum internalvar_kind specifies the kind, and union internalvar_data
1984 provides the data associated with this particular kind. */
1985
1986 enum internalvar_kind kind;
1987
1988 union internalvar_data u;
1989 };
1990
1991 static struct internalvar *internalvars;
1992
1993 /* If the variable does not already exist create it and give it the
1994 value given. If no value is given then the default is zero. */
1995 static void
1996 init_if_undefined_command (const char* args, int from_tty)
1997 {
1998 struct internalvar *intvar = nullptr;
1999
2000 /* Parse the expression - this is taken from set_command(). */
2001 expression_up expr = parse_expression (args);
2002
2003 /* Validate the expression.
2004 Was the expression an assignment?
2005 Or even an expression at all? */
2006 if (expr->first_opcode () != BINOP_ASSIGN)
2007 error (_("Init-if-undefined requires an assignment expression."));
2008
2009 /* Extract the variable from the parsed expression. */
2010 expr::assign_operation *assign
2011 = dynamic_cast<expr::assign_operation *> (expr->op.get ());
2012 if (assign != nullptr)
2013 {
2014 expr::operation *lhs = assign->get_lhs ();
2015 expr::internalvar_operation *ivarop
2016 = dynamic_cast<expr::internalvar_operation *> (lhs);
2017 if (ivarop != nullptr)
2018 intvar = ivarop->get_internalvar ();
2019 }
2020
2021 if (intvar == nullptr)
2022 error (_("The first parameter to init-if-undefined "
2023 "should be a GDB variable."));
2024
2025 /* Only evaluate the expression if the lvalue is void.
2026 This may still fail if the expression is invalid. */
2027 if (intvar->kind == INTERNALVAR_VOID)
2028 evaluate_expression (expr.get ());
2029 }
2030
2031
2032 /* Look up an internal variable with name NAME. NAME should not
2033 normally include a dollar sign.
2034
2035 If the specified internal variable does not exist,
2036 the return value is NULL. */
2037
2038 struct internalvar *
2039 lookup_only_internalvar (const char *name)
2040 {
2041 struct internalvar *var;
2042
2043 for (var = internalvars; var; var = var->next)
2044 if (strcmp (var->name, name) == 0)
2045 return var;
2046
2047 return NULL;
2048 }
2049
2050 /* Complete NAME by comparing it to the names of internal
2051 variables. */
2052
2053 void
2054 complete_internalvar (completion_tracker &tracker, const char *name)
2055 {
2056 struct internalvar *var;
2057 int len;
2058
2059 len = strlen (name);
2060
2061 for (var = internalvars; var; var = var->next)
2062 if (strncmp (var->name, name, len) == 0)
2063 tracker.add_completion (make_unique_xstrdup (var->name));
2064 }
2065
2066 /* Create an internal variable with name NAME and with a void value.
2067 NAME should not normally include a dollar sign. */
2068
2069 struct internalvar *
2070 create_internalvar (const char *name)
2071 {
2072 struct internalvar *var = XNEW (struct internalvar);
2073
2074 var->name = xstrdup (name);
2075 var->kind = INTERNALVAR_VOID;
2076 var->next = internalvars;
2077 internalvars = var;
2078 return var;
2079 }
2080
2081 /* Create an internal variable with name NAME and register FUN as the
2082 function that value_of_internalvar uses to create a value whenever
2083 this variable is referenced. NAME should not normally include a
2084 dollar sign. DATA is passed uninterpreted to FUN when it is
2085 called. CLEANUP, if not NULL, is called when the internal variable
2086 is destroyed. It is passed DATA as its only argument. */
2087
2088 struct internalvar *
2089 create_internalvar_type_lazy (const char *name,
2090 const struct internalvar_funcs *funcs,
2091 void *data)
2092 {
2093 struct internalvar *var = create_internalvar (name);
2094
2095 var->kind = INTERNALVAR_MAKE_VALUE;
2096 var->u.make_value.functions = funcs;
2097 var->u.make_value.data = data;
2098 return var;
2099 }
2100
2101 /* See documentation in value.h. */
2102
2103 int
2104 compile_internalvar_to_ax (struct internalvar *var,
2105 struct agent_expr *expr,
2106 struct axs_value *value)
2107 {
2108 if (var->kind != INTERNALVAR_MAKE_VALUE
2109 || var->u.make_value.functions->compile_to_ax == NULL)
2110 return 0;
2111
2112 var->u.make_value.functions->compile_to_ax (var, expr, value,
2113 var->u.make_value.data);
2114 return 1;
2115 }
2116
2117 /* Look up an internal variable with name NAME. NAME should not
2118 normally include a dollar sign.
2119
2120 If the specified internal variable does not exist,
2121 one is created, with a void value. */
2122
2123 struct internalvar *
2124 lookup_internalvar (const char *name)
2125 {
2126 struct internalvar *var;
2127
2128 var = lookup_only_internalvar (name);
2129 if (var)
2130 return var;
2131
2132 return create_internalvar (name);
2133 }
2134
2135 /* Return current value of internal variable VAR. For variables that
2136 are not inherently typed, use a value type appropriate for GDBARCH. */
2137
2138 struct value *
2139 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2140 {
2141 struct value *val;
2142 struct trace_state_variable *tsv;
2143
2144 /* If there is a trace state variable of the same name, assume that
2145 is what we really want to see. */
2146 tsv = find_trace_state_variable (var->name);
2147 if (tsv)
2148 {
2149 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2150 &(tsv->value));
2151 if (tsv->value_known)
2152 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2153 tsv->value);
2154 else
2155 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2156 return val;
2157 }
2158
2159 switch (var->kind)
2160 {
2161 case INTERNALVAR_VOID:
2162 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2163 break;
2164
2165 case INTERNALVAR_FUNCTION:
2166 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2167 break;
2168
2169 case INTERNALVAR_INTEGER:
2170 if (!var->u.integer.type)
2171 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2172 var->u.integer.val);
2173 else
2174 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2175 break;
2176
2177 case INTERNALVAR_STRING:
2178 val = value_cstring (var->u.string, strlen (var->u.string),
2179 builtin_type (gdbarch)->builtin_char);
2180 break;
2181
2182 case INTERNALVAR_VALUE:
2183 val = value_copy (var->u.value);
2184 if (value_lazy (val))
2185 value_fetch_lazy (val);
2186 break;
2187
2188 case INTERNALVAR_MAKE_VALUE:
2189 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2190 var->u.make_value.data);
2191 break;
2192
2193 default:
2194 internal_error (_("bad kind"));
2195 }
2196
2197 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2198 on this value go back to affect the original internal variable.
2199
2200 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2201 no underlying modifiable state in the internal variable.
2202
2203 Likewise, if the variable's value is a computed lvalue, we want
2204 references to it to produce another computed lvalue, where
2205 references and assignments actually operate through the
2206 computed value's functions.
2207
2208 This means that internal variables with computed values
2209 behave a little differently from other internal variables:
2210 assignments to them don't just replace the previous value
2211 altogether. At the moment, this seems like the behavior we
2212 want. */
2213
2214 if (var->kind != INTERNALVAR_MAKE_VALUE
2215 && val->m_lval != lval_computed)
2216 {
2217 VALUE_LVAL (val) = lval_internalvar;
2218 VALUE_INTERNALVAR (val) = var;
2219 }
2220
2221 return val;
2222 }
2223
2224 int
2225 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2226 {
2227 if (var->kind == INTERNALVAR_INTEGER)
2228 {
2229 *result = var->u.integer.val;
2230 return 1;
2231 }
2232
2233 if (var->kind == INTERNALVAR_VALUE)
2234 {
2235 struct type *type = check_typedef (var->u.value->type ());
2236
2237 if (type->code () == TYPE_CODE_INT)
2238 {
2239 *result = value_as_long (var->u.value);
2240 return 1;
2241 }
2242 }
2243
2244 return 0;
2245 }
2246
2247 static int
2248 get_internalvar_function (struct internalvar *var,
2249 struct internal_function **result)
2250 {
2251 switch (var->kind)
2252 {
2253 case INTERNALVAR_FUNCTION:
2254 *result = var->u.fn.function;
2255 return 1;
2256
2257 default:
2258 return 0;
2259 }
2260 }
2261
2262 void
2263 set_internalvar_component (struct internalvar *var,
2264 LONGEST offset, LONGEST bitpos,
2265 LONGEST bitsize, struct value *newval)
2266 {
2267 gdb_byte *addr;
2268 struct gdbarch *arch;
2269 int unit_size;
2270
2271 switch (var->kind)
2272 {
2273 case INTERNALVAR_VALUE:
2274 addr = value_contents_writeable (var->u.value).data ();
2275 arch = var->u.value->arch ();
2276 unit_size = gdbarch_addressable_memory_unit_size (arch);
2277
2278 if (bitsize)
2279 modify_field (var->u.value->type (), addr + offset,
2280 value_as_long (newval), bitpos, bitsize);
2281 else
2282 memcpy (addr + offset * unit_size, value_contents (newval).data (),
2283 newval->type ()->length ());
2284 break;
2285
2286 default:
2287 /* We can never get a component of any other kind. */
2288 internal_error (_("set_internalvar_component"));
2289 }
2290 }
2291
2292 void
2293 set_internalvar (struct internalvar *var, struct value *val)
2294 {
2295 enum internalvar_kind new_kind;
2296 union internalvar_data new_data = { 0 };
2297
2298 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2299 error (_("Cannot overwrite convenience function %s"), var->name);
2300
2301 /* Prepare new contents. */
2302 switch (check_typedef (val->type ())->code ())
2303 {
2304 case TYPE_CODE_VOID:
2305 new_kind = INTERNALVAR_VOID;
2306 break;
2307
2308 case TYPE_CODE_INTERNAL_FUNCTION:
2309 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2310 new_kind = INTERNALVAR_FUNCTION;
2311 get_internalvar_function (VALUE_INTERNALVAR (val),
2312 &new_data.fn.function);
2313 /* Copies created here are never canonical. */
2314 break;
2315
2316 default:
2317 new_kind = INTERNALVAR_VALUE;
2318 struct value *copy = value_copy (val);
2319 copy->m_modifiable = 1;
2320
2321 /* Force the value to be fetched from the target now, to avoid problems
2322 later when this internalvar is referenced and the target is gone or
2323 has changed. */
2324 if (value_lazy (copy))
2325 value_fetch_lazy (copy);
2326
2327 /* Release the value from the value chain to prevent it from being
2328 deleted by free_all_values. From here on this function should not
2329 call error () until new_data is installed into the var->u to avoid
2330 leaking memory. */
2331 new_data.value = release_value (copy).release ();
2332
2333 /* Internal variables which are created from values with a dynamic
2334 location don't need the location property of the origin anymore.
2335 The resolved dynamic location is used prior then any other address
2336 when accessing the value.
2337 If we keep it, we would still refer to the origin value.
2338 Remove the location property in case it exist. */
2339 new_data.value->type ()->remove_dyn_prop (DYN_PROP_DATA_LOCATION);
2340
2341 break;
2342 }
2343
2344 /* Clean up old contents. */
2345 clear_internalvar (var);
2346
2347 /* Switch over. */
2348 var->kind = new_kind;
2349 var->u = new_data;
2350 /* End code which must not call error(). */
2351 }
2352
2353 void
2354 set_internalvar_integer (struct internalvar *var, LONGEST l)
2355 {
2356 /* Clean up old contents. */
2357 clear_internalvar (var);
2358
2359 var->kind = INTERNALVAR_INTEGER;
2360 var->u.integer.type = NULL;
2361 var->u.integer.val = l;
2362 }
2363
2364 void
2365 set_internalvar_string (struct internalvar *var, const char *string)
2366 {
2367 /* Clean up old contents. */
2368 clear_internalvar (var);
2369
2370 var->kind = INTERNALVAR_STRING;
2371 var->u.string = xstrdup (string);
2372 }
2373
2374 static void
2375 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2376 {
2377 /* Clean up old contents. */
2378 clear_internalvar (var);
2379
2380 var->kind = INTERNALVAR_FUNCTION;
2381 var->u.fn.function = f;
2382 var->u.fn.canonical = 1;
2383 /* Variables installed here are always the canonical version. */
2384 }
2385
2386 void
2387 clear_internalvar (struct internalvar *var)
2388 {
2389 /* Clean up old contents. */
2390 switch (var->kind)
2391 {
2392 case INTERNALVAR_VALUE:
2393 value_decref (var->u.value);
2394 break;
2395
2396 case INTERNALVAR_STRING:
2397 xfree (var->u.string);
2398 break;
2399
2400 default:
2401 break;
2402 }
2403
2404 /* Reset to void kind. */
2405 var->kind = INTERNALVAR_VOID;
2406 }
2407
2408 const char *
2409 internalvar_name (const struct internalvar *var)
2410 {
2411 return var->name;
2412 }
2413
2414 static struct internal_function *
2415 create_internal_function (const char *name,
2416 internal_function_fn handler, void *cookie)
2417 {
2418 struct internal_function *ifn = XNEW (struct internal_function);
2419
2420 ifn->name = xstrdup (name);
2421 ifn->handler = handler;
2422 ifn->cookie = cookie;
2423 return ifn;
2424 }
2425
2426 const char *
2427 value_internal_function_name (struct value *val)
2428 {
2429 struct internal_function *ifn;
2430 int result;
2431
2432 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2433 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2434 gdb_assert (result);
2435
2436 return ifn->name;
2437 }
2438
2439 struct value *
2440 call_internal_function (struct gdbarch *gdbarch,
2441 const struct language_defn *language,
2442 struct value *func, int argc, struct value **argv)
2443 {
2444 struct internal_function *ifn;
2445 int result;
2446
2447 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2448 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2449 gdb_assert (result);
2450
2451 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2452 }
2453
2454 /* The 'function' command. This does nothing -- it is just a
2455 placeholder to let "help function NAME" work. This is also used as
2456 the implementation of the sub-command that is created when
2457 registering an internal function. */
2458 static void
2459 function_command (const char *command, int from_tty)
2460 {
2461 /* Do nothing. */
2462 }
2463
2464 /* Helper function that does the work for add_internal_function. */
2465
2466 static struct cmd_list_element *
2467 do_add_internal_function (const char *name, const char *doc,
2468 internal_function_fn handler, void *cookie)
2469 {
2470 struct internal_function *ifn;
2471 struct internalvar *var = lookup_internalvar (name);
2472
2473 ifn = create_internal_function (name, handler, cookie);
2474 set_internalvar_function (var, ifn);
2475
2476 return add_cmd (name, no_class, function_command, doc, &functionlist);
2477 }
2478
2479 /* See value.h. */
2480
2481 void
2482 add_internal_function (const char *name, const char *doc,
2483 internal_function_fn handler, void *cookie)
2484 {
2485 do_add_internal_function (name, doc, handler, cookie);
2486 }
2487
2488 /* See value.h. */
2489
2490 void
2491 add_internal_function (gdb::unique_xmalloc_ptr<char> &&name,
2492 gdb::unique_xmalloc_ptr<char> &&doc,
2493 internal_function_fn handler, void *cookie)
2494 {
2495 struct cmd_list_element *cmd
2496 = do_add_internal_function (name.get (), doc.get (), handler, cookie);
2497 doc.release ();
2498 cmd->doc_allocated = 1;
2499 name.release ();
2500 cmd->name_allocated = 1;
2501 }
2502
2503 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2504 prevent cycles / duplicates. */
2505
2506 void
2507 preserve_one_value (struct value *value, struct objfile *objfile,
2508 htab_t copied_types)
2509 {
2510 if (value->m_type->objfile_owner () == objfile)
2511 value->m_type = copy_type_recursive (value->m_type, copied_types);
2512
2513 if (value->m_enclosing_type->objfile_owner () == objfile)
2514 value->m_enclosing_type = copy_type_recursive (value->m_enclosing_type,
2515 copied_types);
2516 }
2517
2518 /* Likewise for internal variable VAR. */
2519
2520 static void
2521 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2522 htab_t copied_types)
2523 {
2524 switch (var->kind)
2525 {
2526 case INTERNALVAR_INTEGER:
2527 if (var->u.integer.type
2528 && var->u.integer.type->objfile_owner () == objfile)
2529 var->u.integer.type
2530 = copy_type_recursive (var->u.integer.type, copied_types);
2531 break;
2532
2533 case INTERNALVAR_VALUE:
2534 preserve_one_value (var->u.value, objfile, copied_types);
2535 break;
2536 }
2537 }
2538
2539 /* Make sure that all types and values referenced by VAROBJ are updated before
2540 OBJFILE is discarded. COPIED_TYPES is used to prevent cycles and
2541 duplicates. */
2542
2543 static void
2544 preserve_one_varobj (struct varobj *varobj, struct objfile *objfile,
2545 htab_t copied_types)
2546 {
2547 if (varobj->type->is_objfile_owned ()
2548 && varobj->type->objfile_owner () == objfile)
2549 {
2550 varobj->type
2551 = copy_type_recursive (varobj->type, copied_types);
2552 }
2553
2554 if (varobj->value != nullptr)
2555 preserve_one_value (varobj->value.get (), objfile, copied_types);
2556 }
2557
2558 /* Update the internal variables and value history when OBJFILE is
2559 discarded; we must copy the types out of the objfile. New global types
2560 will be created for every convenience variable which currently points to
2561 this objfile's types, and the convenience variables will be adjusted to
2562 use the new global types. */
2563
2564 void
2565 preserve_values (struct objfile *objfile)
2566 {
2567 struct internalvar *var;
2568
2569 /* Create the hash table. We allocate on the objfile's obstack, since
2570 it is soon to be deleted. */
2571 htab_up copied_types = create_copied_types_hash ();
2572
2573 for (const value_ref_ptr &item : value_history)
2574 preserve_one_value (item.get (), objfile, copied_types.get ());
2575
2576 for (var = internalvars; var; var = var->next)
2577 preserve_one_internalvar (var, objfile, copied_types.get ());
2578
2579 /* For the remaining varobj, check that none has type owned by OBJFILE. */
2580 all_root_varobjs ([&copied_types, objfile] (struct varobj *varobj)
2581 {
2582 preserve_one_varobj (varobj, objfile,
2583 copied_types.get ());
2584 });
2585
2586 preserve_ext_lang_values (objfile, copied_types.get ());
2587 }
2588
2589 static void
2590 show_convenience (const char *ignore, int from_tty)
2591 {
2592 struct gdbarch *gdbarch = get_current_arch ();
2593 struct internalvar *var;
2594 int varseen = 0;
2595 struct value_print_options opts;
2596
2597 get_user_print_options (&opts);
2598 for (var = internalvars; var; var = var->next)
2599 {
2600
2601 if (!varseen)
2602 {
2603 varseen = 1;
2604 }
2605 gdb_printf (("$%s = "), var->name);
2606
2607 try
2608 {
2609 struct value *val;
2610
2611 val = value_of_internalvar (gdbarch, var);
2612 value_print (val, gdb_stdout, &opts);
2613 }
2614 catch (const gdb_exception_error &ex)
2615 {
2616 fprintf_styled (gdb_stdout, metadata_style.style (),
2617 _("<error: %s>"), ex.what ());
2618 }
2619
2620 gdb_printf (("\n"));
2621 }
2622 if (!varseen)
2623 {
2624 /* This text does not mention convenience functions on purpose.
2625 The user can't create them except via Python, and if Python support
2626 is installed this message will never be printed ($_streq will
2627 exist). */
2628 gdb_printf (_("No debugger convenience variables now defined.\n"
2629 "Convenience variables have "
2630 "names starting with \"$\";\n"
2631 "use \"set\" as in \"set "
2632 "$foo = 5\" to define them.\n"));
2633 }
2634 }
2635 \f
2636
2637 /* See value.h. */
2638
2639 struct value *
2640 value_from_xmethod (xmethod_worker_up &&worker)
2641 {
2642 struct value *v;
2643
2644 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2645 v->m_lval = lval_xcallable;
2646 v->m_location.xm_worker = worker.release ();
2647 v->m_modifiable = 0;
2648
2649 return v;
2650 }
2651
2652 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2653
2654 struct type *
2655 result_type_of_xmethod (struct value *method, gdb::array_view<value *> argv)
2656 {
2657 gdb_assert (method->type ()->code () == TYPE_CODE_XMETHOD
2658 && method->m_lval == lval_xcallable && !argv.empty ());
2659
2660 return method->m_location.xm_worker->get_result_type (argv[0], argv.slice (1));
2661 }
2662
2663 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2664
2665 struct value *
2666 call_xmethod (struct value *method, gdb::array_view<value *> argv)
2667 {
2668 gdb_assert (method->type ()->code () == TYPE_CODE_XMETHOD
2669 && method->m_lval == lval_xcallable && !argv.empty ());
2670
2671 return method->m_location.xm_worker->invoke (argv[0], argv.slice (1));
2672 }
2673 \f
2674 /* Extract a value as a C number (either long or double).
2675 Knows how to convert fixed values to double, or
2676 floating values to long.
2677 Does not deallocate the value. */
2678
2679 LONGEST
2680 value_as_long (struct value *val)
2681 {
2682 /* This coerces arrays and functions, which is necessary (e.g.
2683 in disassemble_command). It also dereferences references, which
2684 I suspect is the most logical thing to do. */
2685 val = coerce_array (val);
2686 return unpack_long (val->type (), value_contents (val).data ());
2687 }
2688
2689 /* Extract a value as a C pointer. Does not deallocate the value.
2690 Note that val's type may not actually be a pointer; value_as_long
2691 handles all the cases. */
2692 CORE_ADDR
2693 value_as_address (struct value *val)
2694 {
2695 struct gdbarch *gdbarch = val->type ()->arch ();
2696
2697 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2698 whether we want this to be true eventually. */
2699 #if 0
2700 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2701 non-address (e.g. argument to "signal", "info break", etc.), or
2702 for pointers to char, in which the low bits *are* significant. */
2703 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2704 #else
2705
2706 /* There are several targets (IA-64, PowerPC, and others) which
2707 don't represent pointers to functions as simply the address of
2708 the function's entry point. For example, on the IA-64, a
2709 function pointer points to a two-word descriptor, generated by
2710 the linker, which contains the function's entry point, and the
2711 value the IA-64 "global pointer" register should have --- to
2712 support position-independent code. The linker generates
2713 descriptors only for those functions whose addresses are taken.
2714
2715 On such targets, it's difficult for GDB to convert an arbitrary
2716 function address into a function pointer; it has to either find
2717 an existing descriptor for that function, or call malloc and
2718 build its own. On some targets, it is impossible for GDB to
2719 build a descriptor at all: the descriptor must contain a jump
2720 instruction; data memory cannot be executed; and code memory
2721 cannot be modified.
2722
2723 Upon entry to this function, if VAL is a value of type `function'
2724 (that is, TYPE_CODE (val->type ()) == TYPE_CODE_FUNC), then
2725 value_address (val) is the address of the function. This is what
2726 you'll get if you evaluate an expression like `main'. The call
2727 to COERCE_ARRAY below actually does all the usual unary
2728 conversions, which includes converting values of type `function'
2729 to `pointer to function'. This is the challenging conversion
2730 discussed above. Then, `unpack_long' will convert that pointer
2731 back into an address.
2732
2733 So, suppose the user types `disassemble foo' on an architecture
2734 with a strange function pointer representation, on which GDB
2735 cannot build its own descriptors, and suppose further that `foo'
2736 has no linker-built descriptor. The address->pointer conversion
2737 will signal an error and prevent the command from running, even
2738 though the next step would have been to convert the pointer
2739 directly back into the same address.
2740
2741 The following shortcut avoids this whole mess. If VAL is a
2742 function, just return its address directly. */
2743 if (val->type ()->code () == TYPE_CODE_FUNC
2744 || val->type ()->code () == TYPE_CODE_METHOD)
2745 return value_address (val);
2746
2747 val = coerce_array (val);
2748
2749 /* Some architectures (e.g. Harvard), map instruction and data
2750 addresses onto a single large unified address space. For
2751 instance: An architecture may consider a large integer in the
2752 range 0x10000000 .. 0x1000ffff to already represent a data
2753 addresses (hence not need a pointer to address conversion) while
2754 a small integer would still need to be converted integer to
2755 pointer to address. Just assume such architectures handle all
2756 integer conversions in a single function. */
2757
2758 /* JimB writes:
2759
2760 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2761 must admonish GDB hackers to make sure its behavior matches the
2762 compiler's, whenever possible.
2763
2764 In general, I think GDB should evaluate expressions the same way
2765 the compiler does. When the user copies an expression out of
2766 their source code and hands it to a `print' command, they should
2767 get the same value the compiler would have computed. Any
2768 deviation from this rule can cause major confusion and annoyance,
2769 and needs to be justified carefully. In other words, GDB doesn't
2770 really have the freedom to do these conversions in clever and
2771 useful ways.
2772
2773 AndrewC pointed out that users aren't complaining about how GDB
2774 casts integers to pointers; they are complaining that they can't
2775 take an address from a disassembly listing and give it to `x/i'.
2776 This is certainly important.
2777
2778 Adding an architecture method like integer_to_address() certainly
2779 makes it possible for GDB to "get it right" in all circumstances
2780 --- the target has complete control over how things get done, so
2781 people can Do The Right Thing for their target without breaking
2782 anyone else. The standard doesn't specify how integers get
2783 converted to pointers; usually, the ABI doesn't either, but
2784 ABI-specific code is a more reasonable place to handle it. */
2785
2786 if (!val->type ()->is_pointer_or_reference ()
2787 && gdbarch_integer_to_address_p (gdbarch))
2788 return gdbarch_integer_to_address (gdbarch, val->type (),
2789 value_contents (val).data ());
2790
2791 return unpack_long (val->type (), value_contents (val).data ());
2792 #endif
2793 }
2794 \f
2795 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2796 as a long, or as a double, assuming the raw data is described
2797 by type TYPE. Knows how to convert different sizes of values
2798 and can convert between fixed and floating point. We don't assume
2799 any alignment for the raw data. Return value is in host byte order.
2800
2801 If you want functions and arrays to be coerced to pointers, and
2802 references to be dereferenced, call value_as_long() instead.
2803
2804 C++: It is assumed that the front-end has taken care of
2805 all matters concerning pointers to members. A pointer
2806 to member which reaches here is considered to be equivalent
2807 to an INT (or some size). After all, it is only an offset. */
2808
2809 LONGEST
2810 unpack_long (struct type *type, const gdb_byte *valaddr)
2811 {
2812 if (is_fixed_point_type (type))
2813 type = type->fixed_point_type_base_type ();
2814
2815 enum bfd_endian byte_order = type_byte_order (type);
2816 enum type_code code = type->code ();
2817 int len = type->length ();
2818 int nosign = type->is_unsigned ();
2819
2820 switch (code)
2821 {
2822 case TYPE_CODE_TYPEDEF:
2823 return unpack_long (check_typedef (type), valaddr);
2824 case TYPE_CODE_ENUM:
2825 case TYPE_CODE_FLAGS:
2826 case TYPE_CODE_BOOL:
2827 case TYPE_CODE_INT:
2828 case TYPE_CODE_CHAR:
2829 case TYPE_CODE_RANGE:
2830 case TYPE_CODE_MEMBERPTR:
2831 {
2832 LONGEST result;
2833
2834 if (type->bit_size_differs_p ())
2835 {
2836 unsigned bit_off = type->bit_offset ();
2837 unsigned bit_size = type->bit_size ();
2838 if (bit_size == 0)
2839 {
2840 /* unpack_bits_as_long doesn't handle this case the
2841 way we'd like, so handle it here. */
2842 result = 0;
2843 }
2844 else
2845 result = unpack_bits_as_long (type, valaddr, bit_off, bit_size);
2846 }
2847 else
2848 {
2849 if (nosign)
2850 result = extract_unsigned_integer (valaddr, len, byte_order);
2851 else
2852 result = extract_signed_integer (valaddr, len, byte_order);
2853 }
2854 if (code == TYPE_CODE_RANGE)
2855 result += type->bounds ()->bias;
2856 return result;
2857 }
2858
2859 case TYPE_CODE_FLT:
2860 case TYPE_CODE_DECFLOAT:
2861 return target_float_to_longest (valaddr, type);
2862
2863 case TYPE_CODE_FIXED_POINT:
2864 {
2865 gdb_mpq vq;
2866 vq.read_fixed_point (gdb::make_array_view (valaddr, len),
2867 byte_order, nosign,
2868 type->fixed_point_scaling_factor ());
2869
2870 gdb_mpz vz;
2871 mpz_tdiv_q (vz.val, mpq_numref (vq.val), mpq_denref (vq.val));
2872 return vz.as_integer<LONGEST> ();
2873 }
2874
2875 case TYPE_CODE_PTR:
2876 case TYPE_CODE_REF:
2877 case TYPE_CODE_RVALUE_REF:
2878 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2879 whether we want this to be true eventually. */
2880 return extract_typed_address (valaddr, type);
2881
2882 default:
2883 error (_("Value can't be converted to integer."));
2884 }
2885 }
2886
2887 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2888 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2889 We don't assume any alignment for the raw data. Return value is in
2890 host byte order.
2891
2892 If you want functions and arrays to be coerced to pointers, and
2893 references to be dereferenced, call value_as_address() instead.
2894
2895 C++: It is assumed that the front-end has taken care of
2896 all matters concerning pointers to members. A pointer
2897 to member which reaches here is considered to be equivalent
2898 to an INT (or some size). After all, it is only an offset. */
2899
2900 CORE_ADDR
2901 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2902 {
2903 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2904 whether we want this to be true eventually. */
2905 return unpack_long (type, valaddr);
2906 }
2907
2908 bool
2909 is_floating_value (struct value *val)
2910 {
2911 struct type *type = check_typedef (val->type ());
2912
2913 if (is_floating_type (type))
2914 {
2915 if (!target_float_is_valid (value_contents (val).data (), type))
2916 error (_("Invalid floating value found in program."));
2917 return true;
2918 }
2919
2920 return false;
2921 }
2922
2923 \f
2924 /* Get the value of the FIELDNO'th field (which must be static) of
2925 TYPE. */
2926
2927 struct value *
2928 value_static_field (struct type *type, int fieldno)
2929 {
2930 struct value *retval;
2931
2932 switch (type->field (fieldno).loc_kind ())
2933 {
2934 case FIELD_LOC_KIND_PHYSADDR:
2935 retval = value_at_lazy (type->field (fieldno).type (),
2936 type->field (fieldno).loc_physaddr ());
2937 break;
2938 case FIELD_LOC_KIND_PHYSNAME:
2939 {
2940 const char *phys_name = type->field (fieldno).loc_physname ();
2941 /* type->field (fieldno).name (); */
2942 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2943
2944 if (sym.symbol == NULL)
2945 {
2946 /* With some compilers, e.g. HP aCC, static data members are
2947 reported as non-debuggable symbols. */
2948 struct bound_minimal_symbol msym
2949 = lookup_minimal_symbol (phys_name, NULL, NULL);
2950 struct type *field_type = type->field (fieldno).type ();
2951
2952 if (!msym.minsym)
2953 retval = allocate_optimized_out_value (field_type);
2954 else
2955 retval = value_at_lazy (field_type, msym.value_address ());
2956 }
2957 else
2958 retval = value_of_variable (sym.symbol, sym.block);
2959 break;
2960 }
2961 default:
2962 gdb_assert_not_reached ("unexpected field location kind");
2963 }
2964
2965 return retval;
2966 }
2967
2968 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2969 You have to be careful here, since the size of the data area for the value
2970 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2971 than the old enclosing type, you have to allocate more space for the
2972 data. */
2973
2974 void
2975 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2976 {
2977 if (new_encl_type->length () > value_enclosing_type (val)->length ())
2978 {
2979 check_type_length_before_alloc (new_encl_type);
2980 val->m_contents
2981 .reset ((gdb_byte *) xrealloc (val->m_contents.release (),
2982 new_encl_type->length ()));
2983 }
2984
2985 val->m_enclosing_type = new_encl_type;
2986 }
2987
2988 /* Given a value ARG1 (offset by OFFSET bytes)
2989 of a struct or union type ARG_TYPE,
2990 extract and return the value of one of its (non-static) fields.
2991 FIELDNO says which field. */
2992
2993 struct value *
2994 value_primitive_field (struct value *arg1, LONGEST offset,
2995 int fieldno, struct type *arg_type)
2996 {
2997 struct value *v;
2998 struct type *type;
2999 struct gdbarch *arch = arg1->arch ();
3000 int unit_size = gdbarch_addressable_memory_unit_size (arch);
3001
3002 arg_type = check_typedef (arg_type);
3003 type = arg_type->field (fieldno).type ();
3004
3005 /* Call check_typedef on our type to make sure that, if TYPE
3006 is a TYPE_CODE_TYPEDEF, its length is set to the length
3007 of the target type instead of zero. However, we do not
3008 replace the typedef type by the target type, because we want
3009 to keep the typedef in order to be able to print the type
3010 description correctly. */
3011 check_typedef (type);
3012
3013 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
3014 {
3015 /* Handle packed fields.
3016
3017 Create a new value for the bitfield, with bitpos and bitsize
3018 set. If possible, arrange offset and bitpos so that we can
3019 do a single aligned read of the size of the containing type.
3020 Otherwise, adjust offset to the byte containing the first
3021 bit. Assume that the address, offset, and embedded offset
3022 are sufficiently aligned. */
3023
3024 LONGEST bitpos = arg_type->field (fieldno).loc_bitpos ();
3025 LONGEST container_bitsize = type->length () * 8;
3026
3027 v = allocate_value_lazy (type);
3028 v->m_bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
3029 if ((bitpos % container_bitsize) + v->m_bitsize <= container_bitsize
3030 && type->length () <= (int) sizeof (LONGEST))
3031 v->m_bitpos = bitpos % container_bitsize;
3032 else
3033 v->m_bitpos = bitpos % 8;
3034 v->m_offset = (value_embedded_offset (arg1)
3035 + offset
3036 + (bitpos - v->m_bitpos) / 8);
3037 v->set_parent (arg1);
3038 if (!value_lazy (arg1))
3039 value_fetch_lazy (v);
3040 }
3041 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3042 {
3043 /* This field is actually a base subobject, so preserve the
3044 entire object's contents for later references to virtual
3045 bases, etc. */
3046 LONGEST boffset;
3047
3048 /* Lazy register values with offsets are not supported. */
3049 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3050 value_fetch_lazy (arg1);
3051
3052 /* We special case virtual inheritance here because this
3053 requires access to the contents, which we would rather avoid
3054 for references to ordinary fields of unavailable values. */
3055 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3056 boffset = baseclass_offset (arg_type, fieldno,
3057 value_contents (arg1).data (),
3058 value_embedded_offset (arg1),
3059 value_address (arg1),
3060 arg1);
3061 else
3062 boffset = arg_type->field (fieldno).loc_bitpos () / 8;
3063
3064 if (value_lazy (arg1))
3065 v = allocate_value_lazy (value_enclosing_type (arg1));
3066 else
3067 {
3068 v = allocate_value (value_enclosing_type (arg1));
3069 value_contents_copy_raw (v, 0, arg1, 0,
3070 value_enclosing_type (arg1)->length ());
3071 }
3072 v->m_type = type;
3073 v->m_offset = arg1->offset ();
3074 v->m_embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3075 }
3076 else if (NULL != TYPE_DATA_LOCATION (type))
3077 {
3078 /* Field is a dynamic data member. */
3079
3080 gdb_assert (0 == offset);
3081 /* We expect an already resolved data location. */
3082 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
3083 /* For dynamic data types defer memory allocation
3084 until we actual access the value. */
3085 v = allocate_value_lazy (type);
3086 }
3087 else
3088 {
3089 /* Plain old data member */
3090 offset += (arg_type->field (fieldno).loc_bitpos ()
3091 / (HOST_CHAR_BIT * unit_size));
3092
3093 /* Lazy register values with offsets are not supported. */
3094 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3095 value_fetch_lazy (arg1);
3096
3097 if (value_lazy (arg1))
3098 v = allocate_value_lazy (type);
3099 else
3100 {
3101 v = allocate_value (type);
3102 value_contents_copy_raw (v, value_embedded_offset (v),
3103 arg1, value_embedded_offset (arg1) + offset,
3104 type_length_units (type));
3105 }
3106 v->m_offset = (arg1->offset () + offset
3107 + value_embedded_offset (arg1));
3108 }
3109 set_value_component_location (v, arg1);
3110 return v;
3111 }
3112
3113 /* Given a value ARG1 of a struct or union type,
3114 extract and return the value of one of its (non-static) fields.
3115 FIELDNO says which field. */
3116
3117 struct value *
3118 value_field (struct value *arg1, int fieldno)
3119 {
3120 return value_primitive_field (arg1, 0, fieldno, arg1->type ());
3121 }
3122
3123 /* Return a non-virtual function as a value.
3124 F is the list of member functions which contains the desired method.
3125 J is an index into F which provides the desired method.
3126
3127 We only use the symbol for its address, so be happy with either a
3128 full symbol or a minimal symbol. */
3129
3130 struct value *
3131 value_fn_field (struct value **arg1p, struct fn_field *f,
3132 int j, struct type *type,
3133 LONGEST offset)
3134 {
3135 struct value *v;
3136 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3137 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3138 struct symbol *sym;
3139 struct bound_minimal_symbol msym;
3140
3141 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3142 if (sym == nullptr)
3143 {
3144 msym = lookup_bound_minimal_symbol (physname);
3145 if (msym.minsym == NULL)
3146 return NULL;
3147 }
3148
3149 v = allocate_value (ftype);
3150 VALUE_LVAL (v) = lval_memory;
3151 if (sym)
3152 {
3153 set_value_address (v, sym->value_block ()->entry_pc ());
3154 }
3155 else
3156 {
3157 /* The minimal symbol might point to a function descriptor;
3158 resolve it to the actual code address instead. */
3159 struct objfile *objfile = msym.objfile;
3160 struct gdbarch *gdbarch = objfile->arch ();
3161
3162 set_value_address (v,
3163 gdbarch_convert_from_func_ptr_addr
3164 (gdbarch, msym.value_address (),
3165 current_inferior ()->top_target ()));
3166 }
3167
3168 if (arg1p)
3169 {
3170 if (type != (*arg1p)->type ())
3171 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3172 value_addr (*arg1p)));
3173
3174 /* Move the `this' pointer according to the offset.
3175 (*arg1p)->offset () += offset; */
3176 }
3177
3178 return v;
3179 }
3180
3181 \f
3182
3183 /* See value.h. */
3184
3185 LONGEST
3186 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3187 LONGEST bitpos, LONGEST bitsize)
3188 {
3189 enum bfd_endian byte_order = type_byte_order (field_type);
3190 ULONGEST val;
3191 ULONGEST valmask;
3192 int lsbcount;
3193 LONGEST bytes_read;
3194 LONGEST read_offset;
3195
3196 /* Read the minimum number of bytes required; there may not be
3197 enough bytes to read an entire ULONGEST. */
3198 field_type = check_typedef (field_type);
3199 if (bitsize)
3200 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3201 else
3202 {
3203 bytes_read = field_type->length ();
3204 bitsize = 8 * bytes_read;
3205 }
3206
3207 read_offset = bitpos / 8;
3208
3209 val = extract_unsigned_integer (valaddr + read_offset,
3210 bytes_read, byte_order);
3211
3212 /* Extract bits. See comment above. */
3213
3214 if (byte_order == BFD_ENDIAN_BIG)
3215 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3216 else
3217 lsbcount = (bitpos % 8);
3218 val >>= lsbcount;
3219
3220 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3221 If the field is signed, and is negative, then sign extend. */
3222
3223 if (bitsize < 8 * (int) sizeof (val))
3224 {
3225 valmask = (((ULONGEST) 1) << bitsize) - 1;
3226 val &= valmask;
3227 if (!field_type->is_unsigned ())
3228 {
3229 if (val & (valmask ^ (valmask >> 1)))
3230 {
3231 val |= ~valmask;
3232 }
3233 }
3234 }
3235
3236 return val;
3237 }
3238
3239 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3240 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3241 ORIGINAL_VALUE, which must not be NULL. See
3242 unpack_value_bits_as_long for more details. */
3243
3244 int
3245 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3246 LONGEST embedded_offset, int fieldno,
3247 const struct value *val, LONGEST *result)
3248 {
3249 int bitpos = type->field (fieldno).loc_bitpos ();
3250 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3251 struct type *field_type = type->field (fieldno).type ();
3252 int bit_offset;
3253
3254 gdb_assert (val != NULL);
3255
3256 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3257 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3258 || !value_bits_available (val, bit_offset, bitsize))
3259 return 0;
3260
3261 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3262 bitpos, bitsize);
3263 return 1;
3264 }
3265
3266 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3267 object at VALADDR. See unpack_bits_as_long for more details. */
3268
3269 LONGEST
3270 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3271 {
3272 int bitpos = type->field (fieldno).loc_bitpos ();
3273 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3274 struct type *field_type = type->field (fieldno).type ();
3275
3276 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3277 }
3278
3279 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3280 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3281 the contents in DEST_VAL, zero or sign extending if the type of
3282 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3283 VAL. If the VAL's contents required to extract the bitfield from
3284 are unavailable/optimized out, DEST_VAL is correspondingly
3285 marked unavailable/optimized out. */
3286
3287 void
3288 unpack_value_bitfield (struct value *dest_val,
3289 LONGEST bitpos, LONGEST bitsize,
3290 const gdb_byte *valaddr, LONGEST embedded_offset,
3291 const struct value *val)
3292 {
3293 enum bfd_endian byte_order;
3294 int src_bit_offset;
3295 int dst_bit_offset;
3296 struct type *field_type = dest_val->type ();
3297
3298 byte_order = type_byte_order (field_type);
3299
3300 /* First, unpack and sign extend the bitfield as if it was wholly
3301 valid. Optimized out/unavailable bits are read as zero, but
3302 that's OK, as they'll end up marked below. If the VAL is
3303 wholly-invalid we may have skipped allocating its contents,
3304 though. See allocate_optimized_out_value. */
3305 if (valaddr != NULL)
3306 {
3307 LONGEST num;
3308
3309 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3310 bitpos, bitsize);
3311 store_signed_integer (value_contents_raw (dest_val).data (),
3312 field_type->length (), byte_order, num);
3313 }
3314
3315 /* Now copy the optimized out / unavailability ranges to the right
3316 bits. */
3317 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3318 if (byte_order == BFD_ENDIAN_BIG)
3319 dst_bit_offset = field_type->length () * TARGET_CHAR_BIT - bitsize;
3320 else
3321 dst_bit_offset = 0;
3322 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3323 val, src_bit_offset, bitsize);
3324 }
3325
3326 /* Return a new value with type TYPE, which is FIELDNO field of the
3327 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3328 of VAL. If the VAL's contents required to extract the bitfield
3329 from are unavailable/optimized out, the new value is
3330 correspondingly marked unavailable/optimized out. */
3331
3332 struct value *
3333 value_field_bitfield (struct type *type, int fieldno,
3334 const gdb_byte *valaddr,
3335 LONGEST embedded_offset, const struct value *val)
3336 {
3337 int bitpos = type->field (fieldno).loc_bitpos ();
3338 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3339 struct value *res_val = allocate_value (type->field (fieldno).type ());
3340
3341 unpack_value_bitfield (res_val, bitpos, bitsize,
3342 valaddr, embedded_offset, val);
3343
3344 return res_val;
3345 }
3346
3347 /* Modify the value of a bitfield. ADDR points to a block of memory in
3348 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3349 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3350 indicate which bits (in target bit order) comprise the bitfield.
3351 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3352 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3353
3354 void
3355 modify_field (struct type *type, gdb_byte *addr,
3356 LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3357 {
3358 enum bfd_endian byte_order = type_byte_order (type);
3359 ULONGEST oword;
3360 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3361 LONGEST bytesize;
3362
3363 /* Normalize BITPOS. */
3364 addr += bitpos / 8;
3365 bitpos %= 8;
3366
3367 /* If a negative fieldval fits in the field in question, chop
3368 off the sign extension bits. */
3369 if ((~fieldval & ~(mask >> 1)) == 0)
3370 fieldval &= mask;
3371
3372 /* Warn if value is too big to fit in the field in question. */
3373 if (0 != (fieldval & ~mask))
3374 {
3375 /* FIXME: would like to include fieldval in the message, but
3376 we don't have a sprintf_longest. */
3377 warning (_("Value does not fit in %s bits."), plongest (bitsize));
3378
3379 /* Truncate it, otherwise adjoining fields may be corrupted. */
3380 fieldval &= mask;
3381 }
3382
3383 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3384 false valgrind reports. */
3385
3386 bytesize = (bitpos + bitsize + 7) / 8;
3387 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3388
3389 /* Shifting for bit field depends on endianness of the target machine. */
3390 if (byte_order == BFD_ENDIAN_BIG)
3391 bitpos = bytesize * 8 - bitpos - bitsize;
3392
3393 oword &= ~(mask << bitpos);
3394 oword |= fieldval << bitpos;
3395
3396 store_unsigned_integer (addr, bytesize, byte_order, oword);
3397 }
3398 \f
3399 /* Pack NUM into BUF using a target format of TYPE. */
3400
3401 void
3402 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3403 {
3404 enum bfd_endian byte_order = type_byte_order (type);
3405 LONGEST len;
3406
3407 type = check_typedef (type);
3408 len = type->length ();
3409
3410 switch (type->code ())
3411 {
3412 case TYPE_CODE_RANGE:
3413 num -= type->bounds ()->bias;
3414 /* Fall through. */
3415 case TYPE_CODE_INT:
3416 case TYPE_CODE_CHAR:
3417 case TYPE_CODE_ENUM:
3418 case TYPE_CODE_FLAGS:
3419 case TYPE_CODE_BOOL:
3420 case TYPE_CODE_MEMBERPTR:
3421 if (type->bit_size_differs_p ())
3422 {
3423 unsigned bit_off = type->bit_offset ();
3424 unsigned bit_size = type->bit_size ();
3425 num &= ((ULONGEST) 1 << bit_size) - 1;
3426 num <<= bit_off;
3427 }
3428 store_signed_integer (buf, len, byte_order, num);
3429 break;
3430
3431 case TYPE_CODE_REF:
3432 case TYPE_CODE_RVALUE_REF:
3433 case TYPE_CODE_PTR:
3434 store_typed_address (buf, type, (CORE_ADDR) num);
3435 break;
3436
3437 case TYPE_CODE_FLT:
3438 case TYPE_CODE_DECFLOAT:
3439 target_float_from_longest (buf, type, num);
3440 break;
3441
3442 default:
3443 error (_("Unexpected type (%d) encountered for integer constant."),
3444 type->code ());
3445 }
3446 }
3447
3448
3449 /* Pack NUM into BUF using a target format of TYPE. */
3450
3451 static void
3452 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3453 {
3454 LONGEST len;
3455 enum bfd_endian byte_order;
3456
3457 type = check_typedef (type);
3458 len = type->length ();
3459 byte_order = type_byte_order (type);
3460
3461 switch (type->code ())
3462 {
3463 case TYPE_CODE_INT:
3464 case TYPE_CODE_CHAR:
3465 case TYPE_CODE_ENUM:
3466 case TYPE_CODE_FLAGS:
3467 case TYPE_CODE_BOOL:
3468 case TYPE_CODE_RANGE:
3469 case TYPE_CODE_MEMBERPTR:
3470 if (type->bit_size_differs_p ())
3471 {
3472 unsigned bit_off = type->bit_offset ();
3473 unsigned bit_size = type->bit_size ();
3474 num &= ((ULONGEST) 1 << bit_size) - 1;
3475 num <<= bit_off;
3476 }
3477 store_unsigned_integer (buf, len, byte_order, num);
3478 break;
3479
3480 case TYPE_CODE_REF:
3481 case TYPE_CODE_RVALUE_REF:
3482 case TYPE_CODE_PTR:
3483 store_typed_address (buf, type, (CORE_ADDR) num);
3484 break;
3485
3486 case TYPE_CODE_FLT:
3487 case TYPE_CODE_DECFLOAT:
3488 target_float_from_ulongest (buf, type, num);
3489 break;
3490
3491 default:
3492 error (_("Unexpected type (%d) encountered "
3493 "for unsigned integer constant."),
3494 type->code ());
3495 }
3496 }
3497
3498
3499 /* Create a value of type TYPE that is zero, and return it. */
3500
3501 struct value *
3502 value_zero (struct type *type, enum lval_type lv)
3503 {
3504 struct value *val = allocate_value_lazy (type);
3505
3506 VALUE_LVAL (val) = (lv == lval_computed ? not_lval : lv);
3507 val->m_is_zero = true;
3508 return val;
3509 }
3510
3511 /* Convert C numbers into newly allocated values. */
3512
3513 struct value *
3514 value_from_longest (struct type *type, LONGEST num)
3515 {
3516 struct value *val = allocate_value (type);
3517
3518 pack_long (value_contents_raw (val).data (), type, num);
3519 return val;
3520 }
3521
3522
3523 /* Convert C unsigned numbers into newly allocated values. */
3524
3525 struct value *
3526 value_from_ulongest (struct type *type, ULONGEST num)
3527 {
3528 struct value *val = allocate_value (type);
3529
3530 pack_unsigned_long (value_contents_raw (val).data (), type, num);
3531
3532 return val;
3533 }
3534
3535
3536 /* Create a value representing a pointer of type TYPE to the address
3537 ADDR. */
3538
3539 struct value *
3540 value_from_pointer (struct type *type, CORE_ADDR addr)
3541 {
3542 struct value *val = allocate_value (type);
3543
3544 store_typed_address (value_contents_raw (val).data (),
3545 check_typedef (type), addr);
3546 return val;
3547 }
3548
3549 /* Create and return a value object of TYPE containing the value D. The
3550 TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once
3551 it is converted to target format. */
3552
3553 struct value *
3554 value_from_host_double (struct type *type, double d)
3555 {
3556 struct value *value = allocate_value (type);
3557 gdb_assert (type->code () == TYPE_CODE_FLT);
3558 target_float_from_host_double (value_contents_raw (value).data (),
3559 value->type (), d);
3560 return value;
3561 }
3562
3563 /* Create a value of type TYPE whose contents come from VALADDR, if it
3564 is non-null, and whose memory address (in the inferior) is
3565 ADDRESS. The type of the created value may differ from the passed
3566 type TYPE. Make sure to retrieve values new type after this call.
3567 Note that TYPE is not passed through resolve_dynamic_type; this is
3568 a special API intended for use only by Ada. */
3569
3570 struct value *
3571 value_from_contents_and_address_unresolved (struct type *type,
3572 const gdb_byte *valaddr,
3573 CORE_ADDR address)
3574 {
3575 struct value *v;
3576
3577 if (valaddr == NULL)
3578 v = allocate_value_lazy (type);
3579 else
3580 v = value_from_contents (type, valaddr);
3581 VALUE_LVAL (v) = lval_memory;
3582 set_value_address (v, address);
3583 return v;
3584 }
3585
3586 /* Create a value of type TYPE whose contents come from VALADDR, if it
3587 is non-null, and whose memory address (in the inferior) is
3588 ADDRESS. The type of the created value may differ from the passed
3589 type TYPE. Make sure to retrieve values new type after this call. */
3590
3591 struct value *
3592 value_from_contents_and_address (struct type *type,
3593 const gdb_byte *valaddr,
3594 CORE_ADDR address)
3595 {
3596 gdb::array_view<const gdb_byte> view;
3597 if (valaddr != nullptr)
3598 view = gdb::make_array_view (valaddr, type->length ());
3599 struct type *resolved_type = resolve_dynamic_type (type, view, address);
3600 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3601 struct value *v;
3602
3603 if (valaddr == NULL)
3604 v = allocate_value_lazy (resolved_type);
3605 else
3606 v = value_from_contents (resolved_type, valaddr);
3607 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3608 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3609 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3610 VALUE_LVAL (v) = lval_memory;
3611 set_value_address (v, address);
3612 return v;
3613 }
3614
3615 /* Create a value of type TYPE holding the contents CONTENTS.
3616 The new value is `not_lval'. */
3617
3618 struct value *
3619 value_from_contents (struct type *type, const gdb_byte *contents)
3620 {
3621 struct value *result;
3622
3623 result = allocate_value (type);
3624 memcpy (value_contents_raw (result).data (), contents, type->length ());
3625 return result;
3626 }
3627
3628 /* Extract a value from the history file. Input will be of the form
3629 $digits or $$digits. See block comment above 'write_dollar_variable'
3630 for details. */
3631
3632 struct value *
3633 value_from_history_ref (const char *h, const char **endp)
3634 {
3635 int index, len;
3636
3637 if (h[0] == '$')
3638 len = 1;
3639 else
3640 return NULL;
3641
3642 if (h[1] == '$')
3643 len = 2;
3644
3645 /* Find length of numeral string. */
3646 for (; isdigit (h[len]); len++)
3647 ;
3648
3649 /* Make sure numeral string is not part of an identifier. */
3650 if (h[len] == '_' || isalpha (h[len]))
3651 return NULL;
3652
3653 /* Now collect the index value. */
3654 if (h[1] == '$')
3655 {
3656 if (len == 2)
3657 {
3658 /* For some bizarre reason, "$$" is equivalent to "$$1",
3659 rather than to "$$0" as it ought to be! */
3660 index = -1;
3661 *endp += len;
3662 }
3663 else
3664 {
3665 char *local_end;
3666
3667 index = -strtol (&h[2], &local_end, 10);
3668 *endp = local_end;
3669 }
3670 }
3671 else
3672 {
3673 if (len == 1)
3674 {
3675 /* "$" is equivalent to "$0". */
3676 index = 0;
3677 *endp += len;
3678 }
3679 else
3680 {
3681 char *local_end;
3682
3683 index = strtol (&h[1], &local_end, 10);
3684 *endp = local_end;
3685 }
3686 }
3687
3688 return access_value_history (index);
3689 }
3690
3691 /* Get the component value (offset by OFFSET bytes) of a struct or
3692 union WHOLE. Component's type is TYPE. */
3693
3694 struct value *
3695 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3696 {
3697 struct value *v;
3698
3699 if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole))
3700 v = allocate_value_lazy (type);
3701 else
3702 {
3703 v = allocate_value (type);
3704 value_contents_copy (v, value_embedded_offset (v),
3705 whole, value_embedded_offset (whole) + offset,
3706 type_length_units (type));
3707 }
3708 v->m_offset = whole->offset () + offset + value_embedded_offset (whole);
3709 set_value_component_location (v, whole);
3710
3711 return v;
3712 }
3713
3714 /* See value.h. */
3715
3716 struct value *
3717 value_from_component_bitsize (struct value *whole, struct type *type,
3718 LONGEST bit_offset, LONGEST bit_length)
3719 {
3720 gdb_assert (!value_lazy (whole));
3721
3722 /* Preserve lvalue-ness if possible. This is needed to avoid
3723 array-printing failures (including crashes) when printing Ada
3724 arrays in programs compiled with -fgnat-encodings=all. */
3725 if ((bit_offset % TARGET_CHAR_BIT) == 0
3726 && (bit_length % TARGET_CHAR_BIT) == 0
3727 && bit_length == TARGET_CHAR_BIT * type->length ())
3728 return value_from_component (whole, type, bit_offset / TARGET_CHAR_BIT);
3729
3730 struct value *v = allocate_value (type);
3731
3732 LONGEST dst_offset = TARGET_CHAR_BIT * value_embedded_offset (v);
3733 if (is_scalar_type (type) && type_byte_order (type) == BFD_ENDIAN_BIG)
3734 dst_offset += TARGET_CHAR_BIT * type->length () - bit_length;
3735
3736 value_contents_copy_raw_bitwise (v, dst_offset,
3737 whole,
3738 TARGET_CHAR_BIT
3739 * value_embedded_offset (whole)
3740 + bit_offset,
3741 bit_length);
3742 return v;
3743 }
3744
3745 struct value *
3746 coerce_ref_if_computed (const struct value *arg)
3747 {
3748 const struct lval_funcs *funcs;
3749
3750 if (!TYPE_IS_REFERENCE (check_typedef (arg->type ())))
3751 return NULL;
3752
3753 if (value_lval_const (arg) != lval_computed)
3754 return NULL;
3755
3756 funcs = value_computed_funcs (arg);
3757 if (funcs->coerce_ref == NULL)
3758 return NULL;
3759
3760 return funcs->coerce_ref (arg);
3761 }
3762
3763 /* Look at value.h for description. */
3764
3765 struct value *
3766 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3767 const struct type *original_type,
3768 struct value *original_value,
3769 CORE_ADDR original_value_address)
3770 {
3771 gdb_assert (original_type->is_pointer_or_reference ());
3772
3773 struct type *original_target_type = original_type->target_type ();
3774 gdb::array_view<const gdb_byte> view;
3775 struct type *resolved_original_target_type
3776 = resolve_dynamic_type (original_target_type, view,
3777 original_value_address);
3778
3779 /* Re-adjust type. */
3780 value->deprecated_set_type (resolved_original_target_type);
3781
3782 /* Add embedding info. */
3783 set_value_enclosing_type (value, enc_type);
3784 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3785
3786 /* We may be pointing to an object of some derived type. */
3787 return value_full_object (value, NULL, 0, 0, 0);
3788 }
3789
3790 struct value *
3791 coerce_ref (struct value *arg)
3792 {
3793 struct type *value_type_arg_tmp = check_typedef (arg->type ());
3794 struct value *retval;
3795 struct type *enc_type;
3796
3797 retval = coerce_ref_if_computed (arg);
3798 if (retval)
3799 return retval;
3800
3801 if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3802 return arg;
3803
3804 enc_type = check_typedef (value_enclosing_type (arg));
3805 enc_type = enc_type->target_type ();
3806
3807 CORE_ADDR addr = unpack_pointer (arg->type (), value_contents (arg).data ());
3808 retval = value_at_lazy (enc_type, addr);
3809 enc_type = retval->type ();
3810 return readjust_indirect_value_type (retval, enc_type, value_type_arg_tmp,
3811 arg, addr);
3812 }
3813
3814 struct value *
3815 coerce_array (struct value *arg)
3816 {
3817 struct type *type;
3818
3819 arg = coerce_ref (arg);
3820 type = check_typedef (arg->type ());
3821
3822 switch (type->code ())
3823 {
3824 case TYPE_CODE_ARRAY:
3825 if (!type->is_vector () && current_language->c_style_arrays_p ())
3826 arg = value_coerce_array (arg);
3827 break;
3828 case TYPE_CODE_FUNC:
3829 arg = value_coerce_function (arg);
3830 break;
3831 }
3832 return arg;
3833 }
3834 \f
3835
3836 /* Return the return value convention that will be used for the
3837 specified type. */
3838
3839 enum return_value_convention
3840 struct_return_convention (struct gdbarch *gdbarch,
3841 struct value *function, struct type *value_type)
3842 {
3843 enum type_code code = value_type->code ();
3844
3845 if (code == TYPE_CODE_ERROR)
3846 error (_("Function return type unknown."));
3847
3848 /* Probe the architecture for the return-value convention. */
3849 return gdbarch_return_value_as_value (gdbarch, function, value_type,
3850 NULL, NULL, NULL);
3851 }
3852
3853 /* Return true if the function returning the specified type is using
3854 the convention of returning structures in memory (passing in the
3855 address as a hidden first parameter). */
3856
3857 int
3858 using_struct_return (struct gdbarch *gdbarch,
3859 struct value *function, struct type *value_type)
3860 {
3861 if (value_type->code () == TYPE_CODE_VOID)
3862 /* A void return value is never in memory. See also corresponding
3863 code in "print_return_value". */
3864 return 0;
3865
3866 return (struct_return_convention (gdbarch, function, value_type)
3867 != RETURN_VALUE_REGISTER_CONVENTION);
3868 }
3869
3870 /* Set the initialized field in a value struct. */
3871
3872 void
3873 set_value_initialized (struct value *val, int status)
3874 {
3875 val->m_initialized = status;
3876 }
3877
3878 /* Return the initialized field in a value struct. */
3879
3880 int
3881 value_initialized (const struct value *val)
3882 {
3883 return val->m_initialized;
3884 }
3885
3886 /* Helper for value_fetch_lazy when the value is a bitfield. */
3887
3888 static void
3889 value_fetch_lazy_bitfield (struct value *val)
3890 {
3891 gdb_assert (val->bitsize () != 0);
3892
3893 /* To read a lazy bitfield, read the entire enclosing value. This
3894 prevents reading the same block of (possibly volatile) memory once
3895 per bitfield. It would be even better to read only the containing
3896 word, but we have no way to record that just specific bits of a
3897 value have been fetched. */
3898 struct value *parent = val->parent ();
3899
3900 if (value_lazy (parent))
3901 value_fetch_lazy (parent);
3902
3903 unpack_value_bitfield (val, val->bitpos (), val->bitsize (),
3904 value_contents_for_printing (parent).data (),
3905 val->offset (), parent);
3906 }
3907
3908 /* Helper for value_fetch_lazy when the value is in memory. */
3909
3910 static void
3911 value_fetch_lazy_memory (struct value *val)
3912 {
3913 gdb_assert (VALUE_LVAL (val) == lval_memory);
3914
3915 CORE_ADDR addr = value_address (val);
3916 struct type *type = check_typedef (value_enclosing_type (val));
3917
3918 /* Figure out how much we should copy from memory. Usually, this is just
3919 the size of the type, but, for arrays, we might only be loading a
3920 small part of the array (this is only done for very large arrays). */
3921 int len = 0;
3922 if (val->m_limited_length > 0)
3923 {
3924 gdb_assert (val->type ()->code () == TYPE_CODE_ARRAY);
3925 len = val->m_limited_length;
3926 }
3927 else if (type->length () > 0)
3928 len = type_length_units (type);
3929
3930 gdb_assert (len >= 0);
3931
3932 if (len > 0)
3933 read_value_memory (val, 0, value_stack (val), addr,
3934 value_contents_all_raw (val).data (), len);
3935 }
3936
3937 /* Helper for value_fetch_lazy when the value is in a register. */
3938
3939 static void
3940 value_fetch_lazy_register (struct value *val)
3941 {
3942 frame_info_ptr next_frame;
3943 int regnum;
3944 struct type *type = check_typedef (val->type ());
3945 struct value *new_val = val, *mark = value_mark ();
3946
3947 /* Offsets are not supported here; lazy register values must
3948 refer to the entire register. */
3949 gdb_assert (val->offset () == 0);
3950
3951 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3952 {
3953 struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
3954
3955 next_frame = frame_find_by_id (next_frame_id);
3956 regnum = VALUE_REGNUM (new_val);
3957
3958 gdb_assert (next_frame != NULL);
3959
3960 /* Convertible register routines are used for multi-register
3961 values and for interpretation in different types
3962 (e.g. float or int from a double register). Lazy
3963 register values should have the register's natural type,
3964 so they do not apply. */
3965 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
3966 regnum, type));
3967
3968 /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
3969 Since a "->next" operation was performed when setting
3970 this field, we do not need to perform a "next" operation
3971 again when unwinding the register. That's why
3972 frame_unwind_register_value() is called here instead of
3973 get_frame_register_value(). */
3974 new_val = frame_unwind_register_value (next_frame, regnum);
3975
3976 /* If we get another lazy lval_register value, it means the
3977 register is found by reading it from NEXT_FRAME's next frame.
3978 frame_unwind_register_value should never return a value with
3979 the frame id pointing to NEXT_FRAME. If it does, it means we
3980 either have two consecutive frames with the same frame id
3981 in the frame chain, or some code is trying to unwind
3982 behind get_prev_frame's back (e.g., a frame unwind
3983 sniffer trying to unwind), bypassing its validations. In
3984 any case, it should always be an internal error to end up
3985 in this situation. */
3986 if (VALUE_LVAL (new_val) == lval_register
3987 && value_lazy (new_val)
3988 && VALUE_NEXT_FRAME_ID (new_val) == next_frame_id)
3989 internal_error (_("infinite loop while fetching a register"));
3990 }
3991
3992 /* If it's still lazy (for instance, a saved register on the
3993 stack), fetch it. */
3994 if (value_lazy (new_val))
3995 value_fetch_lazy (new_val);
3996
3997 /* Copy the contents and the unavailability/optimized-out
3998 meta-data from NEW_VAL to VAL. */
3999 set_value_lazy (val, 0);
4000 value_contents_copy (val, value_embedded_offset (val),
4001 new_val, value_embedded_offset (new_val),
4002 type_length_units (type));
4003
4004 if (frame_debug)
4005 {
4006 struct gdbarch *gdbarch;
4007 frame_info_ptr frame;
4008 frame = frame_find_by_id (VALUE_NEXT_FRAME_ID (val));
4009 frame = get_prev_frame_always (frame);
4010 regnum = VALUE_REGNUM (val);
4011 gdbarch = get_frame_arch (frame);
4012
4013 string_file debug_file;
4014 gdb_printf (&debug_file,
4015 "(frame=%d, regnum=%d(%s), ...) ",
4016 frame_relative_level (frame), regnum,
4017 user_reg_map_regnum_to_name (gdbarch, regnum));
4018
4019 gdb_printf (&debug_file, "->");
4020 if (value_optimized_out (new_val))
4021 {
4022 gdb_printf (&debug_file, " ");
4023 val_print_optimized_out (new_val, &debug_file);
4024 }
4025 else
4026 {
4027 int i;
4028 gdb::array_view<const gdb_byte> buf = value_contents (new_val);
4029
4030 if (VALUE_LVAL (new_val) == lval_register)
4031 gdb_printf (&debug_file, " register=%d",
4032 VALUE_REGNUM (new_val));
4033 else if (VALUE_LVAL (new_val) == lval_memory)
4034 gdb_printf (&debug_file, " address=%s",
4035 paddress (gdbarch,
4036 value_address (new_val)));
4037 else
4038 gdb_printf (&debug_file, " computed");
4039
4040 gdb_printf (&debug_file, " bytes=");
4041 gdb_printf (&debug_file, "[");
4042 for (i = 0; i < register_size (gdbarch, regnum); i++)
4043 gdb_printf (&debug_file, "%02x", buf[i]);
4044 gdb_printf (&debug_file, "]");
4045 }
4046
4047 frame_debug_printf ("%s", debug_file.c_str ());
4048 }
4049
4050 /* Dispose of the intermediate values. This prevents
4051 watchpoints from trying to watch the saved frame pointer. */
4052 value_free_to_mark (mark);
4053 }
4054
4055 /* Load the actual content of a lazy value. Fetch the data from the
4056 user's process and clear the lazy flag to indicate that the data in
4057 the buffer is valid.
4058
4059 If the value is zero-length, we avoid calling read_memory, which
4060 would abort. We mark the value as fetched anyway -- all 0 bytes of
4061 it. */
4062
4063 void
4064 value_fetch_lazy (struct value *val)
4065 {
4066 gdb_assert (value_lazy (val));
4067 allocate_value_contents (val, true);
4068 /* A value is either lazy, or fully fetched. The
4069 availability/validity is only established as we try to fetch a
4070 value. */
4071 gdb_assert (val->m_optimized_out.empty ());
4072 gdb_assert (val->m_unavailable.empty ());
4073 if (val->m_is_zero)
4074 {
4075 /* Nothing. */
4076 }
4077 else if (val->bitsize ())
4078 value_fetch_lazy_bitfield (val);
4079 else if (VALUE_LVAL (val) == lval_memory)
4080 value_fetch_lazy_memory (val);
4081 else if (VALUE_LVAL (val) == lval_register)
4082 value_fetch_lazy_register (val);
4083 else if (VALUE_LVAL (val) == lval_computed
4084 && value_computed_funcs (val)->read != NULL)
4085 value_computed_funcs (val)->read (val);
4086 else
4087 internal_error (_("Unexpected lazy value type."));
4088
4089 set_value_lazy (val, 0);
4090 }
4091
4092 /* Implementation of the convenience function $_isvoid. */
4093
4094 static struct value *
4095 isvoid_internal_fn (struct gdbarch *gdbarch,
4096 const struct language_defn *language,
4097 void *cookie, int argc, struct value **argv)
4098 {
4099 int ret;
4100
4101 if (argc != 1)
4102 error (_("You must provide one argument for $_isvoid."));
4103
4104 ret = argv[0]->type ()->code () == TYPE_CODE_VOID;
4105
4106 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
4107 }
4108
4109 /* Implementation of the convenience function $_creal. Extracts the
4110 real part from a complex number. */
4111
4112 static struct value *
4113 creal_internal_fn (struct gdbarch *gdbarch,
4114 const struct language_defn *language,
4115 void *cookie, int argc, struct value **argv)
4116 {
4117 if (argc != 1)
4118 error (_("You must provide one argument for $_creal."));
4119
4120 value *cval = argv[0];
4121 type *ctype = check_typedef (cval->type ());
4122 if (ctype->code () != TYPE_CODE_COMPLEX)
4123 error (_("expected a complex number"));
4124 return value_real_part (cval);
4125 }
4126
4127 /* Implementation of the convenience function $_cimag. Extracts the
4128 imaginary part from a complex number. */
4129
4130 static struct value *
4131 cimag_internal_fn (struct gdbarch *gdbarch,
4132 const struct language_defn *language,
4133 void *cookie, int argc,
4134 struct value **argv)
4135 {
4136 if (argc != 1)
4137 error (_("You must provide one argument for $_cimag."));
4138
4139 value *cval = argv[0];
4140 type *ctype = check_typedef (cval->type ());
4141 if (ctype->code () != TYPE_CODE_COMPLEX)
4142 error (_("expected a complex number"));
4143 return value_imaginary_part (cval);
4144 }
4145
4146 #if GDB_SELF_TEST
4147 namespace selftests
4148 {
4149
4150 /* Test the ranges_contain function. */
4151
4152 static void
4153 test_ranges_contain ()
4154 {
4155 std::vector<range> ranges;
4156 range r;
4157
4158 /* [10, 14] */
4159 r.offset = 10;
4160 r.length = 5;
4161 ranges.push_back (r);
4162
4163 /* [20, 24] */
4164 r.offset = 20;
4165 r.length = 5;
4166 ranges.push_back (r);
4167
4168 /* [2, 6] */
4169 SELF_CHECK (!ranges_contain (ranges, 2, 5));
4170 /* [9, 13] */
4171 SELF_CHECK (ranges_contain (ranges, 9, 5));
4172 /* [10, 11] */
4173 SELF_CHECK (ranges_contain (ranges, 10, 2));
4174 /* [10, 14] */
4175 SELF_CHECK (ranges_contain (ranges, 10, 5));
4176 /* [13, 18] */
4177 SELF_CHECK (ranges_contain (ranges, 13, 6));
4178 /* [14, 18] */
4179 SELF_CHECK (ranges_contain (ranges, 14, 5));
4180 /* [15, 18] */
4181 SELF_CHECK (!ranges_contain (ranges, 15, 4));
4182 /* [16, 19] */
4183 SELF_CHECK (!ranges_contain (ranges, 16, 4));
4184 /* [16, 21] */
4185 SELF_CHECK (ranges_contain (ranges, 16, 6));
4186 /* [21, 21] */
4187 SELF_CHECK (ranges_contain (ranges, 21, 1));
4188 /* [21, 25] */
4189 SELF_CHECK (ranges_contain (ranges, 21, 5));
4190 /* [26, 28] */
4191 SELF_CHECK (!ranges_contain (ranges, 26, 3));
4192 }
4193
4194 /* Check that RANGES contains the same ranges as EXPECTED. */
4195
4196 static bool
4197 check_ranges_vector (gdb::array_view<const range> ranges,
4198 gdb::array_view<const range> expected)
4199 {
4200 return ranges == expected;
4201 }
4202
4203 /* Test the insert_into_bit_range_vector function. */
4204
4205 static void
4206 test_insert_into_bit_range_vector ()
4207 {
4208 std::vector<range> ranges;
4209
4210 /* [10, 14] */
4211 {
4212 insert_into_bit_range_vector (&ranges, 10, 5);
4213 static const range expected[] = {
4214 {10, 5}
4215 };
4216 SELF_CHECK (check_ranges_vector (ranges, expected));
4217 }
4218
4219 /* [10, 14] */
4220 {
4221 insert_into_bit_range_vector (&ranges, 11, 4);
4222 static const range expected = {10, 5};
4223 SELF_CHECK (check_ranges_vector (ranges, expected));
4224 }
4225
4226 /* [10, 14] [20, 24] */
4227 {
4228 insert_into_bit_range_vector (&ranges, 20, 5);
4229 static const range expected[] = {
4230 {10, 5},
4231 {20, 5},
4232 };
4233 SELF_CHECK (check_ranges_vector (ranges, expected));
4234 }
4235
4236 /* [10, 14] [17, 24] */
4237 {
4238 insert_into_bit_range_vector (&ranges, 17, 5);
4239 static const range expected[] = {
4240 {10, 5},
4241 {17, 8},
4242 };
4243 SELF_CHECK (check_ranges_vector (ranges, expected));
4244 }
4245
4246 /* [2, 8] [10, 14] [17, 24] */
4247 {
4248 insert_into_bit_range_vector (&ranges, 2, 7);
4249 static const range expected[] = {
4250 {2, 7},
4251 {10, 5},
4252 {17, 8},
4253 };
4254 SELF_CHECK (check_ranges_vector (ranges, expected));
4255 }
4256
4257 /* [2, 14] [17, 24] */
4258 {
4259 insert_into_bit_range_vector (&ranges, 9, 1);
4260 static const range expected[] = {
4261 {2, 13},
4262 {17, 8},
4263 };
4264 SELF_CHECK (check_ranges_vector (ranges, expected));
4265 }
4266
4267 /* [2, 14] [17, 24] */
4268 {
4269 insert_into_bit_range_vector (&ranges, 9, 1);
4270 static const range expected[] = {
4271 {2, 13},
4272 {17, 8},
4273 };
4274 SELF_CHECK (check_ranges_vector (ranges, expected));
4275 }
4276
4277 /* [2, 33] */
4278 {
4279 insert_into_bit_range_vector (&ranges, 4, 30);
4280 static const range expected = {2, 32};
4281 SELF_CHECK (check_ranges_vector (ranges, expected));
4282 }
4283 }
4284
4285 static void
4286 test_value_copy ()
4287 {
4288 type *type = builtin_type (current_inferior ()->gdbarch)->builtin_int;
4289
4290 /* Verify that we can copy an entirely optimized out value, that may not have
4291 its contents allocated. */
4292 value_ref_ptr val = release_value (allocate_optimized_out_value (type));
4293 value_ref_ptr copy = release_value (value_copy (val.get ()));
4294
4295 SELF_CHECK (value_entirely_optimized_out (val.get ()));
4296 SELF_CHECK (value_entirely_optimized_out (copy.get ()));
4297 }
4298
4299 } /* namespace selftests */
4300 #endif /* GDB_SELF_TEST */
4301
4302 void _initialize_values ();
4303 void
4304 _initialize_values ()
4305 {
4306 cmd_list_element *show_convenience_cmd
4307 = add_cmd ("convenience", no_class, show_convenience, _("\
4308 Debugger convenience (\"$foo\") variables and functions.\n\
4309 Convenience variables are created when you assign them values;\n\
4310 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4311 \n\
4312 A few convenience variables are given values automatically:\n\
4313 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4314 \"$__\" holds the contents of the last address examined with \"x\"."
4315 #ifdef HAVE_PYTHON
4316 "\n\n\
4317 Convenience functions are defined via the Python API."
4318 #endif
4319 ), &showlist);
4320 add_alias_cmd ("conv", show_convenience_cmd, no_class, 1, &showlist);
4321
4322 add_cmd ("values", no_set_class, show_values, _("\
4323 Elements of value history around item number IDX (or last ten)."),
4324 &showlist);
4325
4326 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4327 Initialize a convenience variable if necessary.\n\
4328 init-if-undefined VARIABLE = EXPRESSION\n\
4329 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4330 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4331 VARIABLE is already initialized."));
4332
4333 add_prefix_cmd ("function", no_class, function_command, _("\
4334 Placeholder command for showing help on convenience functions."),
4335 &functionlist, 0, &cmdlist);
4336
4337 add_internal_function ("_isvoid", _("\
4338 Check whether an expression is void.\n\
4339 Usage: $_isvoid (expression)\n\
4340 Return 1 if the expression is void, zero otherwise."),
4341 isvoid_internal_fn, NULL);
4342
4343 add_internal_function ("_creal", _("\
4344 Extract the real part of a complex number.\n\
4345 Usage: $_creal (expression)\n\
4346 Return the real part of a complex number, the type depends on the\n\
4347 type of a complex number."),
4348 creal_internal_fn, NULL);
4349
4350 add_internal_function ("_cimag", _("\
4351 Extract the imaginary part of a complex number.\n\
4352 Usage: $_cimag (expression)\n\
4353 Return the imaginary part of a complex number, the type depends on the\n\
4354 type of a complex number."),
4355 cimag_internal_fn, NULL);
4356
4357 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4358 class_support, &max_value_size, _("\
4359 Set maximum sized value gdb will load from the inferior."), _("\
4360 Show maximum sized value gdb will load from the inferior."), _("\
4361 Use this to control the maximum size, in bytes, of a value that gdb\n\
4362 will load from the inferior. Setting this value to 'unlimited'\n\
4363 disables checking.\n\
4364 Setting this does not invalidate already allocated values, it only\n\
4365 prevents future values, larger than this size, from being allocated."),
4366 set_max_value_size,
4367 show_max_value_size,
4368 &setlist, &showlist);
4369 set_show_commands vsize_limit
4370 = add_setshow_zuinteger_unlimited_cmd ("varsize-limit", class_support,
4371 &max_value_size, _("\
4372 Set the maximum number of bytes allowed in a variable-size object."), _("\
4373 Show the maximum number of bytes allowed in a variable-size object."), _("\
4374 Attempts to access an object whose size is not a compile-time constant\n\
4375 and exceeds this limit will cause an error."),
4376 NULL, NULL, &setlist, &showlist);
4377 deprecate_cmd (vsize_limit.set, "set max-value-size");
4378
4379 #if GDB_SELF_TEST
4380 selftests::register_test ("ranges_contain", selftests::test_ranges_contain);
4381 selftests::register_test ("insert_into_bit_range_vector",
4382 selftests::test_insert_into_bit_range_vector);
4383 selftests::register_test ("value_copy", selftests::test_value_copy);
4384 #endif
4385 }
4386
4387 /* See value.h. */
4388
4389 void
4390 finalize_values ()
4391 {
4392 all_values.clear ();
4393 }