[gdb] Don't use gdb_stdlog for inferior-events
[binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2021 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "regcache.h"
32 #include "block.h"
33 #include "target-float.h"
34 #include "objfiles.h"
35 #include "valprint.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
38 #include <ctype.h>
39 #include "tracepoint.h"
40 #include "cp-abi.h"
41 #include "user-regs.h"
42 #include <algorithm>
43 #include "completer.h"
44 #include "gdbsupport/selftest.h"
45 #include "gdbsupport/array-view.h"
46 #include "cli/cli-style.h"
47 #include "expop.h"
48 #include "inferior.h"
49
50 /* Definition of a user function. */
51 struct internal_function
52 {
53 /* The name of the function. It is a bit odd to have this in the
54 function itself -- the user might use a differently-named
55 convenience variable to hold the function. */
56 char *name;
57
58 /* The handler. */
59 internal_function_fn handler;
60
61 /* User data for the handler. */
62 void *cookie;
63 };
64
65 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
66
67 struct range
68 {
69 /* Lowest offset in the range. */
70 LONGEST offset;
71
72 /* Length of the range. */
73 LONGEST length;
74
75 /* Returns true if THIS is strictly less than OTHER, useful for
76 searching. We keep ranges sorted by offset and coalesce
77 overlapping and contiguous ranges, so this just compares the
78 starting offset. */
79
80 bool operator< (const range &other) const
81 {
82 return offset < other.offset;
83 }
84
85 /* Returns true if THIS is equal to OTHER. */
86 bool operator== (const range &other) const
87 {
88 return offset == other.offset && length == other.length;
89 }
90 };
91
92 /* Returns true if the ranges defined by [offset1, offset1+len1) and
93 [offset2, offset2+len2) overlap. */
94
95 static int
96 ranges_overlap (LONGEST offset1, LONGEST len1,
97 LONGEST offset2, LONGEST len2)
98 {
99 ULONGEST h, l;
100
101 l = std::max (offset1, offset2);
102 h = std::min (offset1 + len1, offset2 + len2);
103 return (l < h);
104 }
105
106 /* Returns true if RANGES contains any range that overlaps [OFFSET,
107 OFFSET+LENGTH). */
108
109 static int
110 ranges_contain (const std::vector<range> &ranges, LONGEST offset,
111 LONGEST length)
112 {
113 range what;
114
115 what.offset = offset;
116 what.length = length;
117
118 /* We keep ranges sorted by offset and coalesce overlapping and
119 contiguous ranges, so to check if a range list contains a given
120 range, we can do a binary search for the position the given range
121 would be inserted if we only considered the starting OFFSET of
122 ranges. We call that position I. Since we also have LENGTH to
123 care for (this is a range afterall), we need to check if the
124 _previous_ range overlaps the I range. E.g.,
125
126 R
127 |---|
128 |---| |---| |------| ... |--|
129 0 1 2 N
130
131 I=1
132
133 In the case above, the binary search would return `I=1', meaning,
134 this OFFSET should be inserted at position 1, and the current
135 position 1 should be pushed further (and before 2). But, `0'
136 overlaps with R.
137
138 Then we need to check if the I range overlaps the I range itself.
139 E.g.,
140
141 R
142 |---|
143 |---| |---| |-------| ... |--|
144 0 1 2 N
145
146 I=1
147 */
148
149
150 auto i = std::lower_bound (ranges.begin (), ranges.end (), what);
151
152 if (i > ranges.begin ())
153 {
154 const struct range &bef = *(i - 1);
155
156 if (ranges_overlap (bef.offset, bef.length, offset, length))
157 return 1;
158 }
159
160 if (i < ranges.end ())
161 {
162 const struct range &r = *i;
163
164 if (ranges_overlap (r.offset, r.length, offset, length))
165 return 1;
166 }
167
168 return 0;
169 }
170
171 static struct cmd_list_element *functionlist;
172
173 /* Note that the fields in this structure are arranged to save a bit
174 of memory. */
175
176 struct value
177 {
178 explicit value (struct type *type_)
179 : modifiable (1),
180 lazy (1),
181 initialized (1),
182 stack (0),
183 is_zero (false),
184 type (type_),
185 enclosing_type (type_)
186 {
187 }
188
189 ~value ()
190 {
191 if (VALUE_LVAL (this) == lval_computed)
192 {
193 const struct lval_funcs *funcs = location.computed.funcs;
194
195 if (funcs->free_closure)
196 funcs->free_closure (this);
197 }
198 else if (VALUE_LVAL (this) == lval_xcallable)
199 delete location.xm_worker;
200 }
201
202 DISABLE_COPY_AND_ASSIGN (value);
203
204 /* Type of value; either not an lval, or one of the various
205 different possible kinds of lval. */
206 enum lval_type lval = not_lval;
207
208 /* Is it modifiable? Only relevant if lval != not_lval. */
209 unsigned int modifiable : 1;
210
211 /* If zero, contents of this value are in the contents field. If
212 nonzero, contents are in inferior. If the lval field is lval_memory,
213 the contents are in inferior memory at location.address plus offset.
214 The lval field may also be lval_register.
215
216 WARNING: This field is used by the code which handles watchpoints
217 (see breakpoint.c) to decide whether a particular value can be
218 watched by hardware watchpoints. If the lazy flag is set for
219 some member of a value chain, it is assumed that this member of
220 the chain doesn't need to be watched as part of watching the
221 value itself. This is how GDB avoids watching the entire struct
222 or array when the user wants to watch a single struct member or
223 array element. If you ever change the way lazy flag is set and
224 reset, be sure to consider this use as well! */
225 unsigned int lazy : 1;
226
227 /* If value is a variable, is it initialized or not. */
228 unsigned int initialized : 1;
229
230 /* If value is from the stack. If this is set, read_stack will be
231 used instead of read_memory to enable extra caching. */
232 unsigned int stack : 1;
233
234 /* True if this is a zero value, created by 'value_zero'; false
235 otherwise. */
236 bool is_zero : 1;
237
238 /* Location of value (if lval). */
239 union
240 {
241 /* If lval == lval_memory, this is the address in the inferior */
242 CORE_ADDR address;
243
244 /*If lval == lval_register, the value is from a register. */
245 struct
246 {
247 /* Register number. */
248 int regnum;
249 /* Frame ID of "next" frame to which a register value is relative.
250 If the register value is found relative to frame F, then the
251 frame id of F->next will be stored in next_frame_id. */
252 struct frame_id next_frame_id;
253 } reg;
254
255 /* Pointer to internal variable. */
256 struct internalvar *internalvar;
257
258 /* Pointer to xmethod worker. */
259 struct xmethod_worker *xm_worker;
260
261 /* If lval == lval_computed, this is a set of function pointers
262 to use to access and describe the value, and a closure pointer
263 for them to use. */
264 struct
265 {
266 /* Functions to call. */
267 const struct lval_funcs *funcs;
268
269 /* Closure for those functions to use. */
270 void *closure;
271 } computed;
272 } location {};
273
274 /* Describes offset of a value within lval of a structure in target
275 addressable memory units. Note also the member embedded_offset
276 below. */
277 LONGEST offset = 0;
278
279 /* Only used for bitfields; number of bits contained in them. */
280 LONGEST bitsize = 0;
281
282 /* Only used for bitfields; position of start of field. For
283 little-endian targets, it is the position of the LSB. For
284 big-endian targets, it is the position of the MSB. */
285 LONGEST bitpos = 0;
286
287 /* The number of references to this value. When a value is created,
288 the value chain holds a reference, so REFERENCE_COUNT is 1. If
289 release_value is called, this value is removed from the chain but
290 the caller of release_value now has a reference to this value.
291 The caller must arrange for a call to value_free later. */
292 int reference_count = 1;
293
294 /* Only used for bitfields; the containing value. This allows a
295 single read from the target when displaying multiple
296 bitfields. */
297 value_ref_ptr parent;
298
299 /* Type of the value. */
300 struct type *type;
301
302 /* If a value represents a C++ object, then the `type' field gives
303 the object's compile-time type. If the object actually belongs
304 to some class derived from `type', perhaps with other base
305 classes and additional members, then `type' is just a subobject
306 of the real thing, and the full object is probably larger than
307 `type' would suggest.
308
309 If `type' is a dynamic class (i.e. one with a vtable), then GDB
310 can actually determine the object's run-time type by looking at
311 the run-time type information in the vtable. When this
312 information is available, we may elect to read in the entire
313 object, for several reasons:
314
315 - When printing the value, the user would probably rather see the
316 full object, not just the limited portion apparent from the
317 compile-time type.
318
319 - If `type' has virtual base classes, then even printing `type'
320 alone may require reaching outside the `type' portion of the
321 object to wherever the virtual base class has been stored.
322
323 When we store the entire object, `enclosing_type' is the run-time
324 type -- the complete object -- and `embedded_offset' is the
325 offset of `type' within that larger type, in target addressable memory
326 units. The value_contents() macro takes `embedded_offset' into account,
327 so most GDB code continues to see the `type' portion of the value, just
328 as the inferior would.
329
330 If `type' is a pointer to an object, then `enclosing_type' is a
331 pointer to the object's run-time type, and `pointed_to_offset' is
332 the offset in target addressable memory units from the full object
333 to the pointed-to object -- that is, the value `embedded_offset' would
334 have if we followed the pointer and fetched the complete object.
335 (I don't really see the point. Why not just determine the
336 run-time type when you indirect, and avoid the special case? The
337 contents don't matter until you indirect anyway.)
338
339 If we're not doing anything fancy, `enclosing_type' is equal to
340 `type', and `embedded_offset' is zero, so everything works
341 normally. */
342 struct type *enclosing_type;
343 LONGEST embedded_offset = 0;
344 LONGEST pointed_to_offset = 0;
345
346 /* Actual contents of the value. Target byte-order. NULL or not
347 valid if lazy is nonzero. */
348 gdb::unique_xmalloc_ptr<gdb_byte> contents;
349
350 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
351 rather than available, since the common and default case is for a
352 value to be available. This is filled in at value read time.
353 The unavailable ranges are tracked in bits. Note that a contents
354 bit that has been optimized out doesn't really exist in the
355 program, so it can't be marked unavailable either. */
356 std::vector<range> unavailable;
357
358 /* Likewise, but for optimized out contents (a chunk of the value of
359 a variable that does not actually exist in the program). If LVAL
360 is lval_register, this is a register ($pc, $sp, etc., never a
361 program variable) that has not been saved in the frame. Not
362 saved registers and optimized-out program variables values are
363 treated pretty much the same, except not-saved registers have a
364 different string representation and related error strings. */
365 std::vector<range> optimized_out;
366 };
367
368 /* See value.h. */
369
370 struct gdbarch *
371 get_value_arch (const struct value *value)
372 {
373 return value_type (value)->arch ();
374 }
375
376 int
377 value_bits_available (const struct value *value, LONGEST offset, LONGEST length)
378 {
379 gdb_assert (!value->lazy);
380
381 return !ranges_contain (value->unavailable, offset, length);
382 }
383
384 int
385 value_bytes_available (const struct value *value,
386 LONGEST offset, LONGEST length)
387 {
388 return value_bits_available (value,
389 offset * TARGET_CHAR_BIT,
390 length * TARGET_CHAR_BIT);
391 }
392
393 int
394 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
395 {
396 gdb_assert (!value->lazy);
397
398 return ranges_contain (value->optimized_out, bit_offset, bit_length);
399 }
400
401 int
402 value_entirely_available (struct value *value)
403 {
404 /* We can only tell whether the whole value is available when we try
405 to read it. */
406 if (value->lazy)
407 value_fetch_lazy (value);
408
409 if (value->unavailable.empty ())
410 return 1;
411 return 0;
412 }
413
414 /* Returns true if VALUE is entirely covered by RANGES. If the value
415 is lazy, it'll be read now. Note that RANGE is a pointer to
416 pointer because reading the value might change *RANGE. */
417
418 static int
419 value_entirely_covered_by_range_vector (struct value *value,
420 const std::vector<range> &ranges)
421 {
422 /* We can only tell whether the whole value is optimized out /
423 unavailable when we try to read it. */
424 if (value->lazy)
425 value_fetch_lazy (value);
426
427 if (ranges.size () == 1)
428 {
429 const struct range &t = ranges[0];
430
431 if (t.offset == 0
432 && t.length == (TARGET_CHAR_BIT
433 * TYPE_LENGTH (value_enclosing_type (value))))
434 return 1;
435 }
436
437 return 0;
438 }
439
440 int
441 value_entirely_unavailable (struct value *value)
442 {
443 return value_entirely_covered_by_range_vector (value, value->unavailable);
444 }
445
446 int
447 value_entirely_optimized_out (struct value *value)
448 {
449 return value_entirely_covered_by_range_vector (value, value->optimized_out);
450 }
451
452 /* Insert into the vector pointed to by VECTORP the bit range starting of
453 OFFSET bits, and extending for the next LENGTH bits. */
454
455 static void
456 insert_into_bit_range_vector (std::vector<range> *vectorp,
457 LONGEST offset, LONGEST length)
458 {
459 range newr;
460
461 /* Insert the range sorted. If there's overlap or the new range
462 would be contiguous with an existing range, merge. */
463
464 newr.offset = offset;
465 newr.length = length;
466
467 /* Do a binary search for the position the given range would be
468 inserted if we only considered the starting OFFSET of ranges.
469 Call that position I. Since we also have LENGTH to care for
470 (this is a range afterall), we need to check if the _previous_
471 range overlaps the I range. E.g., calling R the new range:
472
473 #1 - overlaps with previous
474
475 R
476 |-...-|
477 |---| |---| |------| ... |--|
478 0 1 2 N
479
480 I=1
481
482 In the case #1 above, the binary search would return `I=1',
483 meaning, this OFFSET should be inserted at position 1, and the
484 current position 1 should be pushed further (and become 2). But,
485 note that `0' overlaps with R, so we want to merge them.
486
487 A similar consideration needs to be taken if the new range would
488 be contiguous with the previous range:
489
490 #2 - contiguous with previous
491
492 R
493 |-...-|
494 |--| |---| |------| ... |--|
495 0 1 2 N
496
497 I=1
498
499 If there's no overlap with the previous range, as in:
500
501 #3 - not overlapping and not contiguous
502
503 R
504 |-...-|
505 |--| |---| |------| ... |--|
506 0 1 2 N
507
508 I=1
509
510 or if I is 0:
511
512 #4 - R is the range with lowest offset
513
514 R
515 |-...-|
516 |--| |---| |------| ... |--|
517 0 1 2 N
518
519 I=0
520
521 ... we just push the new range to I.
522
523 All the 4 cases above need to consider that the new range may
524 also overlap several of the ranges that follow, or that R may be
525 contiguous with the following range, and merge. E.g.,
526
527 #5 - overlapping following ranges
528
529 R
530 |------------------------|
531 |--| |---| |------| ... |--|
532 0 1 2 N
533
534 I=0
535
536 or:
537
538 R
539 |-------|
540 |--| |---| |------| ... |--|
541 0 1 2 N
542
543 I=1
544
545 */
546
547 auto i = std::lower_bound (vectorp->begin (), vectorp->end (), newr);
548 if (i > vectorp->begin ())
549 {
550 struct range &bef = *(i - 1);
551
552 if (ranges_overlap (bef.offset, bef.length, offset, length))
553 {
554 /* #1 */
555 ULONGEST l = std::min (bef.offset, offset);
556 ULONGEST h = std::max (bef.offset + bef.length, offset + length);
557
558 bef.offset = l;
559 bef.length = h - l;
560 i--;
561 }
562 else if (offset == bef.offset + bef.length)
563 {
564 /* #2 */
565 bef.length += length;
566 i--;
567 }
568 else
569 {
570 /* #3 */
571 i = vectorp->insert (i, newr);
572 }
573 }
574 else
575 {
576 /* #4 */
577 i = vectorp->insert (i, newr);
578 }
579
580 /* Check whether the ranges following the one we've just added or
581 touched can be folded in (#5 above). */
582 if (i != vectorp->end () && i + 1 < vectorp->end ())
583 {
584 int removed = 0;
585 auto next = i + 1;
586
587 /* Get the range we just touched. */
588 struct range &t = *i;
589 removed = 0;
590
591 i = next;
592 for (; i < vectorp->end (); i++)
593 {
594 struct range &r = *i;
595 if (r.offset <= t.offset + t.length)
596 {
597 ULONGEST l, h;
598
599 l = std::min (t.offset, r.offset);
600 h = std::max (t.offset + t.length, r.offset + r.length);
601
602 t.offset = l;
603 t.length = h - l;
604
605 removed++;
606 }
607 else
608 {
609 /* If we couldn't merge this one, we won't be able to
610 merge following ones either, since the ranges are
611 always sorted by OFFSET. */
612 break;
613 }
614 }
615
616 if (removed != 0)
617 vectorp->erase (next, next + removed);
618 }
619 }
620
621 void
622 mark_value_bits_unavailable (struct value *value,
623 LONGEST offset, LONGEST length)
624 {
625 insert_into_bit_range_vector (&value->unavailable, offset, length);
626 }
627
628 void
629 mark_value_bytes_unavailable (struct value *value,
630 LONGEST offset, LONGEST length)
631 {
632 mark_value_bits_unavailable (value,
633 offset * TARGET_CHAR_BIT,
634 length * TARGET_CHAR_BIT);
635 }
636
637 /* Find the first range in RANGES that overlaps the range defined by
638 OFFSET and LENGTH, starting at element POS in the RANGES vector,
639 Returns the index into RANGES where such overlapping range was
640 found, or -1 if none was found. */
641
642 static int
643 find_first_range_overlap (const std::vector<range> *ranges, int pos,
644 LONGEST offset, LONGEST length)
645 {
646 int i;
647
648 for (i = pos; i < ranges->size (); i++)
649 {
650 const range &r = (*ranges)[i];
651 if (ranges_overlap (r.offset, r.length, offset, length))
652 return i;
653 }
654
655 return -1;
656 }
657
658 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
659 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
660 return non-zero.
661
662 It must always be the case that:
663 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
664
665 It is assumed that memory can be accessed from:
666 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
667 to:
668 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
669 / TARGET_CHAR_BIT) */
670 static int
671 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
672 const gdb_byte *ptr2, size_t offset2_bits,
673 size_t length_bits)
674 {
675 gdb_assert (offset1_bits % TARGET_CHAR_BIT
676 == offset2_bits % TARGET_CHAR_BIT);
677
678 if (offset1_bits % TARGET_CHAR_BIT != 0)
679 {
680 size_t bits;
681 gdb_byte mask, b1, b2;
682
683 /* The offset from the base pointers PTR1 and PTR2 is not a complete
684 number of bytes. A number of bits up to either the next exact
685 byte boundary, or LENGTH_BITS (which ever is sooner) will be
686 compared. */
687 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
688 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
689 mask = (1 << bits) - 1;
690
691 if (length_bits < bits)
692 {
693 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
694 bits = length_bits;
695 }
696
697 /* Now load the two bytes and mask off the bits we care about. */
698 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
699 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
700
701 if (b1 != b2)
702 return 1;
703
704 /* Now update the length and offsets to take account of the bits
705 we've just compared. */
706 length_bits -= bits;
707 offset1_bits += bits;
708 offset2_bits += bits;
709 }
710
711 if (length_bits % TARGET_CHAR_BIT != 0)
712 {
713 size_t bits;
714 size_t o1, o2;
715 gdb_byte mask, b1, b2;
716
717 /* The length is not an exact number of bytes. After the previous
718 IF.. block then the offsets are byte aligned, or the
719 length is zero (in which case this code is not reached). Compare
720 a number of bits at the end of the region, starting from an exact
721 byte boundary. */
722 bits = length_bits % TARGET_CHAR_BIT;
723 o1 = offset1_bits + length_bits - bits;
724 o2 = offset2_bits + length_bits - bits;
725
726 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
727 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
728
729 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
730 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
731
732 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
733 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
734
735 if (b1 != b2)
736 return 1;
737
738 length_bits -= bits;
739 }
740
741 if (length_bits > 0)
742 {
743 /* We've now taken care of any stray "bits" at the start, or end of
744 the region to compare, the remainder can be covered with a simple
745 memcmp. */
746 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
747 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
748 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
749
750 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
751 ptr2 + offset2_bits / TARGET_CHAR_BIT,
752 length_bits / TARGET_CHAR_BIT);
753 }
754
755 /* Length is zero, regions match. */
756 return 0;
757 }
758
759 /* Helper struct for find_first_range_overlap_and_match and
760 value_contents_bits_eq. Keep track of which slot of a given ranges
761 vector have we last looked at. */
762
763 struct ranges_and_idx
764 {
765 /* The ranges. */
766 const std::vector<range> *ranges;
767
768 /* The range we've last found in RANGES. Given ranges are sorted,
769 we can start the next lookup here. */
770 int idx;
771 };
772
773 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
774 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
775 ranges starting at OFFSET2 bits. Return true if the ranges match
776 and fill in *L and *H with the overlapping window relative to
777 (both) OFFSET1 or OFFSET2. */
778
779 static int
780 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
781 struct ranges_and_idx *rp2,
782 LONGEST offset1, LONGEST offset2,
783 LONGEST length, ULONGEST *l, ULONGEST *h)
784 {
785 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
786 offset1, length);
787 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
788 offset2, length);
789
790 if (rp1->idx == -1 && rp2->idx == -1)
791 {
792 *l = length;
793 *h = length;
794 return 1;
795 }
796 else if (rp1->idx == -1 || rp2->idx == -1)
797 return 0;
798 else
799 {
800 const range *r1, *r2;
801 ULONGEST l1, h1;
802 ULONGEST l2, h2;
803
804 r1 = &(*rp1->ranges)[rp1->idx];
805 r2 = &(*rp2->ranges)[rp2->idx];
806
807 /* Get the unavailable windows intersected by the incoming
808 ranges. The first and last ranges that overlap the argument
809 range may be wider than said incoming arguments ranges. */
810 l1 = std::max (offset1, r1->offset);
811 h1 = std::min (offset1 + length, r1->offset + r1->length);
812
813 l2 = std::max (offset2, r2->offset);
814 h2 = std::min (offset2 + length, offset2 + r2->length);
815
816 /* Make them relative to the respective start offsets, so we can
817 compare them for equality. */
818 l1 -= offset1;
819 h1 -= offset1;
820
821 l2 -= offset2;
822 h2 -= offset2;
823
824 /* Different ranges, no match. */
825 if (l1 != l2 || h1 != h2)
826 return 0;
827
828 *h = h1;
829 *l = l1;
830 return 1;
831 }
832 }
833
834 /* Helper function for value_contents_eq. The only difference is that
835 this function is bit rather than byte based.
836
837 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
838 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
839 Return true if the available bits match. */
840
841 static bool
842 value_contents_bits_eq (const struct value *val1, int offset1,
843 const struct value *val2, int offset2,
844 int length)
845 {
846 /* Each array element corresponds to a ranges source (unavailable,
847 optimized out). '1' is for VAL1, '2' for VAL2. */
848 struct ranges_and_idx rp1[2], rp2[2];
849
850 /* See function description in value.h. */
851 gdb_assert (!val1->lazy && !val2->lazy);
852
853 /* We shouldn't be trying to compare past the end of the values. */
854 gdb_assert (offset1 + length
855 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
856 gdb_assert (offset2 + length
857 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
858
859 memset (&rp1, 0, sizeof (rp1));
860 memset (&rp2, 0, sizeof (rp2));
861 rp1[0].ranges = &val1->unavailable;
862 rp2[0].ranges = &val2->unavailable;
863 rp1[1].ranges = &val1->optimized_out;
864 rp2[1].ranges = &val2->optimized_out;
865
866 while (length > 0)
867 {
868 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
869 int i;
870
871 for (i = 0; i < 2; i++)
872 {
873 ULONGEST l_tmp, h_tmp;
874
875 /* The contents only match equal if the invalid/unavailable
876 contents ranges match as well. */
877 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
878 offset1, offset2, length,
879 &l_tmp, &h_tmp))
880 return false;
881
882 /* We're interested in the lowest/first range found. */
883 if (i == 0 || l_tmp < l)
884 {
885 l = l_tmp;
886 h = h_tmp;
887 }
888 }
889
890 /* Compare the available/valid contents. */
891 if (memcmp_with_bit_offsets (val1->contents.get (), offset1,
892 val2->contents.get (), offset2, l) != 0)
893 return false;
894
895 length -= h;
896 offset1 += h;
897 offset2 += h;
898 }
899
900 return true;
901 }
902
903 bool
904 value_contents_eq (const struct value *val1, LONGEST offset1,
905 const struct value *val2, LONGEST offset2,
906 LONGEST length)
907 {
908 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
909 val2, offset2 * TARGET_CHAR_BIT,
910 length * TARGET_CHAR_BIT);
911 }
912
913
914 /* The value-history records all the values printed by print commands
915 during this session. */
916
917 static std::vector<value_ref_ptr> value_history;
918
919 \f
920 /* List of all value objects currently allocated
921 (except for those released by calls to release_value)
922 This is so they can be freed after each command. */
923
924 static std::vector<value_ref_ptr> all_values;
925
926 /* Allocate a lazy value for type TYPE. Its actual content is
927 "lazily" allocated too: the content field of the return value is
928 NULL; it will be allocated when it is fetched from the target. */
929
930 struct value *
931 allocate_value_lazy (struct type *type)
932 {
933 struct value *val;
934
935 /* Call check_typedef on our type to make sure that, if TYPE
936 is a TYPE_CODE_TYPEDEF, its length is set to the length
937 of the target type instead of zero. However, we do not
938 replace the typedef type by the target type, because we want
939 to keep the typedef in order to be able to set the VAL's type
940 description correctly. */
941 check_typedef (type);
942
943 val = new struct value (type);
944
945 /* Values start out on the all_values chain. */
946 all_values.emplace_back (val);
947
948 return val;
949 }
950
951 /* The maximum size, in bytes, that GDB will try to allocate for a value.
952 The initial value of 64k was not selected for any specific reason, it is
953 just a reasonable starting point. */
954
955 static int max_value_size = 65536; /* 64k bytes */
956
957 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
958 LONGEST, otherwise GDB will not be able to parse integer values from the
959 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
960 be unable to parse "set max-value-size 2".
961
962 As we want a consistent GDB experience across hosts with different sizes
963 of LONGEST, this arbitrary minimum value was selected, so long as this
964 is bigger than LONGEST on all GDB supported hosts we're fine. */
965
966 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
967 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
968
969 /* Implement the "set max-value-size" command. */
970
971 static void
972 set_max_value_size (const char *args, int from_tty,
973 struct cmd_list_element *c)
974 {
975 gdb_assert (max_value_size == -1 || max_value_size >= 0);
976
977 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
978 {
979 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
980 error (_("max-value-size set too low, increasing to %d bytes"),
981 max_value_size);
982 }
983 }
984
985 /* Implement the "show max-value-size" command. */
986
987 static void
988 show_max_value_size (struct ui_file *file, int from_tty,
989 struct cmd_list_element *c, const char *value)
990 {
991 if (max_value_size == -1)
992 fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
993 else
994 fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
995 max_value_size);
996 }
997
998 /* Called before we attempt to allocate or reallocate a buffer for the
999 contents of a value. TYPE is the type of the value for which we are
1000 allocating the buffer. If the buffer is too large (based on the user
1001 controllable setting) then throw an error. If this function returns
1002 then we should attempt to allocate the buffer. */
1003
1004 static void
1005 check_type_length_before_alloc (const struct type *type)
1006 {
1007 ULONGEST length = TYPE_LENGTH (type);
1008
1009 if (max_value_size > -1 && length > max_value_size)
1010 {
1011 if (type->name () != NULL)
1012 error (_("value of type `%s' requires %s bytes, which is more "
1013 "than max-value-size"), type->name (), pulongest (length));
1014 else
1015 error (_("value requires %s bytes, which is more than "
1016 "max-value-size"), pulongest (length));
1017 }
1018 }
1019
1020 /* Allocate the contents of VAL if it has not been allocated yet. */
1021
1022 static void
1023 allocate_value_contents (struct value *val)
1024 {
1025 if (!val->contents)
1026 {
1027 check_type_length_before_alloc (val->enclosing_type);
1028 val->contents.reset
1029 ((gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type)));
1030 }
1031 }
1032
1033 /* Allocate a value and its contents for type TYPE. */
1034
1035 struct value *
1036 allocate_value (struct type *type)
1037 {
1038 struct value *val = allocate_value_lazy (type);
1039
1040 allocate_value_contents (val);
1041 val->lazy = 0;
1042 return val;
1043 }
1044
1045 /* Allocate a value that has the correct length
1046 for COUNT repetitions of type TYPE. */
1047
1048 struct value *
1049 allocate_repeat_value (struct type *type, int count)
1050 {
1051 /* Despite the fact that we are really creating an array of TYPE here, we
1052 use the string lower bound as the array lower bound. This seems to
1053 work fine for now. */
1054 int low_bound = current_language->string_lower_bound ();
1055 /* FIXME-type-allocation: need a way to free this type when we are
1056 done with it. */
1057 struct type *array_type
1058 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1059
1060 return allocate_value (array_type);
1061 }
1062
1063 struct value *
1064 allocate_computed_value (struct type *type,
1065 const struct lval_funcs *funcs,
1066 void *closure)
1067 {
1068 struct value *v = allocate_value_lazy (type);
1069
1070 VALUE_LVAL (v) = lval_computed;
1071 v->location.computed.funcs = funcs;
1072 v->location.computed.closure = closure;
1073
1074 return v;
1075 }
1076
1077 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1078
1079 struct value *
1080 allocate_optimized_out_value (struct type *type)
1081 {
1082 struct value *retval = allocate_value_lazy (type);
1083
1084 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1085 set_value_lazy (retval, 0);
1086 return retval;
1087 }
1088
1089 /* Accessor methods. */
1090
1091 struct type *
1092 value_type (const struct value *value)
1093 {
1094 return value->type;
1095 }
1096 void
1097 deprecated_set_value_type (struct value *value, struct type *type)
1098 {
1099 value->type = type;
1100 }
1101
1102 LONGEST
1103 value_offset (const struct value *value)
1104 {
1105 return value->offset;
1106 }
1107 void
1108 set_value_offset (struct value *value, LONGEST offset)
1109 {
1110 value->offset = offset;
1111 }
1112
1113 LONGEST
1114 value_bitpos (const struct value *value)
1115 {
1116 return value->bitpos;
1117 }
1118 void
1119 set_value_bitpos (struct value *value, LONGEST bit)
1120 {
1121 value->bitpos = bit;
1122 }
1123
1124 LONGEST
1125 value_bitsize (const struct value *value)
1126 {
1127 return value->bitsize;
1128 }
1129 void
1130 set_value_bitsize (struct value *value, LONGEST bit)
1131 {
1132 value->bitsize = bit;
1133 }
1134
1135 struct value *
1136 value_parent (const struct value *value)
1137 {
1138 return value->parent.get ();
1139 }
1140
1141 /* See value.h. */
1142
1143 void
1144 set_value_parent (struct value *value, struct value *parent)
1145 {
1146 value->parent = value_ref_ptr::new_reference (parent);
1147 }
1148
1149 gdb::array_view<gdb_byte>
1150 value_contents_raw (struct value *value)
1151 {
1152 struct gdbarch *arch = get_value_arch (value);
1153 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1154
1155 allocate_value_contents (value);
1156
1157 ULONGEST length = TYPE_LENGTH (value_type (value));
1158 return gdb::make_array_view
1159 (value->contents.get () + value->embedded_offset * unit_size, length);
1160 }
1161
1162 gdb::array_view<gdb_byte>
1163 value_contents_all_raw (struct value *value)
1164 {
1165 allocate_value_contents (value);
1166
1167 ULONGEST length = TYPE_LENGTH (value_type (value));
1168 return gdb::make_array_view (value->contents.get (), length);
1169 }
1170
1171 struct type *
1172 value_enclosing_type (const struct value *value)
1173 {
1174 return value->enclosing_type;
1175 }
1176
1177 /* Look at value.h for description. */
1178
1179 struct type *
1180 value_actual_type (struct value *value, int resolve_simple_types,
1181 int *real_type_found)
1182 {
1183 struct value_print_options opts;
1184 struct type *result;
1185
1186 get_user_print_options (&opts);
1187
1188 if (real_type_found)
1189 *real_type_found = 0;
1190 result = value_type (value);
1191 if (opts.objectprint)
1192 {
1193 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1194 fetch its rtti type. */
1195 if (result->is_pointer_or_reference ()
1196 && (check_typedef (TYPE_TARGET_TYPE (result))->code ()
1197 == TYPE_CODE_STRUCT)
1198 && !value_optimized_out (value))
1199 {
1200 struct type *real_type;
1201
1202 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1203 if (real_type)
1204 {
1205 if (real_type_found)
1206 *real_type_found = 1;
1207 result = real_type;
1208 }
1209 }
1210 else if (resolve_simple_types)
1211 {
1212 if (real_type_found)
1213 *real_type_found = 1;
1214 result = value_enclosing_type (value);
1215 }
1216 }
1217
1218 return result;
1219 }
1220
1221 void
1222 error_value_optimized_out (void)
1223 {
1224 error (_("value has been optimized out"));
1225 }
1226
1227 static void
1228 require_not_optimized_out (const struct value *value)
1229 {
1230 if (!value->optimized_out.empty ())
1231 {
1232 if (value->lval == lval_register)
1233 error (_("register has not been saved in frame"));
1234 else
1235 error_value_optimized_out ();
1236 }
1237 }
1238
1239 static void
1240 require_available (const struct value *value)
1241 {
1242 if (!value->unavailable.empty ())
1243 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1244 }
1245
1246 gdb::array_view<const gdb_byte>
1247 value_contents_for_printing (struct value *value)
1248 {
1249 if (value->lazy)
1250 value_fetch_lazy (value);
1251
1252 ULONGEST length = TYPE_LENGTH (value_type (value));
1253 return gdb::make_array_view (value->contents.get (), length);
1254 }
1255
1256 gdb::array_view<const gdb_byte>
1257 value_contents_for_printing_const (const struct value *value)
1258 {
1259 gdb_assert (!value->lazy);
1260
1261 ULONGEST length = TYPE_LENGTH (value_type (value));
1262 return gdb::make_array_view (value->contents.get (), length);
1263 }
1264
1265 gdb::array_view<const gdb_byte>
1266 value_contents_all (struct value *value)
1267 {
1268 gdb::array_view<const gdb_byte> result = value_contents_for_printing (value);
1269 require_not_optimized_out (value);
1270 require_available (value);
1271 return result;
1272 }
1273
1274 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1275 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1276
1277 static void
1278 ranges_copy_adjusted (std::vector<range> *dst_range, int dst_bit_offset,
1279 const std::vector<range> &src_range, int src_bit_offset,
1280 int bit_length)
1281 {
1282 for (const range &r : src_range)
1283 {
1284 ULONGEST h, l;
1285
1286 l = std::max (r.offset, (LONGEST) src_bit_offset);
1287 h = std::min (r.offset + r.length,
1288 (LONGEST) src_bit_offset + bit_length);
1289
1290 if (l < h)
1291 insert_into_bit_range_vector (dst_range,
1292 dst_bit_offset + (l - src_bit_offset),
1293 h - l);
1294 }
1295 }
1296
1297 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1298 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1299
1300 static void
1301 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1302 const struct value *src, int src_bit_offset,
1303 int bit_length)
1304 {
1305 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1306 src->unavailable, src_bit_offset,
1307 bit_length);
1308 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1309 src->optimized_out, src_bit_offset,
1310 bit_length);
1311 }
1312
1313 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1314 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1315 contents, starting at DST_OFFSET. If unavailable contents are
1316 being copied from SRC, the corresponding DST contents are marked
1317 unavailable accordingly. Neither DST nor SRC may be lazy
1318 values.
1319
1320 It is assumed the contents of DST in the [DST_OFFSET,
1321 DST_OFFSET+LENGTH) range are wholly available. */
1322
1323 static void
1324 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1325 struct value *src, LONGEST src_offset, LONGEST length)
1326 {
1327 LONGEST src_bit_offset, dst_bit_offset, bit_length;
1328 struct gdbarch *arch = get_value_arch (src);
1329 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1330
1331 /* A lazy DST would make that this copy operation useless, since as
1332 soon as DST's contents were un-lazied (by a later value_contents
1333 call, say), the contents would be overwritten. A lazy SRC would
1334 mean we'd be copying garbage. */
1335 gdb_assert (!dst->lazy && !src->lazy);
1336
1337 /* The overwritten DST range gets unavailability ORed in, not
1338 replaced. Make sure to remember to implement replacing if it
1339 turns out actually necessary. */
1340 gdb_assert (value_bytes_available (dst, dst_offset, length));
1341 gdb_assert (!value_bits_any_optimized_out (dst,
1342 TARGET_CHAR_BIT * dst_offset,
1343 TARGET_CHAR_BIT * length));
1344
1345 /* Copy the data. */
1346 memcpy (value_contents_all_raw (dst).data () + dst_offset * unit_size,
1347 value_contents_all_raw (src).data () + src_offset * unit_size,
1348 length * unit_size);
1349
1350 /* Copy the meta-data, adjusted. */
1351 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1352 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1353 bit_length = length * unit_size * HOST_CHAR_BIT;
1354
1355 value_ranges_copy_adjusted (dst, dst_bit_offset,
1356 src, src_bit_offset,
1357 bit_length);
1358 }
1359
1360 /* Copy LENGTH bytes of SRC value's (all) contents
1361 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1362 (all) contents, starting at DST_OFFSET. If unavailable contents
1363 are being copied from SRC, the corresponding DST contents are
1364 marked unavailable accordingly. DST must not be lazy. If SRC is
1365 lazy, it will be fetched now.
1366
1367 It is assumed the contents of DST in the [DST_OFFSET,
1368 DST_OFFSET+LENGTH) range are wholly available. */
1369
1370 void
1371 value_contents_copy (struct value *dst, LONGEST dst_offset,
1372 struct value *src, LONGEST src_offset, LONGEST length)
1373 {
1374 if (src->lazy)
1375 value_fetch_lazy (src);
1376
1377 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1378 }
1379
1380 int
1381 value_lazy (const struct value *value)
1382 {
1383 return value->lazy;
1384 }
1385
1386 void
1387 set_value_lazy (struct value *value, int val)
1388 {
1389 value->lazy = val;
1390 }
1391
1392 int
1393 value_stack (const struct value *value)
1394 {
1395 return value->stack;
1396 }
1397
1398 void
1399 set_value_stack (struct value *value, int val)
1400 {
1401 value->stack = val;
1402 }
1403
1404 gdb::array_view<const gdb_byte>
1405 value_contents (struct value *value)
1406 {
1407 gdb::array_view<const gdb_byte> result = value_contents_writeable (value);
1408 require_not_optimized_out (value);
1409 require_available (value);
1410 return result;
1411 }
1412
1413 gdb::array_view<gdb_byte>
1414 value_contents_writeable (struct value *value)
1415 {
1416 if (value->lazy)
1417 value_fetch_lazy (value);
1418 return value_contents_raw (value);
1419 }
1420
1421 int
1422 value_optimized_out (struct value *value)
1423 {
1424 if (value->lazy)
1425 {
1426 /* See if we can compute the result without fetching the
1427 value. */
1428 if (VALUE_LVAL (value) == lval_memory)
1429 return false;
1430 else if (VALUE_LVAL (value) == lval_computed)
1431 {
1432 const struct lval_funcs *funcs = value->location.computed.funcs;
1433
1434 if (funcs->is_optimized_out != nullptr)
1435 return funcs->is_optimized_out (value);
1436 }
1437
1438 /* Fall back to fetching. */
1439 try
1440 {
1441 value_fetch_lazy (value);
1442 }
1443 catch (const gdb_exception_error &ex)
1444 {
1445 switch (ex.error)
1446 {
1447 case MEMORY_ERROR:
1448 case OPTIMIZED_OUT_ERROR:
1449 case NOT_AVAILABLE_ERROR:
1450 /* These can normally happen when we try to access an
1451 optimized out or unavailable register, either in a
1452 physical register or spilled to memory. */
1453 break;
1454 default:
1455 throw;
1456 }
1457 }
1458 }
1459
1460 return !value->optimized_out.empty ();
1461 }
1462
1463 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1464 the following LENGTH bytes. */
1465
1466 void
1467 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1468 {
1469 mark_value_bits_optimized_out (value,
1470 offset * TARGET_CHAR_BIT,
1471 length * TARGET_CHAR_BIT);
1472 }
1473
1474 /* See value.h. */
1475
1476 void
1477 mark_value_bits_optimized_out (struct value *value,
1478 LONGEST offset, LONGEST length)
1479 {
1480 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1481 }
1482
1483 int
1484 value_bits_synthetic_pointer (const struct value *value,
1485 LONGEST offset, LONGEST length)
1486 {
1487 if (value->lval != lval_computed
1488 || !value->location.computed.funcs->check_synthetic_pointer)
1489 return 0;
1490 return value->location.computed.funcs->check_synthetic_pointer (value,
1491 offset,
1492 length);
1493 }
1494
1495 LONGEST
1496 value_embedded_offset (const struct value *value)
1497 {
1498 return value->embedded_offset;
1499 }
1500
1501 void
1502 set_value_embedded_offset (struct value *value, LONGEST val)
1503 {
1504 value->embedded_offset = val;
1505 }
1506
1507 LONGEST
1508 value_pointed_to_offset (const struct value *value)
1509 {
1510 return value->pointed_to_offset;
1511 }
1512
1513 void
1514 set_value_pointed_to_offset (struct value *value, LONGEST val)
1515 {
1516 value->pointed_to_offset = val;
1517 }
1518
1519 const struct lval_funcs *
1520 value_computed_funcs (const struct value *v)
1521 {
1522 gdb_assert (value_lval_const (v) == lval_computed);
1523
1524 return v->location.computed.funcs;
1525 }
1526
1527 void *
1528 value_computed_closure (const struct value *v)
1529 {
1530 gdb_assert (v->lval == lval_computed);
1531
1532 return v->location.computed.closure;
1533 }
1534
1535 enum lval_type *
1536 deprecated_value_lval_hack (struct value *value)
1537 {
1538 return &value->lval;
1539 }
1540
1541 enum lval_type
1542 value_lval_const (const struct value *value)
1543 {
1544 return value->lval;
1545 }
1546
1547 CORE_ADDR
1548 value_address (const struct value *value)
1549 {
1550 if (value->lval != lval_memory)
1551 return 0;
1552 if (value->parent != NULL)
1553 return value_address (value->parent.get ()) + value->offset;
1554 if (NULL != TYPE_DATA_LOCATION (value_type (value)))
1555 {
1556 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
1557 return TYPE_DATA_LOCATION_ADDR (value_type (value));
1558 }
1559
1560 return value->location.address + value->offset;
1561 }
1562
1563 CORE_ADDR
1564 value_raw_address (const struct value *value)
1565 {
1566 if (value->lval != lval_memory)
1567 return 0;
1568 return value->location.address;
1569 }
1570
1571 void
1572 set_value_address (struct value *value, CORE_ADDR addr)
1573 {
1574 gdb_assert (value->lval == lval_memory);
1575 value->location.address = addr;
1576 }
1577
1578 struct internalvar **
1579 deprecated_value_internalvar_hack (struct value *value)
1580 {
1581 return &value->location.internalvar;
1582 }
1583
1584 struct frame_id *
1585 deprecated_value_next_frame_id_hack (struct value *value)
1586 {
1587 gdb_assert (value->lval == lval_register);
1588 return &value->location.reg.next_frame_id;
1589 }
1590
1591 int *
1592 deprecated_value_regnum_hack (struct value *value)
1593 {
1594 gdb_assert (value->lval == lval_register);
1595 return &value->location.reg.regnum;
1596 }
1597
1598 int
1599 deprecated_value_modifiable (const struct value *value)
1600 {
1601 return value->modifiable;
1602 }
1603 \f
1604 /* Return a mark in the value chain. All values allocated after the
1605 mark is obtained (except for those released) are subject to being freed
1606 if a subsequent value_free_to_mark is passed the mark. */
1607 struct value *
1608 value_mark (void)
1609 {
1610 if (all_values.empty ())
1611 return nullptr;
1612 return all_values.back ().get ();
1613 }
1614
1615 /* See value.h. */
1616
1617 void
1618 value_incref (struct value *val)
1619 {
1620 val->reference_count++;
1621 }
1622
1623 /* Release a reference to VAL, which was acquired with value_incref.
1624 This function is also called to deallocate values from the value
1625 chain. */
1626
1627 void
1628 value_decref (struct value *val)
1629 {
1630 if (val != nullptr)
1631 {
1632 gdb_assert (val->reference_count > 0);
1633 val->reference_count--;
1634 if (val->reference_count == 0)
1635 delete val;
1636 }
1637 }
1638
1639 /* Free all values allocated since MARK was obtained by value_mark
1640 (except for those released). */
1641 void
1642 value_free_to_mark (const struct value *mark)
1643 {
1644 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1645 if (iter == all_values.end ())
1646 all_values.clear ();
1647 else
1648 all_values.erase (iter + 1, all_values.end ());
1649 }
1650
1651 /* Remove VAL from the chain all_values
1652 so it will not be freed automatically. */
1653
1654 value_ref_ptr
1655 release_value (struct value *val)
1656 {
1657 if (val == nullptr)
1658 return value_ref_ptr ();
1659
1660 std::vector<value_ref_ptr>::reverse_iterator iter;
1661 for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter)
1662 {
1663 if (*iter == val)
1664 {
1665 value_ref_ptr result = *iter;
1666 all_values.erase (iter.base () - 1);
1667 return result;
1668 }
1669 }
1670
1671 /* We must always return an owned reference. Normally this happens
1672 because we transfer the reference from the value chain, but in
1673 this case the value was not on the chain. */
1674 return value_ref_ptr::new_reference (val);
1675 }
1676
1677 /* See value.h. */
1678
1679 std::vector<value_ref_ptr>
1680 value_release_to_mark (const struct value *mark)
1681 {
1682 std::vector<value_ref_ptr> result;
1683
1684 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1685 if (iter == all_values.end ())
1686 std::swap (result, all_values);
1687 else
1688 {
1689 std::move (iter + 1, all_values.end (), std::back_inserter (result));
1690 all_values.erase (iter + 1, all_values.end ());
1691 }
1692 std::reverse (result.begin (), result.end ());
1693 return result;
1694 }
1695
1696 /* Return a copy of the value ARG.
1697 It contains the same contents, for same memory address,
1698 but it's a different block of storage. */
1699
1700 struct value *
1701 value_copy (struct value *arg)
1702 {
1703 struct type *encl_type = value_enclosing_type (arg);
1704 struct value *val;
1705
1706 if (value_lazy (arg))
1707 val = allocate_value_lazy (encl_type);
1708 else
1709 val = allocate_value (encl_type);
1710 val->type = arg->type;
1711 VALUE_LVAL (val) = VALUE_LVAL (arg);
1712 val->location = arg->location;
1713 val->offset = arg->offset;
1714 val->bitpos = arg->bitpos;
1715 val->bitsize = arg->bitsize;
1716 val->lazy = arg->lazy;
1717 val->embedded_offset = value_embedded_offset (arg);
1718 val->pointed_to_offset = arg->pointed_to_offset;
1719 val->modifiable = arg->modifiable;
1720 val->stack = arg->stack;
1721 val->is_zero = arg->is_zero;
1722 val->initialized = arg->initialized;
1723 if (!value_lazy (val))
1724 {
1725 memcpy (value_contents_all_raw (val).data (),
1726 value_contents_all_raw (arg).data (),
1727 TYPE_LENGTH (value_enclosing_type (arg)));
1728
1729 }
1730 val->unavailable = arg->unavailable;
1731 val->optimized_out = arg->optimized_out;
1732 val->parent = arg->parent;
1733 if (VALUE_LVAL (val) == lval_computed)
1734 {
1735 const struct lval_funcs *funcs = val->location.computed.funcs;
1736
1737 if (funcs->copy_closure)
1738 val->location.computed.closure = funcs->copy_closure (val);
1739 }
1740 return val;
1741 }
1742
1743 /* Return a "const" and/or "volatile" qualified version of the value V.
1744 If CNST is true, then the returned value will be qualified with
1745 "const".
1746 if VOLTL is true, then the returned value will be qualified with
1747 "volatile". */
1748
1749 struct value *
1750 make_cv_value (int cnst, int voltl, struct value *v)
1751 {
1752 struct type *val_type = value_type (v);
1753 struct type *enclosing_type = value_enclosing_type (v);
1754 struct value *cv_val = value_copy (v);
1755
1756 deprecated_set_value_type (cv_val,
1757 make_cv_type (cnst, voltl, val_type, NULL));
1758 set_value_enclosing_type (cv_val,
1759 make_cv_type (cnst, voltl, enclosing_type, NULL));
1760
1761 return cv_val;
1762 }
1763
1764 /* Return a version of ARG that is non-lvalue. */
1765
1766 struct value *
1767 value_non_lval (struct value *arg)
1768 {
1769 if (VALUE_LVAL (arg) != not_lval)
1770 {
1771 struct type *enc_type = value_enclosing_type (arg);
1772 struct value *val = allocate_value (enc_type);
1773
1774 memcpy (value_contents_all_raw (val).data (),
1775 value_contents_all (arg).data (),
1776 TYPE_LENGTH (enc_type));
1777 val->type = arg->type;
1778 set_value_embedded_offset (val, value_embedded_offset (arg));
1779 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1780 return val;
1781 }
1782 return arg;
1783 }
1784
1785 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1786
1787 void
1788 value_force_lval (struct value *v, CORE_ADDR addr)
1789 {
1790 gdb_assert (VALUE_LVAL (v) == not_lval);
1791
1792 write_memory (addr, value_contents_raw (v).data (), TYPE_LENGTH (value_type (v)));
1793 v->lval = lval_memory;
1794 v->location.address = addr;
1795 }
1796
1797 void
1798 set_value_component_location (struct value *component,
1799 const struct value *whole)
1800 {
1801 struct type *type;
1802
1803 gdb_assert (whole->lval != lval_xcallable);
1804
1805 if (whole->lval == lval_internalvar)
1806 VALUE_LVAL (component) = lval_internalvar_component;
1807 else
1808 VALUE_LVAL (component) = whole->lval;
1809
1810 component->location = whole->location;
1811 if (whole->lval == lval_computed)
1812 {
1813 const struct lval_funcs *funcs = whole->location.computed.funcs;
1814
1815 if (funcs->copy_closure)
1816 component->location.computed.closure = funcs->copy_closure (whole);
1817 }
1818
1819 /* If the WHOLE value has a dynamically resolved location property then
1820 update the address of the COMPONENT. */
1821 type = value_type (whole);
1822 if (NULL != TYPE_DATA_LOCATION (type)
1823 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1824 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1825
1826 /* Similarly, if the COMPONENT value has a dynamically resolved location
1827 property then update its address. */
1828 type = value_type (component);
1829 if (NULL != TYPE_DATA_LOCATION (type)
1830 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1831 {
1832 /* If the COMPONENT has a dynamic location, and is an
1833 lval_internalvar_component, then we change it to a lval_memory.
1834
1835 Usually a component of an internalvar is created non-lazy, and has
1836 its content immediately copied from the parent internalvar.
1837 However, for components with a dynamic location, the content of
1838 the component is not contained within the parent, but is instead
1839 accessed indirectly. Further, the component will be created as a
1840 lazy value.
1841
1842 By changing the type of the component to lval_memory we ensure
1843 that value_fetch_lazy can successfully load the component.
1844
1845 This solution isn't ideal, but a real fix would require values to
1846 carry around both the parent value contents, and the contents of
1847 any dynamic fields within the parent. This is a substantial
1848 change to how values work in GDB. */
1849 if (VALUE_LVAL (component) == lval_internalvar_component)
1850 {
1851 gdb_assert (value_lazy (component));
1852 VALUE_LVAL (component) = lval_memory;
1853 }
1854 else
1855 gdb_assert (VALUE_LVAL (component) == lval_memory);
1856 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1857 }
1858 }
1859
1860 /* Access to the value history. */
1861
1862 /* Record a new value in the value history.
1863 Returns the absolute history index of the entry. */
1864
1865 int
1866 record_latest_value (struct value *val)
1867 {
1868 /* We don't want this value to have anything to do with the inferior anymore.
1869 In particular, "set $1 = 50" should not affect the variable from which
1870 the value was taken, and fast watchpoints should be able to assume that
1871 a value on the value history never changes. */
1872 if (value_lazy (val))
1873 value_fetch_lazy (val);
1874 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1875 from. This is a bit dubious, because then *&$1 does not just return $1
1876 but the current contents of that location. c'est la vie... */
1877 val->modifiable = 0;
1878
1879 value_history.push_back (release_value (val));
1880
1881 return value_history.size ();
1882 }
1883
1884 /* Return a copy of the value in the history with sequence number NUM. */
1885
1886 struct value *
1887 access_value_history (int num)
1888 {
1889 int absnum = num;
1890
1891 if (absnum <= 0)
1892 absnum += value_history.size ();
1893
1894 if (absnum <= 0)
1895 {
1896 if (num == 0)
1897 error (_("The history is empty."));
1898 else if (num == 1)
1899 error (_("There is only one value in the history."));
1900 else
1901 error (_("History does not go back to $$%d."), -num);
1902 }
1903 if (absnum > value_history.size ())
1904 error (_("History has not yet reached $%d."), absnum);
1905
1906 absnum--;
1907
1908 return value_copy (value_history[absnum].get ());
1909 }
1910
1911 static void
1912 show_values (const char *num_exp, int from_tty)
1913 {
1914 int i;
1915 struct value *val;
1916 static int num = 1;
1917
1918 if (num_exp)
1919 {
1920 /* "show values +" should print from the stored position.
1921 "show values <exp>" should print around value number <exp>. */
1922 if (num_exp[0] != '+' || num_exp[1] != '\0')
1923 num = parse_and_eval_long (num_exp) - 5;
1924 }
1925 else
1926 {
1927 /* "show values" means print the last 10 values. */
1928 num = value_history.size () - 9;
1929 }
1930
1931 if (num <= 0)
1932 num = 1;
1933
1934 for (i = num; i < num + 10 && i <= value_history.size (); i++)
1935 {
1936 struct value_print_options opts;
1937
1938 val = access_value_history (i);
1939 printf_filtered (("$%d = "), i);
1940 get_user_print_options (&opts);
1941 value_print (val, gdb_stdout, &opts);
1942 printf_filtered (("\n"));
1943 }
1944
1945 /* The next "show values +" should start after what we just printed. */
1946 num += 10;
1947
1948 /* Hitting just return after this command should do the same thing as
1949 "show values +". If num_exp is null, this is unnecessary, since
1950 "show values +" is not useful after "show values". */
1951 if (from_tty && num_exp)
1952 set_repeat_arguments ("+");
1953 }
1954 \f
1955 enum internalvar_kind
1956 {
1957 /* The internal variable is empty. */
1958 INTERNALVAR_VOID,
1959
1960 /* The value of the internal variable is provided directly as
1961 a GDB value object. */
1962 INTERNALVAR_VALUE,
1963
1964 /* A fresh value is computed via a call-back routine on every
1965 access to the internal variable. */
1966 INTERNALVAR_MAKE_VALUE,
1967
1968 /* The internal variable holds a GDB internal convenience function. */
1969 INTERNALVAR_FUNCTION,
1970
1971 /* The variable holds an integer value. */
1972 INTERNALVAR_INTEGER,
1973
1974 /* The variable holds a GDB-provided string. */
1975 INTERNALVAR_STRING,
1976 };
1977
1978 union internalvar_data
1979 {
1980 /* A value object used with INTERNALVAR_VALUE. */
1981 struct value *value;
1982
1983 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1984 struct
1985 {
1986 /* The functions to call. */
1987 const struct internalvar_funcs *functions;
1988
1989 /* The function's user-data. */
1990 void *data;
1991 } make_value;
1992
1993 /* The internal function used with INTERNALVAR_FUNCTION. */
1994 struct
1995 {
1996 struct internal_function *function;
1997 /* True if this is the canonical name for the function. */
1998 int canonical;
1999 } fn;
2000
2001 /* An integer value used with INTERNALVAR_INTEGER. */
2002 struct
2003 {
2004 /* If type is non-NULL, it will be used as the type to generate
2005 a value for this internal variable. If type is NULL, a default
2006 integer type for the architecture is used. */
2007 struct type *type;
2008 LONGEST val;
2009 } integer;
2010
2011 /* A string value used with INTERNALVAR_STRING. */
2012 char *string;
2013 };
2014
2015 /* Internal variables. These are variables within the debugger
2016 that hold values assigned by debugger commands.
2017 The user refers to them with a '$' prefix
2018 that does not appear in the variable names stored internally. */
2019
2020 struct internalvar
2021 {
2022 struct internalvar *next;
2023 char *name;
2024
2025 /* We support various different kinds of content of an internal variable.
2026 enum internalvar_kind specifies the kind, and union internalvar_data
2027 provides the data associated with this particular kind. */
2028
2029 enum internalvar_kind kind;
2030
2031 union internalvar_data u;
2032 };
2033
2034 static struct internalvar *internalvars;
2035
2036 /* If the variable does not already exist create it and give it the
2037 value given. If no value is given then the default is zero. */
2038 static void
2039 init_if_undefined_command (const char* args, int from_tty)
2040 {
2041 struct internalvar *intvar = nullptr;
2042
2043 /* Parse the expression - this is taken from set_command(). */
2044 expression_up expr = parse_expression (args);
2045
2046 /* Validate the expression.
2047 Was the expression an assignment?
2048 Or even an expression at all? */
2049 if (expr->first_opcode () != BINOP_ASSIGN)
2050 error (_("Init-if-undefined requires an assignment expression."));
2051
2052 /* Extract the variable from the parsed expression. */
2053 expr::assign_operation *assign
2054 = dynamic_cast<expr::assign_operation *> (expr->op.get ());
2055 if (assign != nullptr)
2056 {
2057 expr::operation *lhs = assign->get_lhs ();
2058 expr::internalvar_operation *ivarop
2059 = dynamic_cast<expr::internalvar_operation *> (lhs);
2060 if (ivarop != nullptr)
2061 intvar = ivarop->get_internalvar ();
2062 }
2063
2064 if (intvar == nullptr)
2065 error (_("The first parameter to init-if-undefined "
2066 "should be a GDB variable."));
2067
2068 /* Only evaluate the expression if the lvalue is void.
2069 This may still fail if the expression is invalid. */
2070 if (intvar->kind == INTERNALVAR_VOID)
2071 evaluate_expression (expr.get ());
2072 }
2073
2074
2075 /* Look up an internal variable with name NAME. NAME should not
2076 normally include a dollar sign.
2077
2078 If the specified internal variable does not exist,
2079 the return value is NULL. */
2080
2081 struct internalvar *
2082 lookup_only_internalvar (const char *name)
2083 {
2084 struct internalvar *var;
2085
2086 for (var = internalvars; var; var = var->next)
2087 if (strcmp (var->name, name) == 0)
2088 return var;
2089
2090 return NULL;
2091 }
2092
2093 /* Complete NAME by comparing it to the names of internal
2094 variables. */
2095
2096 void
2097 complete_internalvar (completion_tracker &tracker, const char *name)
2098 {
2099 struct internalvar *var;
2100 int len;
2101
2102 len = strlen (name);
2103
2104 for (var = internalvars; var; var = var->next)
2105 if (strncmp (var->name, name, len) == 0)
2106 tracker.add_completion (make_unique_xstrdup (var->name));
2107 }
2108
2109 /* Create an internal variable with name NAME and with a void value.
2110 NAME should not normally include a dollar sign. */
2111
2112 struct internalvar *
2113 create_internalvar (const char *name)
2114 {
2115 struct internalvar *var = XNEW (struct internalvar);
2116
2117 var->name = xstrdup (name);
2118 var->kind = INTERNALVAR_VOID;
2119 var->next = internalvars;
2120 internalvars = var;
2121 return var;
2122 }
2123
2124 /* Create an internal variable with name NAME and register FUN as the
2125 function that value_of_internalvar uses to create a value whenever
2126 this variable is referenced. NAME should not normally include a
2127 dollar sign. DATA is passed uninterpreted to FUN when it is
2128 called. CLEANUP, if not NULL, is called when the internal variable
2129 is destroyed. It is passed DATA as its only argument. */
2130
2131 struct internalvar *
2132 create_internalvar_type_lazy (const char *name,
2133 const struct internalvar_funcs *funcs,
2134 void *data)
2135 {
2136 struct internalvar *var = create_internalvar (name);
2137
2138 var->kind = INTERNALVAR_MAKE_VALUE;
2139 var->u.make_value.functions = funcs;
2140 var->u.make_value.data = data;
2141 return var;
2142 }
2143
2144 /* See documentation in value.h. */
2145
2146 int
2147 compile_internalvar_to_ax (struct internalvar *var,
2148 struct agent_expr *expr,
2149 struct axs_value *value)
2150 {
2151 if (var->kind != INTERNALVAR_MAKE_VALUE
2152 || var->u.make_value.functions->compile_to_ax == NULL)
2153 return 0;
2154
2155 var->u.make_value.functions->compile_to_ax (var, expr, value,
2156 var->u.make_value.data);
2157 return 1;
2158 }
2159
2160 /* Look up an internal variable with name NAME. NAME should not
2161 normally include a dollar sign.
2162
2163 If the specified internal variable does not exist,
2164 one is created, with a void value. */
2165
2166 struct internalvar *
2167 lookup_internalvar (const char *name)
2168 {
2169 struct internalvar *var;
2170
2171 var = lookup_only_internalvar (name);
2172 if (var)
2173 return var;
2174
2175 return create_internalvar (name);
2176 }
2177
2178 /* Return current value of internal variable VAR. For variables that
2179 are not inherently typed, use a value type appropriate for GDBARCH. */
2180
2181 struct value *
2182 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2183 {
2184 struct value *val;
2185 struct trace_state_variable *tsv;
2186
2187 /* If there is a trace state variable of the same name, assume that
2188 is what we really want to see. */
2189 tsv = find_trace_state_variable (var->name);
2190 if (tsv)
2191 {
2192 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2193 &(tsv->value));
2194 if (tsv->value_known)
2195 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2196 tsv->value);
2197 else
2198 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2199 return val;
2200 }
2201
2202 switch (var->kind)
2203 {
2204 case INTERNALVAR_VOID:
2205 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2206 break;
2207
2208 case INTERNALVAR_FUNCTION:
2209 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2210 break;
2211
2212 case INTERNALVAR_INTEGER:
2213 if (!var->u.integer.type)
2214 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2215 var->u.integer.val);
2216 else
2217 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2218 break;
2219
2220 case INTERNALVAR_STRING:
2221 val = value_cstring (var->u.string, strlen (var->u.string),
2222 builtin_type (gdbarch)->builtin_char);
2223 break;
2224
2225 case INTERNALVAR_VALUE:
2226 val = value_copy (var->u.value);
2227 if (value_lazy (val))
2228 value_fetch_lazy (val);
2229 break;
2230
2231 case INTERNALVAR_MAKE_VALUE:
2232 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2233 var->u.make_value.data);
2234 break;
2235
2236 default:
2237 internal_error (__FILE__, __LINE__, _("bad kind"));
2238 }
2239
2240 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2241 on this value go back to affect the original internal variable.
2242
2243 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2244 no underlying modifiable state in the internal variable.
2245
2246 Likewise, if the variable's value is a computed lvalue, we want
2247 references to it to produce another computed lvalue, where
2248 references and assignments actually operate through the
2249 computed value's functions.
2250
2251 This means that internal variables with computed values
2252 behave a little differently from other internal variables:
2253 assignments to them don't just replace the previous value
2254 altogether. At the moment, this seems like the behavior we
2255 want. */
2256
2257 if (var->kind != INTERNALVAR_MAKE_VALUE
2258 && val->lval != lval_computed)
2259 {
2260 VALUE_LVAL (val) = lval_internalvar;
2261 VALUE_INTERNALVAR (val) = var;
2262 }
2263
2264 return val;
2265 }
2266
2267 int
2268 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2269 {
2270 if (var->kind == INTERNALVAR_INTEGER)
2271 {
2272 *result = var->u.integer.val;
2273 return 1;
2274 }
2275
2276 if (var->kind == INTERNALVAR_VALUE)
2277 {
2278 struct type *type = check_typedef (value_type (var->u.value));
2279
2280 if (type->code () == TYPE_CODE_INT)
2281 {
2282 *result = value_as_long (var->u.value);
2283 return 1;
2284 }
2285 }
2286
2287 return 0;
2288 }
2289
2290 static int
2291 get_internalvar_function (struct internalvar *var,
2292 struct internal_function **result)
2293 {
2294 switch (var->kind)
2295 {
2296 case INTERNALVAR_FUNCTION:
2297 *result = var->u.fn.function;
2298 return 1;
2299
2300 default:
2301 return 0;
2302 }
2303 }
2304
2305 void
2306 set_internalvar_component (struct internalvar *var,
2307 LONGEST offset, LONGEST bitpos,
2308 LONGEST bitsize, struct value *newval)
2309 {
2310 gdb_byte *addr;
2311 struct gdbarch *arch;
2312 int unit_size;
2313
2314 switch (var->kind)
2315 {
2316 case INTERNALVAR_VALUE:
2317 addr = value_contents_writeable (var->u.value).data ();
2318 arch = get_value_arch (var->u.value);
2319 unit_size = gdbarch_addressable_memory_unit_size (arch);
2320
2321 if (bitsize)
2322 modify_field (value_type (var->u.value), addr + offset,
2323 value_as_long (newval), bitpos, bitsize);
2324 else
2325 memcpy (addr + offset * unit_size, value_contents (newval).data (),
2326 TYPE_LENGTH (value_type (newval)));
2327 break;
2328
2329 default:
2330 /* We can never get a component of any other kind. */
2331 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2332 }
2333 }
2334
2335 void
2336 set_internalvar (struct internalvar *var, struct value *val)
2337 {
2338 enum internalvar_kind new_kind;
2339 union internalvar_data new_data = { 0 };
2340
2341 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2342 error (_("Cannot overwrite convenience function %s"), var->name);
2343
2344 /* Prepare new contents. */
2345 switch (check_typedef (value_type (val))->code ())
2346 {
2347 case TYPE_CODE_VOID:
2348 new_kind = INTERNALVAR_VOID;
2349 break;
2350
2351 case TYPE_CODE_INTERNAL_FUNCTION:
2352 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2353 new_kind = INTERNALVAR_FUNCTION;
2354 get_internalvar_function (VALUE_INTERNALVAR (val),
2355 &new_data.fn.function);
2356 /* Copies created here are never canonical. */
2357 break;
2358
2359 default:
2360 new_kind = INTERNALVAR_VALUE;
2361 struct value *copy = value_copy (val);
2362 copy->modifiable = 1;
2363
2364 /* Force the value to be fetched from the target now, to avoid problems
2365 later when this internalvar is referenced and the target is gone or
2366 has changed. */
2367 if (value_lazy (copy))
2368 value_fetch_lazy (copy);
2369
2370 /* Release the value from the value chain to prevent it from being
2371 deleted by free_all_values. From here on this function should not
2372 call error () until new_data is installed into the var->u to avoid
2373 leaking memory. */
2374 new_data.value = release_value (copy).release ();
2375
2376 /* Internal variables which are created from values with a dynamic
2377 location don't need the location property of the origin anymore.
2378 The resolved dynamic location is used prior then any other address
2379 when accessing the value.
2380 If we keep it, we would still refer to the origin value.
2381 Remove the location property in case it exist. */
2382 value_type (new_data.value)->remove_dyn_prop (DYN_PROP_DATA_LOCATION);
2383
2384 break;
2385 }
2386
2387 /* Clean up old contents. */
2388 clear_internalvar (var);
2389
2390 /* Switch over. */
2391 var->kind = new_kind;
2392 var->u = new_data;
2393 /* End code which must not call error(). */
2394 }
2395
2396 void
2397 set_internalvar_integer (struct internalvar *var, LONGEST l)
2398 {
2399 /* Clean up old contents. */
2400 clear_internalvar (var);
2401
2402 var->kind = INTERNALVAR_INTEGER;
2403 var->u.integer.type = NULL;
2404 var->u.integer.val = l;
2405 }
2406
2407 void
2408 set_internalvar_string (struct internalvar *var, const char *string)
2409 {
2410 /* Clean up old contents. */
2411 clear_internalvar (var);
2412
2413 var->kind = INTERNALVAR_STRING;
2414 var->u.string = xstrdup (string);
2415 }
2416
2417 static void
2418 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2419 {
2420 /* Clean up old contents. */
2421 clear_internalvar (var);
2422
2423 var->kind = INTERNALVAR_FUNCTION;
2424 var->u.fn.function = f;
2425 var->u.fn.canonical = 1;
2426 /* Variables installed here are always the canonical version. */
2427 }
2428
2429 void
2430 clear_internalvar (struct internalvar *var)
2431 {
2432 /* Clean up old contents. */
2433 switch (var->kind)
2434 {
2435 case INTERNALVAR_VALUE:
2436 value_decref (var->u.value);
2437 break;
2438
2439 case INTERNALVAR_STRING:
2440 xfree (var->u.string);
2441 break;
2442
2443 case INTERNALVAR_MAKE_VALUE:
2444 if (var->u.make_value.functions->destroy != NULL)
2445 var->u.make_value.functions->destroy (var->u.make_value.data);
2446 break;
2447
2448 default:
2449 break;
2450 }
2451
2452 /* Reset to void kind. */
2453 var->kind = INTERNALVAR_VOID;
2454 }
2455
2456 const char *
2457 internalvar_name (const struct internalvar *var)
2458 {
2459 return var->name;
2460 }
2461
2462 static struct internal_function *
2463 create_internal_function (const char *name,
2464 internal_function_fn handler, void *cookie)
2465 {
2466 struct internal_function *ifn = XNEW (struct internal_function);
2467
2468 ifn->name = xstrdup (name);
2469 ifn->handler = handler;
2470 ifn->cookie = cookie;
2471 return ifn;
2472 }
2473
2474 const char *
2475 value_internal_function_name (struct value *val)
2476 {
2477 struct internal_function *ifn;
2478 int result;
2479
2480 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2481 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2482 gdb_assert (result);
2483
2484 return ifn->name;
2485 }
2486
2487 struct value *
2488 call_internal_function (struct gdbarch *gdbarch,
2489 const struct language_defn *language,
2490 struct value *func, int argc, struct value **argv)
2491 {
2492 struct internal_function *ifn;
2493 int result;
2494
2495 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2496 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2497 gdb_assert (result);
2498
2499 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2500 }
2501
2502 /* The 'function' command. This does nothing -- it is just a
2503 placeholder to let "help function NAME" work. This is also used as
2504 the implementation of the sub-command that is created when
2505 registering an internal function. */
2506 static void
2507 function_command (const char *command, int from_tty)
2508 {
2509 /* Do nothing. */
2510 }
2511
2512 /* Helper function that does the work for add_internal_function. */
2513
2514 static struct cmd_list_element *
2515 do_add_internal_function (const char *name, const char *doc,
2516 internal_function_fn handler, void *cookie)
2517 {
2518 struct internal_function *ifn;
2519 struct internalvar *var = lookup_internalvar (name);
2520
2521 ifn = create_internal_function (name, handler, cookie);
2522 set_internalvar_function (var, ifn);
2523
2524 return add_cmd (name, no_class, function_command, doc, &functionlist);
2525 }
2526
2527 /* See value.h. */
2528
2529 void
2530 add_internal_function (const char *name, const char *doc,
2531 internal_function_fn handler, void *cookie)
2532 {
2533 do_add_internal_function (name, doc, handler, cookie);
2534 }
2535
2536 /* See value.h. */
2537
2538 void
2539 add_internal_function (gdb::unique_xmalloc_ptr<char> &&name,
2540 gdb::unique_xmalloc_ptr<char> &&doc,
2541 internal_function_fn handler, void *cookie)
2542 {
2543 struct cmd_list_element *cmd
2544 = do_add_internal_function (name.get (), doc.get (), handler, cookie);
2545 doc.release ();
2546 cmd->doc_allocated = 1;
2547 name.release ();
2548 cmd->name_allocated = 1;
2549 }
2550
2551 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2552 prevent cycles / duplicates. */
2553
2554 void
2555 preserve_one_value (struct value *value, struct objfile *objfile,
2556 htab_t copied_types)
2557 {
2558 if (value->type->objfile_owner () == objfile)
2559 value->type = copy_type_recursive (objfile, value->type, copied_types);
2560
2561 if (value->enclosing_type->objfile_owner () == objfile)
2562 value->enclosing_type = copy_type_recursive (objfile,
2563 value->enclosing_type,
2564 copied_types);
2565 }
2566
2567 /* Likewise for internal variable VAR. */
2568
2569 static void
2570 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2571 htab_t copied_types)
2572 {
2573 switch (var->kind)
2574 {
2575 case INTERNALVAR_INTEGER:
2576 if (var->u.integer.type
2577 && var->u.integer.type->objfile_owner () == objfile)
2578 var->u.integer.type
2579 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2580 break;
2581
2582 case INTERNALVAR_VALUE:
2583 preserve_one_value (var->u.value, objfile, copied_types);
2584 break;
2585 }
2586 }
2587
2588 /* Update the internal variables and value history when OBJFILE is
2589 discarded; we must copy the types out of the objfile. New global types
2590 will be created for every convenience variable which currently points to
2591 this objfile's types, and the convenience variables will be adjusted to
2592 use the new global types. */
2593
2594 void
2595 preserve_values (struct objfile *objfile)
2596 {
2597 struct internalvar *var;
2598
2599 /* Create the hash table. We allocate on the objfile's obstack, since
2600 it is soon to be deleted. */
2601 htab_up copied_types = create_copied_types_hash (objfile);
2602
2603 for (const value_ref_ptr &item : value_history)
2604 preserve_one_value (item.get (), objfile, copied_types.get ());
2605
2606 for (var = internalvars; var; var = var->next)
2607 preserve_one_internalvar (var, objfile, copied_types.get ());
2608
2609 preserve_ext_lang_values (objfile, copied_types.get ());
2610 }
2611
2612 static void
2613 show_convenience (const char *ignore, int from_tty)
2614 {
2615 struct gdbarch *gdbarch = get_current_arch ();
2616 struct internalvar *var;
2617 int varseen = 0;
2618 struct value_print_options opts;
2619
2620 get_user_print_options (&opts);
2621 for (var = internalvars; var; var = var->next)
2622 {
2623
2624 if (!varseen)
2625 {
2626 varseen = 1;
2627 }
2628 printf_filtered (("$%s = "), var->name);
2629
2630 try
2631 {
2632 struct value *val;
2633
2634 val = value_of_internalvar (gdbarch, var);
2635 value_print (val, gdb_stdout, &opts);
2636 }
2637 catch (const gdb_exception_error &ex)
2638 {
2639 fprintf_styled (gdb_stdout, metadata_style.style (),
2640 _("<error: %s>"), ex.what ());
2641 }
2642
2643 printf_filtered (("\n"));
2644 }
2645 if (!varseen)
2646 {
2647 /* This text does not mention convenience functions on purpose.
2648 The user can't create them except via Python, and if Python support
2649 is installed this message will never be printed ($_streq will
2650 exist). */
2651 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2652 "Convenience variables have "
2653 "names starting with \"$\";\n"
2654 "use \"set\" as in \"set "
2655 "$foo = 5\" to define them.\n"));
2656 }
2657 }
2658 \f
2659
2660 /* See value.h. */
2661
2662 struct value *
2663 value_from_xmethod (xmethod_worker_up &&worker)
2664 {
2665 struct value *v;
2666
2667 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2668 v->lval = lval_xcallable;
2669 v->location.xm_worker = worker.release ();
2670 v->modifiable = 0;
2671
2672 return v;
2673 }
2674
2675 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2676
2677 struct type *
2678 result_type_of_xmethod (struct value *method, gdb::array_view<value *> argv)
2679 {
2680 gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD
2681 && method->lval == lval_xcallable && !argv.empty ());
2682
2683 return method->location.xm_worker->get_result_type (argv[0], argv.slice (1));
2684 }
2685
2686 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2687
2688 struct value *
2689 call_xmethod (struct value *method, gdb::array_view<value *> argv)
2690 {
2691 gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD
2692 && method->lval == lval_xcallable && !argv.empty ());
2693
2694 return method->location.xm_worker->invoke (argv[0], argv.slice (1));
2695 }
2696 \f
2697 /* Extract a value as a C number (either long or double).
2698 Knows how to convert fixed values to double, or
2699 floating values to long.
2700 Does not deallocate the value. */
2701
2702 LONGEST
2703 value_as_long (struct value *val)
2704 {
2705 /* This coerces arrays and functions, which is necessary (e.g.
2706 in disassemble_command). It also dereferences references, which
2707 I suspect is the most logical thing to do. */
2708 val = coerce_array (val);
2709 return unpack_long (value_type (val), value_contents (val).data ());
2710 }
2711
2712 /* Extract a value as a C pointer. Does not deallocate the value.
2713 Note that val's type may not actually be a pointer; value_as_long
2714 handles all the cases. */
2715 CORE_ADDR
2716 value_as_address (struct value *val)
2717 {
2718 struct gdbarch *gdbarch = value_type (val)->arch ();
2719
2720 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2721 whether we want this to be true eventually. */
2722 #if 0
2723 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2724 non-address (e.g. argument to "signal", "info break", etc.), or
2725 for pointers to char, in which the low bits *are* significant. */
2726 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2727 #else
2728
2729 /* There are several targets (IA-64, PowerPC, and others) which
2730 don't represent pointers to functions as simply the address of
2731 the function's entry point. For example, on the IA-64, a
2732 function pointer points to a two-word descriptor, generated by
2733 the linker, which contains the function's entry point, and the
2734 value the IA-64 "global pointer" register should have --- to
2735 support position-independent code. The linker generates
2736 descriptors only for those functions whose addresses are taken.
2737
2738 On such targets, it's difficult for GDB to convert an arbitrary
2739 function address into a function pointer; it has to either find
2740 an existing descriptor for that function, or call malloc and
2741 build its own. On some targets, it is impossible for GDB to
2742 build a descriptor at all: the descriptor must contain a jump
2743 instruction; data memory cannot be executed; and code memory
2744 cannot be modified.
2745
2746 Upon entry to this function, if VAL is a value of type `function'
2747 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2748 value_address (val) is the address of the function. This is what
2749 you'll get if you evaluate an expression like `main'. The call
2750 to COERCE_ARRAY below actually does all the usual unary
2751 conversions, which includes converting values of type `function'
2752 to `pointer to function'. This is the challenging conversion
2753 discussed above. Then, `unpack_long' will convert that pointer
2754 back into an address.
2755
2756 So, suppose the user types `disassemble foo' on an architecture
2757 with a strange function pointer representation, on which GDB
2758 cannot build its own descriptors, and suppose further that `foo'
2759 has no linker-built descriptor. The address->pointer conversion
2760 will signal an error and prevent the command from running, even
2761 though the next step would have been to convert the pointer
2762 directly back into the same address.
2763
2764 The following shortcut avoids this whole mess. If VAL is a
2765 function, just return its address directly. */
2766 if (value_type (val)->code () == TYPE_CODE_FUNC
2767 || value_type (val)->code () == TYPE_CODE_METHOD)
2768 return value_address (val);
2769
2770 val = coerce_array (val);
2771
2772 /* Some architectures (e.g. Harvard), map instruction and data
2773 addresses onto a single large unified address space. For
2774 instance: An architecture may consider a large integer in the
2775 range 0x10000000 .. 0x1000ffff to already represent a data
2776 addresses (hence not need a pointer to address conversion) while
2777 a small integer would still need to be converted integer to
2778 pointer to address. Just assume such architectures handle all
2779 integer conversions in a single function. */
2780
2781 /* JimB writes:
2782
2783 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2784 must admonish GDB hackers to make sure its behavior matches the
2785 compiler's, whenever possible.
2786
2787 In general, I think GDB should evaluate expressions the same way
2788 the compiler does. When the user copies an expression out of
2789 their source code and hands it to a `print' command, they should
2790 get the same value the compiler would have computed. Any
2791 deviation from this rule can cause major confusion and annoyance,
2792 and needs to be justified carefully. In other words, GDB doesn't
2793 really have the freedom to do these conversions in clever and
2794 useful ways.
2795
2796 AndrewC pointed out that users aren't complaining about how GDB
2797 casts integers to pointers; they are complaining that they can't
2798 take an address from a disassembly listing and give it to `x/i'.
2799 This is certainly important.
2800
2801 Adding an architecture method like integer_to_address() certainly
2802 makes it possible for GDB to "get it right" in all circumstances
2803 --- the target has complete control over how things get done, so
2804 people can Do The Right Thing for their target without breaking
2805 anyone else. The standard doesn't specify how integers get
2806 converted to pointers; usually, the ABI doesn't either, but
2807 ABI-specific code is a more reasonable place to handle it. */
2808
2809 if (!value_type (val)->is_pointer_or_reference ()
2810 && gdbarch_integer_to_address_p (gdbarch))
2811 return gdbarch_integer_to_address (gdbarch, value_type (val),
2812 value_contents (val).data ());
2813
2814 return unpack_long (value_type (val), value_contents (val).data ());
2815 #endif
2816 }
2817 \f
2818 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2819 as a long, or as a double, assuming the raw data is described
2820 by type TYPE. Knows how to convert different sizes of values
2821 and can convert between fixed and floating point. We don't assume
2822 any alignment for the raw data. Return value is in host byte order.
2823
2824 If you want functions and arrays to be coerced to pointers, and
2825 references to be dereferenced, call value_as_long() instead.
2826
2827 C++: It is assumed that the front-end has taken care of
2828 all matters concerning pointers to members. A pointer
2829 to member which reaches here is considered to be equivalent
2830 to an INT (or some size). After all, it is only an offset. */
2831
2832 LONGEST
2833 unpack_long (struct type *type, const gdb_byte *valaddr)
2834 {
2835 if (is_fixed_point_type (type))
2836 type = type->fixed_point_type_base_type ();
2837
2838 enum bfd_endian byte_order = type_byte_order (type);
2839 enum type_code code = type->code ();
2840 int len = TYPE_LENGTH (type);
2841 int nosign = type->is_unsigned ();
2842
2843 switch (code)
2844 {
2845 case TYPE_CODE_TYPEDEF:
2846 return unpack_long (check_typedef (type), valaddr);
2847 case TYPE_CODE_ENUM:
2848 case TYPE_CODE_FLAGS:
2849 case TYPE_CODE_BOOL:
2850 case TYPE_CODE_INT:
2851 case TYPE_CODE_CHAR:
2852 case TYPE_CODE_RANGE:
2853 case TYPE_CODE_MEMBERPTR:
2854 {
2855 LONGEST result;
2856
2857 if (type->bit_size_differs_p ())
2858 {
2859 unsigned bit_off = type->bit_offset ();
2860 unsigned bit_size = type->bit_size ();
2861 if (bit_size == 0)
2862 {
2863 /* unpack_bits_as_long doesn't handle this case the
2864 way we'd like, so handle it here. */
2865 result = 0;
2866 }
2867 else
2868 result = unpack_bits_as_long (type, valaddr, bit_off, bit_size);
2869 }
2870 else
2871 {
2872 if (nosign)
2873 result = extract_unsigned_integer (valaddr, len, byte_order);
2874 else
2875 result = extract_signed_integer (valaddr, len, byte_order);
2876 }
2877 if (code == TYPE_CODE_RANGE)
2878 result += type->bounds ()->bias;
2879 return result;
2880 }
2881
2882 case TYPE_CODE_FLT:
2883 case TYPE_CODE_DECFLOAT:
2884 return target_float_to_longest (valaddr, type);
2885
2886 case TYPE_CODE_FIXED_POINT:
2887 {
2888 gdb_mpq vq;
2889 vq.read_fixed_point (gdb::make_array_view (valaddr, len),
2890 byte_order, nosign,
2891 type->fixed_point_scaling_factor ());
2892
2893 gdb_mpz vz;
2894 mpz_tdiv_q (vz.val, mpq_numref (vq.val), mpq_denref (vq.val));
2895 return vz.as_integer<LONGEST> ();
2896 }
2897
2898 case TYPE_CODE_PTR:
2899 case TYPE_CODE_REF:
2900 case TYPE_CODE_RVALUE_REF:
2901 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2902 whether we want this to be true eventually. */
2903 return extract_typed_address (valaddr, type);
2904
2905 default:
2906 error (_("Value can't be converted to integer."));
2907 }
2908 }
2909
2910 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2911 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2912 We don't assume any alignment for the raw data. Return value is in
2913 host byte order.
2914
2915 If you want functions and arrays to be coerced to pointers, and
2916 references to be dereferenced, call value_as_address() instead.
2917
2918 C++: It is assumed that the front-end has taken care of
2919 all matters concerning pointers to members. A pointer
2920 to member which reaches here is considered to be equivalent
2921 to an INT (or some size). After all, it is only an offset. */
2922
2923 CORE_ADDR
2924 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2925 {
2926 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2927 whether we want this to be true eventually. */
2928 return unpack_long (type, valaddr);
2929 }
2930
2931 bool
2932 is_floating_value (struct value *val)
2933 {
2934 struct type *type = check_typedef (value_type (val));
2935
2936 if (is_floating_type (type))
2937 {
2938 if (!target_float_is_valid (value_contents (val).data (), type))
2939 error (_("Invalid floating value found in program."));
2940 return true;
2941 }
2942
2943 return false;
2944 }
2945
2946 \f
2947 /* Get the value of the FIELDNO'th field (which must be static) of
2948 TYPE. */
2949
2950 struct value *
2951 value_static_field (struct type *type, int fieldno)
2952 {
2953 struct value *retval;
2954
2955 switch (type->field (fieldno).loc_kind ())
2956 {
2957 case FIELD_LOC_KIND_PHYSADDR:
2958 retval = value_at_lazy (type->field (fieldno).type (),
2959 type->field (fieldno).loc_physaddr ());
2960 break;
2961 case FIELD_LOC_KIND_PHYSNAME:
2962 {
2963 const char *phys_name = type->field (fieldno).loc_physname ();
2964 /* type->field (fieldno).name (); */
2965 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2966
2967 if (sym.symbol == NULL)
2968 {
2969 /* With some compilers, e.g. HP aCC, static data members are
2970 reported as non-debuggable symbols. */
2971 struct bound_minimal_symbol msym
2972 = lookup_minimal_symbol (phys_name, NULL, NULL);
2973 struct type *field_type = type->field (fieldno).type ();
2974
2975 if (!msym.minsym)
2976 retval = allocate_optimized_out_value (field_type);
2977 else
2978 retval = value_at_lazy (field_type, BMSYMBOL_VALUE_ADDRESS (msym));
2979 }
2980 else
2981 retval = value_of_variable (sym.symbol, sym.block);
2982 break;
2983 }
2984 default:
2985 gdb_assert_not_reached ("unexpected field location kind");
2986 }
2987
2988 return retval;
2989 }
2990
2991 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2992 You have to be careful here, since the size of the data area for the value
2993 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2994 than the old enclosing type, you have to allocate more space for the
2995 data. */
2996
2997 void
2998 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2999 {
3000 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
3001 {
3002 check_type_length_before_alloc (new_encl_type);
3003 val->contents
3004 .reset ((gdb_byte *) xrealloc (val->contents.release (),
3005 TYPE_LENGTH (new_encl_type)));
3006 }
3007
3008 val->enclosing_type = new_encl_type;
3009 }
3010
3011 /* Given a value ARG1 (offset by OFFSET bytes)
3012 of a struct or union type ARG_TYPE,
3013 extract and return the value of one of its (non-static) fields.
3014 FIELDNO says which field. */
3015
3016 struct value *
3017 value_primitive_field (struct value *arg1, LONGEST offset,
3018 int fieldno, struct type *arg_type)
3019 {
3020 struct value *v;
3021 struct type *type;
3022 struct gdbarch *arch = get_value_arch (arg1);
3023 int unit_size = gdbarch_addressable_memory_unit_size (arch);
3024
3025 arg_type = check_typedef (arg_type);
3026 type = arg_type->field (fieldno).type ();
3027
3028 /* Call check_typedef on our type to make sure that, if TYPE
3029 is a TYPE_CODE_TYPEDEF, its length is set to the length
3030 of the target type instead of zero. However, we do not
3031 replace the typedef type by the target type, because we want
3032 to keep the typedef in order to be able to print the type
3033 description correctly. */
3034 check_typedef (type);
3035
3036 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
3037 {
3038 /* Handle packed fields.
3039
3040 Create a new value for the bitfield, with bitpos and bitsize
3041 set. If possible, arrange offset and bitpos so that we can
3042 do a single aligned read of the size of the containing type.
3043 Otherwise, adjust offset to the byte containing the first
3044 bit. Assume that the address, offset, and embedded offset
3045 are sufficiently aligned. */
3046
3047 LONGEST bitpos = arg_type->field (fieldno).loc_bitpos ();
3048 LONGEST container_bitsize = TYPE_LENGTH (type) * 8;
3049
3050 v = allocate_value_lazy (type);
3051 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
3052 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
3053 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
3054 v->bitpos = bitpos % container_bitsize;
3055 else
3056 v->bitpos = bitpos % 8;
3057 v->offset = (value_embedded_offset (arg1)
3058 + offset
3059 + (bitpos - v->bitpos) / 8);
3060 set_value_parent (v, arg1);
3061 if (!value_lazy (arg1))
3062 value_fetch_lazy (v);
3063 }
3064 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3065 {
3066 /* This field is actually a base subobject, so preserve the
3067 entire object's contents for later references to virtual
3068 bases, etc. */
3069 LONGEST boffset;
3070
3071 /* Lazy register values with offsets are not supported. */
3072 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3073 value_fetch_lazy (arg1);
3074
3075 /* We special case virtual inheritance here because this
3076 requires access to the contents, which we would rather avoid
3077 for references to ordinary fields of unavailable values. */
3078 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3079 boffset = baseclass_offset (arg_type, fieldno,
3080 value_contents (arg1).data (),
3081 value_embedded_offset (arg1),
3082 value_address (arg1),
3083 arg1);
3084 else
3085 boffset = arg_type->field (fieldno).loc_bitpos () / 8;
3086
3087 if (value_lazy (arg1))
3088 v = allocate_value_lazy (value_enclosing_type (arg1));
3089 else
3090 {
3091 v = allocate_value (value_enclosing_type (arg1));
3092 value_contents_copy_raw (v, 0, arg1, 0,
3093 TYPE_LENGTH (value_enclosing_type (arg1)));
3094 }
3095 v->type = type;
3096 v->offset = value_offset (arg1);
3097 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3098 }
3099 else if (NULL != TYPE_DATA_LOCATION (type))
3100 {
3101 /* Field is a dynamic data member. */
3102
3103 gdb_assert (0 == offset);
3104 /* We expect an already resolved data location. */
3105 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
3106 /* For dynamic data types defer memory allocation
3107 until we actual access the value. */
3108 v = allocate_value_lazy (type);
3109 }
3110 else
3111 {
3112 /* Plain old data member */
3113 offset += (arg_type->field (fieldno).loc_bitpos ()
3114 / (HOST_CHAR_BIT * unit_size));
3115
3116 /* Lazy register values with offsets are not supported. */
3117 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3118 value_fetch_lazy (arg1);
3119
3120 if (value_lazy (arg1))
3121 v = allocate_value_lazy (type);
3122 else
3123 {
3124 v = allocate_value (type);
3125 value_contents_copy_raw (v, value_embedded_offset (v),
3126 arg1, value_embedded_offset (arg1) + offset,
3127 type_length_units (type));
3128 }
3129 v->offset = (value_offset (arg1) + offset
3130 + value_embedded_offset (arg1));
3131 }
3132 set_value_component_location (v, arg1);
3133 return v;
3134 }
3135
3136 /* Given a value ARG1 of a struct or union type,
3137 extract and return the value of one of its (non-static) fields.
3138 FIELDNO says which field. */
3139
3140 struct value *
3141 value_field (struct value *arg1, int fieldno)
3142 {
3143 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3144 }
3145
3146 /* Return a non-virtual function as a value.
3147 F is the list of member functions which contains the desired method.
3148 J is an index into F which provides the desired method.
3149
3150 We only use the symbol for its address, so be happy with either a
3151 full symbol or a minimal symbol. */
3152
3153 struct value *
3154 value_fn_field (struct value **arg1p, struct fn_field *f,
3155 int j, struct type *type,
3156 LONGEST offset)
3157 {
3158 struct value *v;
3159 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3160 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3161 struct symbol *sym;
3162 struct bound_minimal_symbol msym;
3163
3164 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3165 if (sym != NULL)
3166 {
3167 memset (&msym, 0, sizeof (msym));
3168 }
3169 else
3170 {
3171 gdb_assert (sym == NULL);
3172 msym = lookup_bound_minimal_symbol (physname);
3173 if (msym.minsym == NULL)
3174 return NULL;
3175 }
3176
3177 v = allocate_value (ftype);
3178 VALUE_LVAL (v) = lval_memory;
3179 if (sym)
3180 {
3181 set_value_address (v, BLOCK_ENTRY_PC (SYMBOL_BLOCK_VALUE (sym)));
3182 }
3183 else
3184 {
3185 /* The minimal symbol might point to a function descriptor;
3186 resolve it to the actual code address instead. */
3187 struct objfile *objfile = msym.objfile;
3188 struct gdbarch *gdbarch = objfile->arch ();
3189
3190 set_value_address (v,
3191 gdbarch_convert_from_func_ptr_addr
3192 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym),
3193 current_inferior ()->top_target ()));
3194 }
3195
3196 if (arg1p)
3197 {
3198 if (type != value_type (*arg1p))
3199 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3200 value_addr (*arg1p)));
3201
3202 /* Move the `this' pointer according to the offset.
3203 VALUE_OFFSET (*arg1p) += offset; */
3204 }
3205
3206 return v;
3207 }
3208
3209 \f
3210
3211 /* See value.h. */
3212
3213 LONGEST
3214 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3215 LONGEST bitpos, LONGEST bitsize)
3216 {
3217 enum bfd_endian byte_order = type_byte_order (field_type);
3218 ULONGEST val;
3219 ULONGEST valmask;
3220 int lsbcount;
3221 LONGEST bytes_read;
3222 LONGEST read_offset;
3223
3224 /* Read the minimum number of bytes required; there may not be
3225 enough bytes to read an entire ULONGEST. */
3226 field_type = check_typedef (field_type);
3227 if (bitsize)
3228 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3229 else
3230 {
3231 bytes_read = TYPE_LENGTH (field_type);
3232 bitsize = 8 * bytes_read;
3233 }
3234
3235 read_offset = bitpos / 8;
3236
3237 val = extract_unsigned_integer (valaddr + read_offset,
3238 bytes_read, byte_order);
3239
3240 /* Extract bits. See comment above. */
3241
3242 if (byte_order == BFD_ENDIAN_BIG)
3243 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3244 else
3245 lsbcount = (bitpos % 8);
3246 val >>= lsbcount;
3247
3248 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3249 If the field is signed, and is negative, then sign extend. */
3250
3251 if (bitsize < 8 * (int) sizeof (val))
3252 {
3253 valmask = (((ULONGEST) 1) << bitsize) - 1;
3254 val &= valmask;
3255 if (!field_type->is_unsigned ())
3256 {
3257 if (val & (valmask ^ (valmask >> 1)))
3258 {
3259 val |= ~valmask;
3260 }
3261 }
3262 }
3263
3264 return val;
3265 }
3266
3267 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3268 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3269 ORIGINAL_VALUE, which must not be NULL. See
3270 unpack_value_bits_as_long for more details. */
3271
3272 int
3273 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3274 LONGEST embedded_offset, int fieldno,
3275 const struct value *val, LONGEST *result)
3276 {
3277 int bitpos = type->field (fieldno).loc_bitpos ();
3278 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3279 struct type *field_type = type->field (fieldno).type ();
3280 int bit_offset;
3281
3282 gdb_assert (val != NULL);
3283
3284 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3285 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3286 || !value_bits_available (val, bit_offset, bitsize))
3287 return 0;
3288
3289 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3290 bitpos, bitsize);
3291 return 1;
3292 }
3293
3294 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3295 object at VALADDR. See unpack_bits_as_long for more details. */
3296
3297 LONGEST
3298 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3299 {
3300 int bitpos = type->field (fieldno).loc_bitpos ();
3301 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3302 struct type *field_type = type->field (fieldno).type ();
3303
3304 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3305 }
3306
3307 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3308 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3309 the contents in DEST_VAL, zero or sign extending if the type of
3310 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3311 VAL. If the VAL's contents required to extract the bitfield from
3312 are unavailable/optimized out, DEST_VAL is correspondingly
3313 marked unavailable/optimized out. */
3314
3315 void
3316 unpack_value_bitfield (struct value *dest_val,
3317 LONGEST bitpos, LONGEST bitsize,
3318 const gdb_byte *valaddr, LONGEST embedded_offset,
3319 const struct value *val)
3320 {
3321 enum bfd_endian byte_order;
3322 int src_bit_offset;
3323 int dst_bit_offset;
3324 struct type *field_type = value_type (dest_val);
3325
3326 byte_order = type_byte_order (field_type);
3327
3328 /* First, unpack and sign extend the bitfield as if it was wholly
3329 valid. Optimized out/unavailable bits are read as zero, but
3330 that's OK, as they'll end up marked below. If the VAL is
3331 wholly-invalid we may have skipped allocating its contents,
3332 though. See allocate_optimized_out_value. */
3333 if (valaddr != NULL)
3334 {
3335 LONGEST num;
3336
3337 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3338 bitpos, bitsize);
3339 store_signed_integer (value_contents_raw (dest_val).data (),
3340 TYPE_LENGTH (field_type), byte_order, num);
3341 }
3342
3343 /* Now copy the optimized out / unavailability ranges to the right
3344 bits. */
3345 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3346 if (byte_order == BFD_ENDIAN_BIG)
3347 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3348 else
3349 dst_bit_offset = 0;
3350 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3351 val, src_bit_offset, bitsize);
3352 }
3353
3354 /* Return a new value with type TYPE, which is FIELDNO field of the
3355 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3356 of VAL. If the VAL's contents required to extract the bitfield
3357 from are unavailable/optimized out, the new value is
3358 correspondingly marked unavailable/optimized out. */
3359
3360 struct value *
3361 value_field_bitfield (struct type *type, int fieldno,
3362 const gdb_byte *valaddr,
3363 LONGEST embedded_offset, const struct value *val)
3364 {
3365 int bitpos = type->field (fieldno).loc_bitpos ();
3366 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3367 struct value *res_val = allocate_value (type->field (fieldno).type ());
3368
3369 unpack_value_bitfield (res_val, bitpos, bitsize,
3370 valaddr, embedded_offset, val);
3371
3372 return res_val;
3373 }
3374
3375 /* Modify the value of a bitfield. ADDR points to a block of memory in
3376 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3377 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3378 indicate which bits (in target bit order) comprise the bitfield.
3379 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3380 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3381
3382 void
3383 modify_field (struct type *type, gdb_byte *addr,
3384 LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3385 {
3386 enum bfd_endian byte_order = type_byte_order (type);
3387 ULONGEST oword;
3388 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3389 LONGEST bytesize;
3390
3391 /* Normalize BITPOS. */
3392 addr += bitpos / 8;
3393 bitpos %= 8;
3394
3395 /* If a negative fieldval fits in the field in question, chop
3396 off the sign extension bits. */
3397 if ((~fieldval & ~(mask >> 1)) == 0)
3398 fieldval &= mask;
3399
3400 /* Warn if value is too big to fit in the field in question. */
3401 if (0 != (fieldval & ~mask))
3402 {
3403 /* FIXME: would like to include fieldval in the message, but
3404 we don't have a sprintf_longest. */
3405 warning (_("Value does not fit in %s bits."), plongest (bitsize));
3406
3407 /* Truncate it, otherwise adjoining fields may be corrupted. */
3408 fieldval &= mask;
3409 }
3410
3411 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3412 false valgrind reports. */
3413
3414 bytesize = (bitpos + bitsize + 7) / 8;
3415 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3416
3417 /* Shifting for bit field depends on endianness of the target machine. */
3418 if (byte_order == BFD_ENDIAN_BIG)
3419 bitpos = bytesize * 8 - bitpos - bitsize;
3420
3421 oword &= ~(mask << bitpos);
3422 oword |= fieldval << bitpos;
3423
3424 store_unsigned_integer (addr, bytesize, byte_order, oword);
3425 }
3426 \f
3427 /* Pack NUM into BUF using a target format of TYPE. */
3428
3429 void
3430 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3431 {
3432 enum bfd_endian byte_order = type_byte_order (type);
3433 LONGEST len;
3434
3435 type = check_typedef (type);
3436 len = TYPE_LENGTH (type);
3437
3438 switch (type->code ())
3439 {
3440 case TYPE_CODE_RANGE:
3441 num -= type->bounds ()->bias;
3442 /* Fall through. */
3443 case TYPE_CODE_INT:
3444 case TYPE_CODE_CHAR:
3445 case TYPE_CODE_ENUM:
3446 case TYPE_CODE_FLAGS:
3447 case TYPE_CODE_BOOL:
3448 case TYPE_CODE_MEMBERPTR:
3449 if (type->bit_size_differs_p ())
3450 {
3451 unsigned bit_off = type->bit_offset ();
3452 unsigned bit_size = type->bit_size ();
3453 num &= ((ULONGEST) 1 << bit_size) - 1;
3454 num <<= bit_off;
3455 }
3456 store_signed_integer (buf, len, byte_order, num);
3457 break;
3458
3459 case TYPE_CODE_REF:
3460 case TYPE_CODE_RVALUE_REF:
3461 case TYPE_CODE_PTR:
3462 store_typed_address (buf, type, (CORE_ADDR) num);
3463 break;
3464
3465 case TYPE_CODE_FLT:
3466 case TYPE_CODE_DECFLOAT:
3467 target_float_from_longest (buf, type, num);
3468 break;
3469
3470 default:
3471 error (_("Unexpected type (%d) encountered for integer constant."),
3472 type->code ());
3473 }
3474 }
3475
3476
3477 /* Pack NUM into BUF using a target format of TYPE. */
3478
3479 static void
3480 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3481 {
3482 LONGEST len;
3483 enum bfd_endian byte_order;
3484
3485 type = check_typedef (type);
3486 len = TYPE_LENGTH (type);
3487 byte_order = type_byte_order (type);
3488
3489 switch (type->code ())
3490 {
3491 case TYPE_CODE_INT:
3492 case TYPE_CODE_CHAR:
3493 case TYPE_CODE_ENUM:
3494 case TYPE_CODE_FLAGS:
3495 case TYPE_CODE_BOOL:
3496 case TYPE_CODE_RANGE:
3497 case TYPE_CODE_MEMBERPTR:
3498 if (type->bit_size_differs_p ())
3499 {
3500 unsigned bit_off = type->bit_offset ();
3501 unsigned bit_size = type->bit_size ();
3502 num &= ((ULONGEST) 1 << bit_size) - 1;
3503 num <<= bit_off;
3504 }
3505 store_unsigned_integer (buf, len, byte_order, num);
3506 break;
3507
3508 case TYPE_CODE_REF:
3509 case TYPE_CODE_RVALUE_REF:
3510 case TYPE_CODE_PTR:
3511 store_typed_address (buf, type, (CORE_ADDR) num);
3512 break;
3513
3514 case TYPE_CODE_FLT:
3515 case TYPE_CODE_DECFLOAT:
3516 target_float_from_ulongest (buf, type, num);
3517 break;
3518
3519 default:
3520 error (_("Unexpected type (%d) encountered "
3521 "for unsigned integer constant."),
3522 type->code ());
3523 }
3524 }
3525
3526
3527 /* Create a value of type TYPE that is zero, and return it. */
3528
3529 struct value *
3530 value_zero (struct type *type, enum lval_type lv)
3531 {
3532 struct value *val = allocate_value_lazy (type);
3533
3534 VALUE_LVAL (val) = (lv == lval_computed ? not_lval : lv);
3535 val->is_zero = true;
3536 return val;
3537 }
3538
3539 /* Convert C numbers into newly allocated values. */
3540
3541 struct value *
3542 value_from_longest (struct type *type, LONGEST num)
3543 {
3544 struct value *val = allocate_value (type);
3545
3546 pack_long (value_contents_raw (val).data (), type, num);
3547 return val;
3548 }
3549
3550
3551 /* Convert C unsigned numbers into newly allocated values. */
3552
3553 struct value *
3554 value_from_ulongest (struct type *type, ULONGEST num)
3555 {
3556 struct value *val = allocate_value (type);
3557
3558 pack_unsigned_long (value_contents_raw (val).data (), type, num);
3559
3560 return val;
3561 }
3562
3563
3564 /* Create a value representing a pointer of type TYPE to the address
3565 ADDR. */
3566
3567 struct value *
3568 value_from_pointer (struct type *type, CORE_ADDR addr)
3569 {
3570 struct value *val = allocate_value (type);
3571
3572 store_typed_address (value_contents_raw (val).data (),
3573 check_typedef (type), addr);
3574 return val;
3575 }
3576
3577 /* Create and return a value object of TYPE containing the value D. The
3578 TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once
3579 it is converted to target format. */
3580
3581 struct value *
3582 value_from_host_double (struct type *type, double d)
3583 {
3584 struct value *value = allocate_value (type);
3585 gdb_assert (type->code () == TYPE_CODE_FLT);
3586 target_float_from_host_double (value_contents_raw (value).data (),
3587 value_type (value), d);
3588 return value;
3589 }
3590
3591 /* Create a value of type TYPE whose contents come from VALADDR, if it
3592 is non-null, and whose memory address (in the inferior) is
3593 ADDRESS. The type of the created value may differ from the passed
3594 type TYPE. Make sure to retrieve values new type after this call.
3595 Note that TYPE is not passed through resolve_dynamic_type; this is
3596 a special API intended for use only by Ada. */
3597
3598 struct value *
3599 value_from_contents_and_address_unresolved (struct type *type,
3600 const gdb_byte *valaddr,
3601 CORE_ADDR address)
3602 {
3603 struct value *v;
3604
3605 if (valaddr == NULL)
3606 v = allocate_value_lazy (type);
3607 else
3608 v = value_from_contents (type, valaddr);
3609 VALUE_LVAL (v) = lval_memory;
3610 set_value_address (v, address);
3611 return v;
3612 }
3613
3614 /* Create a value of type TYPE whose contents come from VALADDR, if it
3615 is non-null, and whose memory address (in the inferior) is
3616 ADDRESS. The type of the created value may differ from the passed
3617 type TYPE. Make sure to retrieve values new type after this call. */
3618
3619 struct value *
3620 value_from_contents_and_address (struct type *type,
3621 const gdb_byte *valaddr,
3622 CORE_ADDR address)
3623 {
3624 gdb::array_view<const gdb_byte> view;
3625 if (valaddr != nullptr)
3626 view = gdb::make_array_view (valaddr, TYPE_LENGTH (type));
3627 struct type *resolved_type = resolve_dynamic_type (type, view, address);
3628 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3629 struct value *v;
3630
3631 if (valaddr == NULL)
3632 v = allocate_value_lazy (resolved_type);
3633 else
3634 v = value_from_contents (resolved_type, valaddr);
3635 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3636 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3637 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3638 VALUE_LVAL (v) = lval_memory;
3639 set_value_address (v, address);
3640 return v;
3641 }
3642
3643 /* Create a value of type TYPE holding the contents CONTENTS.
3644 The new value is `not_lval'. */
3645
3646 struct value *
3647 value_from_contents (struct type *type, const gdb_byte *contents)
3648 {
3649 struct value *result;
3650
3651 result = allocate_value (type);
3652 memcpy (value_contents_raw (result).data (), contents, TYPE_LENGTH (type));
3653 return result;
3654 }
3655
3656 /* Extract a value from the history file. Input will be of the form
3657 $digits or $$digits. See block comment above 'write_dollar_variable'
3658 for details. */
3659
3660 struct value *
3661 value_from_history_ref (const char *h, const char **endp)
3662 {
3663 int index, len;
3664
3665 if (h[0] == '$')
3666 len = 1;
3667 else
3668 return NULL;
3669
3670 if (h[1] == '$')
3671 len = 2;
3672
3673 /* Find length of numeral string. */
3674 for (; isdigit (h[len]); len++)
3675 ;
3676
3677 /* Make sure numeral string is not part of an identifier. */
3678 if (h[len] == '_' || isalpha (h[len]))
3679 return NULL;
3680
3681 /* Now collect the index value. */
3682 if (h[1] == '$')
3683 {
3684 if (len == 2)
3685 {
3686 /* For some bizarre reason, "$$" is equivalent to "$$1",
3687 rather than to "$$0" as it ought to be! */
3688 index = -1;
3689 *endp += len;
3690 }
3691 else
3692 {
3693 char *local_end;
3694
3695 index = -strtol (&h[2], &local_end, 10);
3696 *endp = local_end;
3697 }
3698 }
3699 else
3700 {
3701 if (len == 1)
3702 {
3703 /* "$" is equivalent to "$0". */
3704 index = 0;
3705 *endp += len;
3706 }
3707 else
3708 {
3709 char *local_end;
3710
3711 index = strtol (&h[1], &local_end, 10);
3712 *endp = local_end;
3713 }
3714 }
3715
3716 return access_value_history (index);
3717 }
3718
3719 /* Get the component value (offset by OFFSET bytes) of a struct or
3720 union WHOLE. Component's type is TYPE. */
3721
3722 struct value *
3723 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3724 {
3725 struct value *v;
3726
3727 if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole))
3728 v = allocate_value_lazy (type);
3729 else
3730 {
3731 v = allocate_value (type);
3732 value_contents_copy (v, value_embedded_offset (v),
3733 whole, value_embedded_offset (whole) + offset,
3734 type_length_units (type));
3735 }
3736 v->offset = value_offset (whole) + offset + value_embedded_offset (whole);
3737 set_value_component_location (v, whole);
3738
3739 return v;
3740 }
3741
3742 struct value *
3743 coerce_ref_if_computed (const struct value *arg)
3744 {
3745 const struct lval_funcs *funcs;
3746
3747 if (!TYPE_IS_REFERENCE (check_typedef (value_type (arg))))
3748 return NULL;
3749
3750 if (value_lval_const (arg) != lval_computed)
3751 return NULL;
3752
3753 funcs = value_computed_funcs (arg);
3754 if (funcs->coerce_ref == NULL)
3755 return NULL;
3756
3757 return funcs->coerce_ref (arg);
3758 }
3759
3760 /* Look at value.h for description. */
3761
3762 struct value *
3763 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3764 const struct type *original_type,
3765 struct value *original_value,
3766 CORE_ADDR original_value_address)
3767 {
3768 gdb_assert (original_type->is_pointer_or_reference ());
3769
3770 struct type *original_target_type = TYPE_TARGET_TYPE (original_type);
3771 gdb::array_view<const gdb_byte> view;
3772 struct type *resolved_original_target_type
3773 = resolve_dynamic_type (original_target_type, view,
3774 original_value_address);
3775
3776 /* Re-adjust type. */
3777 deprecated_set_value_type (value, resolved_original_target_type);
3778
3779 /* Add embedding info. */
3780 set_value_enclosing_type (value, enc_type);
3781 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3782
3783 /* We may be pointing to an object of some derived type. */
3784 return value_full_object (value, NULL, 0, 0, 0);
3785 }
3786
3787 struct value *
3788 coerce_ref (struct value *arg)
3789 {
3790 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3791 struct value *retval;
3792 struct type *enc_type;
3793
3794 retval = coerce_ref_if_computed (arg);
3795 if (retval)
3796 return retval;
3797
3798 if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3799 return arg;
3800
3801 enc_type = check_typedef (value_enclosing_type (arg));
3802 enc_type = TYPE_TARGET_TYPE (enc_type);
3803
3804 CORE_ADDR addr = unpack_pointer (value_type (arg), value_contents (arg).data ());
3805 retval = value_at_lazy (enc_type, addr);
3806 enc_type = value_type (retval);
3807 return readjust_indirect_value_type (retval, enc_type, value_type_arg_tmp,
3808 arg, addr);
3809 }
3810
3811 struct value *
3812 coerce_array (struct value *arg)
3813 {
3814 struct type *type;
3815
3816 arg = coerce_ref (arg);
3817 type = check_typedef (value_type (arg));
3818
3819 switch (type->code ())
3820 {
3821 case TYPE_CODE_ARRAY:
3822 if (!type->is_vector () && current_language->c_style_arrays_p ())
3823 arg = value_coerce_array (arg);
3824 break;
3825 case TYPE_CODE_FUNC:
3826 arg = value_coerce_function (arg);
3827 break;
3828 }
3829 return arg;
3830 }
3831 \f
3832
3833 /* Return the return value convention that will be used for the
3834 specified type. */
3835
3836 enum return_value_convention
3837 struct_return_convention (struct gdbarch *gdbarch,
3838 struct value *function, struct type *value_type)
3839 {
3840 enum type_code code = value_type->code ();
3841
3842 if (code == TYPE_CODE_ERROR)
3843 error (_("Function return type unknown."));
3844
3845 /* Probe the architecture for the return-value convention. */
3846 return gdbarch_return_value (gdbarch, function, value_type,
3847 NULL, NULL, NULL);
3848 }
3849
3850 /* Return true if the function returning the specified type is using
3851 the convention of returning structures in memory (passing in the
3852 address as a hidden first parameter). */
3853
3854 int
3855 using_struct_return (struct gdbarch *gdbarch,
3856 struct value *function, struct type *value_type)
3857 {
3858 if (value_type->code () == TYPE_CODE_VOID)
3859 /* A void return value is never in memory. See also corresponding
3860 code in "print_return_value". */
3861 return 0;
3862
3863 return (struct_return_convention (gdbarch, function, value_type)
3864 != RETURN_VALUE_REGISTER_CONVENTION);
3865 }
3866
3867 /* Set the initialized field in a value struct. */
3868
3869 void
3870 set_value_initialized (struct value *val, int status)
3871 {
3872 val->initialized = status;
3873 }
3874
3875 /* Return the initialized field in a value struct. */
3876
3877 int
3878 value_initialized (const struct value *val)
3879 {
3880 return val->initialized;
3881 }
3882
3883 /* Helper for value_fetch_lazy when the value is a bitfield. */
3884
3885 static void
3886 value_fetch_lazy_bitfield (struct value *val)
3887 {
3888 gdb_assert (value_bitsize (val) != 0);
3889
3890 /* To read a lazy bitfield, read the entire enclosing value. This
3891 prevents reading the same block of (possibly volatile) memory once
3892 per bitfield. It would be even better to read only the containing
3893 word, but we have no way to record that just specific bits of a
3894 value have been fetched. */
3895 struct value *parent = value_parent (val);
3896
3897 if (value_lazy (parent))
3898 value_fetch_lazy (parent);
3899
3900 unpack_value_bitfield (val, value_bitpos (val), value_bitsize (val),
3901 value_contents_for_printing (parent).data (),
3902 value_offset (val), parent);
3903 }
3904
3905 /* Helper for value_fetch_lazy when the value is in memory. */
3906
3907 static void
3908 value_fetch_lazy_memory (struct value *val)
3909 {
3910 gdb_assert (VALUE_LVAL (val) == lval_memory);
3911
3912 CORE_ADDR addr = value_address (val);
3913 struct type *type = check_typedef (value_enclosing_type (val));
3914
3915 if (TYPE_LENGTH (type))
3916 read_value_memory (val, 0, value_stack (val),
3917 addr, value_contents_all_raw (val).data (),
3918 type_length_units (type));
3919 }
3920
3921 /* Helper for value_fetch_lazy when the value is in a register. */
3922
3923 static void
3924 value_fetch_lazy_register (struct value *val)
3925 {
3926 struct frame_info *next_frame;
3927 int regnum;
3928 struct type *type = check_typedef (value_type (val));
3929 struct value *new_val = val, *mark = value_mark ();
3930
3931 /* Offsets are not supported here; lazy register values must
3932 refer to the entire register. */
3933 gdb_assert (value_offset (val) == 0);
3934
3935 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3936 {
3937 struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
3938
3939 next_frame = frame_find_by_id (next_frame_id);
3940 regnum = VALUE_REGNUM (new_val);
3941
3942 gdb_assert (next_frame != NULL);
3943
3944 /* Convertible register routines are used for multi-register
3945 values and for interpretation in different types
3946 (e.g. float or int from a double register). Lazy
3947 register values should have the register's natural type,
3948 so they do not apply. */
3949 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
3950 regnum, type));
3951
3952 /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
3953 Since a "->next" operation was performed when setting
3954 this field, we do not need to perform a "next" operation
3955 again when unwinding the register. That's why
3956 frame_unwind_register_value() is called here instead of
3957 get_frame_register_value(). */
3958 new_val = frame_unwind_register_value (next_frame, regnum);
3959
3960 /* If we get another lazy lval_register value, it means the
3961 register is found by reading it from NEXT_FRAME's next frame.
3962 frame_unwind_register_value should never return a value with
3963 the frame id pointing to NEXT_FRAME. If it does, it means we
3964 either have two consecutive frames with the same frame id
3965 in the frame chain, or some code is trying to unwind
3966 behind get_prev_frame's back (e.g., a frame unwind
3967 sniffer trying to unwind), bypassing its validations. In
3968 any case, it should always be an internal error to end up
3969 in this situation. */
3970 if (VALUE_LVAL (new_val) == lval_register
3971 && value_lazy (new_val)
3972 && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id))
3973 internal_error (__FILE__, __LINE__,
3974 _("infinite loop while fetching a register"));
3975 }
3976
3977 /* If it's still lazy (for instance, a saved register on the
3978 stack), fetch it. */
3979 if (value_lazy (new_val))
3980 value_fetch_lazy (new_val);
3981
3982 /* Copy the contents and the unavailability/optimized-out
3983 meta-data from NEW_VAL to VAL. */
3984 set_value_lazy (val, 0);
3985 value_contents_copy (val, value_embedded_offset (val),
3986 new_val, value_embedded_offset (new_val),
3987 type_length_units (type));
3988
3989 if (frame_debug)
3990 {
3991 struct gdbarch *gdbarch;
3992 struct frame_info *frame;
3993 frame = frame_find_by_id (VALUE_NEXT_FRAME_ID (val));
3994 frame = get_prev_frame_always (frame);
3995 regnum = VALUE_REGNUM (val);
3996 gdbarch = get_frame_arch (frame);
3997
3998 string_file debug_file;
3999 fprintf_unfiltered (&debug_file,
4000 "(frame=%d, regnum=%d(%s), ...) ",
4001 frame_relative_level (frame), regnum,
4002 user_reg_map_regnum_to_name (gdbarch, regnum));
4003
4004 fprintf_unfiltered (&debug_file, "->");
4005 if (value_optimized_out (new_val))
4006 {
4007 fprintf_unfiltered (&debug_file, " ");
4008 val_print_optimized_out (new_val, &debug_file);
4009 }
4010 else
4011 {
4012 int i;
4013 const gdb_byte *buf = value_contents (new_val).data ();
4014
4015 if (VALUE_LVAL (new_val) == lval_register)
4016 fprintf_unfiltered (&debug_file, " register=%d",
4017 VALUE_REGNUM (new_val));
4018 else if (VALUE_LVAL (new_val) == lval_memory)
4019 fprintf_unfiltered (&debug_file, " address=%s",
4020 paddress (gdbarch,
4021 value_address (new_val)));
4022 else
4023 fprintf_unfiltered (&debug_file, " computed");
4024
4025 fprintf_unfiltered (&debug_file, " bytes=");
4026 fprintf_unfiltered (&debug_file, "[");
4027 for (i = 0; i < register_size (gdbarch, regnum); i++)
4028 fprintf_unfiltered (&debug_file, "%02x", buf[i]);
4029 fprintf_unfiltered (&debug_file, "]");
4030 }
4031
4032 frame_debug_printf ("%s", debug_file.c_str ());
4033 }
4034
4035 /* Dispose of the intermediate values. This prevents
4036 watchpoints from trying to watch the saved frame pointer. */
4037 value_free_to_mark (mark);
4038 }
4039
4040 /* Load the actual content of a lazy value. Fetch the data from the
4041 user's process and clear the lazy flag to indicate that the data in
4042 the buffer is valid.
4043
4044 If the value is zero-length, we avoid calling read_memory, which
4045 would abort. We mark the value as fetched anyway -- all 0 bytes of
4046 it. */
4047
4048 void
4049 value_fetch_lazy (struct value *val)
4050 {
4051 gdb_assert (value_lazy (val));
4052 allocate_value_contents (val);
4053 /* A value is either lazy, or fully fetched. The
4054 availability/validity is only established as we try to fetch a
4055 value. */
4056 gdb_assert (val->optimized_out.empty ());
4057 gdb_assert (val->unavailable.empty ());
4058 if (val->is_zero)
4059 {
4060 /* Nothing. */
4061 }
4062 else if (value_bitsize (val))
4063 value_fetch_lazy_bitfield (val);
4064 else if (VALUE_LVAL (val) == lval_memory)
4065 value_fetch_lazy_memory (val);
4066 else if (VALUE_LVAL (val) == lval_register)
4067 value_fetch_lazy_register (val);
4068 else if (VALUE_LVAL (val) == lval_computed
4069 && value_computed_funcs (val)->read != NULL)
4070 value_computed_funcs (val)->read (val);
4071 else
4072 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
4073
4074 set_value_lazy (val, 0);
4075 }
4076
4077 /* Implementation of the convenience function $_isvoid. */
4078
4079 static struct value *
4080 isvoid_internal_fn (struct gdbarch *gdbarch,
4081 const struct language_defn *language,
4082 void *cookie, int argc, struct value **argv)
4083 {
4084 int ret;
4085
4086 if (argc != 1)
4087 error (_("You must provide one argument for $_isvoid."));
4088
4089 ret = value_type (argv[0])->code () == TYPE_CODE_VOID;
4090
4091 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
4092 }
4093
4094 /* Implementation of the convenience function $_creal. Extracts the
4095 real part from a complex number. */
4096
4097 static struct value *
4098 creal_internal_fn (struct gdbarch *gdbarch,
4099 const struct language_defn *language,
4100 void *cookie, int argc, struct value **argv)
4101 {
4102 if (argc != 1)
4103 error (_("You must provide one argument for $_creal."));
4104
4105 value *cval = argv[0];
4106 type *ctype = check_typedef (value_type (cval));
4107 if (ctype->code () != TYPE_CODE_COMPLEX)
4108 error (_("expected a complex number"));
4109 return value_real_part (cval);
4110 }
4111
4112 /* Implementation of the convenience function $_cimag. Extracts the
4113 imaginary part from a complex number. */
4114
4115 static struct value *
4116 cimag_internal_fn (struct gdbarch *gdbarch,
4117 const struct language_defn *language,
4118 void *cookie, int argc,
4119 struct value **argv)
4120 {
4121 if (argc != 1)
4122 error (_("You must provide one argument for $_cimag."));
4123
4124 value *cval = argv[0];
4125 type *ctype = check_typedef (value_type (cval));
4126 if (ctype->code () != TYPE_CODE_COMPLEX)
4127 error (_("expected a complex number"));
4128 return value_imaginary_part (cval);
4129 }
4130
4131 #if GDB_SELF_TEST
4132 namespace selftests
4133 {
4134
4135 /* Test the ranges_contain function. */
4136
4137 static void
4138 test_ranges_contain ()
4139 {
4140 std::vector<range> ranges;
4141 range r;
4142
4143 /* [10, 14] */
4144 r.offset = 10;
4145 r.length = 5;
4146 ranges.push_back (r);
4147
4148 /* [20, 24] */
4149 r.offset = 20;
4150 r.length = 5;
4151 ranges.push_back (r);
4152
4153 /* [2, 6] */
4154 SELF_CHECK (!ranges_contain (ranges, 2, 5));
4155 /* [9, 13] */
4156 SELF_CHECK (ranges_contain (ranges, 9, 5));
4157 /* [10, 11] */
4158 SELF_CHECK (ranges_contain (ranges, 10, 2));
4159 /* [10, 14] */
4160 SELF_CHECK (ranges_contain (ranges, 10, 5));
4161 /* [13, 18] */
4162 SELF_CHECK (ranges_contain (ranges, 13, 6));
4163 /* [14, 18] */
4164 SELF_CHECK (ranges_contain (ranges, 14, 5));
4165 /* [15, 18] */
4166 SELF_CHECK (!ranges_contain (ranges, 15, 4));
4167 /* [16, 19] */
4168 SELF_CHECK (!ranges_contain (ranges, 16, 4));
4169 /* [16, 21] */
4170 SELF_CHECK (ranges_contain (ranges, 16, 6));
4171 /* [21, 21] */
4172 SELF_CHECK (ranges_contain (ranges, 21, 1));
4173 /* [21, 25] */
4174 SELF_CHECK (ranges_contain (ranges, 21, 5));
4175 /* [26, 28] */
4176 SELF_CHECK (!ranges_contain (ranges, 26, 3));
4177 }
4178
4179 /* Check that RANGES contains the same ranges as EXPECTED. */
4180
4181 static bool
4182 check_ranges_vector (gdb::array_view<const range> ranges,
4183 gdb::array_view<const range> expected)
4184 {
4185 return ranges == expected;
4186 }
4187
4188 /* Test the insert_into_bit_range_vector function. */
4189
4190 static void
4191 test_insert_into_bit_range_vector ()
4192 {
4193 std::vector<range> ranges;
4194
4195 /* [10, 14] */
4196 {
4197 insert_into_bit_range_vector (&ranges, 10, 5);
4198 static const range expected[] = {
4199 {10, 5}
4200 };
4201 SELF_CHECK (check_ranges_vector (ranges, expected));
4202 }
4203
4204 /* [10, 14] */
4205 {
4206 insert_into_bit_range_vector (&ranges, 11, 4);
4207 static const range expected = {10, 5};
4208 SELF_CHECK (check_ranges_vector (ranges, expected));
4209 }
4210
4211 /* [10, 14] [20, 24] */
4212 {
4213 insert_into_bit_range_vector (&ranges, 20, 5);
4214 static const range expected[] = {
4215 {10, 5},
4216 {20, 5},
4217 };
4218 SELF_CHECK (check_ranges_vector (ranges, expected));
4219 }
4220
4221 /* [10, 14] [17, 24] */
4222 {
4223 insert_into_bit_range_vector (&ranges, 17, 5);
4224 static const range expected[] = {
4225 {10, 5},
4226 {17, 8},
4227 };
4228 SELF_CHECK (check_ranges_vector (ranges, expected));
4229 }
4230
4231 /* [2, 8] [10, 14] [17, 24] */
4232 {
4233 insert_into_bit_range_vector (&ranges, 2, 7);
4234 static const range expected[] = {
4235 {2, 7},
4236 {10, 5},
4237 {17, 8},
4238 };
4239 SELF_CHECK (check_ranges_vector (ranges, expected));
4240 }
4241
4242 /* [2, 14] [17, 24] */
4243 {
4244 insert_into_bit_range_vector (&ranges, 9, 1);
4245 static const range expected[] = {
4246 {2, 13},
4247 {17, 8},
4248 };
4249 SELF_CHECK (check_ranges_vector (ranges, expected));
4250 }
4251
4252 /* [2, 14] [17, 24] */
4253 {
4254 insert_into_bit_range_vector (&ranges, 9, 1);
4255 static const range expected[] = {
4256 {2, 13},
4257 {17, 8},
4258 };
4259 SELF_CHECK (check_ranges_vector (ranges, expected));
4260 }
4261
4262 /* [2, 33] */
4263 {
4264 insert_into_bit_range_vector (&ranges, 4, 30);
4265 static const range expected = {2, 32};
4266 SELF_CHECK (check_ranges_vector (ranges, expected));
4267 }
4268 }
4269
4270 } /* namespace selftests */
4271 #endif /* GDB_SELF_TEST */
4272
4273 void _initialize_values ();
4274 void
4275 _initialize_values ()
4276 {
4277 cmd_list_element *show_convenience_cmd
4278 = add_cmd ("convenience", no_class, show_convenience, _("\
4279 Debugger convenience (\"$foo\") variables and functions.\n\
4280 Convenience variables are created when you assign them values;\n\
4281 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4282 \n\
4283 A few convenience variables are given values automatically:\n\
4284 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4285 \"$__\" holds the contents of the last address examined with \"x\"."
4286 #ifdef HAVE_PYTHON
4287 "\n\n\
4288 Convenience functions are defined via the Python API."
4289 #endif
4290 ), &showlist);
4291 add_alias_cmd ("conv", show_convenience_cmd, no_class, 1, &showlist);
4292
4293 add_cmd ("values", no_set_class, show_values, _("\
4294 Elements of value history around item number IDX (or last ten)."),
4295 &showlist);
4296
4297 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4298 Initialize a convenience variable if necessary.\n\
4299 init-if-undefined VARIABLE = EXPRESSION\n\
4300 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4301 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4302 VARIABLE is already initialized."));
4303
4304 add_prefix_cmd ("function", no_class, function_command, _("\
4305 Placeholder command for showing help on convenience functions."),
4306 &functionlist, 0, &cmdlist);
4307
4308 add_internal_function ("_isvoid", _("\
4309 Check whether an expression is void.\n\
4310 Usage: $_isvoid (expression)\n\
4311 Return 1 if the expression is void, zero otherwise."),
4312 isvoid_internal_fn, NULL);
4313
4314 add_internal_function ("_creal", _("\
4315 Extract the real part of a complex number.\n\
4316 Usage: $_creal (expression)\n\
4317 Return the real part of a complex number, the type depends on the\n\
4318 type of a complex number."),
4319 creal_internal_fn, NULL);
4320
4321 add_internal_function ("_cimag", _("\
4322 Extract the imaginary part of a complex number.\n\
4323 Usage: $_cimag (expression)\n\
4324 Return the imaginary part of a complex number, the type depends on the\n\
4325 type of a complex number."),
4326 cimag_internal_fn, NULL);
4327
4328 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4329 class_support, &max_value_size, _("\
4330 Set maximum sized value gdb will load from the inferior."), _("\
4331 Show maximum sized value gdb will load from the inferior."), _("\
4332 Use this to control the maximum size, in bytes, of a value that gdb\n\
4333 will load from the inferior. Setting this value to 'unlimited'\n\
4334 disables checking.\n\
4335 Setting this does not invalidate already allocated values, it only\n\
4336 prevents future values, larger than this size, from being allocated."),
4337 set_max_value_size,
4338 show_max_value_size,
4339 &setlist, &showlist);
4340 set_show_commands vsize_limit
4341 = add_setshow_zuinteger_unlimited_cmd ("varsize-limit", class_support,
4342 &max_value_size, _("\
4343 Set the maximum number of bytes allowed in a variable-size object."), _("\
4344 Show the maximum number of bytes allowed in a variable-size object."), _("\
4345 Attempts to access an object whose size is not a compile-time constant\n\
4346 and exceeds this limit will cause an error."),
4347 NULL, NULL, &setlist, &showlist);
4348 deprecate_cmd (vsize_limit.set, "set max-value-size");
4349
4350 #if GDB_SELF_TEST
4351 selftests::register_test ("ranges_contain", selftests::test_ranges_contain);
4352 selftests::register_test ("insert_into_bit_range_vector",
4353 selftests::test_insert_into_bit_range_vector);
4354 #endif
4355 }
4356
4357 /* See value.h. */
4358
4359 void
4360 finalize_values ()
4361 {
4362 all_values.clear ();
4363 }