Automatic date update in version.in
[binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2021 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "regcache.h"
32 #include "block.h"
33 #include "target-float.h"
34 #include "objfiles.h"
35 #include "valprint.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
38 #include <ctype.h>
39 #include "tracepoint.h"
40 #include "cp-abi.h"
41 #include "user-regs.h"
42 #include <algorithm>
43 #include "completer.h"
44 #include "gdbsupport/selftest.h"
45 #include "gdbsupport/array-view.h"
46 #include "cli/cli-style.h"
47 #include "expop.h"
48 #include "inferior.h"
49
50 /* Definition of a user function. */
51 struct internal_function
52 {
53 /* The name of the function. It is a bit odd to have this in the
54 function itself -- the user might use a differently-named
55 convenience variable to hold the function. */
56 char *name;
57
58 /* The handler. */
59 internal_function_fn handler;
60
61 /* User data for the handler. */
62 void *cookie;
63 };
64
65 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
66
67 struct range
68 {
69 /* Lowest offset in the range. */
70 LONGEST offset;
71
72 /* Length of the range. */
73 LONGEST length;
74
75 /* Returns true if THIS is strictly less than OTHER, useful for
76 searching. We keep ranges sorted by offset and coalesce
77 overlapping and contiguous ranges, so this just compares the
78 starting offset. */
79
80 bool operator< (const range &other) const
81 {
82 return offset < other.offset;
83 }
84
85 /* Returns true if THIS is equal to OTHER. */
86 bool operator== (const range &other) const
87 {
88 return offset == other.offset && length == other.length;
89 }
90 };
91
92 /* Returns true if the ranges defined by [offset1, offset1+len1) and
93 [offset2, offset2+len2) overlap. */
94
95 static int
96 ranges_overlap (LONGEST offset1, LONGEST len1,
97 LONGEST offset2, LONGEST len2)
98 {
99 ULONGEST h, l;
100
101 l = std::max (offset1, offset2);
102 h = std::min (offset1 + len1, offset2 + len2);
103 return (l < h);
104 }
105
106 /* Returns true if RANGES contains any range that overlaps [OFFSET,
107 OFFSET+LENGTH). */
108
109 static int
110 ranges_contain (const std::vector<range> &ranges, LONGEST offset,
111 LONGEST length)
112 {
113 range what;
114
115 what.offset = offset;
116 what.length = length;
117
118 /* We keep ranges sorted by offset and coalesce overlapping and
119 contiguous ranges, so to check if a range list contains a given
120 range, we can do a binary search for the position the given range
121 would be inserted if we only considered the starting OFFSET of
122 ranges. We call that position I. Since we also have LENGTH to
123 care for (this is a range afterall), we need to check if the
124 _previous_ range overlaps the I range. E.g.,
125
126 R
127 |---|
128 |---| |---| |------| ... |--|
129 0 1 2 N
130
131 I=1
132
133 In the case above, the binary search would return `I=1', meaning,
134 this OFFSET should be inserted at position 1, and the current
135 position 1 should be pushed further (and before 2). But, `0'
136 overlaps with R.
137
138 Then we need to check if the I range overlaps the I range itself.
139 E.g.,
140
141 R
142 |---|
143 |---| |---| |-------| ... |--|
144 0 1 2 N
145
146 I=1
147 */
148
149
150 auto i = std::lower_bound (ranges.begin (), ranges.end (), what);
151
152 if (i > ranges.begin ())
153 {
154 const struct range &bef = *(i - 1);
155
156 if (ranges_overlap (bef.offset, bef.length, offset, length))
157 return 1;
158 }
159
160 if (i < ranges.end ())
161 {
162 const struct range &r = *i;
163
164 if (ranges_overlap (r.offset, r.length, offset, length))
165 return 1;
166 }
167
168 return 0;
169 }
170
171 static struct cmd_list_element *functionlist;
172
173 /* Note that the fields in this structure are arranged to save a bit
174 of memory. */
175
176 struct value
177 {
178 explicit value (struct type *type_)
179 : modifiable (1),
180 lazy (1),
181 initialized (1),
182 stack (0),
183 is_zero (false),
184 type (type_),
185 enclosing_type (type_)
186 {
187 }
188
189 ~value ()
190 {
191 if (VALUE_LVAL (this) == lval_computed)
192 {
193 const struct lval_funcs *funcs = location.computed.funcs;
194
195 if (funcs->free_closure)
196 funcs->free_closure (this);
197 }
198 else if (VALUE_LVAL (this) == lval_xcallable)
199 delete location.xm_worker;
200 }
201
202 DISABLE_COPY_AND_ASSIGN (value);
203
204 /* Type of value; either not an lval, or one of the various
205 different possible kinds of lval. */
206 enum lval_type lval = not_lval;
207
208 /* Is it modifiable? Only relevant if lval != not_lval. */
209 unsigned int modifiable : 1;
210
211 /* If zero, contents of this value are in the contents field. If
212 nonzero, contents are in inferior. If the lval field is lval_memory,
213 the contents are in inferior memory at location.address plus offset.
214 The lval field may also be lval_register.
215
216 WARNING: This field is used by the code which handles watchpoints
217 (see breakpoint.c) to decide whether a particular value can be
218 watched by hardware watchpoints. If the lazy flag is set for
219 some member of a value chain, it is assumed that this member of
220 the chain doesn't need to be watched as part of watching the
221 value itself. This is how GDB avoids watching the entire struct
222 or array when the user wants to watch a single struct member or
223 array element. If you ever change the way lazy flag is set and
224 reset, be sure to consider this use as well! */
225 unsigned int lazy : 1;
226
227 /* If value is a variable, is it initialized or not. */
228 unsigned int initialized : 1;
229
230 /* If value is from the stack. If this is set, read_stack will be
231 used instead of read_memory to enable extra caching. */
232 unsigned int stack : 1;
233
234 /* True if this is a zero value, created by 'value_zero'; false
235 otherwise. */
236 bool is_zero : 1;
237
238 /* Location of value (if lval). */
239 union
240 {
241 /* If lval == lval_memory, this is the address in the inferior */
242 CORE_ADDR address;
243
244 /*If lval == lval_register, the value is from a register. */
245 struct
246 {
247 /* Register number. */
248 int regnum;
249 /* Frame ID of "next" frame to which a register value is relative.
250 If the register value is found relative to frame F, then the
251 frame id of F->next will be stored in next_frame_id. */
252 struct frame_id next_frame_id;
253 } reg;
254
255 /* Pointer to internal variable. */
256 struct internalvar *internalvar;
257
258 /* Pointer to xmethod worker. */
259 struct xmethod_worker *xm_worker;
260
261 /* If lval == lval_computed, this is a set of function pointers
262 to use to access and describe the value, and a closure pointer
263 for them to use. */
264 struct
265 {
266 /* Functions to call. */
267 const struct lval_funcs *funcs;
268
269 /* Closure for those functions to use. */
270 void *closure;
271 } computed;
272 } location {};
273
274 /* Describes offset of a value within lval of a structure in target
275 addressable memory units. Note also the member embedded_offset
276 below. */
277 LONGEST offset = 0;
278
279 /* Only used for bitfields; number of bits contained in them. */
280 LONGEST bitsize = 0;
281
282 /* Only used for bitfields; position of start of field. For
283 little-endian targets, it is the position of the LSB. For
284 big-endian targets, it is the position of the MSB. */
285 LONGEST bitpos = 0;
286
287 /* The number of references to this value. When a value is created,
288 the value chain holds a reference, so REFERENCE_COUNT is 1. If
289 release_value is called, this value is removed from the chain but
290 the caller of release_value now has a reference to this value.
291 The caller must arrange for a call to value_free later. */
292 int reference_count = 1;
293
294 /* Only used for bitfields; the containing value. This allows a
295 single read from the target when displaying multiple
296 bitfields. */
297 value_ref_ptr parent;
298
299 /* Type of the value. */
300 struct type *type;
301
302 /* If a value represents a C++ object, then the `type' field gives
303 the object's compile-time type. If the object actually belongs
304 to some class derived from `type', perhaps with other base
305 classes and additional members, then `type' is just a subobject
306 of the real thing, and the full object is probably larger than
307 `type' would suggest.
308
309 If `type' is a dynamic class (i.e. one with a vtable), then GDB
310 can actually determine the object's run-time type by looking at
311 the run-time type information in the vtable. When this
312 information is available, we may elect to read in the entire
313 object, for several reasons:
314
315 - When printing the value, the user would probably rather see the
316 full object, not just the limited portion apparent from the
317 compile-time type.
318
319 - If `type' has virtual base classes, then even printing `type'
320 alone may require reaching outside the `type' portion of the
321 object to wherever the virtual base class has been stored.
322
323 When we store the entire object, `enclosing_type' is the run-time
324 type -- the complete object -- and `embedded_offset' is the
325 offset of `type' within that larger type, in target addressable memory
326 units. The value_contents() macro takes `embedded_offset' into account,
327 so most GDB code continues to see the `type' portion of the value, just
328 as the inferior would.
329
330 If `type' is a pointer to an object, then `enclosing_type' is a
331 pointer to the object's run-time type, and `pointed_to_offset' is
332 the offset in target addressable memory units from the full object
333 to the pointed-to object -- that is, the value `embedded_offset' would
334 have if we followed the pointer and fetched the complete object.
335 (I don't really see the point. Why not just determine the
336 run-time type when you indirect, and avoid the special case? The
337 contents don't matter until you indirect anyway.)
338
339 If we're not doing anything fancy, `enclosing_type' is equal to
340 `type', and `embedded_offset' is zero, so everything works
341 normally. */
342 struct type *enclosing_type;
343 LONGEST embedded_offset = 0;
344 LONGEST pointed_to_offset = 0;
345
346 /* Actual contents of the value. Target byte-order. NULL or not
347 valid if lazy is nonzero. */
348 gdb::unique_xmalloc_ptr<gdb_byte> contents;
349
350 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
351 rather than available, since the common and default case is for a
352 value to be available. This is filled in at value read time.
353 The unavailable ranges are tracked in bits. Note that a contents
354 bit that has been optimized out doesn't really exist in the
355 program, so it can't be marked unavailable either. */
356 std::vector<range> unavailable;
357
358 /* Likewise, but for optimized out contents (a chunk of the value of
359 a variable that does not actually exist in the program). If LVAL
360 is lval_register, this is a register ($pc, $sp, etc., never a
361 program variable) that has not been saved in the frame. Not
362 saved registers and optimized-out program variables values are
363 treated pretty much the same, except not-saved registers have a
364 different string representation and related error strings. */
365 std::vector<range> optimized_out;
366 };
367
368 /* See value.h. */
369
370 struct gdbarch *
371 get_value_arch (const struct value *value)
372 {
373 return value_type (value)->arch ();
374 }
375
376 int
377 value_bits_available (const struct value *value, LONGEST offset, LONGEST length)
378 {
379 gdb_assert (!value->lazy);
380
381 return !ranges_contain (value->unavailable, offset, length);
382 }
383
384 int
385 value_bytes_available (const struct value *value,
386 LONGEST offset, LONGEST length)
387 {
388 return value_bits_available (value,
389 offset * TARGET_CHAR_BIT,
390 length * TARGET_CHAR_BIT);
391 }
392
393 int
394 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
395 {
396 gdb_assert (!value->lazy);
397
398 return ranges_contain (value->optimized_out, bit_offset, bit_length);
399 }
400
401 int
402 value_entirely_available (struct value *value)
403 {
404 /* We can only tell whether the whole value is available when we try
405 to read it. */
406 if (value->lazy)
407 value_fetch_lazy (value);
408
409 if (value->unavailable.empty ())
410 return 1;
411 return 0;
412 }
413
414 /* Returns true if VALUE is entirely covered by RANGES. If the value
415 is lazy, it'll be read now. Note that RANGE is a pointer to
416 pointer because reading the value might change *RANGE. */
417
418 static int
419 value_entirely_covered_by_range_vector (struct value *value,
420 const std::vector<range> &ranges)
421 {
422 /* We can only tell whether the whole value is optimized out /
423 unavailable when we try to read it. */
424 if (value->lazy)
425 value_fetch_lazy (value);
426
427 if (ranges.size () == 1)
428 {
429 const struct range &t = ranges[0];
430
431 if (t.offset == 0
432 && t.length == (TARGET_CHAR_BIT
433 * TYPE_LENGTH (value_enclosing_type (value))))
434 return 1;
435 }
436
437 return 0;
438 }
439
440 int
441 value_entirely_unavailable (struct value *value)
442 {
443 return value_entirely_covered_by_range_vector (value, value->unavailable);
444 }
445
446 int
447 value_entirely_optimized_out (struct value *value)
448 {
449 return value_entirely_covered_by_range_vector (value, value->optimized_out);
450 }
451
452 /* Insert into the vector pointed to by VECTORP the bit range starting of
453 OFFSET bits, and extending for the next LENGTH bits. */
454
455 static void
456 insert_into_bit_range_vector (std::vector<range> *vectorp,
457 LONGEST offset, LONGEST length)
458 {
459 range newr;
460
461 /* Insert the range sorted. If there's overlap or the new range
462 would be contiguous with an existing range, merge. */
463
464 newr.offset = offset;
465 newr.length = length;
466
467 /* Do a binary search for the position the given range would be
468 inserted if we only considered the starting OFFSET of ranges.
469 Call that position I. Since we also have LENGTH to care for
470 (this is a range afterall), we need to check if the _previous_
471 range overlaps the I range. E.g., calling R the new range:
472
473 #1 - overlaps with previous
474
475 R
476 |-...-|
477 |---| |---| |------| ... |--|
478 0 1 2 N
479
480 I=1
481
482 In the case #1 above, the binary search would return `I=1',
483 meaning, this OFFSET should be inserted at position 1, and the
484 current position 1 should be pushed further (and become 2). But,
485 note that `0' overlaps with R, so we want to merge them.
486
487 A similar consideration needs to be taken if the new range would
488 be contiguous with the previous range:
489
490 #2 - contiguous with previous
491
492 R
493 |-...-|
494 |--| |---| |------| ... |--|
495 0 1 2 N
496
497 I=1
498
499 If there's no overlap with the previous range, as in:
500
501 #3 - not overlapping and not contiguous
502
503 R
504 |-...-|
505 |--| |---| |------| ... |--|
506 0 1 2 N
507
508 I=1
509
510 or if I is 0:
511
512 #4 - R is the range with lowest offset
513
514 R
515 |-...-|
516 |--| |---| |------| ... |--|
517 0 1 2 N
518
519 I=0
520
521 ... we just push the new range to I.
522
523 All the 4 cases above need to consider that the new range may
524 also overlap several of the ranges that follow, or that R may be
525 contiguous with the following range, and merge. E.g.,
526
527 #5 - overlapping following ranges
528
529 R
530 |------------------------|
531 |--| |---| |------| ... |--|
532 0 1 2 N
533
534 I=0
535
536 or:
537
538 R
539 |-------|
540 |--| |---| |------| ... |--|
541 0 1 2 N
542
543 I=1
544
545 */
546
547 auto i = std::lower_bound (vectorp->begin (), vectorp->end (), newr);
548 if (i > vectorp->begin ())
549 {
550 struct range &bef = *(i - 1);
551
552 if (ranges_overlap (bef.offset, bef.length, offset, length))
553 {
554 /* #1 */
555 ULONGEST l = std::min (bef.offset, offset);
556 ULONGEST h = std::max (bef.offset + bef.length, offset + length);
557
558 bef.offset = l;
559 bef.length = h - l;
560 i--;
561 }
562 else if (offset == bef.offset + bef.length)
563 {
564 /* #2 */
565 bef.length += length;
566 i--;
567 }
568 else
569 {
570 /* #3 */
571 i = vectorp->insert (i, newr);
572 }
573 }
574 else
575 {
576 /* #4 */
577 i = vectorp->insert (i, newr);
578 }
579
580 /* Check whether the ranges following the one we've just added or
581 touched can be folded in (#5 above). */
582 if (i != vectorp->end () && i + 1 < vectorp->end ())
583 {
584 int removed = 0;
585 auto next = i + 1;
586
587 /* Get the range we just touched. */
588 struct range &t = *i;
589 removed = 0;
590
591 i = next;
592 for (; i < vectorp->end (); i++)
593 {
594 struct range &r = *i;
595 if (r.offset <= t.offset + t.length)
596 {
597 ULONGEST l, h;
598
599 l = std::min (t.offset, r.offset);
600 h = std::max (t.offset + t.length, r.offset + r.length);
601
602 t.offset = l;
603 t.length = h - l;
604
605 removed++;
606 }
607 else
608 {
609 /* If we couldn't merge this one, we won't be able to
610 merge following ones either, since the ranges are
611 always sorted by OFFSET. */
612 break;
613 }
614 }
615
616 if (removed != 0)
617 vectorp->erase (next, next + removed);
618 }
619 }
620
621 void
622 mark_value_bits_unavailable (struct value *value,
623 LONGEST offset, LONGEST length)
624 {
625 insert_into_bit_range_vector (&value->unavailable, offset, length);
626 }
627
628 void
629 mark_value_bytes_unavailable (struct value *value,
630 LONGEST offset, LONGEST length)
631 {
632 mark_value_bits_unavailable (value,
633 offset * TARGET_CHAR_BIT,
634 length * TARGET_CHAR_BIT);
635 }
636
637 /* Find the first range in RANGES that overlaps the range defined by
638 OFFSET and LENGTH, starting at element POS in the RANGES vector,
639 Returns the index into RANGES where such overlapping range was
640 found, or -1 if none was found. */
641
642 static int
643 find_first_range_overlap (const std::vector<range> *ranges, int pos,
644 LONGEST offset, LONGEST length)
645 {
646 int i;
647
648 for (i = pos; i < ranges->size (); i++)
649 {
650 const range &r = (*ranges)[i];
651 if (ranges_overlap (r.offset, r.length, offset, length))
652 return i;
653 }
654
655 return -1;
656 }
657
658 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
659 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
660 return non-zero.
661
662 It must always be the case that:
663 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
664
665 It is assumed that memory can be accessed from:
666 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
667 to:
668 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
669 / TARGET_CHAR_BIT) */
670 static int
671 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
672 const gdb_byte *ptr2, size_t offset2_bits,
673 size_t length_bits)
674 {
675 gdb_assert (offset1_bits % TARGET_CHAR_BIT
676 == offset2_bits % TARGET_CHAR_BIT);
677
678 if (offset1_bits % TARGET_CHAR_BIT != 0)
679 {
680 size_t bits;
681 gdb_byte mask, b1, b2;
682
683 /* The offset from the base pointers PTR1 and PTR2 is not a complete
684 number of bytes. A number of bits up to either the next exact
685 byte boundary, or LENGTH_BITS (which ever is sooner) will be
686 compared. */
687 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
688 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
689 mask = (1 << bits) - 1;
690
691 if (length_bits < bits)
692 {
693 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
694 bits = length_bits;
695 }
696
697 /* Now load the two bytes and mask off the bits we care about. */
698 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
699 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
700
701 if (b1 != b2)
702 return 1;
703
704 /* Now update the length and offsets to take account of the bits
705 we've just compared. */
706 length_bits -= bits;
707 offset1_bits += bits;
708 offset2_bits += bits;
709 }
710
711 if (length_bits % TARGET_CHAR_BIT != 0)
712 {
713 size_t bits;
714 size_t o1, o2;
715 gdb_byte mask, b1, b2;
716
717 /* The length is not an exact number of bytes. After the previous
718 IF.. block then the offsets are byte aligned, or the
719 length is zero (in which case this code is not reached). Compare
720 a number of bits at the end of the region, starting from an exact
721 byte boundary. */
722 bits = length_bits % TARGET_CHAR_BIT;
723 o1 = offset1_bits + length_bits - bits;
724 o2 = offset2_bits + length_bits - bits;
725
726 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
727 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
728
729 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
730 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
731
732 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
733 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
734
735 if (b1 != b2)
736 return 1;
737
738 length_bits -= bits;
739 }
740
741 if (length_bits > 0)
742 {
743 /* We've now taken care of any stray "bits" at the start, or end of
744 the region to compare, the remainder can be covered with a simple
745 memcmp. */
746 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
747 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
748 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
749
750 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
751 ptr2 + offset2_bits / TARGET_CHAR_BIT,
752 length_bits / TARGET_CHAR_BIT);
753 }
754
755 /* Length is zero, regions match. */
756 return 0;
757 }
758
759 /* Helper struct for find_first_range_overlap_and_match and
760 value_contents_bits_eq. Keep track of which slot of a given ranges
761 vector have we last looked at. */
762
763 struct ranges_and_idx
764 {
765 /* The ranges. */
766 const std::vector<range> *ranges;
767
768 /* The range we've last found in RANGES. Given ranges are sorted,
769 we can start the next lookup here. */
770 int idx;
771 };
772
773 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
774 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
775 ranges starting at OFFSET2 bits. Return true if the ranges match
776 and fill in *L and *H with the overlapping window relative to
777 (both) OFFSET1 or OFFSET2. */
778
779 static int
780 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
781 struct ranges_and_idx *rp2,
782 LONGEST offset1, LONGEST offset2,
783 LONGEST length, ULONGEST *l, ULONGEST *h)
784 {
785 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
786 offset1, length);
787 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
788 offset2, length);
789
790 if (rp1->idx == -1 && rp2->idx == -1)
791 {
792 *l = length;
793 *h = length;
794 return 1;
795 }
796 else if (rp1->idx == -1 || rp2->idx == -1)
797 return 0;
798 else
799 {
800 const range *r1, *r2;
801 ULONGEST l1, h1;
802 ULONGEST l2, h2;
803
804 r1 = &(*rp1->ranges)[rp1->idx];
805 r2 = &(*rp2->ranges)[rp2->idx];
806
807 /* Get the unavailable windows intersected by the incoming
808 ranges. The first and last ranges that overlap the argument
809 range may be wider than said incoming arguments ranges. */
810 l1 = std::max (offset1, r1->offset);
811 h1 = std::min (offset1 + length, r1->offset + r1->length);
812
813 l2 = std::max (offset2, r2->offset);
814 h2 = std::min (offset2 + length, offset2 + r2->length);
815
816 /* Make them relative to the respective start offsets, so we can
817 compare them for equality. */
818 l1 -= offset1;
819 h1 -= offset1;
820
821 l2 -= offset2;
822 h2 -= offset2;
823
824 /* Different ranges, no match. */
825 if (l1 != l2 || h1 != h2)
826 return 0;
827
828 *h = h1;
829 *l = l1;
830 return 1;
831 }
832 }
833
834 /* Helper function for value_contents_eq. The only difference is that
835 this function is bit rather than byte based.
836
837 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
838 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
839 Return true if the available bits match. */
840
841 static bool
842 value_contents_bits_eq (const struct value *val1, int offset1,
843 const struct value *val2, int offset2,
844 int length)
845 {
846 /* Each array element corresponds to a ranges source (unavailable,
847 optimized out). '1' is for VAL1, '2' for VAL2. */
848 struct ranges_and_idx rp1[2], rp2[2];
849
850 /* See function description in value.h. */
851 gdb_assert (!val1->lazy && !val2->lazy);
852
853 /* We shouldn't be trying to compare past the end of the values. */
854 gdb_assert (offset1 + length
855 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
856 gdb_assert (offset2 + length
857 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
858
859 memset (&rp1, 0, sizeof (rp1));
860 memset (&rp2, 0, sizeof (rp2));
861 rp1[0].ranges = &val1->unavailable;
862 rp2[0].ranges = &val2->unavailable;
863 rp1[1].ranges = &val1->optimized_out;
864 rp2[1].ranges = &val2->optimized_out;
865
866 while (length > 0)
867 {
868 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
869 int i;
870
871 for (i = 0; i < 2; i++)
872 {
873 ULONGEST l_tmp, h_tmp;
874
875 /* The contents only match equal if the invalid/unavailable
876 contents ranges match as well. */
877 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
878 offset1, offset2, length,
879 &l_tmp, &h_tmp))
880 return false;
881
882 /* We're interested in the lowest/first range found. */
883 if (i == 0 || l_tmp < l)
884 {
885 l = l_tmp;
886 h = h_tmp;
887 }
888 }
889
890 /* Compare the available/valid contents. */
891 if (memcmp_with_bit_offsets (val1->contents.get (), offset1,
892 val2->contents.get (), offset2, l) != 0)
893 return false;
894
895 length -= h;
896 offset1 += h;
897 offset2 += h;
898 }
899
900 return true;
901 }
902
903 bool
904 value_contents_eq (const struct value *val1, LONGEST offset1,
905 const struct value *val2, LONGEST offset2,
906 LONGEST length)
907 {
908 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
909 val2, offset2 * TARGET_CHAR_BIT,
910 length * TARGET_CHAR_BIT);
911 }
912
913
914 /* The value-history records all the values printed by print commands
915 during this session. */
916
917 static std::vector<value_ref_ptr> value_history;
918
919 \f
920 /* List of all value objects currently allocated
921 (except for those released by calls to release_value)
922 This is so they can be freed after each command. */
923
924 static std::vector<value_ref_ptr> all_values;
925
926 /* Allocate a lazy value for type TYPE. Its actual content is
927 "lazily" allocated too: the content field of the return value is
928 NULL; it will be allocated when it is fetched from the target. */
929
930 struct value *
931 allocate_value_lazy (struct type *type)
932 {
933 struct value *val;
934
935 /* Call check_typedef on our type to make sure that, if TYPE
936 is a TYPE_CODE_TYPEDEF, its length is set to the length
937 of the target type instead of zero. However, we do not
938 replace the typedef type by the target type, because we want
939 to keep the typedef in order to be able to set the VAL's type
940 description correctly. */
941 check_typedef (type);
942
943 val = new struct value (type);
944
945 /* Values start out on the all_values chain. */
946 all_values.emplace_back (val);
947
948 return val;
949 }
950
951 /* The maximum size, in bytes, that GDB will try to allocate for a value.
952 The initial value of 64k was not selected for any specific reason, it is
953 just a reasonable starting point. */
954
955 static int max_value_size = 65536; /* 64k bytes */
956
957 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
958 LONGEST, otherwise GDB will not be able to parse integer values from the
959 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
960 be unable to parse "set max-value-size 2".
961
962 As we want a consistent GDB experience across hosts with different sizes
963 of LONGEST, this arbitrary minimum value was selected, so long as this
964 is bigger than LONGEST on all GDB supported hosts we're fine. */
965
966 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
967 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
968
969 /* Implement the "set max-value-size" command. */
970
971 static void
972 set_max_value_size (const char *args, int from_tty,
973 struct cmd_list_element *c)
974 {
975 gdb_assert (max_value_size == -1 || max_value_size >= 0);
976
977 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
978 {
979 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
980 error (_("max-value-size set too low, increasing to %d bytes"),
981 max_value_size);
982 }
983 }
984
985 /* Implement the "show max-value-size" command. */
986
987 static void
988 show_max_value_size (struct ui_file *file, int from_tty,
989 struct cmd_list_element *c, const char *value)
990 {
991 if (max_value_size == -1)
992 fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
993 else
994 fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
995 max_value_size);
996 }
997
998 /* Called before we attempt to allocate or reallocate a buffer for the
999 contents of a value. TYPE is the type of the value for which we are
1000 allocating the buffer. If the buffer is too large (based on the user
1001 controllable setting) then throw an error. If this function returns
1002 then we should attempt to allocate the buffer. */
1003
1004 static void
1005 check_type_length_before_alloc (const struct type *type)
1006 {
1007 ULONGEST length = TYPE_LENGTH (type);
1008
1009 if (max_value_size > -1 && length > max_value_size)
1010 {
1011 if (type->name () != NULL)
1012 error (_("value of type `%s' requires %s bytes, which is more "
1013 "than max-value-size"), type->name (), pulongest (length));
1014 else
1015 error (_("value requires %s bytes, which is more than "
1016 "max-value-size"), pulongest (length));
1017 }
1018 }
1019
1020 /* Allocate the contents of VAL if it has not been allocated yet. */
1021
1022 static void
1023 allocate_value_contents (struct value *val)
1024 {
1025 if (!val->contents)
1026 {
1027 check_type_length_before_alloc (val->enclosing_type);
1028 val->contents.reset
1029 ((gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type)));
1030 }
1031 }
1032
1033 /* Allocate a value and its contents for type TYPE. */
1034
1035 struct value *
1036 allocate_value (struct type *type)
1037 {
1038 struct value *val = allocate_value_lazy (type);
1039
1040 allocate_value_contents (val);
1041 val->lazy = 0;
1042 return val;
1043 }
1044
1045 /* Allocate a value that has the correct length
1046 for COUNT repetitions of type TYPE. */
1047
1048 struct value *
1049 allocate_repeat_value (struct type *type, int count)
1050 {
1051 /* Despite the fact that we are really creating an array of TYPE here, we
1052 use the string lower bound as the array lower bound. This seems to
1053 work fine for now. */
1054 int low_bound = current_language->string_lower_bound ();
1055 /* FIXME-type-allocation: need a way to free this type when we are
1056 done with it. */
1057 struct type *array_type
1058 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1059
1060 return allocate_value (array_type);
1061 }
1062
1063 struct value *
1064 allocate_computed_value (struct type *type,
1065 const struct lval_funcs *funcs,
1066 void *closure)
1067 {
1068 struct value *v = allocate_value_lazy (type);
1069
1070 VALUE_LVAL (v) = lval_computed;
1071 v->location.computed.funcs = funcs;
1072 v->location.computed.closure = closure;
1073
1074 return v;
1075 }
1076
1077 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1078
1079 struct value *
1080 allocate_optimized_out_value (struct type *type)
1081 {
1082 struct value *retval = allocate_value_lazy (type);
1083
1084 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1085 set_value_lazy (retval, 0);
1086 return retval;
1087 }
1088
1089 /* Accessor methods. */
1090
1091 struct type *
1092 value_type (const struct value *value)
1093 {
1094 return value->type;
1095 }
1096 void
1097 deprecated_set_value_type (struct value *value, struct type *type)
1098 {
1099 value->type = type;
1100 }
1101
1102 LONGEST
1103 value_offset (const struct value *value)
1104 {
1105 return value->offset;
1106 }
1107 void
1108 set_value_offset (struct value *value, LONGEST offset)
1109 {
1110 value->offset = offset;
1111 }
1112
1113 LONGEST
1114 value_bitpos (const struct value *value)
1115 {
1116 return value->bitpos;
1117 }
1118 void
1119 set_value_bitpos (struct value *value, LONGEST bit)
1120 {
1121 value->bitpos = bit;
1122 }
1123
1124 LONGEST
1125 value_bitsize (const struct value *value)
1126 {
1127 return value->bitsize;
1128 }
1129 void
1130 set_value_bitsize (struct value *value, LONGEST bit)
1131 {
1132 value->bitsize = bit;
1133 }
1134
1135 struct value *
1136 value_parent (const struct value *value)
1137 {
1138 return value->parent.get ();
1139 }
1140
1141 /* See value.h. */
1142
1143 void
1144 set_value_parent (struct value *value, struct value *parent)
1145 {
1146 value->parent = value_ref_ptr::new_reference (parent);
1147 }
1148
1149 gdb::array_view<gdb_byte>
1150 value_contents_raw (struct value *value)
1151 {
1152 struct gdbarch *arch = get_value_arch (value);
1153 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1154
1155 allocate_value_contents (value);
1156
1157 ULONGEST length = TYPE_LENGTH (value_type (value));
1158 return gdb::make_array_view
1159 (value->contents.get () + value->embedded_offset * unit_size, length);
1160 }
1161
1162 gdb::array_view<gdb_byte>
1163 value_contents_all_raw (struct value *value)
1164 {
1165 allocate_value_contents (value);
1166
1167 ULONGEST length = TYPE_LENGTH (value_enclosing_type (value));
1168 return gdb::make_array_view (value->contents.get (), length);
1169 }
1170
1171 struct type *
1172 value_enclosing_type (const struct value *value)
1173 {
1174 return value->enclosing_type;
1175 }
1176
1177 /* Look at value.h for description. */
1178
1179 struct type *
1180 value_actual_type (struct value *value, int resolve_simple_types,
1181 int *real_type_found)
1182 {
1183 struct value_print_options opts;
1184 struct type *result;
1185
1186 get_user_print_options (&opts);
1187
1188 if (real_type_found)
1189 *real_type_found = 0;
1190 result = value_type (value);
1191 if (opts.objectprint)
1192 {
1193 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1194 fetch its rtti type. */
1195 if (result->is_pointer_or_reference ()
1196 && (check_typedef (TYPE_TARGET_TYPE (result))->code ()
1197 == TYPE_CODE_STRUCT)
1198 && !value_optimized_out (value))
1199 {
1200 struct type *real_type;
1201
1202 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1203 if (real_type)
1204 {
1205 if (real_type_found)
1206 *real_type_found = 1;
1207 result = real_type;
1208 }
1209 }
1210 else if (resolve_simple_types)
1211 {
1212 if (real_type_found)
1213 *real_type_found = 1;
1214 result = value_enclosing_type (value);
1215 }
1216 }
1217
1218 return result;
1219 }
1220
1221 void
1222 error_value_optimized_out (void)
1223 {
1224 throw_error (OPTIMIZED_OUT_ERROR, _("value has been optimized out"));
1225 }
1226
1227 static void
1228 require_not_optimized_out (const struct value *value)
1229 {
1230 if (!value->optimized_out.empty ())
1231 {
1232 if (value->lval == lval_register)
1233 throw_error (OPTIMIZED_OUT_ERROR,
1234 _("register has not been saved in frame"));
1235 else
1236 error_value_optimized_out ();
1237 }
1238 }
1239
1240 static void
1241 require_available (const struct value *value)
1242 {
1243 if (!value->unavailable.empty ())
1244 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1245 }
1246
1247 gdb::array_view<const gdb_byte>
1248 value_contents_for_printing (struct value *value)
1249 {
1250 if (value->lazy)
1251 value_fetch_lazy (value);
1252
1253 ULONGEST length = TYPE_LENGTH (value_enclosing_type (value));
1254 return gdb::make_array_view (value->contents.get (), length);
1255 }
1256
1257 gdb::array_view<const gdb_byte>
1258 value_contents_for_printing_const (const struct value *value)
1259 {
1260 gdb_assert (!value->lazy);
1261
1262 ULONGEST length = TYPE_LENGTH (value_enclosing_type (value));
1263 return gdb::make_array_view (value->contents.get (), length);
1264 }
1265
1266 gdb::array_view<const gdb_byte>
1267 value_contents_all (struct value *value)
1268 {
1269 gdb::array_view<const gdb_byte> result = value_contents_for_printing (value);
1270 require_not_optimized_out (value);
1271 require_available (value);
1272 return result;
1273 }
1274
1275 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1276 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1277
1278 static void
1279 ranges_copy_adjusted (std::vector<range> *dst_range, int dst_bit_offset,
1280 const std::vector<range> &src_range, int src_bit_offset,
1281 int bit_length)
1282 {
1283 for (const range &r : src_range)
1284 {
1285 ULONGEST h, l;
1286
1287 l = std::max (r.offset, (LONGEST) src_bit_offset);
1288 h = std::min (r.offset + r.length,
1289 (LONGEST) src_bit_offset + bit_length);
1290
1291 if (l < h)
1292 insert_into_bit_range_vector (dst_range,
1293 dst_bit_offset + (l - src_bit_offset),
1294 h - l);
1295 }
1296 }
1297
1298 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1299 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1300
1301 static void
1302 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1303 const struct value *src, int src_bit_offset,
1304 int bit_length)
1305 {
1306 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1307 src->unavailable, src_bit_offset,
1308 bit_length);
1309 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1310 src->optimized_out, src_bit_offset,
1311 bit_length);
1312 }
1313
1314 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1315 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1316 contents, starting at DST_OFFSET. If unavailable contents are
1317 being copied from SRC, the corresponding DST contents are marked
1318 unavailable accordingly. Neither DST nor SRC may be lazy
1319 values.
1320
1321 It is assumed the contents of DST in the [DST_OFFSET,
1322 DST_OFFSET+LENGTH) range are wholly available. */
1323
1324 static void
1325 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1326 struct value *src, LONGEST src_offset, LONGEST length)
1327 {
1328 LONGEST src_bit_offset, dst_bit_offset, bit_length;
1329 struct gdbarch *arch = get_value_arch (src);
1330 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1331
1332 /* A lazy DST would make that this copy operation useless, since as
1333 soon as DST's contents were un-lazied (by a later value_contents
1334 call, say), the contents would be overwritten. A lazy SRC would
1335 mean we'd be copying garbage. */
1336 gdb_assert (!dst->lazy && !src->lazy);
1337
1338 /* The overwritten DST range gets unavailability ORed in, not
1339 replaced. Make sure to remember to implement replacing if it
1340 turns out actually necessary. */
1341 gdb_assert (value_bytes_available (dst, dst_offset, length));
1342 gdb_assert (!value_bits_any_optimized_out (dst,
1343 TARGET_CHAR_BIT * dst_offset,
1344 TARGET_CHAR_BIT * length));
1345
1346 /* Copy the data. */
1347 gdb::array_view<gdb_byte> dst_contents
1348 = value_contents_all_raw (dst).slice (dst_offset * unit_size,
1349 length * unit_size);
1350 gdb::array_view<const gdb_byte> src_contents
1351 = value_contents_all_raw (src).slice (src_offset * unit_size,
1352 length * unit_size);
1353 copy (src_contents, dst_contents);
1354
1355 /* Copy the meta-data, adjusted. */
1356 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1357 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1358 bit_length = length * unit_size * HOST_CHAR_BIT;
1359
1360 value_ranges_copy_adjusted (dst, dst_bit_offset,
1361 src, src_bit_offset,
1362 bit_length);
1363 }
1364
1365 /* Copy LENGTH bytes of SRC value's (all) contents
1366 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1367 (all) contents, starting at DST_OFFSET. If unavailable contents
1368 are being copied from SRC, the corresponding DST contents are
1369 marked unavailable accordingly. DST must not be lazy. If SRC is
1370 lazy, it will be fetched now.
1371
1372 It is assumed the contents of DST in the [DST_OFFSET,
1373 DST_OFFSET+LENGTH) range are wholly available. */
1374
1375 void
1376 value_contents_copy (struct value *dst, LONGEST dst_offset,
1377 struct value *src, LONGEST src_offset, LONGEST length)
1378 {
1379 if (src->lazy)
1380 value_fetch_lazy (src);
1381
1382 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1383 }
1384
1385 int
1386 value_lazy (const struct value *value)
1387 {
1388 return value->lazy;
1389 }
1390
1391 void
1392 set_value_lazy (struct value *value, int val)
1393 {
1394 value->lazy = val;
1395 }
1396
1397 int
1398 value_stack (const struct value *value)
1399 {
1400 return value->stack;
1401 }
1402
1403 void
1404 set_value_stack (struct value *value, int val)
1405 {
1406 value->stack = val;
1407 }
1408
1409 gdb::array_view<const gdb_byte>
1410 value_contents (struct value *value)
1411 {
1412 gdb::array_view<const gdb_byte> result = value_contents_writeable (value);
1413 require_not_optimized_out (value);
1414 require_available (value);
1415 return result;
1416 }
1417
1418 gdb::array_view<gdb_byte>
1419 value_contents_writeable (struct value *value)
1420 {
1421 if (value->lazy)
1422 value_fetch_lazy (value);
1423 return value_contents_raw (value);
1424 }
1425
1426 int
1427 value_optimized_out (struct value *value)
1428 {
1429 if (value->lazy)
1430 {
1431 /* See if we can compute the result without fetching the
1432 value. */
1433 if (VALUE_LVAL (value) == lval_memory)
1434 return false;
1435 else if (VALUE_LVAL (value) == lval_computed)
1436 {
1437 const struct lval_funcs *funcs = value->location.computed.funcs;
1438
1439 if (funcs->is_optimized_out != nullptr)
1440 return funcs->is_optimized_out (value);
1441 }
1442
1443 /* Fall back to fetching. */
1444 try
1445 {
1446 value_fetch_lazy (value);
1447 }
1448 catch (const gdb_exception_error &ex)
1449 {
1450 switch (ex.error)
1451 {
1452 case MEMORY_ERROR:
1453 case OPTIMIZED_OUT_ERROR:
1454 case NOT_AVAILABLE_ERROR:
1455 /* These can normally happen when we try to access an
1456 optimized out or unavailable register, either in a
1457 physical register or spilled to memory. */
1458 break;
1459 default:
1460 throw;
1461 }
1462 }
1463 }
1464
1465 return !value->optimized_out.empty ();
1466 }
1467
1468 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1469 the following LENGTH bytes. */
1470
1471 void
1472 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1473 {
1474 mark_value_bits_optimized_out (value,
1475 offset * TARGET_CHAR_BIT,
1476 length * TARGET_CHAR_BIT);
1477 }
1478
1479 /* See value.h. */
1480
1481 void
1482 mark_value_bits_optimized_out (struct value *value,
1483 LONGEST offset, LONGEST length)
1484 {
1485 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1486 }
1487
1488 int
1489 value_bits_synthetic_pointer (const struct value *value,
1490 LONGEST offset, LONGEST length)
1491 {
1492 if (value->lval != lval_computed
1493 || !value->location.computed.funcs->check_synthetic_pointer)
1494 return 0;
1495 return value->location.computed.funcs->check_synthetic_pointer (value,
1496 offset,
1497 length);
1498 }
1499
1500 LONGEST
1501 value_embedded_offset (const struct value *value)
1502 {
1503 return value->embedded_offset;
1504 }
1505
1506 void
1507 set_value_embedded_offset (struct value *value, LONGEST val)
1508 {
1509 value->embedded_offset = val;
1510 }
1511
1512 LONGEST
1513 value_pointed_to_offset (const struct value *value)
1514 {
1515 return value->pointed_to_offset;
1516 }
1517
1518 void
1519 set_value_pointed_to_offset (struct value *value, LONGEST val)
1520 {
1521 value->pointed_to_offset = val;
1522 }
1523
1524 const struct lval_funcs *
1525 value_computed_funcs (const struct value *v)
1526 {
1527 gdb_assert (value_lval_const (v) == lval_computed);
1528
1529 return v->location.computed.funcs;
1530 }
1531
1532 void *
1533 value_computed_closure (const struct value *v)
1534 {
1535 gdb_assert (v->lval == lval_computed);
1536
1537 return v->location.computed.closure;
1538 }
1539
1540 enum lval_type *
1541 deprecated_value_lval_hack (struct value *value)
1542 {
1543 return &value->lval;
1544 }
1545
1546 enum lval_type
1547 value_lval_const (const struct value *value)
1548 {
1549 return value->lval;
1550 }
1551
1552 CORE_ADDR
1553 value_address (const struct value *value)
1554 {
1555 if (value->lval != lval_memory)
1556 return 0;
1557 if (value->parent != NULL)
1558 return value_address (value->parent.get ()) + value->offset;
1559 if (NULL != TYPE_DATA_LOCATION (value_type (value)))
1560 {
1561 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
1562 return TYPE_DATA_LOCATION_ADDR (value_type (value));
1563 }
1564
1565 return value->location.address + value->offset;
1566 }
1567
1568 CORE_ADDR
1569 value_raw_address (const struct value *value)
1570 {
1571 if (value->lval != lval_memory)
1572 return 0;
1573 return value->location.address;
1574 }
1575
1576 void
1577 set_value_address (struct value *value, CORE_ADDR addr)
1578 {
1579 gdb_assert (value->lval == lval_memory);
1580 value->location.address = addr;
1581 }
1582
1583 struct internalvar **
1584 deprecated_value_internalvar_hack (struct value *value)
1585 {
1586 return &value->location.internalvar;
1587 }
1588
1589 struct frame_id *
1590 deprecated_value_next_frame_id_hack (struct value *value)
1591 {
1592 gdb_assert (value->lval == lval_register);
1593 return &value->location.reg.next_frame_id;
1594 }
1595
1596 int *
1597 deprecated_value_regnum_hack (struct value *value)
1598 {
1599 gdb_assert (value->lval == lval_register);
1600 return &value->location.reg.regnum;
1601 }
1602
1603 int
1604 deprecated_value_modifiable (const struct value *value)
1605 {
1606 return value->modifiable;
1607 }
1608 \f
1609 /* Return a mark in the value chain. All values allocated after the
1610 mark is obtained (except for those released) are subject to being freed
1611 if a subsequent value_free_to_mark is passed the mark. */
1612 struct value *
1613 value_mark (void)
1614 {
1615 if (all_values.empty ())
1616 return nullptr;
1617 return all_values.back ().get ();
1618 }
1619
1620 /* See value.h. */
1621
1622 void
1623 value_incref (struct value *val)
1624 {
1625 val->reference_count++;
1626 }
1627
1628 /* Release a reference to VAL, which was acquired with value_incref.
1629 This function is also called to deallocate values from the value
1630 chain. */
1631
1632 void
1633 value_decref (struct value *val)
1634 {
1635 if (val != nullptr)
1636 {
1637 gdb_assert (val->reference_count > 0);
1638 val->reference_count--;
1639 if (val->reference_count == 0)
1640 delete val;
1641 }
1642 }
1643
1644 /* Free all values allocated since MARK was obtained by value_mark
1645 (except for those released). */
1646 void
1647 value_free_to_mark (const struct value *mark)
1648 {
1649 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1650 if (iter == all_values.end ())
1651 all_values.clear ();
1652 else
1653 all_values.erase (iter + 1, all_values.end ());
1654 }
1655
1656 /* Remove VAL from the chain all_values
1657 so it will not be freed automatically. */
1658
1659 value_ref_ptr
1660 release_value (struct value *val)
1661 {
1662 if (val == nullptr)
1663 return value_ref_ptr ();
1664
1665 std::vector<value_ref_ptr>::reverse_iterator iter;
1666 for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter)
1667 {
1668 if (*iter == val)
1669 {
1670 value_ref_ptr result = *iter;
1671 all_values.erase (iter.base () - 1);
1672 return result;
1673 }
1674 }
1675
1676 /* We must always return an owned reference. Normally this happens
1677 because we transfer the reference from the value chain, but in
1678 this case the value was not on the chain. */
1679 return value_ref_ptr::new_reference (val);
1680 }
1681
1682 /* See value.h. */
1683
1684 std::vector<value_ref_ptr>
1685 value_release_to_mark (const struct value *mark)
1686 {
1687 std::vector<value_ref_ptr> result;
1688
1689 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1690 if (iter == all_values.end ())
1691 std::swap (result, all_values);
1692 else
1693 {
1694 std::move (iter + 1, all_values.end (), std::back_inserter (result));
1695 all_values.erase (iter + 1, all_values.end ());
1696 }
1697 std::reverse (result.begin (), result.end ());
1698 return result;
1699 }
1700
1701 /* Return a copy of the value ARG.
1702 It contains the same contents, for same memory address,
1703 but it's a different block of storage. */
1704
1705 struct value *
1706 value_copy (struct value *arg)
1707 {
1708 struct type *encl_type = value_enclosing_type (arg);
1709 struct value *val;
1710
1711 if (value_lazy (arg))
1712 val = allocate_value_lazy (encl_type);
1713 else
1714 val = allocate_value (encl_type);
1715 val->type = arg->type;
1716 VALUE_LVAL (val) = VALUE_LVAL (arg);
1717 val->location = arg->location;
1718 val->offset = arg->offset;
1719 val->bitpos = arg->bitpos;
1720 val->bitsize = arg->bitsize;
1721 val->lazy = arg->lazy;
1722 val->embedded_offset = value_embedded_offset (arg);
1723 val->pointed_to_offset = arg->pointed_to_offset;
1724 val->modifiable = arg->modifiable;
1725 val->stack = arg->stack;
1726 val->is_zero = arg->is_zero;
1727 val->initialized = arg->initialized;
1728
1729 if (!value_lazy (val))
1730 copy (value_contents_all_raw (arg),
1731 value_contents_all_raw (val));
1732
1733 val->unavailable = arg->unavailable;
1734 val->optimized_out = arg->optimized_out;
1735 val->parent = arg->parent;
1736 if (VALUE_LVAL (val) == lval_computed)
1737 {
1738 const struct lval_funcs *funcs = val->location.computed.funcs;
1739
1740 if (funcs->copy_closure)
1741 val->location.computed.closure = funcs->copy_closure (val);
1742 }
1743 return val;
1744 }
1745
1746 /* Return a "const" and/or "volatile" qualified version of the value V.
1747 If CNST is true, then the returned value will be qualified with
1748 "const".
1749 if VOLTL is true, then the returned value will be qualified with
1750 "volatile". */
1751
1752 struct value *
1753 make_cv_value (int cnst, int voltl, struct value *v)
1754 {
1755 struct type *val_type = value_type (v);
1756 struct type *enclosing_type = value_enclosing_type (v);
1757 struct value *cv_val = value_copy (v);
1758
1759 deprecated_set_value_type (cv_val,
1760 make_cv_type (cnst, voltl, val_type, NULL));
1761 set_value_enclosing_type (cv_val,
1762 make_cv_type (cnst, voltl, enclosing_type, NULL));
1763
1764 return cv_val;
1765 }
1766
1767 /* Return a version of ARG that is non-lvalue. */
1768
1769 struct value *
1770 value_non_lval (struct value *arg)
1771 {
1772 if (VALUE_LVAL (arg) != not_lval)
1773 {
1774 struct type *enc_type = value_enclosing_type (arg);
1775 struct value *val = allocate_value (enc_type);
1776
1777 copy (value_contents_all (arg), value_contents_all_raw (val));
1778 val->type = arg->type;
1779 set_value_embedded_offset (val, value_embedded_offset (arg));
1780 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1781 return val;
1782 }
1783 return arg;
1784 }
1785
1786 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1787
1788 void
1789 value_force_lval (struct value *v, CORE_ADDR addr)
1790 {
1791 gdb_assert (VALUE_LVAL (v) == not_lval);
1792
1793 write_memory (addr, value_contents_raw (v).data (), TYPE_LENGTH (value_type (v)));
1794 v->lval = lval_memory;
1795 v->location.address = addr;
1796 }
1797
1798 void
1799 set_value_component_location (struct value *component,
1800 const struct value *whole)
1801 {
1802 struct type *type;
1803
1804 gdb_assert (whole->lval != lval_xcallable);
1805
1806 if (whole->lval == lval_internalvar)
1807 VALUE_LVAL (component) = lval_internalvar_component;
1808 else
1809 VALUE_LVAL (component) = whole->lval;
1810
1811 component->location = whole->location;
1812 if (whole->lval == lval_computed)
1813 {
1814 const struct lval_funcs *funcs = whole->location.computed.funcs;
1815
1816 if (funcs->copy_closure)
1817 component->location.computed.closure = funcs->copy_closure (whole);
1818 }
1819
1820 /* If the WHOLE value has a dynamically resolved location property then
1821 update the address of the COMPONENT. */
1822 type = value_type (whole);
1823 if (NULL != TYPE_DATA_LOCATION (type)
1824 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1825 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1826
1827 /* Similarly, if the COMPONENT value has a dynamically resolved location
1828 property then update its address. */
1829 type = value_type (component);
1830 if (NULL != TYPE_DATA_LOCATION (type)
1831 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1832 {
1833 /* If the COMPONENT has a dynamic location, and is an
1834 lval_internalvar_component, then we change it to a lval_memory.
1835
1836 Usually a component of an internalvar is created non-lazy, and has
1837 its content immediately copied from the parent internalvar.
1838 However, for components with a dynamic location, the content of
1839 the component is not contained within the parent, but is instead
1840 accessed indirectly. Further, the component will be created as a
1841 lazy value.
1842
1843 By changing the type of the component to lval_memory we ensure
1844 that value_fetch_lazy can successfully load the component.
1845
1846 This solution isn't ideal, but a real fix would require values to
1847 carry around both the parent value contents, and the contents of
1848 any dynamic fields within the parent. This is a substantial
1849 change to how values work in GDB. */
1850 if (VALUE_LVAL (component) == lval_internalvar_component)
1851 {
1852 gdb_assert (value_lazy (component));
1853 VALUE_LVAL (component) = lval_memory;
1854 }
1855 else
1856 gdb_assert (VALUE_LVAL (component) == lval_memory);
1857 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1858 }
1859 }
1860
1861 /* Access to the value history. */
1862
1863 /* Record a new value in the value history.
1864 Returns the absolute history index of the entry. */
1865
1866 int
1867 record_latest_value (struct value *val)
1868 {
1869 /* We don't want this value to have anything to do with the inferior anymore.
1870 In particular, "set $1 = 50" should not affect the variable from which
1871 the value was taken, and fast watchpoints should be able to assume that
1872 a value on the value history never changes. */
1873 if (value_lazy (val))
1874 value_fetch_lazy (val);
1875 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1876 from. This is a bit dubious, because then *&$1 does not just return $1
1877 but the current contents of that location. c'est la vie... */
1878 val->modifiable = 0;
1879
1880 value_history.push_back (release_value (val));
1881
1882 return value_history.size ();
1883 }
1884
1885 /* Return a copy of the value in the history with sequence number NUM. */
1886
1887 struct value *
1888 access_value_history (int num)
1889 {
1890 int absnum = num;
1891
1892 if (absnum <= 0)
1893 absnum += value_history.size ();
1894
1895 if (absnum <= 0)
1896 {
1897 if (num == 0)
1898 error (_("The history is empty."));
1899 else if (num == 1)
1900 error (_("There is only one value in the history."));
1901 else
1902 error (_("History does not go back to $$%d."), -num);
1903 }
1904 if (absnum > value_history.size ())
1905 error (_("History has not yet reached $%d."), absnum);
1906
1907 absnum--;
1908
1909 return value_copy (value_history[absnum].get ());
1910 }
1911
1912 static void
1913 show_values (const char *num_exp, int from_tty)
1914 {
1915 int i;
1916 struct value *val;
1917 static int num = 1;
1918
1919 if (num_exp)
1920 {
1921 /* "show values +" should print from the stored position.
1922 "show values <exp>" should print around value number <exp>. */
1923 if (num_exp[0] != '+' || num_exp[1] != '\0')
1924 num = parse_and_eval_long (num_exp) - 5;
1925 }
1926 else
1927 {
1928 /* "show values" means print the last 10 values. */
1929 num = value_history.size () - 9;
1930 }
1931
1932 if (num <= 0)
1933 num = 1;
1934
1935 for (i = num; i < num + 10 && i <= value_history.size (); i++)
1936 {
1937 struct value_print_options opts;
1938
1939 val = access_value_history (i);
1940 printf_filtered (("$%d = "), i);
1941 get_user_print_options (&opts);
1942 value_print (val, gdb_stdout, &opts);
1943 printf_filtered (("\n"));
1944 }
1945
1946 /* The next "show values +" should start after what we just printed. */
1947 num += 10;
1948
1949 /* Hitting just return after this command should do the same thing as
1950 "show values +". If num_exp is null, this is unnecessary, since
1951 "show values +" is not useful after "show values". */
1952 if (from_tty && num_exp)
1953 set_repeat_arguments ("+");
1954 }
1955 \f
1956 enum internalvar_kind
1957 {
1958 /* The internal variable is empty. */
1959 INTERNALVAR_VOID,
1960
1961 /* The value of the internal variable is provided directly as
1962 a GDB value object. */
1963 INTERNALVAR_VALUE,
1964
1965 /* A fresh value is computed via a call-back routine on every
1966 access to the internal variable. */
1967 INTERNALVAR_MAKE_VALUE,
1968
1969 /* The internal variable holds a GDB internal convenience function. */
1970 INTERNALVAR_FUNCTION,
1971
1972 /* The variable holds an integer value. */
1973 INTERNALVAR_INTEGER,
1974
1975 /* The variable holds a GDB-provided string. */
1976 INTERNALVAR_STRING,
1977 };
1978
1979 union internalvar_data
1980 {
1981 /* A value object used with INTERNALVAR_VALUE. */
1982 struct value *value;
1983
1984 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1985 struct
1986 {
1987 /* The functions to call. */
1988 const struct internalvar_funcs *functions;
1989
1990 /* The function's user-data. */
1991 void *data;
1992 } make_value;
1993
1994 /* The internal function used with INTERNALVAR_FUNCTION. */
1995 struct
1996 {
1997 struct internal_function *function;
1998 /* True if this is the canonical name for the function. */
1999 int canonical;
2000 } fn;
2001
2002 /* An integer value used with INTERNALVAR_INTEGER. */
2003 struct
2004 {
2005 /* If type is non-NULL, it will be used as the type to generate
2006 a value for this internal variable. If type is NULL, a default
2007 integer type for the architecture is used. */
2008 struct type *type;
2009 LONGEST val;
2010 } integer;
2011
2012 /* A string value used with INTERNALVAR_STRING. */
2013 char *string;
2014 };
2015
2016 /* Internal variables. These are variables within the debugger
2017 that hold values assigned by debugger commands.
2018 The user refers to them with a '$' prefix
2019 that does not appear in the variable names stored internally. */
2020
2021 struct internalvar
2022 {
2023 struct internalvar *next;
2024 char *name;
2025
2026 /* We support various different kinds of content of an internal variable.
2027 enum internalvar_kind specifies the kind, and union internalvar_data
2028 provides the data associated with this particular kind. */
2029
2030 enum internalvar_kind kind;
2031
2032 union internalvar_data u;
2033 };
2034
2035 static struct internalvar *internalvars;
2036
2037 /* If the variable does not already exist create it and give it the
2038 value given. If no value is given then the default is zero. */
2039 static void
2040 init_if_undefined_command (const char* args, int from_tty)
2041 {
2042 struct internalvar *intvar = nullptr;
2043
2044 /* Parse the expression - this is taken from set_command(). */
2045 expression_up expr = parse_expression (args);
2046
2047 /* Validate the expression.
2048 Was the expression an assignment?
2049 Or even an expression at all? */
2050 if (expr->first_opcode () != BINOP_ASSIGN)
2051 error (_("Init-if-undefined requires an assignment expression."));
2052
2053 /* Extract the variable from the parsed expression. */
2054 expr::assign_operation *assign
2055 = dynamic_cast<expr::assign_operation *> (expr->op.get ());
2056 if (assign != nullptr)
2057 {
2058 expr::operation *lhs = assign->get_lhs ();
2059 expr::internalvar_operation *ivarop
2060 = dynamic_cast<expr::internalvar_operation *> (lhs);
2061 if (ivarop != nullptr)
2062 intvar = ivarop->get_internalvar ();
2063 }
2064
2065 if (intvar == nullptr)
2066 error (_("The first parameter to init-if-undefined "
2067 "should be a GDB variable."));
2068
2069 /* Only evaluate the expression if the lvalue is void.
2070 This may still fail if the expression is invalid. */
2071 if (intvar->kind == INTERNALVAR_VOID)
2072 evaluate_expression (expr.get ());
2073 }
2074
2075
2076 /* Look up an internal variable with name NAME. NAME should not
2077 normally include a dollar sign.
2078
2079 If the specified internal variable does not exist,
2080 the return value is NULL. */
2081
2082 struct internalvar *
2083 lookup_only_internalvar (const char *name)
2084 {
2085 struct internalvar *var;
2086
2087 for (var = internalvars; var; var = var->next)
2088 if (strcmp (var->name, name) == 0)
2089 return var;
2090
2091 return NULL;
2092 }
2093
2094 /* Complete NAME by comparing it to the names of internal
2095 variables. */
2096
2097 void
2098 complete_internalvar (completion_tracker &tracker, const char *name)
2099 {
2100 struct internalvar *var;
2101 int len;
2102
2103 len = strlen (name);
2104
2105 for (var = internalvars; var; var = var->next)
2106 if (strncmp (var->name, name, len) == 0)
2107 tracker.add_completion (make_unique_xstrdup (var->name));
2108 }
2109
2110 /* Create an internal variable with name NAME and with a void value.
2111 NAME should not normally include a dollar sign. */
2112
2113 struct internalvar *
2114 create_internalvar (const char *name)
2115 {
2116 struct internalvar *var = XNEW (struct internalvar);
2117
2118 var->name = xstrdup (name);
2119 var->kind = INTERNALVAR_VOID;
2120 var->next = internalvars;
2121 internalvars = var;
2122 return var;
2123 }
2124
2125 /* Create an internal variable with name NAME and register FUN as the
2126 function that value_of_internalvar uses to create a value whenever
2127 this variable is referenced. NAME should not normally include a
2128 dollar sign. DATA is passed uninterpreted to FUN when it is
2129 called. CLEANUP, if not NULL, is called when the internal variable
2130 is destroyed. It is passed DATA as its only argument. */
2131
2132 struct internalvar *
2133 create_internalvar_type_lazy (const char *name,
2134 const struct internalvar_funcs *funcs,
2135 void *data)
2136 {
2137 struct internalvar *var = create_internalvar (name);
2138
2139 var->kind = INTERNALVAR_MAKE_VALUE;
2140 var->u.make_value.functions = funcs;
2141 var->u.make_value.data = data;
2142 return var;
2143 }
2144
2145 /* See documentation in value.h. */
2146
2147 int
2148 compile_internalvar_to_ax (struct internalvar *var,
2149 struct agent_expr *expr,
2150 struct axs_value *value)
2151 {
2152 if (var->kind != INTERNALVAR_MAKE_VALUE
2153 || var->u.make_value.functions->compile_to_ax == NULL)
2154 return 0;
2155
2156 var->u.make_value.functions->compile_to_ax (var, expr, value,
2157 var->u.make_value.data);
2158 return 1;
2159 }
2160
2161 /* Look up an internal variable with name NAME. NAME should not
2162 normally include a dollar sign.
2163
2164 If the specified internal variable does not exist,
2165 one is created, with a void value. */
2166
2167 struct internalvar *
2168 lookup_internalvar (const char *name)
2169 {
2170 struct internalvar *var;
2171
2172 var = lookup_only_internalvar (name);
2173 if (var)
2174 return var;
2175
2176 return create_internalvar (name);
2177 }
2178
2179 /* Return current value of internal variable VAR. For variables that
2180 are not inherently typed, use a value type appropriate for GDBARCH. */
2181
2182 struct value *
2183 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2184 {
2185 struct value *val;
2186 struct trace_state_variable *tsv;
2187
2188 /* If there is a trace state variable of the same name, assume that
2189 is what we really want to see. */
2190 tsv = find_trace_state_variable (var->name);
2191 if (tsv)
2192 {
2193 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2194 &(tsv->value));
2195 if (tsv->value_known)
2196 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2197 tsv->value);
2198 else
2199 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2200 return val;
2201 }
2202
2203 switch (var->kind)
2204 {
2205 case INTERNALVAR_VOID:
2206 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2207 break;
2208
2209 case INTERNALVAR_FUNCTION:
2210 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2211 break;
2212
2213 case INTERNALVAR_INTEGER:
2214 if (!var->u.integer.type)
2215 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2216 var->u.integer.val);
2217 else
2218 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2219 break;
2220
2221 case INTERNALVAR_STRING:
2222 val = value_cstring (var->u.string, strlen (var->u.string),
2223 builtin_type (gdbarch)->builtin_char);
2224 break;
2225
2226 case INTERNALVAR_VALUE:
2227 val = value_copy (var->u.value);
2228 if (value_lazy (val))
2229 value_fetch_lazy (val);
2230 break;
2231
2232 case INTERNALVAR_MAKE_VALUE:
2233 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2234 var->u.make_value.data);
2235 break;
2236
2237 default:
2238 internal_error (__FILE__, __LINE__, _("bad kind"));
2239 }
2240
2241 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2242 on this value go back to affect the original internal variable.
2243
2244 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2245 no underlying modifiable state in the internal variable.
2246
2247 Likewise, if the variable's value is a computed lvalue, we want
2248 references to it to produce another computed lvalue, where
2249 references and assignments actually operate through the
2250 computed value's functions.
2251
2252 This means that internal variables with computed values
2253 behave a little differently from other internal variables:
2254 assignments to them don't just replace the previous value
2255 altogether. At the moment, this seems like the behavior we
2256 want. */
2257
2258 if (var->kind != INTERNALVAR_MAKE_VALUE
2259 && val->lval != lval_computed)
2260 {
2261 VALUE_LVAL (val) = lval_internalvar;
2262 VALUE_INTERNALVAR (val) = var;
2263 }
2264
2265 return val;
2266 }
2267
2268 int
2269 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2270 {
2271 if (var->kind == INTERNALVAR_INTEGER)
2272 {
2273 *result = var->u.integer.val;
2274 return 1;
2275 }
2276
2277 if (var->kind == INTERNALVAR_VALUE)
2278 {
2279 struct type *type = check_typedef (value_type (var->u.value));
2280
2281 if (type->code () == TYPE_CODE_INT)
2282 {
2283 *result = value_as_long (var->u.value);
2284 return 1;
2285 }
2286 }
2287
2288 return 0;
2289 }
2290
2291 static int
2292 get_internalvar_function (struct internalvar *var,
2293 struct internal_function **result)
2294 {
2295 switch (var->kind)
2296 {
2297 case INTERNALVAR_FUNCTION:
2298 *result = var->u.fn.function;
2299 return 1;
2300
2301 default:
2302 return 0;
2303 }
2304 }
2305
2306 void
2307 set_internalvar_component (struct internalvar *var,
2308 LONGEST offset, LONGEST bitpos,
2309 LONGEST bitsize, struct value *newval)
2310 {
2311 gdb_byte *addr;
2312 struct gdbarch *arch;
2313 int unit_size;
2314
2315 switch (var->kind)
2316 {
2317 case INTERNALVAR_VALUE:
2318 addr = value_contents_writeable (var->u.value).data ();
2319 arch = get_value_arch (var->u.value);
2320 unit_size = gdbarch_addressable_memory_unit_size (arch);
2321
2322 if (bitsize)
2323 modify_field (value_type (var->u.value), addr + offset,
2324 value_as_long (newval), bitpos, bitsize);
2325 else
2326 memcpy (addr + offset * unit_size, value_contents (newval).data (),
2327 TYPE_LENGTH (value_type (newval)));
2328 break;
2329
2330 default:
2331 /* We can never get a component of any other kind. */
2332 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2333 }
2334 }
2335
2336 void
2337 set_internalvar (struct internalvar *var, struct value *val)
2338 {
2339 enum internalvar_kind new_kind;
2340 union internalvar_data new_data = { 0 };
2341
2342 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2343 error (_("Cannot overwrite convenience function %s"), var->name);
2344
2345 /* Prepare new contents. */
2346 switch (check_typedef (value_type (val))->code ())
2347 {
2348 case TYPE_CODE_VOID:
2349 new_kind = INTERNALVAR_VOID;
2350 break;
2351
2352 case TYPE_CODE_INTERNAL_FUNCTION:
2353 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2354 new_kind = INTERNALVAR_FUNCTION;
2355 get_internalvar_function (VALUE_INTERNALVAR (val),
2356 &new_data.fn.function);
2357 /* Copies created here are never canonical. */
2358 break;
2359
2360 default:
2361 new_kind = INTERNALVAR_VALUE;
2362 struct value *copy = value_copy (val);
2363 copy->modifiable = 1;
2364
2365 /* Force the value to be fetched from the target now, to avoid problems
2366 later when this internalvar is referenced and the target is gone or
2367 has changed. */
2368 if (value_lazy (copy))
2369 value_fetch_lazy (copy);
2370
2371 /* Release the value from the value chain to prevent it from being
2372 deleted by free_all_values. From here on this function should not
2373 call error () until new_data is installed into the var->u to avoid
2374 leaking memory. */
2375 new_data.value = release_value (copy).release ();
2376
2377 /* Internal variables which are created from values with a dynamic
2378 location don't need the location property of the origin anymore.
2379 The resolved dynamic location is used prior then any other address
2380 when accessing the value.
2381 If we keep it, we would still refer to the origin value.
2382 Remove the location property in case it exist. */
2383 value_type (new_data.value)->remove_dyn_prop (DYN_PROP_DATA_LOCATION);
2384
2385 break;
2386 }
2387
2388 /* Clean up old contents. */
2389 clear_internalvar (var);
2390
2391 /* Switch over. */
2392 var->kind = new_kind;
2393 var->u = new_data;
2394 /* End code which must not call error(). */
2395 }
2396
2397 void
2398 set_internalvar_integer (struct internalvar *var, LONGEST l)
2399 {
2400 /* Clean up old contents. */
2401 clear_internalvar (var);
2402
2403 var->kind = INTERNALVAR_INTEGER;
2404 var->u.integer.type = NULL;
2405 var->u.integer.val = l;
2406 }
2407
2408 void
2409 set_internalvar_string (struct internalvar *var, const char *string)
2410 {
2411 /* Clean up old contents. */
2412 clear_internalvar (var);
2413
2414 var->kind = INTERNALVAR_STRING;
2415 var->u.string = xstrdup (string);
2416 }
2417
2418 static void
2419 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2420 {
2421 /* Clean up old contents. */
2422 clear_internalvar (var);
2423
2424 var->kind = INTERNALVAR_FUNCTION;
2425 var->u.fn.function = f;
2426 var->u.fn.canonical = 1;
2427 /* Variables installed here are always the canonical version. */
2428 }
2429
2430 void
2431 clear_internalvar (struct internalvar *var)
2432 {
2433 /* Clean up old contents. */
2434 switch (var->kind)
2435 {
2436 case INTERNALVAR_VALUE:
2437 value_decref (var->u.value);
2438 break;
2439
2440 case INTERNALVAR_STRING:
2441 xfree (var->u.string);
2442 break;
2443
2444 case INTERNALVAR_MAKE_VALUE:
2445 if (var->u.make_value.functions->destroy != NULL)
2446 var->u.make_value.functions->destroy (var->u.make_value.data);
2447 break;
2448
2449 default:
2450 break;
2451 }
2452
2453 /* Reset to void kind. */
2454 var->kind = INTERNALVAR_VOID;
2455 }
2456
2457 const char *
2458 internalvar_name (const struct internalvar *var)
2459 {
2460 return var->name;
2461 }
2462
2463 static struct internal_function *
2464 create_internal_function (const char *name,
2465 internal_function_fn handler, void *cookie)
2466 {
2467 struct internal_function *ifn = XNEW (struct internal_function);
2468
2469 ifn->name = xstrdup (name);
2470 ifn->handler = handler;
2471 ifn->cookie = cookie;
2472 return ifn;
2473 }
2474
2475 const char *
2476 value_internal_function_name (struct value *val)
2477 {
2478 struct internal_function *ifn;
2479 int result;
2480
2481 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2482 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2483 gdb_assert (result);
2484
2485 return ifn->name;
2486 }
2487
2488 struct value *
2489 call_internal_function (struct gdbarch *gdbarch,
2490 const struct language_defn *language,
2491 struct value *func, int argc, struct value **argv)
2492 {
2493 struct internal_function *ifn;
2494 int result;
2495
2496 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2497 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2498 gdb_assert (result);
2499
2500 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2501 }
2502
2503 /* The 'function' command. This does nothing -- it is just a
2504 placeholder to let "help function NAME" work. This is also used as
2505 the implementation of the sub-command that is created when
2506 registering an internal function. */
2507 static void
2508 function_command (const char *command, int from_tty)
2509 {
2510 /* Do nothing. */
2511 }
2512
2513 /* Helper function that does the work for add_internal_function. */
2514
2515 static struct cmd_list_element *
2516 do_add_internal_function (const char *name, const char *doc,
2517 internal_function_fn handler, void *cookie)
2518 {
2519 struct internal_function *ifn;
2520 struct internalvar *var = lookup_internalvar (name);
2521
2522 ifn = create_internal_function (name, handler, cookie);
2523 set_internalvar_function (var, ifn);
2524
2525 return add_cmd (name, no_class, function_command, doc, &functionlist);
2526 }
2527
2528 /* See value.h. */
2529
2530 void
2531 add_internal_function (const char *name, const char *doc,
2532 internal_function_fn handler, void *cookie)
2533 {
2534 do_add_internal_function (name, doc, handler, cookie);
2535 }
2536
2537 /* See value.h. */
2538
2539 void
2540 add_internal_function (gdb::unique_xmalloc_ptr<char> &&name,
2541 gdb::unique_xmalloc_ptr<char> &&doc,
2542 internal_function_fn handler, void *cookie)
2543 {
2544 struct cmd_list_element *cmd
2545 = do_add_internal_function (name.get (), doc.get (), handler, cookie);
2546 doc.release ();
2547 cmd->doc_allocated = 1;
2548 name.release ();
2549 cmd->name_allocated = 1;
2550 }
2551
2552 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2553 prevent cycles / duplicates. */
2554
2555 void
2556 preserve_one_value (struct value *value, struct objfile *objfile,
2557 htab_t copied_types)
2558 {
2559 if (value->type->objfile_owner () == objfile)
2560 value->type = copy_type_recursive (objfile, value->type, copied_types);
2561
2562 if (value->enclosing_type->objfile_owner () == objfile)
2563 value->enclosing_type = copy_type_recursive (objfile,
2564 value->enclosing_type,
2565 copied_types);
2566 }
2567
2568 /* Likewise for internal variable VAR. */
2569
2570 static void
2571 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2572 htab_t copied_types)
2573 {
2574 switch (var->kind)
2575 {
2576 case INTERNALVAR_INTEGER:
2577 if (var->u.integer.type
2578 && var->u.integer.type->objfile_owner () == objfile)
2579 var->u.integer.type
2580 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2581 break;
2582
2583 case INTERNALVAR_VALUE:
2584 preserve_one_value (var->u.value, objfile, copied_types);
2585 break;
2586 }
2587 }
2588
2589 /* Update the internal variables and value history when OBJFILE is
2590 discarded; we must copy the types out of the objfile. New global types
2591 will be created for every convenience variable which currently points to
2592 this objfile's types, and the convenience variables will be adjusted to
2593 use the new global types. */
2594
2595 void
2596 preserve_values (struct objfile *objfile)
2597 {
2598 struct internalvar *var;
2599
2600 /* Create the hash table. We allocate on the objfile's obstack, since
2601 it is soon to be deleted. */
2602 htab_up copied_types = create_copied_types_hash (objfile);
2603
2604 for (const value_ref_ptr &item : value_history)
2605 preserve_one_value (item.get (), objfile, copied_types.get ());
2606
2607 for (var = internalvars; var; var = var->next)
2608 preserve_one_internalvar (var, objfile, copied_types.get ());
2609
2610 preserve_ext_lang_values (objfile, copied_types.get ());
2611 }
2612
2613 static void
2614 show_convenience (const char *ignore, int from_tty)
2615 {
2616 struct gdbarch *gdbarch = get_current_arch ();
2617 struct internalvar *var;
2618 int varseen = 0;
2619 struct value_print_options opts;
2620
2621 get_user_print_options (&opts);
2622 for (var = internalvars; var; var = var->next)
2623 {
2624
2625 if (!varseen)
2626 {
2627 varseen = 1;
2628 }
2629 printf_filtered (("$%s = "), var->name);
2630
2631 try
2632 {
2633 struct value *val;
2634
2635 val = value_of_internalvar (gdbarch, var);
2636 value_print (val, gdb_stdout, &opts);
2637 }
2638 catch (const gdb_exception_error &ex)
2639 {
2640 fprintf_styled (gdb_stdout, metadata_style.style (),
2641 _("<error: %s>"), ex.what ());
2642 }
2643
2644 printf_filtered (("\n"));
2645 }
2646 if (!varseen)
2647 {
2648 /* This text does not mention convenience functions on purpose.
2649 The user can't create them except via Python, and if Python support
2650 is installed this message will never be printed ($_streq will
2651 exist). */
2652 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2653 "Convenience variables have "
2654 "names starting with \"$\";\n"
2655 "use \"set\" as in \"set "
2656 "$foo = 5\" to define them.\n"));
2657 }
2658 }
2659 \f
2660
2661 /* See value.h. */
2662
2663 struct value *
2664 value_from_xmethod (xmethod_worker_up &&worker)
2665 {
2666 struct value *v;
2667
2668 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2669 v->lval = lval_xcallable;
2670 v->location.xm_worker = worker.release ();
2671 v->modifiable = 0;
2672
2673 return v;
2674 }
2675
2676 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2677
2678 struct type *
2679 result_type_of_xmethod (struct value *method, gdb::array_view<value *> argv)
2680 {
2681 gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD
2682 && method->lval == lval_xcallable && !argv.empty ());
2683
2684 return method->location.xm_worker->get_result_type (argv[0], argv.slice (1));
2685 }
2686
2687 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2688
2689 struct value *
2690 call_xmethod (struct value *method, gdb::array_view<value *> argv)
2691 {
2692 gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD
2693 && method->lval == lval_xcallable && !argv.empty ());
2694
2695 return method->location.xm_worker->invoke (argv[0], argv.slice (1));
2696 }
2697 \f
2698 /* Extract a value as a C number (either long or double).
2699 Knows how to convert fixed values to double, or
2700 floating values to long.
2701 Does not deallocate the value. */
2702
2703 LONGEST
2704 value_as_long (struct value *val)
2705 {
2706 /* This coerces arrays and functions, which is necessary (e.g.
2707 in disassemble_command). It also dereferences references, which
2708 I suspect is the most logical thing to do. */
2709 val = coerce_array (val);
2710 return unpack_long (value_type (val), value_contents (val).data ());
2711 }
2712
2713 /* Extract a value as a C pointer. Does not deallocate the value.
2714 Note that val's type may not actually be a pointer; value_as_long
2715 handles all the cases. */
2716 CORE_ADDR
2717 value_as_address (struct value *val)
2718 {
2719 struct gdbarch *gdbarch = value_type (val)->arch ();
2720
2721 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2722 whether we want this to be true eventually. */
2723 #if 0
2724 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2725 non-address (e.g. argument to "signal", "info break", etc.), or
2726 for pointers to char, in which the low bits *are* significant. */
2727 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2728 #else
2729
2730 /* There are several targets (IA-64, PowerPC, and others) which
2731 don't represent pointers to functions as simply the address of
2732 the function's entry point. For example, on the IA-64, a
2733 function pointer points to a two-word descriptor, generated by
2734 the linker, which contains the function's entry point, and the
2735 value the IA-64 "global pointer" register should have --- to
2736 support position-independent code. The linker generates
2737 descriptors only for those functions whose addresses are taken.
2738
2739 On such targets, it's difficult for GDB to convert an arbitrary
2740 function address into a function pointer; it has to either find
2741 an existing descriptor for that function, or call malloc and
2742 build its own. On some targets, it is impossible for GDB to
2743 build a descriptor at all: the descriptor must contain a jump
2744 instruction; data memory cannot be executed; and code memory
2745 cannot be modified.
2746
2747 Upon entry to this function, if VAL is a value of type `function'
2748 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2749 value_address (val) is the address of the function. This is what
2750 you'll get if you evaluate an expression like `main'. The call
2751 to COERCE_ARRAY below actually does all the usual unary
2752 conversions, which includes converting values of type `function'
2753 to `pointer to function'. This is the challenging conversion
2754 discussed above. Then, `unpack_long' will convert that pointer
2755 back into an address.
2756
2757 So, suppose the user types `disassemble foo' on an architecture
2758 with a strange function pointer representation, on which GDB
2759 cannot build its own descriptors, and suppose further that `foo'
2760 has no linker-built descriptor. The address->pointer conversion
2761 will signal an error and prevent the command from running, even
2762 though the next step would have been to convert the pointer
2763 directly back into the same address.
2764
2765 The following shortcut avoids this whole mess. If VAL is a
2766 function, just return its address directly. */
2767 if (value_type (val)->code () == TYPE_CODE_FUNC
2768 || value_type (val)->code () == TYPE_CODE_METHOD)
2769 return value_address (val);
2770
2771 val = coerce_array (val);
2772
2773 /* Some architectures (e.g. Harvard), map instruction and data
2774 addresses onto a single large unified address space. For
2775 instance: An architecture may consider a large integer in the
2776 range 0x10000000 .. 0x1000ffff to already represent a data
2777 addresses (hence not need a pointer to address conversion) while
2778 a small integer would still need to be converted integer to
2779 pointer to address. Just assume such architectures handle all
2780 integer conversions in a single function. */
2781
2782 /* JimB writes:
2783
2784 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2785 must admonish GDB hackers to make sure its behavior matches the
2786 compiler's, whenever possible.
2787
2788 In general, I think GDB should evaluate expressions the same way
2789 the compiler does. When the user copies an expression out of
2790 their source code and hands it to a `print' command, they should
2791 get the same value the compiler would have computed. Any
2792 deviation from this rule can cause major confusion and annoyance,
2793 and needs to be justified carefully. In other words, GDB doesn't
2794 really have the freedom to do these conversions in clever and
2795 useful ways.
2796
2797 AndrewC pointed out that users aren't complaining about how GDB
2798 casts integers to pointers; they are complaining that they can't
2799 take an address from a disassembly listing and give it to `x/i'.
2800 This is certainly important.
2801
2802 Adding an architecture method like integer_to_address() certainly
2803 makes it possible for GDB to "get it right" in all circumstances
2804 --- the target has complete control over how things get done, so
2805 people can Do The Right Thing for their target without breaking
2806 anyone else. The standard doesn't specify how integers get
2807 converted to pointers; usually, the ABI doesn't either, but
2808 ABI-specific code is a more reasonable place to handle it. */
2809
2810 if (!value_type (val)->is_pointer_or_reference ()
2811 && gdbarch_integer_to_address_p (gdbarch))
2812 return gdbarch_integer_to_address (gdbarch, value_type (val),
2813 value_contents (val).data ());
2814
2815 return unpack_long (value_type (val), value_contents (val).data ());
2816 #endif
2817 }
2818 \f
2819 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2820 as a long, or as a double, assuming the raw data is described
2821 by type TYPE. Knows how to convert different sizes of values
2822 and can convert between fixed and floating point. We don't assume
2823 any alignment for the raw data. Return value is in host byte order.
2824
2825 If you want functions and arrays to be coerced to pointers, and
2826 references to be dereferenced, call value_as_long() instead.
2827
2828 C++: It is assumed that the front-end has taken care of
2829 all matters concerning pointers to members. A pointer
2830 to member which reaches here is considered to be equivalent
2831 to an INT (or some size). After all, it is only an offset. */
2832
2833 LONGEST
2834 unpack_long (struct type *type, const gdb_byte *valaddr)
2835 {
2836 if (is_fixed_point_type (type))
2837 type = type->fixed_point_type_base_type ();
2838
2839 enum bfd_endian byte_order = type_byte_order (type);
2840 enum type_code code = type->code ();
2841 int len = TYPE_LENGTH (type);
2842 int nosign = type->is_unsigned ();
2843
2844 switch (code)
2845 {
2846 case TYPE_CODE_TYPEDEF:
2847 return unpack_long (check_typedef (type), valaddr);
2848 case TYPE_CODE_ENUM:
2849 case TYPE_CODE_FLAGS:
2850 case TYPE_CODE_BOOL:
2851 case TYPE_CODE_INT:
2852 case TYPE_CODE_CHAR:
2853 case TYPE_CODE_RANGE:
2854 case TYPE_CODE_MEMBERPTR:
2855 {
2856 LONGEST result;
2857
2858 if (type->bit_size_differs_p ())
2859 {
2860 unsigned bit_off = type->bit_offset ();
2861 unsigned bit_size = type->bit_size ();
2862 if (bit_size == 0)
2863 {
2864 /* unpack_bits_as_long doesn't handle this case the
2865 way we'd like, so handle it here. */
2866 result = 0;
2867 }
2868 else
2869 result = unpack_bits_as_long (type, valaddr, bit_off, bit_size);
2870 }
2871 else
2872 {
2873 if (nosign)
2874 result = extract_unsigned_integer (valaddr, len, byte_order);
2875 else
2876 result = extract_signed_integer (valaddr, len, byte_order);
2877 }
2878 if (code == TYPE_CODE_RANGE)
2879 result += type->bounds ()->bias;
2880 return result;
2881 }
2882
2883 case TYPE_CODE_FLT:
2884 case TYPE_CODE_DECFLOAT:
2885 return target_float_to_longest (valaddr, type);
2886
2887 case TYPE_CODE_FIXED_POINT:
2888 {
2889 gdb_mpq vq;
2890 vq.read_fixed_point (gdb::make_array_view (valaddr, len),
2891 byte_order, nosign,
2892 type->fixed_point_scaling_factor ());
2893
2894 gdb_mpz vz;
2895 mpz_tdiv_q (vz.val, mpq_numref (vq.val), mpq_denref (vq.val));
2896 return vz.as_integer<LONGEST> ();
2897 }
2898
2899 case TYPE_CODE_PTR:
2900 case TYPE_CODE_REF:
2901 case TYPE_CODE_RVALUE_REF:
2902 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2903 whether we want this to be true eventually. */
2904 return extract_typed_address (valaddr, type);
2905
2906 default:
2907 error (_("Value can't be converted to integer."));
2908 }
2909 }
2910
2911 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2912 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2913 We don't assume any alignment for the raw data. Return value is in
2914 host byte order.
2915
2916 If you want functions and arrays to be coerced to pointers, and
2917 references to be dereferenced, call value_as_address() instead.
2918
2919 C++: It is assumed that the front-end has taken care of
2920 all matters concerning pointers to members. A pointer
2921 to member which reaches here is considered to be equivalent
2922 to an INT (or some size). After all, it is only an offset. */
2923
2924 CORE_ADDR
2925 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2926 {
2927 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2928 whether we want this to be true eventually. */
2929 return unpack_long (type, valaddr);
2930 }
2931
2932 bool
2933 is_floating_value (struct value *val)
2934 {
2935 struct type *type = check_typedef (value_type (val));
2936
2937 if (is_floating_type (type))
2938 {
2939 if (!target_float_is_valid (value_contents (val).data (), type))
2940 error (_("Invalid floating value found in program."));
2941 return true;
2942 }
2943
2944 return false;
2945 }
2946
2947 \f
2948 /* Get the value of the FIELDNO'th field (which must be static) of
2949 TYPE. */
2950
2951 struct value *
2952 value_static_field (struct type *type, int fieldno)
2953 {
2954 struct value *retval;
2955
2956 switch (type->field (fieldno).loc_kind ())
2957 {
2958 case FIELD_LOC_KIND_PHYSADDR:
2959 retval = value_at_lazy (type->field (fieldno).type (),
2960 type->field (fieldno).loc_physaddr ());
2961 break;
2962 case FIELD_LOC_KIND_PHYSNAME:
2963 {
2964 const char *phys_name = type->field (fieldno).loc_physname ();
2965 /* type->field (fieldno).name (); */
2966 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2967
2968 if (sym.symbol == NULL)
2969 {
2970 /* With some compilers, e.g. HP aCC, static data members are
2971 reported as non-debuggable symbols. */
2972 struct bound_minimal_symbol msym
2973 = lookup_minimal_symbol (phys_name, NULL, NULL);
2974 struct type *field_type = type->field (fieldno).type ();
2975
2976 if (!msym.minsym)
2977 retval = allocate_optimized_out_value (field_type);
2978 else
2979 retval = value_at_lazy (field_type, BMSYMBOL_VALUE_ADDRESS (msym));
2980 }
2981 else
2982 retval = value_of_variable (sym.symbol, sym.block);
2983 break;
2984 }
2985 default:
2986 gdb_assert_not_reached ("unexpected field location kind");
2987 }
2988
2989 return retval;
2990 }
2991
2992 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2993 You have to be careful here, since the size of the data area for the value
2994 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2995 than the old enclosing type, you have to allocate more space for the
2996 data. */
2997
2998 void
2999 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
3000 {
3001 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
3002 {
3003 check_type_length_before_alloc (new_encl_type);
3004 val->contents
3005 .reset ((gdb_byte *) xrealloc (val->contents.release (),
3006 TYPE_LENGTH (new_encl_type)));
3007 }
3008
3009 val->enclosing_type = new_encl_type;
3010 }
3011
3012 /* Given a value ARG1 (offset by OFFSET bytes)
3013 of a struct or union type ARG_TYPE,
3014 extract and return the value of one of its (non-static) fields.
3015 FIELDNO says which field. */
3016
3017 struct value *
3018 value_primitive_field (struct value *arg1, LONGEST offset,
3019 int fieldno, struct type *arg_type)
3020 {
3021 struct value *v;
3022 struct type *type;
3023 struct gdbarch *arch = get_value_arch (arg1);
3024 int unit_size = gdbarch_addressable_memory_unit_size (arch);
3025
3026 arg_type = check_typedef (arg_type);
3027 type = arg_type->field (fieldno).type ();
3028
3029 /* Call check_typedef on our type to make sure that, if TYPE
3030 is a TYPE_CODE_TYPEDEF, its length is set to the length
3031 of the target type instead of zero. However, we do not
3032 replace the typedef type by the target type, because we want
3033 to keep the typedef in order to be able to print the type
3034 description correctly. */
3035 check_typedef (type);
3036
3037 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
3038 {
3039 /* Handle packed fields.
3040
3041 Create a new value for the bitfield, with bitpos and bitsize
3042 set. If possible, arrange offset and bitpos so that we can
3043 do a single aligned read of the size of the containing type.
3044 Otherwise, adjust offset to the byte containing the first
3045 bit. Assume that the address, offset, and embedded offset
3046 are sufficiently aligned. */
3047
3048 LONGEST bitpos = arg_type->field (fieldno).loc_bitpos ();
3049 LONGEST container_bitsize = TYPE_LENGTH (type) * 8;
3050
3051 v = allocate_value_lazy (type);
3052 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
3053 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
3054 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
3055 v->bitpos = bitpos % container_bitsize;
3056 else
3057 v->bitpos = bitpos % 8;
3058 v->offset = (value_embedded_offset (arg1)
3059 + offset
3060 + (bitpos - v->bitpos) / 8);
3061 set_value_parent (v, arg1);
3062 if (!value_lazy (arg1))
3063 value_fetch_lazy (v);
3064 }
3065 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3066 {
3067 /* This field is actually a base subobject, so preserve the
3068 entire object's contents for later references to virtual
3069 bases, etc. */
3070 LONGEST boffset;
3071
3072 /* Lazy register values with offsets are not supported. */
3073 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3074 value_fetch_lazy (arg1);
3075
3076 /* We special case virtual inheritance here because this
3077 requires access to the contents, which we would rather avoid
3078 for references to ordinary fields of unavailable values. */
3079 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3080 boffset = baseclass_offset (arg_type, fieldno,
3081 value_contents (arg1).data (),
3082 value_embedded_offset (arg1),
3083 value_address (arg1),
3084 arg1);
3085 else
3086 boffset = arg_type->field (fieldno).loc_bitpos () / 8;
3087
3088 if (value_lazy (arg1))
3089 v = allocate_value_lazy (value_enclosing_type (arg1));
3090 else
3091 {
3092 v = allocate_value (value_enclosing_type (arg1));
3093 value_contents_copy_raw (v, 0, arg1, 0,
3094 TYPE_LENGTH (value_enclosing_type (arg1)));
3095 }
3096 v->type = type;
3097 v->offset = value_offset (arg1);
3098 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3099 }
3100 else if (NULL != TYPE_DATA_LOCATION (type))
3101 {
3102 /* Field is a dynamic data member. */
3103
3104 gdb_assert (0 == offset);
3105 /* We expect an already resolved data location. */
3106 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
3107 /* For dynamic data types defer memory allocation
3108 until we actual access the value. */
3109 v = allocate_value_lazy (type);
3110 }
3111 else
3112 {
3113 /* Plain old data member */
3114 offset += (arg_type->field (fieldno).loc_bitpos ()
3115 / (HOST_CHAR_BIT * unit_size));
3116
3117 /* Lazy register values with offsets are not supported. */
3118 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3119 value_fetch_lazy (arg1);
3120
3121 if (value_lazy (arg1))
3122 v = allocate_value_lazy (type);
3123 else
3124 {
3125 v = allocate_value (type);
3126 value_contents_copy_raw (v, value_embedded_offset (v),
3127 arg1, value_embedded_offset (arg1) + offset,
3128 type_length_units (type));
3129 }
3130 v->offset = (value_offset (arg1) + offset
3131 + value_embedded_offset (arg1));
3132 }
3133 set_value_component_location (v, arg1);
3134 return v;
3135 }
3136
3137 /* Given a value ARG1 of a struct or union type,
3138 extract and return the value of one of its (non-static) fields.
3139 FIELDNO says which field. */
3140
3141 struct value *
3142 value_field (struct value *arg1, int fieldno)
3143 {
3144 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3145 }
3146
3147 /* Return a non-virtual function as a value.
3148 F is the list of member functions which contains the desired method.
3149 J is an index into F which provides the desired method.
3150
3151 We only use the symbol for its address, so be happy with either a
3152 full symbol or a minimal symbol. */
3153
3154 struct value *
3155 value_fn_field (struct value **arg1p, struct fn_field *f,
3156 int j, struct type *type,
3157 LONGEST offset)
3158 {
3159 struct value *v;
3160 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3161 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3162 struct symbol *sym;
3163 struct bound_minimal_symbol msym;
3164
3165 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3166 if (sym != NULL)
3167 {
3168 memset (&msym, 0, sizeof (msym));
3169 }
3170 else
3171 {
3172 gdb_assert (sym == NULL);
3173 msym = lookup_bound_minimal_symbol (physname);
3174 if (msym.minsym == NULL)
3175 return NULL;
3176 }
3177
3178 v = allocate_value (ftype);
3179 VALUE_LVAL (v) = lval_memory;
3180 if (sym)
3181 {
3182 set_value_address (v, BLOCK_ENTRY_PC (SYMBOL_BLOCK_VALUE (sym)));
3183 }
3184 else
3185 {
3186 /* The minimal symbol might point to a function descriptor;
3187 resolve it to the actual code address instead. */
3188 struct objfile *objfile = msym.objfile;
3189 struct gdbarch *gdbarch = objfile->arch ();
3190
3191 set_value_address (v,
3192 gdbarch_convert_from_func_ptr_addr
3193 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym),
3194 current_inferior ()->top_target ()));
3195 }
3196
3197 if (arg1p)
3198 {
3199 if (type != value_type (*arg1p))
3200 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3201 value_addr (*arg1p)));
3202
3203 /* Move the `this' pointer according to the offset.
3204 VALUE_OFFSET (*arg1p) += offset; */
3205 }
3206
3207 return v;
3208 }
3209
3210 \f
3211
3212 /* See value.h. */
3213
3214 LONGEST
3215 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3216 LONGEST bitpos, LONGEST bitsize)
3217 {
3218 enum bfd_endian byte_order = type_byte_order (field_type);
3219 ULONGEST val;
3220 ULONGEST valmask;
3221 int lsbcount;
3222 LONGEST bytes_read;
3223 LONGEST read_offset;
3224
3225 /* Read the minimum number of bytes required; there may not be
3226 enough bytes to read an entire ULONGEST. */
3227 field_type = check_typedef (field_type);
3228 if (bitsize)
3229 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3230 else
3231 {
3232 bytes_read = TYPE_LENGTH (field_type);
3233 bitsize = 8 * bytes_read;
3234 }
3235
3236 read_offset = bitpos / 8;
3237
3238 val = extract_unsigned_integer (valaddr + read_offset,
3239 bytes_read, byte_order);
3240
3241 /* Extract bits. See comment above. */
3242
3243 if (byte_order == BFD_ENDIAN_BIG)
3244 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3245 else
3246 lsbcount = (bitpos % 8);
3247 val >>= lsbcount;
3248
3249 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3250 If the field is signed, and is negative, then sign extend. */
3251
3252 if (bitsize < 8 * (int) sizeof (val))
3253 {
3254 valmask = (((ULONGEST) 1) << bitsize) - 1;
3255 val &= valmask;
3256 if (!field_type->is_unsigned ())
3257 {
3258 if (val & (valmask ^ (valmask >> 1)))
3259 {
3260 val |= ~valmask;
3261 }
3262 }
3263 }
3264
3265 return val;
3266 }
3267
3268 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3269 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3270 ORIGINAL_VALUE, which must not be NULL. See
3271 unpack_value_bits_as_long for more details. */
3272
3273 int
3274 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3275 LONGEST embedded_offset, int fieldno,
3276 const struct value *val, LONGEST *result)
3277 {
3278 int bitpos = type->field (fieldno).loc_bitpos ();
3279 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3280 struct type *field_type = type->field (fieldno).type ();
3281 int bit_offset;
3282
3283 gdb_assert (val != NULL);
3284
3285 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3286 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3287 || !value_bits_available (val, bit_offset, bitsize))
3288 return 0;
3289
3290 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3291 bitpos, bitsize);
3292 return 1;
3293 }
3294
3295 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3296 object at VALADDR. See unpack_bits_as_long for more details. */
3297
3298 LONGEST
3299 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3300 {
3301 int bitpos = type->field (fieldno).loc_bitpos ();
3302 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3303 struct type *field_type = type->field (fieldno).type ();
3304
3305 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3306 }
3307
3308 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3309 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3310 the contents in DEST_VAL, zero or sign extending if the type of
3311 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3312 VAL. If the VAL's contents required to extract the bitfield from
3313 are unavailable/optimized out, DEST_VAL is correspondingly
3314 marked unavailable/optimized out. */
3315
3316 void
3317 unpack_value_bitfield (struct value *dest_val,
3318 LONGEST bitpos, LONGEST bitsize,
3319 const gdb_byte *valaddr, LONGEST embedded_offset,
3320 const struct value *val)
3321 {
3322 enum bfd_endian byte_order;
3323 int src_bit_offset;
3324 int dst_bit_offset;
3325 struct type *field_type = value_type (dest_val);
3326
3327 byte_order = type_byte_order (field_type);
3328
3329 /* First, unpack and sign extend the bitfield as if it was wholly
3330 valid. Optimized out/unavailable bits are read as zero, but
3331 that's OK, as they'll end up marked below. If the VAL is
3332 wholly-invalid we may have skipped allocating its contents,
3333 though. See allocate_optimized_out_value. */
3334 if (valaddr != NULL)
3335 {
3336 LONGEST num;
3337
3338 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3339 bitpos, bitsize);
3340 store_signed_integer (value_contents_raw (dest_val).data (),
3341 TYPE_LENGTH (field_type), byte_order, num);
3342 }
3343
3344 /* Now copy the optimized out / unavailability ranges to the right
3345 bits. */
3346 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3347 if (byte_order == BFD_ENDIAN_BIG)
3348 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3349 else
3350 dst_bit_offset = 0;
3351 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3352 val, src_bit_offset, bitsize);
3353 }
3354
3355 /* Return a new value with type TYPE, which is FIELDNO field of the
3356 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3357 of VAL. If the VAL's contents required to extract the bitfield
3358 from are unavailable/optimized out, the new value is
3359 correspondingly marked unavailable/optimized out. */
3360
3361 struct value *
3362 value_field_bitfield (struct type *type, int fieldno,
3363 const gdb_byte *valaddr,
3364 LONGEST embedded_offset, const struct value *val)
3365 {
3366 int bitpos = type->field (fieldno).loc_bitpos ();
3367 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3368 struct value *res_val = allocate_value (type->field (fieldno).type ());
3369
3370 unpack_value_bitfield (res_val, bitpos, bitsize,
3371 valaddr, embedded_offset, val);
3372
3373 return res_val;
3374 }
3375
3376 /* Modify the value of a bitfield. ADDR points to a block of memory in
3377 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3378 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3379 indicate which bits (in target bit order) comprise the bitfield.
3380 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3381 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3382
3383 void
3384 modify_field (struct type *type, gdb_byte *addr,
3385 LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3386 {
3387 enum bfd_endian byte_order = type_byte_order (type);
3388 ULONGEST oword;
3389 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3390 LONGEST bytesize;
3391
3392 /* Normalize BITPOS. */
3393 addr += bitpos / 8;
3394 bitpos %= 8;
3395
3396 /* If a negative fieldval fits in the field in question, chop
3397 off the sign extension bits. */
3398 if ((~fieldval & ~(mask >> 1)) == 0)
3399 fieldval &= mask;
3400
3401 /* Warn if value is too big to fit in the field in question. */
3402 if (0 != (fieldval & ~mask))
3403 {
3404 /* FIXME: would like to include fieldval in the message, but
3405 we don't have a sprintf_longest. */
3406 warning (_("Value does not fit in %s bits."), plongest (bitsize));
3407
3408 /* Truncate it, otherwise adjoining fields may be corrupted. */
3409 fieldval &= mask;
3410 }
3411
3412 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3413 false valgrind reports. */
3414
3415 bytesize = (bitpos + bitsize + 7) / 8;
3416 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3417
3418 /* Shifting for bit field depends on endianness of the target machine. */
3419 if (byte_order == BFD_ENDIAN_BIG)
3420 bitpos = bytesize * 8 - bitpos - bitsize;
3421
3422 oword &= ~(mask << bitpos);
3423 oword |= fieldval << bitpos;
3424
3425 store_unsigned_integer (addr, bytesize, byte_order, oword);
3426 }
3427 \f
3428 /* Pack NUM into BUF using a target format of TYPE. */
3429
3430 void
3431 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3432 {
3433 enum bfd_endian byte_order = type_byte_order (type);
3434 LONGEST len;
3435
3436 type = check_typedef (type);
3437 len = TYPE_LENGTH (type);
3438
3439 switch (type->code ())
3440 {
3441 case TYPE_CODE_RANGE:
3442 num -= type->bounds ()->bias;
3443 /* Fall through. */
3444 case TYPE_CODE_INT:
3445 case TYPE_CODE_CHAR:
3446 case TYPE_CODE_ENUM:
3447 case TYPE_CODE_FLAGS:
3448 case TYPE_CODE_BOOL:
3449 case TYPE_CODE_MEMBERPTR:
3450 if (type->bit_size_differs_p ())
3451 {
3452 unsigned bit_off = type->bit_offset ();
3453 unsigned bit_size = type->bit_size ();
3454 num &= ((ULONGEST) 1 << bit_size) - 1;
3455 num <<= bit_off;
3456 }
3457 store_signed_integer (buf, len, byte_order, num);
3458 break;
3459
3460 case TYPE_CODE_REF:
3461 case TYPE_CODE_RVALUE_REF:
3462 case TYPE_CODE_PTR:
3463 store_typed_address (buf, type, (CORE_ADDR) num);
3464 break;
3465
3466 case TYPE_CODE_FLT:
3467 case TYPE_CODE_DECFLOAT:
3468 target_float_from_longest (buf, type, num);
3469 break;
3470
3471 default:
3472 error (_("Unexpected type (%d) encountered for integer constant."),
3473 type->code ());
3474 }
3475 }
3476
3477
3478 /* Pack NUM into BUF using a target format of TYPE. */
3479
3480 static void
3481 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3482 {
3483 LONGEST len;
3484 enum bfd_endian byte_order;
3485
3486 type = check_typedef (type);
3487 len = TYPE_LENGTH (type);
3488 byte_order = type_byte_order (type);
3489
3490 switch (type->code ())
3491 {
3492 case TYPE_CODE_INT:
3493 case TYPE_CODE_CHAR:
3494 case TYPE_CODE_ENUM:
3495 case TYPE_CODE_FLAGS:
3496 case TYPE_CODE_BOOL:
3497 case TYPE_CODE_RANGE:
3498 case TYPE_CODE_MEMBERPTR:
3499 if (type->bit_size_differs_p ())
3500 {
3501 unsigned bit_off = type->bit_offset ();
3502 unsigned bit_size = type->bit_size ();
3503 num &= ((ULONGEST) 1 << bit_size) - 1;
3504 num <<= bit_off;
3505 }
3506 store_unsigned_integer (buf, len, byte_order, num);
3507 break;
3508
3509 case TYPE_CODE_REF:
3510 case TYPE_CODE_RVALUE_REF:
3511 case TYPE_CODE_PTR:
3512 store_typed_address (buf, type, (CORE_ADDR) num);
3513 break;
3514
3515 case TYPE_CODE_FLT:
3516 case TYPE_CODE_DECFLOAT:
3517 target_float_from_ulongest (buf, type, num);
3518 break;
3519
3520 default:
3521 error (_("Unexpected type (%d) encountered "
3522 "for unsigned integer constant."),
3523 type->code ());
3524 }
3525 }
3526
3527
3528 /* Create a value of type TYPE that is zero, and return it. */
3529
3530 struct value *
3531 value_zero (struct type *type, enum lval_type lv)
3532 {
3533 struct value *val = allocate_value_lazy (type);
3534
3535 VALUE_LVAL (val) = (lv == lval_computed ? not_lval : lv);
3536 val->is_zero = true;
3537 return val;
3538 }
3539
3540 /* Convert C numbers into newly allocated values. */
3541
3542 struct value *
3543 value_from_longest (struct type *type, LONGEST num)
3544 {
3545 struct value *val = allocate_value (type);
3546
3547 pack_long (value_contents_raw (val).data (), type, num);
3548 return val;
3549 }
3550
3551
3552 /* Convert C unsigned numbers into newly allocated values. */
3553
3554 struct value *
3555 value_from_ulongest (struct type *type, ULONGEST num)
3556 {
3557 struct value *val = allocate_value (type);
3558
3559 pack_unsigned_long (value_contents_raw (val).data (), type, num);
3560
3561 return val;
3562 }
3563
3564
3565 /* Create a value representing a pointer of type TYPE to the address
3566 ADDR. */
3567
3568 struct value *
3569 value_from_pointer (struct type *type, CORE_ADDR addr)
3570 {
3571 struct value *val = allocate_value (type);
3572
3573 store_typed_address (value_contents_raw (val).data (),
3574 check_typedef (type), addr);
3575 return val;
3576 }
3577
3578 /* Create and return a value object of TYPE containing the value D. The
3579 TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once
3580 it is converted to target format. */
3581
3582 struct value *
3583 value_from_host_double (struct type *type, double d)
3584 {
3585 struct value *value = allocate_value (type);
3586 gdb_assert (type->code () == TYPE_CODE_FLT);
3587 target_float_from_host_double (value_contents_raw (value).data (),
3588 value_type (value), d);
3589 return value;
3590 }
3591
3592 /* Create a value of type TYPE whose contents come from VALADDR, if it
3593 is non-null, and whose memory address (in the inferior) is
3594 ADDRESS. The type of the created value may differ from the passed
3595 type TYPE. Make sure to retrieve values new type after this call.
3596 Note that TYPE is not passed through resolve_dynamic_type; this is
3597 a special API intended for use only by Ada. */
3598
3599 struct value *
3600 value_from_contents_and_address_unresolved (struct type *type,
3601 const gdb_byte *valaddr,
3602 CORE_ADDR address)
3603 {
3604 struct value *v;
3605
3606 if (valaddr == NULL)
3607 v = allocate_value_lazy (type);
3608 else
3609 v = value_from_contents (type, valaddr);
3610 VALUE_LVAL (v) = lval_memory;
3611 set_value_address (v, address);
3612 return v;
3613 }
3614
3615 /* Create a value of type TYPE whose contents come from VALADDR, if it
3616 is non-null, and whose memory address (in the inferior) is
3617 ADDRESS. The type of the created value may differ from the passed
3618 type TYPE. Make sure to retrieve values new type after this call. */
3619
3620 struct value *
3621 value_from_contents_and_address (struct type *type,
3622 const gdb_byte *valaddr,
3623 CORE_ADDR address)
3624 {
3625 gdb::array_view<const gdb_byte> view;
3626 if (valaddr != nullptr)
3627 view = gdb::make_array_view (valaddr, TYPE_LENGTH (type));
3628 struct type *resolved_type = resolve_dynamic_type (type, view, address);
3629 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3630 struct value *v;
3631
3632 if (valaddr == NULL)
3633 v = allocate_value_lazy (resolved_type);
3634 else
3635 v = value_from_contents (resolved_type, valaddr);
3636 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3637 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3638 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3639 VALUE_LVAL (v) = lval_memory;
3640 set_value_address (v, address);
3641 return v;
3642 }
3643
3644 /* Create a value of type TYPE holding the contents CONTENTS.
3645 The new value is `not_lval'. */
3646
3647 struct value *
3648 value_from_contents (struct type *type, const gdb_byte *contents)
3649 {
3650 struct value *result;
3651
3652 result = allocate_value (type);
3653 memcpy (value_contents_raw (result).data (), contents, TYPE_LENGTH (type));
3654 return result;
3655 }
3656
3657 /* Extract a value from the history file. Input will be of the form
3658 $digits or $$digits. See block comment above 'write_dollar_variable'
3659 for details. */
3660
3661 struct value *
3662 value_from_history_ref (const char *h, const char **endp)
3663 {
3664 int index, len;
3665
3666 if (h[0] == '$')
3667 len = 1;
3668 else
3669 return NULL;
3670
3671 if (h[1] == '$')
3672 len = 2;
3673
3674 /* Find length of numeral string. */
3675 for (; isdigit (h[len]); len++)
3676 ;
3677
3678 /* Make sure numeral string is not part of an identifier. */
3679 if (h[len] == '_' || isalpha (h[len]))
3680 return NULL;
3681
3682 /* Now collect the index value. */
3683 if (h[1] == '$')
3684 {
3685 if (len == 2)
3686 {
3687 /* For some bizarre reason, "$$" is equivalent to "$$1",
3688 rather than to "$$0" as it ought to be! */
3689 index = -1;
3690 *endp += len;
3691 }
3692 else
3693 {
3694 char *local_end;
3695
3696 index = -strtol (&h[2], &local_end, 10);
3697 *endp = local_end;
3698 }
3699 }
3700 else
3701 {
3702 if (len == 1)
3703 {
3704 /* "$" is equivalent to "$0". */
3705 index = 0;
3706 *endp += len;
3707 }
3708 else
3709 {
3710 char *local_end;
3711
3712 index = strtol (&h[1], &local_end, 10);
3713 *endp = local_end;
3714 }
3715 }
3716
3717 return access_value_history (index);
3718 }
3719
3720 /* Get the component value (offset by OFFSET bytes) of a struct or
3721 union WHOLE. Component's type is TYPE. */
3722
3723 struct value *
3724 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3725 {
3726 struct value *v;
3727
3728 if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole))
3729 v = allocate_value_lazy (type);
3730 else
3731 {
3732 v = allocate_value (type);
3733 value_contents_copy (v, value_embedded_offset (v),
3734 whole, value_embedded_offset (whole) + offset,
3735 type_length_units (type));
3736 }
3737 v->offset = value_offset (whole) + offset + value_embedded_offset (whole);
3738 set_value_component_location (v, whole);
3739
3740 return v;
3741 }
3742
3743 struct value *
3744 coerce_ref_if_computed (const struct value *arg)
3745 {
3746 const struct lval_funcs *funcs;
3747
3748 if (!TYPE_IS_REFERENCE (check_typedef (value_type (arg))))
3749 return NULL;
3750
3751 if (value_lval_const (arg) != lval_computed)
3752 return NULL;
3753
3754 funcs = value_computed_funcs (arg);
3755 if (funcs->coerce_ref == NULL)
3756 return NULL;
3757
3758 return funcs->coerce_ref (arg);
3759 }
3760
3761 /* Look at value.h for description. */
3762
3763 struct value *
3764 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3765 const struct type *original_type,
3766 struct value *original_value,
3767 CORE_ADDR original_value_address)
3768 {
3769 gdb_assert (original_type->is_pointer_or_reference ());
3770
3771 struct type *original_target_type = TYPE_TARGET_TYPE (original_type);
3772 gdb::array_view<const gdb_byte> view;
3773 struct type *resolved_original_target_type
3774 = resolve_dynamic_type (original_target_type, view,
3775 original_value_address);
3776
3777 /* Re-adjust type. */
3778 deprecated_set_value_type (value, resolved_original_target_type);
3779
3780 /* Add embedding info. */
3781 set_value_enclosing_type (value, enc_type);
3782 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3783
3784 /* We may be pointing to an object of some derived type. */
3785 return value_full_object (value, NULL, 0, 0, 0);
3786 }
3787
3788 struct value *
3789 coerce_ref (struct value *arg)
3790 {
3791 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3792 struct value *retval;
3793 struct type *enc_type;
3794
3795 retval = coerce_ref_if_computed (arg);
3796 if (retval)
3797 return retval;
3798
3799 if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3800 return arg;
3801
3802 enc_type = check_typedef (value_enclosing_type (arg));
3803 enc_type = TYPE_TARGET_TYPE (enc_type);
3804
3805 CORE_ADDR addr = unpack_pointer (value_type (arg), value_contents (arg).data ());
3806 retval = value_at_lazy (enc_type, addr);
3807 enc_type = value_type (retval);
3808 return readjust_indirect_value_type (retval, enc_type, value_type_arg_tmp,
3809 arg, addr);
3810 }
3811
3812 struct value *
3813 coerce_array (struct value *arg)
3814 {
3815 struct type *type;
3816
3817 arg = coerce_ref (arg);
3818 type = check_typedef (value_type (arg));
3819
3820 switch (type->code ())
3821 {
3822 case TYPE_CODE_ARRAY:
3823 if (!type->is_vector () && current_language->c_style_arrays_p ())
3824 arg = value_coerce_array (arg);
3825 break;
3826 case TYPE_CODE_FUNC:
3827 arg = value_coerce_function (arg);
3828 break;
3829 }
3830 return arg;
3831 }
3832 \f
3833
3834 /* Return the return value convention that will be used for the
3835 specified type. */
3836
3837 enum return_value_convention
3838 struct_return_convention (struct gdbarch *gdbarch,
3839 struct value *function, struct type *value_type)
3840 {
3841 enum type_code code = value_type->code ();
3842
3843 if (code == TYPE_CODE_ERROR)
3844 error (_("Function return type unknown."));
3845
3846 /* Probe the architecture for the return-value convention. */
3847 return gdbarch_return_value (gdbarch, function, value_type,
3848 NULL, NULL, NULL);
3849 }
3850
3851 /* Return true if the function returning the specified type is using
3852 the convention of returning structures in memory (passing in the
3853 address as a hidden first parameter). */
3854
3855 int
3856 using_struct_return (struct gdbarch *gdbarch,
3857 struct value *function, struct type *value_type)
3858 {
3859 if (value_type->code () == TYPE_CODE_VOID)
3860 /* A void return value is never in memory. See also corresponding
3861 code in "print_return_value". */
3862 return 0;
3863
3864 return (struct_return_convention (gdbarch, function, value_type)
3865 != RETURN_VALUE_REGISTER_CONVENTION);
3866 }
3867
3868 /* Set the initialized field in a value struct. */
3869
3870 void
3871 set_value_initialized (struct value *val, int status)
3872 {
3873 val->initialized = status;
3874 }
3875
3876 /* Return the initialized field in a value struct. */
3877
3878 int
3879 value_initialized (const struct value *val)
3880 {
3881 return val->initialized;
3882 }
3883
3884 /* Helper for value_fetch_lazy when the value is a bitfield. */
3885
3886 static void
3887 value_fetch_lazy_bitfield (struct value *val)
3888 {
3889 gdb_assert (value_bitsize (val) != 0);
3890
3891 /* To read a lazy bitfield, read the entire enclosing value. This
3892 prevents reading the same block of (possibly volatile) memory once
3893 per bitfield. It would be even better to read only the containing
3894 word, but we have no way to record that just specific bits of a
3895 value have been fetched. */
3896 struct value *parent = value_parent (val);
3897
3898 if (value_lazy (parent))
3899 value_fetch_lazy (parent);
3900
3901 unpack_value_bitfield (val, value_bitpos (val), value_bitsize (val),
3902 value_contents_for_printing (parent).data (),
3903 value_offset (val), parent);
3904 }
3905
3906 /* Helper for value_fetch_lazy when the value is in memory. */
3907
3908 static void
3909 value_fetch_lazy_memory (struct value *val)
3910 {
3911 gdb_assert (VALUE_LVAL (val) == lval_memory);
3912
3913 CORE_ADDR addr = value_address (val);
3914 struct type *type = check_typedef (value_enclosing_type (val));
3915
3916 if (TYPE_LENGTH (type))
3917 read_value_memory (val, 0, value_stack (val),
3918 addr, value_contents_all_raw (val).data (),
3919 type_length_units (type));
3920 }
3921
3922 /* Helper for value_fetch_lazy when the value is in a register. */
3923
3924 static void
3925 value_fetch_lazy_register (struct value *val)
3926 {
3927 struct frame_info *next_frame;
3928 int regnum;
3929 struct type *type = check_typedef (value_type (val));
3930 struct value *new_val = val, *mark = value_mark ();
3931
3932 /* Offsets are not supported here; lazy register values must
3933 refer to the entire register. */
3934 gdb_assert (value_offset (val) == 0);
3935
3936 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3937 {
3938 struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
3939
3940 next_frame = frame_find_by_id (next_frame_id);
3941 regnum = VALUE_REGNUM (new_val);
3942
3943 gdb_assert (next_frame != NULL);
3944
3945 /* Convertible register routines are used for multi-register
3946 values and for interpretation in different types
3947 (e.g. float or int from a double register). Lazy
3948 register values should have the register's natural type,
3949 so they do not apply. */
3950 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
3951 regnum, type));
3952
3953 /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
3954 Since a "->next" operation was performed when setting
3955 this field, we do not need to perform a "next" operation
3956 again when unwinding the register. That's why
3957 frame_unwind_register_value() is called here instead of
3958 get_frame_register_value(). */
3959 new_val = frame_unwind_register_value (next_frame, regnum);
3960
3961 /* If we get another lazy lval_register value, it means the
3962 register is found by reading it from NEXT_FRAME's next frame.
3963 frame_unwind_register_value should never return a value with
3964 the frame id pointing to NEXT_FRAME. If it does, it means we
3965 either have two consecutive frames with the same frame id
3966 in the frame chain, or some code is trying to unwind
3967 behind get_prev_frame's back (e.g., a frame unwind
3968 sniffer trying to unwind), bypassing its validations. In
3969 any case, it should always be an internal error to end up
3970 in this situation. */
3971 if (VALUE_LVAL (new_val) == lval_register
3972 && value_lazy (new_val)
3973 && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id))
3974 internal_error (__FILE__, __LINE__,
3975 _("infinite loop while fetching a register"));
3976 }
3977
3978 /* If it's still lazy (for instance, a saved register on the
3979 stack), fetch it. */
3980 if (value_lazy (new_val))
3981 value_fetch_lazy (new_val);
3982
3983 /* Copy the contents and the unavailability/optimized-out
3984 meta-data from NEW_VAL to VAL. */
3985 set_value_lazy (val, 0);
3986 value_contents_copy (val, value_embedded_offset (val),
3987 new_val, value_embedded_offset (new_val),
3988 type_length_units (type));
3989
3990 if (frame_debug)
3991 {
3992 struct gdbarch *gdbarch;
3993 struct frame_info *frame;
3994 frame = frame_find_by_id (VALUE_NEXT_FRAME_ID (val));
3995 frame = get_prev_frame_always (frame);
3996 regnum = VALUE_REGNUM (val);
3997 gdbarch = get_frame_arch (frame);
3998
3999 string_file debug_file;
4000 fprintf_unfiltered (&debug_file,
4001 "(frame=%d, regnum=%d(%s), ...) ",
4002 frame_relative_level (frame), regnum,
4003 user_reg_map_regnum_to_name (gdbarch, regnum));
4004
4005 fprintf_unfiltered (&debug_file, "->");
4006 if (value_optimized_out (new_val))
4007 {
4008 fprintf_unfiltered (&debug_file, " ");
4009 val_print_optimized_out (new_val, &debug_file);
4010 }
4011 else
4012 {
4013 int i;
4014 gdb::array_view<const gdb_byte> buf = value_contents (new_val);
4015
4016 if (VALUE_LVAL (new_val) == lval_register)
4017 fprintf_unfiltered (&debug_file, " register=%d",
4018 VALUE_REGNUM (new_val));
4019 else if (VALUE_LVAL (new_val) == lval_memory)
4020 fprintf_unfiltered (&debug_file, " address=%s",
4021 paddress (gdbarch,
4022 value_address (new_val)));
4023 else
4024 fprintf_unfiltered (&debug_file, " computed");
4025
4026 fprintf_unfiltered (&debug_file, " bytes=");
4027 fprintf_unfiltered (&debug_file, "[");
4028 for (i = 0; i < register_size (gdbarch, regnum); i++)
4029 fprintf_unfiltered (&debug_file, "%02x", buf[i]);
4030 fprintf_unfiltered (&debug_file, "]");
4031 }
4032
4033 frame_debug_printf ("%s", debug_file.c_str ());
4034 }
4035
4036 /* Dispose of the intermediate values. This prevents
4037 watchpoints from trying to watch the saved frame pointer. */
4038 value_free_to_mark (mark);
4039 }
4040
4041 /* Load the actual content of a lazy value. Fetch the data from the
4042 user's process and clear the lazy flag to indicate that the data in
4043 the buffer is valid.
4044
4045 If the value is zero-length, we avoid calling read_memory, which
4046 would abort. We mark the value as fetched anyway -- all 0 bytes of
4047 it. */
4048
4049 void
4050 value_fetch_lazy (struct value *val)
4051 {
4052 gdb_assert (value_lazy (val));
4053 allocate_value_contents (val);
4054 /* A value is either lazy, or fully fetched. The
4055 availability/validity is only established as we try to fetch a
4056 value. */
4057 gdb_assert (val->optimized_out.empty ());
4058 gdb_assert (val->unavailable.empty ());
4059 if (val->is_zero)
4060 {
4061 /* Nothing. */
4062 }
4063 else if (value_bitsize (val))
4064 value_fetch_lazy_bitfield (val);
4065 else if (VALUE_LVAL (val) == lval_memory)
4066 value_fetch_lazy_memory (val);
4067 else if (VALUE_LVAL (val) == lval_register)
4068 value_fetch_lazy_register (val);
4069 else if (VALUE_LVAL (val) == lval_computed
4070 && value_computed_funcs (val)->read != NULL)
4071 value_computed_funcs (val)->read (val);
4072 else
4073 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
4074
4075 set_value_lazy (val, 0);
4076 }
4077
4078 /* Implementation of the convenience function $_isvoid. */
4079
4080 static struct value *
4081 isvoid_internal_fn (struct gdbarch *gdbarch,
4082 const struct language_defn *language,
4083 void *cookie, int argc, struct value **argv)
4084 {
4085 int ret;
4086
4087 if (argc != 1)
4088 error (_("You must provide one argument for $_isvoid."));
4089
4090 ret = value_type (argv[0])->code () == TYPE_CODE_VOID;
4091
4092 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
4093 }
4094
4095 /* Implementation of the convenience function $_creal. Extracts the
4096 real part from a complex number. */
4097
4098 static struct value *
4099 creal_internal_fn (struct gdbarch *gdbarch,
4100 const struct language_defn *language,
4101 void *cookie, int argc, struct value **argv)
4102 {
4103 if (argc != 1)
4104 error (_("You must provide one argument for $_creal."));
4105
4106 value *cval = argv[0];
4107 type *ctype = check_typedef (value_type (cval));
4108 if (ctype->code () != TYPE_CODE_COMPLEX)
4109 error (_("expected a complex number"));
4110 return value_real_part (cval);
4111 }
4112
4113 /* Implementation of the convenience function $_cimag. Extracts the
4114 imaginary part from a complex number. */
4115
4116 static struct value *
4117 cimag_internal_fn (struct gdbarch *gdbarch,
4118 const struct language_defn *language,
4119 void *cookie, int argc,
4120 struct value **argv)
4121 {
4122 if (argc != 1)
4123 error (_("You must provide one argument for $_cimag."));
4124
4125 value *cval = argv[0];
4126 type *ctype = check_typedef (value_type (cval));
4127 if (ctype->code () != TYPE_CODE_COMPLEX)
4128 error (_("expected a complex number"));
4129 return value_imaginary_part (cval);
4130 }
4131
4132 #if GDB_SELF_TEST
4133 namespace selftests
4134 {
4135
4136 /* Test the ranges_contain function. */
4137
4138 static void
4139 test_ranges_contain ()
4140 {
4141 std::vector<range> ranges;
4142 range r;
4143
4144 /* [10, 14] */
4145 r.offset = 10;
4146 r.length = 5;
4147 ranges.push_back (r);
4148
4149 /* [20, 24] */
4150 r.offset = 20;
4151 r.length = 5;
4152 ranges.push_back (r);
4153
4154 /* [2, 6] */
4155 SELF_CHECK (!ranges_contain (ranges, 2, 5));
4156 /* [9, 13] */
4157 SELF_CHECK (ranges_contain (ranges, 9, 5));
4158 /* [10, 11] */
4159 SELF_CHECK (ranges_contain (ranges, 10, 2));
4160 /* [10, 14] */
4161 SELF_CHECK (ranges_contain (ranges, 10, 5));
4162 /* [13, 18] */
4163 SELF_CHECK (ranges_contain (ranges, 13, 6));
4164 /* [14, 18] */
4165 SELF_CHECK (ranges_contain (ranges, 14, 5));
4166 /* [15, 18] */
4167 SELF_CHECK (!ranges_contain (ranges, 15, 4));
4168 /* [16, 19] */
4169 SELF_CHECK (!ranges_contain (ranges, 16, 4));
4170 /* [16, 21] */
4171 SELF_CHECK (ranges_contain (ranges, 16, 6));
4172 /* [21, 21] */
4173 SELF_CHECK (ranges_contain (ranges, 21, 1));
4174 /* [21, 25] */
4175 SELF_CHECK (ranges_contain (ranges, 21, 5));
4176 /* [26, 28] */
4177 SELF_CHECK (!ranges_contain (ranges, 26, 3));
4178 }
4179
4180 /* Check that RANGES contains the same ranges as EXPECTED. */
4181
4182 static bool
4183 check_ranges_vector (gdb::array_view<const range> ranges,
4184 gdb::array_view<const range> expected)
4185 {
4186 return ranges == expected;
4187 }
4188
4189 /* Test the insert_into_bit_range_vector function. */
4190
4191 static void
4192 test_insert_into_bit_range_vector ()
4193 {
4194 std::vector<range> ranges;
4195
4196 /* [10, 14] */
4197 {
4198 insert_into_bit_range_vector (&ranges, 10, 5);
4199 static const range expected[] = {
4200 {10, 5}
4201 };
4202 SELF_CHECK (check_ranges_vector (ranges, expected));
4203 }
4204
4205 /* [10, 14] */
4206 {
4207 insert_into_bit_range_vector (&ranges, 11, 4);
4208 static const range expected = {10, 5};
4209 SELF_CHECK (check_ranges_vector (ranges, expected));
4210 }
4211
4212 /* [10, 14] [20, 24] */
4213 {
4214 insert_into_bit_range_vector (&ranges, 20, 5);
4215 static const range expected[] = {
4216 {10, 5},
4217 {20, 5},
4218 };
4219 SELF_CHECK (check_ranges_vector (ranges, expected));
4220 }
4221
4222 /* [10, 14] [17, 24] */
4223 {
4224 insert_into_bit_range_vector (&ranges, 17, 5);
4225 static const range expected[] = {
4226 {10, 5},
4227 {17, 8},
4228 };
4229 SELF_CHECK (check_ranges_vector (ranges, expected));
4230 }
4231
4232 /* [2, 8] [10, 14] [17, 24] */
4233 {
4234 insert_into_bit_range_vector (&ranges, 2, 7);
4235 static const range expected[] = {
4236 {2, 7},
4237 {10, 5},
4238 {17, 8},
4239 };
4240 SELF_CHECK (check_ranges_vector (ranges, expected));
4241 }
4242
4243 /* [2, 14] [17, 24] */
4244 {
4245 insert_into_bit_range_vector (&ranges, 9, 1);
4246 static const range expected[] = {
4247 {2, 13},
4248 {17, 8},
4249 };
4250 SELF_CHECK (check_ranges_vector (ranges, expected));
4251 }
4252
4253 /* [2, 14] [17, 24] */
4254 {
4255 insert_into_bit_range_vector (&ranges, 9, 1);
4256 static const range expected[] = {
4257 {2, 13},
4258 {17, 8},
4259 };
4260 SELF_CHECK (check_ranges_vector (ranges, expected));
4261 }
4262
4263 /* [2, 33] */
4264 {
4265 insert_into_bit_range_vector (&ranges, 4, 30);
4266 static const range expected = {2, 32};
4267 SELF_CHECK (check_ranges_vector (ranges, expected));
4268 }
4269 }
4270
4271 } /* namespace selftests */
4272 #endif /* GDB_SELF_TEST */
4273
4274 void _initialize_values ();
4275 void
4276 _initialize_values ()
4277 {
4278 cmd_list_element *show_convenience_cmd
4279 = add_cmd ("convenience", no_class, show_convenience, _("\
4280 Debugger convenience (\"$foo\") variables and functions.\n\
4281 Convenience variables are created when you assign them values;\n\
4282 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4283 \n\
4284 A few convenience variables are given values automatically:\n\
4285 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4286 \"$__\" holds the contents of the last address examined with \"x\"."
4287 #ifdef HAVE_PYTHON
4288 "\n\n\
4289 Convenience functions are defined via the Python API."
4290 #endif
4291 ), &showlist);
4292 add_alias_cmd ("conv", show_convenience_cmd, no_class, 1, &showlist);
4293
4294 add_cmd ("values", no_set_class, show_values, _("\
4295 Elements of value history around item number IDX (or last ten)."),
4296 &showlist);
4297
4298 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4299 Initialize a convenience variable if necessary.\n\
4300 init-if-undefined VARIABLE = EXPRESSION\n\
4301 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4302 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4303 VARIABLE is already initialized."));
4304
4305 add_prefix_cmd ("function", no_class, function_command, _("\
4306 Placeholder command for showing help on convenience functions."),
4307 &functionlist, 0, &cmdlist);
4308
4309 add_internal_function ("_isvoid", _("\
4310 Check whether an expression is void.\n\
4311 Usage: $_isvoid (expression)\n\
4312 Return 1 if the expression is void, zero otherwise."),
4313 isvoid_internal_fn, NULL);
4314
4315 add_internal_function ("_creal", _("\
4316 Extract the real part of a complex number.\n\
4317 Usage: $_creal (expression)\n\
4318 Return the real part of a complex number, the type depends on the\n\
4319 type of a complex number."),
4320 creal_internal_fn, NULL);
4321
4322 add_internal_function ("_cimag", _("\
4323 Extract the imaginary part of a complex number.\n\
4324 Usage: $_cimag (expression)\n\
4325 Return the imaginary part of a complex number, the type depends on the\n\
4326 type of a complex number."),
4327 cimag_internal_fn, NULL);
4328
4329 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4330 class_support, &max_value_size, _("\
4331 Set maximum sized value gdb will load from the inferior."), _("\
4332 Show maximum sized value gdb will load from the inferior."), _("\
4333 Use this to control the maximum size, in bytes, of a value that gdb\n\
4334 will load from the inferior. Setting this value to 'unlimited'\n\
4335 disables checking.\n\
4336 Setting this does not invalidate already allocated values, it only\n\
4337 prevents future values, larger than this size, from being allocated."),
4338 set_max_value_size,
4339 show_max_value_size,
4340 &setlist, &showlist);
4341 set_show_commands vsize_limit
4342 = add_setshow_zuinteger_unlimited_cmd ("varsize-limit", class_support,
4343 &max_value_size, _("\
4344 Set the maximum number of bytes allowed in a variable-size object."), _("\
4345 Show the maximum number of bytes allowed in a variable-size object."), _("\
4346 Attempts to access an object whose size is not a compile-time constant\n\
4347 and exceeds this limit will cause an error."),
4348 NULL, NULL, &setlist, &showlist);
4349 deprecate_cmd (vsize_limit.set, "set max-value-size");
4350
4351 #if GDB_SELF_TEST
4352 selftests::register_test ("ranges_contain", selftests::test_ranges_contain);
4353 selftests::register_test ("insert_into_bit_range_vector",
4354 selftests::test_insert_into_bit_range_vector);
4355 #endif
4356 }
4357
4358 /* See value.h. */
4359
4360 void
4361 finalize_values ()
4362 {
4363 all_values.clear ();
4364 }