gdb: introduce symtab_create_debug_printf
[binutils-gdb.git] / gdb / dwarf2 / expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001-2022 Free Software Foundation, Inc.
4
5 Contributed by Daniel Berlin (dan@dberlin.org)
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "block.h"
24 #include "symtab.h"
25 #include "gdbtypes.h"
26 #include "value.h"
27 #include "gdbcore.h"
28 #include "dwarf2.h"
29 #include "dwarf2/expr.h"
30 #include "dwarf2/loc.h"
31 #include "dwarf2/read.h"
32 #include "frame.h"
33 #include "gdbsupport/underlying.h"
34 #include "gdbarch.h"
35 #include "objfiles.h"
36
37 /* Cookie for gdbarch data. */
38
39 static struct gdbarch_data *dwarf_arch_cookie;
40
41 /* This holds gdbarch-specific types used by the DWARF expression
42 evaluator. See comments in execute_stack_op. */
43
44 struct dwarf_gdbarch_types
45 {
46 struct type *dw_types[3];
47 };
48
49 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
50
51 static void *
52 dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
53 {
54 struct dwarf_gdbarch_types *types
55 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
56
57 /* The types themselves are lazily initialized. */
58
59 return types;
60 }
61
62 /* Ensure that a FRAME is defined, throw an exception otherwise. */
63
64 static void
65 ensure_have_frame (frame_info *frame, const char *op_name)
66 {
67 if (frame == nullptr)
68 throw_error (GENERIC_ERROR,
69 _("%s evaluation requires a frame."), op_name);
70 }
71
72 /* Ensure that a PER_CU is defined and throw an exception otherwise. */
73
74 static void
75 ensure_have_per_cu (dwarf2_per_cu_data *per_cu, const char* op_name)
76 {
77 if (per_cu == nullptr)
78 throw_error (GENERIC_ERROR,
79 _("%s evaluation requires a compilation unit."), op_name);
80 }
81
82 /* Return the number of bytes overlapping a contiguous chunk of N_BITS
83 bits whose first bit is located at bit offset START. */
84
85 static size_t
86 bits_to_bytes (ULONGEST start, ULONGEST n_bits)
87 {
88 return (start % HOST_CHAR_BIT + n_bits + HOST_CHAR_BIT - 1) / HOST_CHAR_BIT;
89 }
90
91 /* See expr.h. */
92
93 CORE_ADDR
94 read_addr_from_reg (frame_info *frame, int reg)
95 {
96 struct gdbarch *gdbarch = get_frame_arch (frame);
97 int regnum = dwarf_reg_to_regnum_or_error (gdbarch, reg);
98
99 return address_from_register (regnum, frame);
100 }
101
102 struct piece_closure
103 {
104 /* Reference count. */
105 int refc = 0;
106
107 /* The objfile from which this closure's expression came. */
108 dwarf2_per_objfile *per_objfile = nullptr;
109
110 /* The CU from which this closure's expression came. */
111 dwarf2_per_cu_data *per_cu = nullptr;
112
113 /* The pieces describing this variable. */
114 std::vector<dwarf_expr_piece> pieces;
115
116 /* Frame ID of frame to which a register value is relative, used
117 only by DWARF_VALUE_REGISTER. */
118 struct frame_id frame_id;
119 };
120
121 /* Allocate a closure for a value formed from separately-described
122 PIECES. */
123
124 static piece_closure *
125 allocate_piece_closure (dwarf2_per_cu_data *per_cu,
126 dwarf2_per_objfile *per_objfile,
127 std::vector<dwarf_expr_piece> &&pieces,
128 frame_info *frame)
129 {
130 piece_closure *c = new piece_closure;
131
132 c->refc = 1;
133 /* We must capture this here due to sharing of DWARF state. */
134 c->per_objfile = per_objfile;
135 c->per_cu = per_cu;
136 c->pieces = std::move (pieces);
137 if (frame == nullptr)
138 c->frame_id = null_frame_id;
139 else
140 c->frame_id = get_frame_id (frame);
141
142 for (dwarf_expr_piece &piece : c->pieces)
143 if (piece.location == DWARF_VALUE_STACK)
144 value_incref (piece.v.value);
145
146 return c;
147 }
148
149 /* Read or write a pieced value V. If FROM != NULL, operate in "write
150 mode": copy FROM into the pieces comprising V. If FROM == NULL,
151 operate in "read mode": fetch the contents of the (lazy) value V by
152 composing it from its pieces. If CHECK_OPTIMIZED is true, then no
153 reading or writing is done; instead the return value of this
154 function is true if any piece is optimized out. When
155 CHECK_OPTIMIZED is true, FROM must be nullptr. */
156
157 static bool
158 rw_pieced_value (value *v, value *from, bool check_optimized)
159 {
160 int i;
161 LONGEST offset = 0, max_offset;
162 gdb_byte *v_contents;
163 const gdb_byte *from_contents;
164 piece_closure *c
165 = (piece_closure *) value_computed_closure (v);
166 gdb::byte_vector buffer;
167 bool bits_big_endian = type_byte_order (value_type (v)) == BFD_ENDIAN_BIG;
168
169 gdb_assert (!check_optimized || from == nullptr);
170 if (from != nullptr)
171 {
172 from_contents = value_contents (from).data ();
173 v_contents = nullptr;
174 }
175 else
176 {
177 if (value_type (v) != value_enclosing_type (v))
178 internal_error (__FILE__, __LINE__,
179 _("Should not be able to create a lazy value with "
180 "an enclosing type"));
181 if (check_optimized)
182 v_contents = nullptr;
183 else
184 v_contents = value_contents_raw (v).data ();
185 from_contents = nullptr;
186 }
187
188 ULONGEST bits_to_skip = 8 * value_offset (v);
189 if (value_bitsize (v))
190 {
191 bits_to_skip += (8 * value_offset (value_parent (v))
192 + value_bitpos (v));
193 if (from != nullptr
194 && (type_byte_order (value_type (from))
195 == BFD_ENDIAN_BIG))
196 {
197 /* Use the least significant bits of FROM. */
198 max_offset = 8 * TYPE_LENGTH (value_type (from));
199 offset = max_offset - value_bitsize (v);
200 }
201 else
202 max_offset = value_bitsize (v);
203 }
204 else
205 max_offset = 8 * TYPE_LENGTH (value_type (v));
206
207 /* Advance to the first non-skipped piece. */
208 for (i = 0; i < c->pieces.size () && bits_to_skip >= c->pieces[i].size; i++)
209 bits_to_skip -= c->pieces[i].size;
210
211 for (; i < c->pieces.size () && offset < max_offset; i++)
212 {
213 dwarf_expr_piece *p = &c->pieces[i];
214 size_t this_size_bits, this_size;
215
216 this_size_bits = p->size - bits_to_skip;
217 if (this_size_bits > max_offset - offset)
218 this_size_bits = max_offset - offset;
219
220 switch (p->location)
221 {
222 case DWARF_VALUE_REGISTER:
223 {
224 frame_info *frame = frame_find_by_id (c->frame_id);
225 gdbarch *arch = get_frame_arch (frame);
226 int gdb_regnum = dwarf_reg_to_regnum_or_error (arch, p->v.regno);
227 ULONGEST reg_bits = 8 * register_size (arch, gdb_regnum);
228 int optim, unavail;
229
230 if (gdbarch_byte_order (arch) == BFD_ENDIAN_BIG
231 && p->offset + p->size < reg_bits)
232 {
233 /* Big-endian, and we want less than full size. */
234 bits_to_skip += reg_bits - (p->offset + p->size);
235 }
236 else
237 bits_to_skip += p->offset;
238
239 this_size = bits_to_bytes (bits_to_skip, this_size_bits);
240 buffer.resize (this_size);
241
242 if (from == nullptr)
243 {
244 /* Read mode. */
245 if (!get_frame_register_bytes (frame, gdb_regnum,
246 bits_to_skip / 8,
247 buffer, &optim, &unavail))
248 {
249 if (optim)
250 {
251 if (check_optimized)
252 return true;
253 mark_value_bits_optimized_out (v, offset,
254 this_size_bits);
255 }
256 if (unavail && !check_optimized)
257 mark_value_bits_unavailable (v, offset,
258 this_size_bits);
259 break;
260 }
261
262 if (!check_optimized)
263 copy_bitwise (v_contents, offset,
264 buffer.data (), bits_to_skip % 8,
265 this_size_bits, bits_big_endian);
266 }
267 else
268 {
269 /* Write mode. */
270 if (bits_to_skip % 8 != 0 || this_size_bits % 8 != 0)
271 {
272 /* Data is copied non-byte-aligned into the register.
273 Need some bits from original register value. */
274 get_frame_register_bytes (frame, gdb_regnum,
275 bits_to_skip / 8,
276 buffer, &optim, &unavail);
277 if (optim)
278 throw_error (OPTIMIZED_OUT_ERROR,
279 _("Can't do read-modify-write to "
280 "update bitfield; containing word "
281 "has been optimized out"));
282 if (unavail)
283 throw_error (NOT_AVAILABLE_ERROR,
284 _("Can't do read-modify-write to "
285 "update bitfield; containing word "
286 "is unavailable"));
287 }
288
289 copy_bitwise (buffer.data (), bits_to_skip % 8,
290 from_contents, offset,
291 this_size_bits, bits_big_endian);
292 put_frame_register_bytes (frame, gdb_regnum,
293 bits_to_skip / 8,
294 buffer);
295 }
296 }
297 break;
298
299 case DWARF_VALUE_MEMORY:
300 {
301 if (check_optimized)
302 break;
303
304 bits_to_skip += p->offset;
305
306 CORE_ADDR start_addr = p->v.mem.addr + bits_to_skip / 8;
307
308 if (bits_to_skip % 8 == 0 && this_size_bits % 8 == 0
309 && offset % 8 == 0)
310 {
311 /* Everything is byte-aligned; no buffer needed. */
312 if (from != nullptr)
313 write_memory_with_notification (start_addr,
314 (from_contents
315 + offset / 8),
316 this_size_bits / 8);
317 else
318 read_value_memory (v, offset,
319 p->v.mem.in_stack_memory,
320 p->v.mem.addr + bits_to_skip / 8,
321 v_contents + offset / 8,
322 this_size_bits / 8);
323 break;
324 }
325
326 this_size = bits_to_bytes (bits_to_skip, this_size_bits);
327 buffer.resize (this_size);
328
329 if (from == nullptr)
330 {
331 /* Read mode. */
332 read_value_memory (v, offset,
333 p->v.mem.in_stack_memory,
334 p->v.mem.addr + bits_to_skip / 8,
335 buffer.data (), this_size);
336 copy_bitwise (v_contents, offset,
337 buffer.data (), bits_to_skip % 8,
338 this_size_bits, bits_big_endian);
339 }
340 else
341 {
342 /* Write mode. */
343 if (bits_to_skip % 8 != 0 || this_size_bits % 8 != 0)
344 {
345 if (this_size <= 8)
346 {
347 /* Perform a single read for small sizes. */
348 read_memory (start_addr, buffer.data (),
349 this_size);
350 }
351 else
352 {
353 /* Only the first and last bytes can possibly have
354 any bits reused. */
355 read_memory (start_addr, buffer.data (), 1);
356 read_memory (start_addr + this_size - 1,
357 &buffer[this_size - 1], 1);
358 }
359 }
360
361 copy_bitwise (buffer.data (), bits_to_skip % 8,
362 from_contents, offset,
363 this_size_bits, bits_big_endian);
364 write_memory_with_notification (start_addr,
365 buffer.data (),
366 this_size);
367 }
368 }
369 break;
370
371 case DWARF_VALUE_STACK:
372 {
373 if (check_optimized)
374 break;
375
376 if (from != nullptr)
377 {
378 mark_value_bits_optimized_out (v, offset, this_size_bits);
379 break;
380 }
381
382 gdbarch *objfile_gdbarch = c->per_objfile->objfile->arch ();
383 ULONGEST stack_value_size_bits
384 = 8 * TYPE_LENGTH (value_type (p->v.value));
385
386 /* Use zeroes if piece reaches beyond stack value. */
387 if (p->offset + p->size > stack_value_size_bits)
388 break;
389
390 /* Piece is anchored at least significant bit end. */
391 if (gdbarch_byte_order (objfile_gdbarch) == BFD_ENDIAN_BIG)
392 bits_to_skip += stack_value_size_bits - p->offset - p->size;
393 else
394 bits_to_skip += p->offset;
395
396 copy_bitwise (v_contents, offset,
397 value_contents_all (p->v.value).data (),
398 bits_to_skip,
399 this_size_bits, bits_big_endian);
400 }
401 break;
402
403 case DWARF_VALUE_LITERAL:
404 {
405 if (check_optimized)
406 break;
407
408 if (from != nullptr)
409 {
410 mark_value_bits_optimized_out (v, offset, this_size_bits);
411 break;
412 }
413
414 ULONGEST literal_size_bits = 8 * p->v.literal.length;
415 size_t n = this_size_bits;
416
417 /* Cut off at the end of the implicit value. */
418 bits_to_skip += p->offset;
419 if (bits_to_skip >= literal_size_bits)
420 break;
421 if (n > literal_size_bits - bits_to_skip)
422 n = literal_size_bits - bits_to_skip;
423
424 copy_bitwise (v_contents, offset,
425 p->v.literal.data, bits_to_skip,
426 n, bits_big_endian);
427 }
428 break;
429
430 case DWARF_VALUE_IMPLICIT_POINTER:
431 if (from != nullptr)
432 {
433 mark_value_bits_optimized_out (v, offset, this_size_bits);
434 break;
435 }
436
437 /* These bits show up as zeros -- but do not cause the value to
438 be considered optimized-out. */
439 break;
440
441 case DWARF_VALUE_OPTIMIZED_OUT:
442 if (check_optimized)
443 return true;
444 mark_value_bits_optimized_out (v, offset, this_size_bits);
445 break;
446
447 default:
448 internal_error (__FILE__, __LINE__, _("invalid location type"));
449 }
450
451 offset += this_size_bits;
452 bits_to_skip = 0;
453 }
454
455 return false;
456 }
457
458 static void
459 read_pieced_value (value *v)
460 {
461 rw_pieced_value (v, nullptr, false);
462 }
463
464 static void
465 write_pieced_value (value *to, value *from)
466 {
467 rw_pieced_value (to, from, false);
468 }
469
470 static bool
471 is_optimized_out_pieced_value (value *v)
472 {
473 return rw_pieced_value (v, nullptr, true);
474 }
475
476 /* An implementation of an lval_funcs method to see whether a value is
477 a synthetic pointer. */
478
479 static int
480 check_pieced_synthetic_pointer (const value *value, LONGEST bit_offset,
481 int bit_length)
482 {
483 piece_closure *c = (piece_closure *) value_computed_closure (value);
484 int i;
485
486 bit_offset += 8 * value_offset (value);
487 if (value_bitsize (value))
488 bit_offset += value_bitpos (value);
489
490 for (i = 0; i < c->pieces.size () && bit_length > 0; i++)
491 {
492 dwarf_expr_piece *p = &c->pieces[i];
493 size_t this_size_bits = p->size;
494
495 if (bit_offset > 0)
496 {
497 if (bit_offset >= this_size_bits)
498 {
499 bit_offset -= this_size_bits;
500 continue;
501 }
502
503 bit_length -= this_size_bits - bit_offset;
504 bit_offset = 0;
505 }
506 else
507 bit_length -= this_size_bits;
508
509 if (p->location != DWARF_VALUE_IMPLICIT_POINTER)
510 return 0;
511 }
512
513 return 1;
514 }
515
516 /* An implementation of an lval_funcs method to indirect through a
517 pointer. This handles the synthetic pointer case when needed. */
518
519 static value *
520 indirect_pieced_value (value *value)
521 {
522 piece_closure *c
523 = (piece_closure *) value_computed_closure (value);
524 int i;
525 dwarf_expr_piece *piece = NULL;
526
527 struct type *type = check_typedef (value_type (value));
528 if (type->code () != TYPE_CODE_PTR)
529 return NULL;
530
531 int bit_length = 8 * TYPE_LENGTH (type);
532 LONGEST bit_offset = 8 * value_offset (value);
533 if (value_bitsize (value))
534 bit_offset += value_bitpos (value);
535
536 for (i = 0; i < c->pieces.size () && bit_length > 0; i++)
537 {
538 dwarf_expr_piece *p = &c->pieces[i];
539 size_t this_size_bits = p->size;
540
541 if (bit_offset > 0)
542 {
543 if (bit_offset >= this_size_bits)
544 {
545 bit_offset -= this_size_bits;
546 continue;
547 }
548
549 bit_length -= this_size_bits - bit_offset;
550 bit_offset = 0;
551 }
552 else
553 bit_length -= this_size_bits;
554
555 if (p->location != DWARF_VALUE_IMPLICIT_POINTER)
556 return NULL;
557
558 if (bit_length != 0)
559 error (_("Invalid use of DW_OP_implicit_pointer"));
560
561 piece = p;
562 break;
563 }
564
565 gdb_assert (piece != NULL && c->per_cu != nullptr);
566 frame_info *frame = get_selected_frame (_("No frame selected."));
567
568 /* This is an offset requested by GDB, such as value subscripts.
569 However, due to how synthetic pointers are implemented, this is
570 always presented to us as a pointer type. This means we have to
571 sign-extend it manually as appropriate. Use raw
572 extract_signed_integer directly rather than value_as_address and
573 sign extend afterwards on architectures that would need it
574 (mostly everywhere except MIPS, which has signed addresses) as
575 the later would go through gdbarch_pointer_to_address and thus
576 return a CORE_ADDR with high bits set on architectures that
577 encode address spaces and other things in CORE_ADDR. */
578 bfd_endian byte_order = gdbarch_byte_order (get_frame_arch (frame));
579 LONGEST byte_offset
580 = extract_signed_integer (value_contents (value), byte_order);
581 byte_offset += piece->v.ptr.offset;
582
583 return indirect_synthetic_pointer (piece->v.ptr.die_sect_off,
584 byte_offset, c->per_cu,
585 c->per_objfile, frame, type);
586 }
587
588 /* Implementation of the coerce_ref method of lval_funcs for synthetic C++
589 references. */
590
591 static value *
592 coerce_pieced_ref (const value *value)
593 {
594 struct type *type = check_typedef (value_type (value));
595
596 if (value_bits_synthetic_pointer (value, value_embedded_offset (value),
597 TARGET_CHAR_BIT * TYPE_LENGTH (type)))
598 {
599 const piece_closure *closure
600 = (piece_closure *) value_computed_closure (value);
601 frame_info *frame
602 = get_selected_frame (_("No frame selected."));
603
604 /* gdb represents synthetic pointers as pieced values with a single
605 piece. */
606 gdb_assert (closure != NULL);
607 gdb_assert (closure->pieces.size () == 1);
608
609 return indirect_synthetic_pointer
610 (closure->pieces[0].v.ptr.die_sect_off,
611 closure->pieces[0].v.ptr.offset,
612 closure->per_cu, closure->per_objfile, frame, type);
613 }
614 else
615 {
616 /* Else: not a synthetic reference; do nothing. */
617 return NULL;
618 }
619 }
620
621 static void *
622 copy_pieced_value_closure (const value *v)
623 {
624 piece_closure *c = (piece_closure *) value_computed_closure (v);
625
626 ++c->refc;
627 return c;
628 }
629
630 static void
631 free_pieced_value_closure (value *v)
632 {
633 piece_closure *c = (piece_closure *) value_computed_closure (v);
634
635 --c->refc;
636 if (c->refc == 0)
637 {
638 for (dwarf_expr_piece &p : c->pieces)
639 if (p.location == DWARF_VALUE_STACK)
640 value_decref (p.v.value);
641
642 delete c;
643 }
644 }
645
646 /* Functions for accessing a variable described by DW_OP_piece. */
647 static const struct lval_funcs pieced_value_funcs = {
648 read_pieced_value,
649 write_pieced_value,
650 is_optimized_out_pieced_value,
651 indirect_pieced_value,
652 coerce_pieced_ref,
653 check_pieced_synthetic_pointer,
654 copy_pieced_value_closure,
655 free_pieced_value_closure
656 };
657
658 /* Given context CTX, section offset SECT_OFF, and compilation unit
659 data PER_CU, execute the "variable value" operation on the DIE
660 found at SECT_OFF. */
661
662 static value *
663 sect_variable_value (sect_offset sect_off,
664 dwarf2_per_cu_data *per_cu,
665 dwarf2_per_objfile *per_objfile)
666 {
667 const char *var_name = nullptr;
668 struct type *die_type
669 = dwarf2_fetch_die_type_sect_off (sect_off, per_cu, per_objfile,
670 &var_name);
671
672 if (die_type == NULL)
673 error (_("Bad DW_OP_GNU_variable_value DIE."));
674
675 /* Note: Things still work when the following test is removed. This
676 test and error is here to conform to the proposed specification. */
677 if (die_type->code () != TYPE_CODE_INT
678 && die_type->code () != TYPE_CODE_ENUM
679 && die_type->code () != TYPE_CODE_RANGE
680 && die_type->code () != TYPE_CODE_PTR)
681 error (_("Type of DW_OP_GNU_variable_value DIE must be an integer or pointer."));
682
683 if (var_name != nullptr)
684 {
685 value *result = compute_var_value (var_name);
686 if (result != nullptr)
687 return result;
688 }
689
690 struct type *type = lookup_pointer_type (die_type);
691 frame_info *frame = get_selected_frame (_("No frame selected."));
692 return indirect_synthetic_pointer (sect_off, 0, per_cu, per_objfile, frame,
693 type, true);
694 }
695
696 /* Return the type used for DWARF operations where the type is
697 unspecified in the DWARF spec. Only certain sizes are
698 supported. */
699
700 struct type *
701 dwarf_expr_context::address_type () const
702 {
703 gdbarch *arch = this->m_per_objfile->objfile->arch ();
704 dwarf_gdbarch_types *types
705 = (dwarf_gdbarch_types *) gdbarch_data (arch, dwarf_arch_cookie);
706 int ndx;
707
708 if (this->m_addr_size == 2)
709 ndx = 0;
710 else if (this->m_addr_size == 4)
711 ndx = 1;
712 else if (this->m_addr_size == 8)
713 ndx = 2;
714 else
715 error (_("Unsupported address size in DWARF expressions: %d bits"),
716 8 * this->m_addr_size);
717
718 if (types->dw_types[ndx] == NULL)
719 types->dw_types[ndx]
720 = arch_integer_type (arch, 8 * this->m_addr_size,
721 0, "<signed DWARF address type>");
722
723 return types->dw_types[ndx];
724 }
725
726 /* Create a new context for the expression evaluator. */
727
728 dwarf_expr_context::dwarf_expr_context (dwarf2_per_objfile *per_objfile,
729 int addr_size)
730 : m_addr_size (addr_size),
731 m_per_objfile (per_objfile)
732 {
733 }
734
735 /* Push VALUE onto the stack. */
736
737 void
738 dwarf_expr_context::push (struct value *value, bool in_stack_memory)
739 {
740 this->m_stack.emplace_back (value, in_stack_memory);
741 }
742
743 /* Push VALUE onto the stack. */
744
745 void
746 dwarf_expr_context::push_address (CORE_ADDR value, bool in_stack_memory)
747 {
748 push (value_from_ulongest (address_type (), value), in_stack_memory);
749 }
750
751 /* Pop the top item off of the stack. */
752
753 void
754 dwarf_expr_context::pop ()
755 {
756 if (this->m_stack.empty ())
757 error (_("dwarf expression stack underflow"));
758
759 this->m_stack.pop_back ();
760 }
761
762 /* Retrieve the N'th item on the stack. */
763
764 struct value *
765 dwarf_expr_context::fetch (int n)
766 {
767 if (this->m_stack.size () <= n)
768 error (_("Asked for position %d of stack, "
769 "stack only has %zu elements on it."),
770 n, this->m_stack.size ());
771 return this->m_stack[this->m_stack.size () - (1 + n)].value;
772 }
773
774 /* See expr.h. */
775
776 void
777 dwarf_expr_context::get_frame_base (const gdb_byte **start,
778 size_t * length)
779 {
780 ensure_have_frame (this->m_frame, "DW_OP_fbreg");
781
782 const block *bl = get_frame_block (this->m_frame, NULL);
783
784 if (bl == NULL)
785 error (_("frame address is not available."));
786
787 /* Use block_linkage_function, which returns a real (not inlined)
788 function, instead of get_frame_function, which may return an
789 inlined function. */
790 symbol *framefunc = block_linkage_function (bl);
791
792 /* If we found a frame-relative symbol then it was certainly within
793 some function associated with a frame. If we can't find the frame,
794 something has gone wrong. */
795 gdb_assert (framefunc != NULL);
796
797 func_get_frame_base_dwarf_block (framefunc,
798 get_frame_address_in_block (this->m_frame),
799 start, length);
800 }
801
802 /* See expr.h. */
803
804 struct type *
805 dwarf_expr_context::get_base_type (cu_offset die_cu_off)
806 {
807 if (this->m_per_cu == nullptr)
808 return builtin_type (this->m_per_objfile->objfile->arch ())->builtin_int;
809
810 struct type *result = dwarf2_get_die_type (die_cu_off, this->m_per_cu,
811 this->m_per_objfile);
812
813 if (result == nullptr)
814 error (_("Could not find type for operation"));
815
816 return result;
817 }
818
819 /* See expr.h. */
820
821 void
822 dwarf_expr_context::dwarf_call (cu_offset die_cu_off)
823 {
824 ensure_have_per_cu (this->m_per_cu, "DW_OP_call");
825
826 frame_info *frame = this->m_frame;
827
828 auto get_pc_from_frame = [frame] ()
829 {
830 ensure_have_frame (frame, "DW_OP_call");
831 return get_frame_address_in_block (frame);
832 };
833
834 dwarf2_locexpr_baton block
835 = dwarf2_fetch_die_loc_cu_off (die_cu_off, this->m_per_cu,
836 this->m_per_objfile, get_pc_from_frame);
837
838 /* DW_OP_call_ref is currently not supported. */
839 gdb_assert (block.per_cu == this->m_per_cu);
840
841 this->eval (block.data, block.size);
842 }
843
844 /* See expr.h. */
845
846 void
847 dwarf_expr_context::read_mem (gdb_byte *buf, CORE_ADDR addr,
848 size_t length)
849 {
850 if (length == 0)
851 return;
852
853 /* Prefer the passed-in memory, if it exists. */
854 if (this->m_addr_info != nullptr)
855 {
856 CORE_ADDR offset = addr - this->m_addr_info->addr;
857
858 if (offset < this->m_addr_info->valaddr.size ()
859 && offset + length <= this->m_addr_info->valaddr.size ())
860 {
861 memcpy (buf, this->m_addr_info->valaddr.data (), length);
862 return;
863 }
864 }
865
866 read_memory (addr, buf, length);
867 }
868
869 /* See expr.h. */
870
871 void
872 dwarf_expr_context::push_dwarf_reg_entry_value (call_site_parameter_kind kind,
873 call_site_parameter_u kind_u,
874 int deref_size)
875 {
876 ensure_have_per_cu (this->m_per_cu, "DW_OP_entry_value");
877 ensure_have_frame (this->m_frame, "DW_OP_entry_value");
878
879 dwarf2_per_cu_data *caller_per_cu;
880 dwarf2_per_objfile *caller_per_objfile;
881 frame_info *caller_frame = get_prev_frame (this->m_frame);
882 call_site_parameter *parameter
883 = dwarf_expr_reg_to_entry_parameter (this->m_frame, kind, kind_u,
884 &caller_per_cu,
885 &caller_per_objfile);
886 const gdb_byte *data_src
887 = deref_size == -1 ? parameter->value : parameter->data_value;
888 size_t size
889 = deref_size == -1 ? parameter->value_size : parameter->data_value_size;
890
891 /* DEREF_SIZE size is not verified here. */
892 if (data_src == nullptr)
893 throw_error (NO_ENTRY_VALUE_ERROR,
894 _("Cannot resolve DW_AT_call_data_value"));
895
896 /* We are about to evaluate an expression in the context of the caller
897 of the current frame. This evaluation context may be different from
898 the current (callee's) context), so temporarily set the caller's context.
899
900 It is possible for the caller to be from a different objfile from the
901 callee if the call is made through a function pointer. */
902 scoped_restore save_frame = make_scoped_restore (&this->m_frame,
903 caller_frame);
904 scoped_restore save_per_cu = make_scoped_restore (&this->m_per_cu,
905 caller_per_cu);
906 scoped_restore save_addr_info = make_scoped_restore (&this->m_addr_info,
907 nullptr);
908 scoped_restore save_per_objfile = make_scoped_restore (&this->m_per_objfile,
909 caller_per_objfile);
910
911 scoped_restore save_addr_size = make_scoped_restore (&this->m_addr_size);
912 this->m_addr_size = this->m_per_cu->addr_size ();
913
914 this->eval (data_src, size);
915 }
916
917 /* See expr.h. */
918
919 value *
920 dwarf_expr_context::fetch_result (struct type *type, struct type *subobj_type,
921 LONGEST subobj_offset, bool as_lval)
922 {
923 value *retval = nullptr;
924 gdbarch *arch = this->m_per_objfile->objfile->arch ();
925
926 if (type == nullptr)
927 type = address_type ();
928
929 if (subobj_type == nullptr)
930 subobj_type = type;
931
932 if (this->m_pieces.size () > 0)
933 {
934 ULONGEST bit_size = 0;
935
936 for (dwarf_expr_piece &piece : this->m_pieces)
937 bit_size += piece.size;
938 /* Complain if the expression is larger than the size of the
939 outer type. */
940 if (bit_size > 8 * TYPE_LENGTH (type))
941 invalid_synthetic_pointer ();
942
943 piece_closure *c
944 = allocate_piece_closure (this->m_per_cu, this->m_per_objfile,
945 std::move (this->m_pieces), this->m_frame);
946 retval = allocate_computed_value (subobj_type,
947 &pieced_value_funcs, c);
948 set_value_offset (retval, subobj_offset);
949 }
950 else
951 {
952 /* If AS_LVAL is false, means that the implicit conversion
953 from a location description to value is expected. */
954 if (!as_lval)
955 this->m_location = DWARF_VALUE_STACK;
956
957 switch (this->m_location)
958 {
959 case DWARF_VALUE_REGISTER:
960 {
961 gdbarch *f_arch = get_frame_arch (this->m_frame);
962 int dwarf_regnum
963 = longest_to_int (value_as_long (this->fetch (0)));
964 int gdb_regnum = dwarf_reg_to_regnum_or_error (f_arch,
965 dwarf_regnum);
966
967 if (subobj_offset != 0)
968 error (_("cannot use offset on synthetic pointer to register"));
969
970 gdb_assert (this->m_frame != NULL);
971
972 retval = value_from_register (subobj_type, gdb_regnum,
973 this->m_frame);
974 if (value_optimized_out (retval))
975 {
976 /* This means the register has undefined value / was
977 not saved. As we're computing the location of some
978 variable etc. in the program, not a value for
979 inspecting a register ($pc, $sp, etc.), return a
980 generic optimized out value instead, so that we show
981 <optimized out> instead of <not saved>. */
982 value *tmp = allocate_value (subobj_type);
983 value_contents_copy (tmp, 0, retval, 0,
984 TYPE_LENGTH (subobj_type));
985 retval = tmp;
986 }
987 }
988 break;
989
990 case DWARF_VALUE_MEMORY:
991 {
992 struct type *ptr_type;
993 CORE_ADDR address = this->fetch_address (0);
994 bool in_stack_memory = this->fetch_in_stack_memory (0);
995
996 /* DW_OP_deref_size (and possibly other operations too) may
997 create a pointer instead of an address. Ideally, the
998 pointer to address conversion would be performed as part
999 of those operations, but the type of the object to
1000 which the address refers is not known at the time of
1001 the operation. Therefore, we do the conversion here
1002 since the type is readily available. */
1003
1004 switch (subobj_type->code ())
1005 {
1006 case TYPE_CODE_FUNC:
1007 case TYPE_CODE_METHOD:
1008 ptr_type = builtin_type (arch)->builtin_func_ptr;
1009 break;
1010 default:
1011 ptr_type = builtin_type (arch)->builtin_data_ptr;
1012 break;
1013 }
1014 address = value_as_address (value_from_pointer (ptr_type, address));
1015
1016 retval = value_at_lazy (subobj_type,
1017 address + subobj_offset);
1018 if (in_stack_memory)
1019 set_value_stack (retval, 1);
1020 }
1021 break;
1022
1023 case DWARF_VALUE_STACK:
1024 {
1025 value *val = this->fetch (0);
1026 size_t n = TYPE_LENGTH (value_type (val));
1027 size_t len = TYPE_LENGTH (subobj_type);
1028 size_t max = TYPE_LENGTH (type);
1029
1030 if (subobj_offset + len > max)
1031 invalid_synthetic_pointer ();
1032
1033 retval = allocate_value (subobj_type);
1034
1035 /* The given offset is relative to the actual object. */
1036 if (gdbarch_byte_order (arch) == BFD_ENDIAN_BIG)
1037 subobj_offset += n - max;
1038
1039 copy (value_contents_all (val).slice (subobj_offset, len),
1040 value_contents_raw (retval));
1041 }
1042 break;
1043
1044 case DWARF_VALUE_LITERAL:
1045 {
1046 size_t n = TYPE_LENGTH (subobj_type);
1047
1048 if (subobj_offset + n > this->m_len)
1049 invalid_synthetic_pointer ();
1050
1051 retval = allocate_value (subobj_type);
1052 bfd_byte *contents = value_contents_raw (retval).data ();
1053 memcpy (contents, this->m_data + subobj_offset, n);
1054 }
1055 break;
1056
1057 case DWARF_VALUE_OPTIMIZED_OUT:
1058 retval = allocate_optimized_out_value (subobj_type);
1059 break;
1060
1061 /* DWARF_VALUE_IMPLICIT_POINTER was converted to a pieced
1062 operation by execute_stack_op. */
1063 case DWARF_VALUE_IMPLICIT_POINTER:
1064 /* DWARF_VALUE_OPTIMIZED_OUT can't occur in this context --
1065 it can only be encountered when making a piece. */
1066 default:
1067 internal_error (__FILE__, __LINE__, _("invalid location type"));
1068 }
1069 }
1070
1071 set_value_initialized (retval, this->m_initialized);
1072
1073 return retval;
1074 }
1075
1076 /* See expr.h. */
1077
1078 value *
1079 dwarf_expr_context::evaluate (const gdb_byte *addr, size_t len, bool as_lval,
1080 dwarf2_per_cu_data *per_cu, frame_info *frame,
1081 const struct property_addr_info *addr_info,
1082 struct type *type, struct type *subobj_type,
1083 LONGEST subobj_offset)
1084 {
1085 this->m_per_cu = per_cu;
1086 this->m_frame = frame;
1087 this->m_addr_info = addr_info;
1088
1089 eval (addr, len);
1090 return fetch_result (type, subobj_type, subobj_offset, as_lval);
1091 }
1092
1093 /* Require that TYPE be an integral type; throw an exception if not. */
1094
1095 static void
1096 dwarf_require_integral (struct type *type)
1097 {
1098 if (type->code () != TYPE_CODE_INT
1099 && type->code () != TYPE_CODE_CHAR
1100 && type->code () != TYPE_CODE_BOOL)
1101 error (_("integral type expected in DWARF expression"));
1102 }
1103
1104 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
1105 type. */
1106
1107 static struct type *
1108 get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
1109 {
1110 switch (TYPE_LENGTH (type))
1111 {
1112 case 1:
1113 return builtin_type (gdbarch)->builtin_uint8;
1114 case 2:
1115 return builtin_type (gdbarch)->builtin_uint16;
1116 case 4:
1117 return builtin_type (gdbarch)->builtin_uint32;
1118 case 8:
1119 return builtin_type (gdbarch)->builtin_uint64;
1120 default:
1121 error (_("no unsigned variant found for type, while evaluating "
1122 "DWARF expression"));
1123 }
1124 }
1125
1126 /* Return the signed form of TYPE. TYPE is necessarily an integral
1127 type. */
1128
1129 static struct type *
1130 get_signed_type (struct gdbarch *gdbarch, struct type *type)
1131 {
1132 switch (TYPE_LENGTH (type))
1133 {
1134 case 1:
1135 return builtin_type (gdbarch)->builtin_int8;
1136 case 2:
1137 return builtin_type (gdbarch)->builtin_int16;
1138 case 4:
1139 return builtin_type (gdbarch)->builtin_int32;
1140 case 8:
1141 return builtin_type (gdbarch)->builtin_int64;
1142 default:
1143 error (_("no signed variant found for type, while evaluating "
1144 "DWARF expression"));
1145 }
1146 }
1147
1148 /* Retrieve the N'th item on the stack, converted to an address. */
1149
1150 CORE_ADDR
1151 dwarf_expr_context::fetch_address (int n)
1152 {
1153 gdbarch *arch = this->m_per_objfile->objfile->arch ();
1154 value *result_val = fetch (n);
1155 bfd_endian byte_order = gdbarch_byte_order (arch);
1156 ULONGEST result;
1157
1158 dwarf_require_integral (value_type (result_val));
1159 result = extract_unsigned_integer (value_contents (result_val), byte_order);
1160
1161 /* For most architectures, calling extract_unsigned_integer() alone
1162 is sufficient for extracting an address. However, some
1163 architectures (e.g. MIPS) use signed addresses and using
1164 extract_unsigned_integer() will not produce a correct
1165 result. Make sure we invoke gdbarch_integer_to_address()
1166 for those architectures which require it. */
1167 if (gdbarch_integer_to_address_p (arch))
1168 {
1169 gdb_byte *buf = (gdb_byte *) alloca (this->m_addr_size);
1170 type *int_type = get_unsigned_type (arch,
1171 value_type (result_val));
1172
1173 store_unsigned_integer (buf, this->m_addr_size, byte_order, result);
1174 return gdbarch_integer_to_address (arch, int_type, buf);
1175 }
1176
1177 return (CORE_ADDR) result;
1178 }
1179
1180 /* Retrieve the in_stack_memory flag of the N'th item on the stack. */
1181
1182 bool
1183 dwarf_expr_context::fetch_in_stack_memory (int n)
1184 {
1185 if (this->m_stack.size () <= n)
1186 error (_("Asked for position %d of stack, "
1187 "stack only has %zu elements on it."),
1188 n, this->m_stack.size ());
1189 return this->m_stack[this->m_stack.size () - (1 + n)].in_stack_memory;
1190 }
1191
1192 /* Return true if the expression stack is empty. */
1193
1194 bool
1195 dwarf_expr_context::stack_empty_p () const
1196 {
1197 return m_stack.empty ();
1198 }
1199
1200 /* Add a new piece to the dwarf_expr_context's piece list. */
1201 void
1202 dwarf_expr_context::add_piece (ULONGEST size, ULONGEST offset)
1203 {
1204 this->m_pieces.emplace_back ();
1205 dwarf_expr_piece &p = this->m_pieces.back ();
1206
1207 p.location = this->m_location;
1208 p.size = size;
1209 p.offset = offset;
1210
1211 if (p.location == DWARF_VALUE_LITERAL)
1212 {
1213 p.v.literal.data = this->m_data;
1214 p.v.literal.length = this->m_len;
1215 }
1216 else if (stack_empty_p ())
1217 {
1218 p.location = DWARF_VALUE_OPTIMIZED_OUT;
1219 /* Also reset the context's location, for our callers. This is
1220 a somewhat strange approach, but this lets us avoid setting
1221 the location to DWARF_VALUE_MEMORY in all the individual
1222 cases in the evaluator. */
1223 this->m_location = DWARF_VALUE_OPTIMIZED_OUT;
1224 }
1225 else if (p.location == DWARF_VALUE_MEMORY)
1226 {
1227 p.v.mem.addr = fetch_address (0);
1228 p.v.mem.in_stack_memory = fetch_in_stack_memory (0);
1229 }
1230 else if (p.location == DWARF_VALUE_IMPLICIT_POINTER)
1231 {
1232 p.v.ptr.die_sect_off = (sect_offset) this->m_len;
1233 p.v.ptr.offset = value_as_long (fetch (0));
1234 }
1235 else if (p.location == DWARF_VALUE_REGISTER)
1236 p.v.regno = value_as_long (fetch (0));
1237 else
1238 {
1239 p.v.value = fetch (0);
1240 }
1241 }
1242
1243 /* Evaluate the expression at ADDR (LEN bytes long). */
1244
1245 void
1246 dwarf_expr_context::eval (const gdb_byte *addr, size_t len)
1247 {
1248 int old_recursion_depth = this->m_recursion_depth;
1249
1250 execute_stack_op (addr, addr + len);
1251
1252 /* RECURSION_DEPTH becomes invalid if an exception was thrown here. */
1253
1254 gdb_assert (this->m_recursion_depth == old_recursion_depth);
1255 }
1256
1257 /* Helper to read a uleb128 value or throw an error. */
1258
1259 const gdb_byte *
1260 safe_read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
1261 uint64_t *r)
1262 {
1263 buf = gdb_read_uleb128 (buf, buf_end, r);
1264 if (buf == NULL)
1265 error (_("DWARF expression error: ran off end of buffer reading uleb128 value"));
1266 return buf;
1267 }
1268
1269 /* Helper to read a sleb128 value or throw an error. */
1270
1271 const gdb_byte *
1272 safe_read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
1273 int64_t *r)
1274 {
1275 buf = gdb_read_sleb128 (buf, buf_end, r);
1276 if (buf == NULL)
1277 error (_("DWARF expression error: ran off end of buffer reading sleb128 value"));
1278 return buf;
1279 }
1280
1281 const gdb_byte *
1282 safe_skip_leb128 (const gdb_byte *buf, const gdb_byte *buf_end)
1283 {
1284 buf = gdb_skip_leb128 (buf, buf_end);
1285 if (buf == NULL)
1286 error (_("DWARF expression error: ran off end of buffer reading leb128 value"));
1287 return buf;
1288 }
1289 \f
1290
1291 /* Check that the current operator is either at the end of an
1292 expression, or that it is followed by a composition operator or by
1293 DW_OP_GNU_uninit (which should terminate the expression). */
1294
1295 void
1296 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
1297 const char *op_name)
1298 {
1299 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece
1300 && *op_ptr != DW_OP_GNU_uninit)
1301 error (_("DWARF-2 expression error: `%s' operations must be "
1302 "used either alone or in conjunction with DW_OP_piece "
1303 "or DW_OP_bit_piece."),
1304 op_name);
1305 }
1306
1307 /* Return true iff the types T1 and T2 are "the same". This only does
1308 checks that might reasonably be needed to compare DWARF base
1309 types. */
1310
1311 static int
1312 base_types_equal_p (struct type *t1, struct type *t2)
1313 {
1314 if (t1->code () != t2->code ())
1315 return 0;
1316 if (t1->is_unsigned () != t2->is_unsigned ())
1317 return 0;
1318 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
1319 }
1320
1321 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
1322 DWARF register number. Otherwise return -1. */
1323
1324 int
1325 dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
1326 {
1327 uint64_t dwarf_reg;
1328
1329 if (buf_end <= buf)
1330 return -1;
1331 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
1332 {
1333 if (buf_end - buf != 1)
1334 return -1;
1335 return *buf - DW_OP_reg0;
1336 }
1337
1338 if (*buf == DW_OP_regval_type || *buf == DW_OP_GNU_regval_type)
1339 {
1340 buf++;
1341 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
1342 if (buf == NULL)
1343 return -1;
1344 buf = gdb_skip_leb128 (buf, buf_end);
1345 if (buf == NULL)
1346 return -1;
1347 }
1348 else if (*buf == DW_OP_regx)
1349 {
1350 buf++;
1351 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
1352 if (buf == NULL)
1353 return -1;
1354 }
1355 else
1356 return -1;
1357 if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
1358 return -1;
1359 return dwarf_reg;
1360 }
1361
1362 /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
1363 DW_OP_deref* return the DWARF register number. Otherwise return -1.
1364 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
1365 size from DW_OP_deref_size. */
1366
1367 int
1368 dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end,
1369 CORE_ADDR *deref_size_return)
1370 {
1371 uint64_t dwarf_reg;
1372 int64_t offset;
1373
1374 if (buf_end <= buf)
1375 return -1;
1376
1377 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
1378 {
1379 dwarf_reg = *buf - DW_OP_breg0;
1380 buf++;
1381 if (buf >= buf_end)
1382 return -1;
1383 }
1384 else if (*buf == DW_OP_bregx)
1385 {
1386 buf++;
1387 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
1388 if (buf == NULL)
1389 return -1;
1390 if ((int) dwarf_reg != dwarf_reg)
1391 return -1;
1392 }
1393 else
1394 return -1;
1395
1396 buf = gdb_read_sleb128 (buf, buf_end, &offset);
1397 if (buf == NULL)
1398 return -1;
1399 if (offset != 0)
1400 return -1;
1401
1402 if (*buf == DW_OP_deref)
1403 {
1404 buf++;
1405 *deref_size_return = -1;
1406 }
1407 else if (*buf == DW_OP_deref_size)
1408 {
1409 buf++;
1410 if (buf >= buf_end)
1411 return -1;
1412 *deref_size_return = *buf++;
1413 }
1414 else
1415 return -1;
1416
1417 if (buf != buf_end)
1418 return -1;
1419
1420 return dwarf_reg;
1421 }
1422
1423 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
1424 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
1425
1426 int
1427 dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end,
1428 CORE_ADDR *fb_offset_return)
1429 {
1430 int64_t fb_offset;
1431
1432 if (buf_end <= buf)
1433 return 0;
1434
1435 if (*buf != DW_OP_fbreg)
1436 return 0;
1437 buf++;
1438
1439 buf = gdb_read_sleb128 (buf, buf_end, &fb_offset);
1440 if (buf == NULL)
1441 return 0;
1442 *fb_offset_return = fb_offset;
1443 if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return)
1444 return 0;
1445
1446 return 1;
1447 }
1448
1449 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
1450 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
1451 The matched SP register number depends on GDBARCH. */
1452
1453 int
1454 dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf,
1455 const gdb_byte *buf_end, CORE_ADDR *sp_offset_return)
1456 {
1457 uint64_t dwarf_reg;
1458 int64_t sp_offset;
1459
1460 if (buf_end <= buf)
1461 return 0;
1462 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
1463 {
1464 dwarf_reg = *buf - DW_OP_breg0;
1465 buf++;
1466 }
1467 else
1468 {
1469 if (*buf != DW_OP_bregx)
1470 return 0;
1471 buf++;
1472 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
1473 if (buf == NULL)
1474 return 0;
1475 }
1476
1477 if (dwarf_reg_to_regnum (gdbarch, dwarf_reg)
1478 != gdbarch_sp_regnum (gdbarch))
1479 return 0;
1480
1481 buf = gdb_read_sleb128 (buf, buf_end, &sp_offset);
1482 if (buf == NULL)
1483 return 0;
1484 *sp_offset_return = sp_offset;
1485 if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return)
1486 return 0;
1487
1488 return 1;
1489 }
1490
1491 /* The engine for the expression evaluator. Using the context in this
1492 object, evaluate the expression between OP_PTR and OP_END. */
1493
1494 void
1495 dwarf_expr_context::execute_stack_op (const gdb_byte *op_ptr,
1496 const gdb_byte *op_end)
1497 {
1498 gdbarch *arch = this->m_per_objfile->objfile->arch ();
1499 bfd_endian byte_order = gdbarch_byte_order (arch);
1500 /* Old-style "untyped" DWARF values need special treatment in a
1501 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
1502 a special type for these values so we can distinguish them from
1503 values that have an explicit type, because explicitly-typed
1504 values do not need special treatment. This special type must be
1505 different (in the `==' sense) from any base type coming from the
1506 CU. */
1507 type *address_type = this->address_type ();
1508
1509 this->m_location = DWARF_VALUE_MEMORY;
1510 this->m_initialized = 1; /* Default is initialized. */
1511
1512 if (this->m_recursion_depth > this->m_max_recursion_depth)
1513 error (_("DWARF-2 expression error: Loop detected (%d)."),
1514 this->m_recursion_depth);
1515 this->m_recursion_depth++;
1516
1517 while (op_ptr < op_end)
1518 {
1519 dwarf_location_atom op = (dwarf_location_atom) *op_ptr++;
1520 ULONGEST result;
1521 /* Assume the value is not in stack memory.
1522 Code that knows otherwise sets this to true.
1523 Some arithmetic on stack addresses can probably be assumed to still
1524 be a stack address, but we skip this complication for now.
1525 This is just an optimization, so it's always ok to punt
1526 and leave this as false. */
1527 bool in_stack_memory = false;
1528 uint64_t uoffset, reg;
1529 int64_t offset;
1530 value *result_val = NULL;
1531
1532 /* The DWARF expression might have a bug causing an infinite
1533 loop. In that case, quitting is the only way out. */
1534 QUIT;
1535
1536 switch (op)
1537 {
1538 case DW_OP_lit0:
1539 case DW_OP_lit1:
1540 case DW_OP_lit2:
1541 case DW_OP_lit3:
1542 case DW_OP_lit4:
1543 case DW_OP_lit5:
1544 case DW_OP_lit6:
1545 case DW_OP_lit7:
1546 case DW_OP_lit8:
1547 case DW_OP_lit9:
1548 case DW_OP_lit10:
1549 case DW_OP_lit11:
1550 case DW_OP_lit12:
1551 case DW_OP_lit13:
1552 case DW_OP_lit14:
1553 case DW_OP_lit15:
1554 case DW_OP_lit16:
1555 case DW_OP_lit17:
1556 case DW_OP_lit18:
1557 case DW_OP_lit19:
1558 case DW_OP_lit20:
1559 case DW_OP_lit21:
1560 case DW_OP_lit22:
1561 case DW_OP_lit23:
1562 case DW_OP_lit24:
1563 case DW_OP_lit25:
1564 case DW_OP_lit26:
1565 case DW_OP_lit27:
1566 case DW_OP_lit28:
1567 case DW_OP_lit29:
1568 case DW_OP_lit30:
1569 case DW_OP_lit31:
1570 result = op - DW_OP_lit0;
1571 result_val = value_from_ulongest (address_type, result);
1572 break;
1573
1574 case DW_OP_addr:
1575 result = extract_unsigned_integer (op_ptr,
1576 this->m_addr_size, byte_order);
1577 op_ptr += this->m_addr_size;
1578 /* Some versions of GCC emit DW_OP_addr before
1579 DW_OP_GNU_push_tls_address. In this case the value is an
1580 index, not an address. We don't support things like
1581 branching between the address and the TLS op. */
1582 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
1583 result += this->m_per_objfile->objfile->text_section_offset ();
1584 result_val = value_from_ulongest (address_type, result);
1585 break;
1586
1587 case DW_OP_addrx:
1588 case DW_OP_GNU_addr_index:
1589 ensure_have_per_cu (this->m_per_cu, "DW_OP_addrx");
1590
1591 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1592 result = dwarf2_read_addr_index (this->m_per_cu, this->m_per_objfile,
1593 uoffset);
1594 result += this->m_per_objfile->objfile->text_section_offset ();
1595 result_val = value_from_ulongest (address_type, result);
1596 break;
1597 case DW_OP_GNU_const_index:
1598 ensure_have_per_cu (this->m_per_cu, "DW_OP_GNU_const_index");
1599
1600 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1601 result = dwarf2_read_addr_index (this->m_per_cu, this->m_per_objfile,
1602 uoffset);
1603 result_val = value_from_ulongest (address_type, result);
1604 break;
1605
1606 case DW_OP_const1u:
1607 result = extract_unsigned_integer (op_ptr, 1, byte_order);
1608 result_val = value_from_ulongest (address_type, result);
1609 op_ptr += 1;
1610 break;
1611 case DW_OP_const1s:
1612 result = extract_signed_integer (op_ptr, 1, byte_order);
1613 result_val = value_from_ulongest (address_type, result);
1614 op_ptr += 1;
1615 break;
1616 case DW_OP_const2u:
1617 result = extract_unsigned_integer (op_ptr, 2, byte_order);
1618 result_val = value_from_ulongest (address_type, result);
1619 op_ptr += 2;
1620 break;
1621 case DW_OP_const2s:
1622 result = extract_signed_integer (op_ptr, 2, byte_order);
1623 result_val = value_from_ulongest (address_type, result);
1624 op_ptr += 2;
1625 break;
1626 case DW_OP_const4u:
1627 result = extract_unsigned_integer (op_ptr, 4, byte_order);
1628 result_val = value_from_ulongest (address_type, result);
1629 op_ptr += 4;
1630 break;
1631 case DW_OP_const4s:
1632 result = extract_signed_integer (op_ptr, 4, byte_order);
1633 result_val = value_from_ulongest (address_type, result);
1634 op_ptr += 4;
1635 break;
1636 case DW_OP_const8u:
1637 result = extract_unsigned_integer (op_ptr, 8, byte_order);
1638 result_val = value_from_ulongest (address_type, result);
1639 op_ptr += 8;
1640 break;
1641 case DW_OP_const8s:
1642 result = extract_signed_integer (op_ptr, 8, byte_order);
1643 result_val = value_from_ulongest (address_type, result);
1644 op_ptr += 8;
1645 break;
1646 case DW_OP_constu:
1647 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1648 result = uoffset;
1649 result_val = value_from_ulongest (address_type, result);
1650 break;
1651 case DW_OP_consts:
1652 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
1653 result = offset;
1654 result_val = value_from_ulongest (address_type, result);
1655 break;
1656
1657 /* The DW_OP_reg operations are required to occur alone in
1658 location expressions. */
1659 case DW_OP_reg0:
1660 case DW_OP_reg1:
1661 case DW_OP_reg2:
1662 case DW_OP_reg3:
1663 case DW_OP_reg4:
1664 case DW_OP_reg5:
1665 case DW_OP_reg6:
1666 case DW_OP_reg7:
1667 case DW_OP_reg8:
1668 case DW_OP_reg9:
1669 case DW_OP_reg10:
1670 case DW_OP_reg11:
1671 case DW_OP_reg12:
1672 case DW_OP_reg13:
1673 case DW_OP_reg14:
1674 case DW_OP_reg15:
1675 case DW_OP_reg16:
1676 case DW_OP_reg17:
1677 case DW_OP_reg18:
1678 case DW_OP_reg19:
1679 case DW_OP_reg20:
1680 case DW_OP_reg21:
1681 case DW_OP_reg22:
1682 case DW_OP_reg23:
1683 case DW_OP_reg24:
1684 case DW_OP_reg25:
1685 case DW_OP_reg26:
1686 case DW_OP_reg27:
1687 case DW_OP_reg28:
1688 case DW_OP_reg29:
1689 case DW_OP_reg30:
1690 case DW_OP_reg31:
1691 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_reg");
1692
1693 result = op - DW_OP_reg0;
1694 result_val = value_from_ulongest (address_type, result);
1695 this->m_location = DWARF_VALUE_REGISTER;
1696 break;
1697
1698 case DW_OP_regx:
1699 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1700 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
1701
1702 result = reg;
1703 result_val = value_from_ulongest (address_type, result);
1704 this->m_location = DWARF_VALUE_REGISTER;
1705 break;
1706
1707 case DW_OP_implicit_value:
1708 {
1709 uint64_t len;
1710
1711 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
1712 if (op_ptr + len > op_end)
1713 error (_("DW_OP_implicit_value: too few bytes available."));
1714 this->m_len = len;
1715 this->m_data = op_ptr;
1716 this->m_location = DWARF_VALUE_LITERAL;
1717 op_ptr += len;
1718 dwarf_expr_require_composition (op_ptr, op_end,
1719 "DW_OP_implicit_value");
1720 }
1721 goto no_push;
1722
1723 case DW_OP_stack_value:
1724 this->m_location = DWARF_VALUE_STACK;
1725 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
1726 goto no_push;
1727
1728 case DW_OP_implicit_pointer:
1729 case DW_OP_GNU_implicit_pointer:
1730 {
1731 int64_t len;
1732 ensure_have_per_cu (this->m_per_cu, "DW_OP_implicit_pointer");
1733
1734 int ref_addr_size = this->m_per_cu->ref_addr_size ();
1735
1736 /* The referred-to DIE of sect_offset kind. */
1737 this->m_len = extract_unsigned_integer (op_ptr, ref_addr_size,
1738 byte_order);
1739 op_ptr += ref_addr_size;
1740
1741 /* The byte offset into the data. */
1742 op_ptr = safe_read_sleb128 (op_ptr, op_end, &len);
1743 result = (ULONGEST) len;
1744 result_val = value_from_ulongest (address_type, result);
1745
1746 this->m_location = DWARF_VALUE_IMPLICIT_POINTER;
1747 dwarf_expr_require_composition (op_ptr, op_end,
1748 "DW_OP_implicit_pointer");
1749 }
1750 break;
1751
1752 case DW_OP_breg0:
1753 case DW_OP_breg1:
1754 case DW_OP_breg2:
1755 case DW_OP_breg3:
1756 case DW_OP_breg4:
1757 case DW_OP_breg5:
1758 case DW_OP_breg6:
1759 case DW_OP_breg7:
1760 case DW_OP_breg8:
1761 case DW_OP_breg9:
1762 case DW_OP_breg10:
1763 case DW_OP_breg11:
1764 case DW_OP_breg12:
1765 case DW_OP_breg13:
1766 case DW_OP_breg14:
1767 case DW_OP_breg15:
1768 case DW_OP_breg16:
1769 case DW_OP_breg17:
1770 case DW_OP_breg18:
1771 case DW_OP_breg19:
1772 case DW_OP_breg20:
1773 case DW_OP_breg21:
1774 case DW_OP_breg22:
1775 case DW_OP_breg23:
1776 case DW_OP_breg24:
1777 case DW_OP_breg25:
1778 case DW_OP_breg26:
1779 case DW_OP_breg27:
1780 case DW_OP_breg28:
1781 case DW_OP_breg29:
1782 case DW_OP_breg30:
1783 case DW_OP_breg31:
1784 {
1785 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
1786 ensure_have_frame (this->m_frame, "DW_OP_breg");
1787
1788 result = read_addr_from_reg (this->m_frame, op - DW_OP_breg0);
1789 result += offset;
1790 result_val = value_from_ulongest (address_type, result);
1791 }
1792 break;
1793 case DW_OP_bregx:
1794 {
1795 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1796 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
1797 ensure_have_frame (this->m_frame, "DW_OP_bregx");
1798
1799 result = read_addr_from_reg (this->m_frame, reg);
1800 result += offset;
1801 result_val = value_from_ulongest (address_type, result);
1802 }
1803 break;
1804 case DW_OP_fbreg:
1805 {
1806 const gdb_byte *datastart;
1807 size_t datalen;
1808
1809 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
1810
1811 /* Rather than create a whole new context, we simply
1812 backup the current stack locally and install a new empty stack,
1813 then reset it afterwards, effectively erasing whatever the
1814 recursive call put there. */
1815 std::vector<dwarf_stack_value> saved_stack = std::move (this->m_stack);
1816 this->m_stack.clear ();
1817
1818 /* FIXME: cagney/2003-03-26: This code should be using
1819 get_frame_base_address(), and then implement a dwarf2
1820 specific this_base method. */
1821 this->get_frame_base (&datastart, &datalen);
1822 eval (datastart, datalen);
1823 if (this->m_location == DWARF_VALUE_MEMORY)
1824 result = fetch_address (0);
1825 else if (this->m_location == DWARF_VALUE_REGISTER)
1826 result
1827 = read_addr_from_reg (this->m_frame, value_as_long (fetch (0)));
1828 else
1829 error (_("Not implemented: computing frame "
1830 "base using explicit value operator"));
1831 result = result + offset;
1832 result_val = value_from_ulongest (address_type, result);
1833 in_stack_memory = true;
1834
1835 /* Restore the content of the original stack. */
1836 this->m_stack = std::move (saved_stack);
1837
1838 this->m_location = DWARF_VALUE_MEMORY;
1839 }
1840 break;
1841
1842 case DW_OP_dup:
1843 result_val = fetch (0);
1844 in_stack_memory = fetch_in_stack_memory (0);
1845 break;
1846
1847 case DW_OP_drop:
1848 pop ();
1849 goto no_push;
1850
1851 case DW_OP_pick:
1852 offset = *op_ptr++;
1853 result_val = fetch (offset);
1854 in_stack_memory = fetch_in_stack_memory (offset);
1855 break;
1856
1857 case DW_OP_swap:
1858 {
1859 if (this->m_stack.size () < 2)
1860 error (_("Not enough elements for "
1861 "DW_OP_swap. Need 2, have %zu."),
1862 this->m_stack.size ());
1863
1864 dwarf_stack_value &t1 = this->m_stack[this->m_stack.size () - 1];
1865 dwarf_stack_value &t2 = this->m_stack[this->m_stack.size () - 2];
1866 std::swap (t1, t2);
1867 goto no_push;
1868 }
1869
1870 case DW_OP_over:
1871 result_val = fetch (1);
1872 in_stack_memory = fetch_in_stack_memory (1);
1873 break;
1874
1875 case DW_OP_rot:
1876 {
1877 if (this->m_stack.size () < 3)
1878 error (_("Not enough elements for "
1879 "DW_OP_rot. Need 3, have %zu."),
1880 this->m_stack.size ());
1881
1882 dwarf_stack_value temp = this->m_stack[this->m_stack.size () - 1];
1883 this->m_stack[this->m_stack.size () - 1]
1884 = this->m_stack[this->m_stack.size () - 2];
1885 this->m_stack[this->m_stack.size () - 2]
1886 = this->m_stack[this->m_stack.size () - 3];
1887 this->m_stack[this->m_stack.size () - 3] = temp;
1888 goto no_push;
1889 }
1890
1891 case DW_OP_deref:
1892 case DW_OP_deref_size:
1893 case DW_OP_deref_type:
1894 case DW_OP_GNU_deref_type:
1895 {
1896 int addr_size = (op == DW_OP_deref ? this->m_addr_size : *op_ptr++);
1897 gdb_byte *buf = (gdb_byte *) alloca (addr_size);
1898 CORE_ADDR addr = fetch_address (0);
1899 struct type *type;
1900
1901 pop ();
1902
1903 if (op == DW_OP_deref_type || op == DW_OP_GNU_deref_type)
1904 {
1905 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1906 cu_offset type_die_cu_off = (cu_offset) uoffset;
1907 type = get_base_type (type_die_cu_off);
1908 }
1909 else
1910 type = address_type;
1911
1912 this->read_mem (buf, addr, addr_size);
1913
1914 /* If the size of the object read from memory is different
1915 from the type length, we need to zero-extend it. */
1916 if (TYPE_LENGTH (type) != addr_size)
1917 {
1918 ULONGEST datum =
1919 extract_unsigned_integer (buf, addr_size, byte_order);
1920
1921 buf = (gdb_byte *) alloca (TYPE_LENGTH (type));
1922 store_unsigned_integer (buf, TYPE_LENGTH (type),
1923 byte_order, datum);
1924 }
1925
1926 result_val = value_from_contents_and_address (type, buf, addr);
1927 break;
1928 }
1929
1930 case DW_OP_abs:
1931 case DW_OP_neg:
1932 case DW_OP_not:
1933 case DW_OP_plus_uconst:
1934 {
1935 /* Unary operations. */
1936 result_val = fetch (0);
1937 pop ();
1938
1939 switch (op)
1940 {
1941 case DW_OP_abs:
1942 if (value_less (result_val,
1943 value_zero (value_type (result_val), not_lval)))
1944 result_val = value_neg (result_val);
1945 break;
1946 case DW_OP_neg:
1947 result_val = value_neg (result_val);
1948 break;
1949 case DW_OP_not:
1950 dwarf_require_integral (value_type (result_val));
1951 result_val = value_complement (result_val);
1952 break;
1953 case DW_OP_plus_uconst:
1954 dwarf_require_integral (value_type (result_val));
1955 result = value_as_long (result_val);
1956 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1957 result += reg;
1958 result_val = value_from_ulongest (address_type, result);
1959 break;
1960 }
1961 }
1962 break;
1963
1964 case DW_OP_and:
1965 case DW_OP_div:
1966 case DW_OP_minus:
1967 case DW_OP_mod:
1968 case DW_OP_mul:
1969 case DW_OP_or:
1970 case DW_OP_plus:
1971 case DW_OP_shl:
1972 case DW_OP_shr:
1973 case DW_OP_shra:
1974 case DW_OP_xor:
1975 case DW_OP_le:
1976 case DW_OP_ge:
1977 case DW_OP_eq:
1978 case DW_OP_lt:
1979 case DW_OP_gt:
1980 case DW_OP_ne:
1981 {
1982 /* Binary operations. */
1983 struct value *first, *second;
1984
1985 second = fetch (0);
1986 pop ();
1987
1988 first = fetch (0);
1989 pop ();
1990
1991 if (! base_types_equal_p (value_type (first), value_type (second)))
1992 error (_("Incompatible types on DWARF stack"));
1993
1994 switch (op)
1995 {
1996 case DW_OP_and:
1997 dwarf_require_integral (value_type (first));
1998 dwarf_require_integral (value_type (second));
1999 result_val = value_binop (first, second, BINOP_BITWISE_AND);
2000 break;
2001 case DW_OP_div:
2002 result_val = value_binop (first, second, BINOP_DIV);
2003 break;
2004 case DW_OP_minus:
2005 result_val = value_binop (first, second, BINOP_SUB);
2006 break;
2007 case DW_OP_mod:
2008 {
2009 int cast_back = 0;
2010 struct type *orig_type = value_type (first);
2011
2012 /* We have to special-case "old-style" untyped values
2013 -- these must have mod computed using unsigned
2014 math. */
2015 if (orig_type == address_type)
2016 {
2017 struct type *utype = get_unsigned_type (arch, orig_type);
2018
2019 cast_back = 1;
2020 first = value_cast (utype, first);
2021 second = value_cast (utype, second);
2022 }
2023 /* Note that value_binop doesn't handle float or
2024 decimal float here. This seems unimportant. */
2025 result_val = value_binop (first, second, BINOP_MOD);
2026 if (cast_back)
2027 result_val = value_cast (orig_type, result_val);
2028 }
2029 break;
2030 case DW_OP_mul:
2031 result_val = value_binop (first, second, BINOP_MUL);
2032 break;
2033 case DW_OP_or:
2034 dwarf_require_integral (value_type (first));
2035 dwarf_require_integral (value_type (second));
2036 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
2037 break;
2038 case DW_OP_plus:
2039 result_val = value_binop (first, second, BINOP_ADD);
2040 break;
2041 case DW_OP_shl:
2042 dwarf_require_integral (value_type (first));
2043 dwarf_require_integral (value_type (second));
2044 result_val = value_binop (first, second, BINOP_LSH);
2045 break;
2046 case DW_OP_shr:
2047 dwarf_require_integral (value_type (first));
2048 dwarf_require_integral (value_type (second));
2049 if (!value_type (first)->is_unsigned ())
2050 {
2051 struct type *utype
2052 = get_unsigned_type (arch, value_type (first));
2053
2054 first = value_cast (utype, first);
2055 }
2056
2057 result_val = value_binop (first, second, BINOP_RSH);
2058 /* Make sure we wind up with the same type we started
2059 with. */
2060 if (value_type (result_val) != value_type (second))
2061 result_val = value_cast (value_type (second), result_val);
2062 break;
2063 case DW_OP_shra:
2064 dwarf_require_integral (value_type (first));
2065 dwarf_require_integral (value_type (second));
2066 if (value_type (first)->is_unsigned ())
2067 {
2068 struct type *stype
2069 = get_signed_type (arch, value_type (first));
2070
2071 first = value_cast (stype, first);
2072 }
2073
2074 result_val = value_binop (first, second, BINOP_RSH);
2075 /* Make sure we wind up with the same type we started
2076 with. */
2077 if (value_type (result_val) != value_type (second))
2078 result_val = value_cast (value_type (second), result_val);
2079 break;
2080 case DW_OP_xor:
2081 dwarf_require_integral (value_type (first));
2082 dwarf_require_integral (value_type (second));
2083 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
2084 break;
2085 case DW_OP_le:
2086 /* A <= B is !(B < A). */
2087 result = ! value_less (second, first);
2088 result_val = value_from_ulongest (address_type, result);
2089 break;
2090 case DW_OP_ge:
2091 /* A >= B is !(A < B). */
2092 result = ! value_less (first, second);
2093 result_val = value_from_ulongest (address_type, result);
2094 break;
2095 case DW_OP_eq:
2096 result = value_equal (first, second);
2097 result_val = value_from_ulongest (address_type, result);
2098 break;
2099 case DW_OP_lt:
2100 result = value_less (first, second);
2101 result_val = value_from_ulongest (address_type, result);
2102 break;
2103 case DW_OP_gt:
2104 /* A > B is B < A. */
2105 result = value_less (second, first);
2106 result_val = value_from_ulongest (address_type, result);
2107 break;
2108 case DW_OP_ne:
2109 result = ! value_equal (first, second);
2110 result_val = value_from_ulongest (address_type, result);
2111 break;
2112 default:
2113 internal_error (__FILE__, __LINE__,
2114 _("Can't be reached."));
2115 }
2116 }
2117 break;
2118
2119 case DW_OP_call_frame_cfa:
2120 ensure_have_frame (this->m_frame, "DW_OP_call_frame_cfa");
2121
2122 result = dwarf2_frame_cfa (this->m_frame);
2123 result_val = value_from_ulongest (address_type, result);
2124 in_stack_memory = true;
2125 break;
2126
2127 case DW_OP_GNU_push_tls_address:
2128 case DW_OP_form_tls_address:
2129 /* Variable is at a constant offset in the thread-local
2130 storage block into the objfile for the current thread and
2131 the dynamic linker module containing this expression. Here
2132 we return returns the offset from that base. The top of the
2133 stack has the offset from the beginning of the thread
2134 control block at which the variable is located. Nothing
2135 should follow this operator, so the top of stack would be
2136 returned. */
2137 result = value_as_long (fetch (0));
2138 pop ();
2139 result = target_translate_tls_address (this->m_per_objfile->objfile,
2140 result);
2141 result_val = value_from_ulongest (address_type, result);
2142 break;
2143
2144 case DW_OP_skip:
2145 offset = extract_signed_integer (op_ptr, 2, byte_order);
2146 op_ptr += 2;
2147 op_ptr += offset;
2148 goto no_push;
2149
2150 case DW_OP_bra:
2151 {
2152 struct value *val;
2153
2154 offset = extract_signed_integer (op_ptr, 2, byte_order);
2155 op_ptr += 2;
2156 val = fetch (0);
2157 dwarf_require_integral (value_type (val));
2158 if (value_as_long (val) != 0)
2159 op_ptr += offset;
2160 pop ();
2161 }
2162 goto no_push;
2163
2164 case DW_OP_nop:
2165 goto no_push;
2166
2167 case DW_OP_piece:
2168 {
2169 uint64_t size;
2170
2171 /* Record the piece. */
2172 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
2173 add_piece (8 * size, 0);
2174
2175 /* Pop off the address/regnum, and reset the location
2176 type. */
2177 if (this->m_location != DWARF_VALUE_LITERAL
2178 && this->m_location != DWARF_VALUE_OPTIMIZED_OUT)
2179 pop ();
2180 this->m_location = DWARF_VALUE_MEMORY;
2181 }
2182 goto no_push;
2183
2184 case DW_OP_bit_piece:
2185 {
2186 uint64_t size, uleb_offset;
2187
2188 /* Record the piece. */
2189 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
2190 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uleb_offset);
2191 add_piece (size, uleb_offset);
2192
2193 /* Pop off the address/regnum, and reset the location
2194 type. */
2195 if (this->m_location != DWARF_VALUE_LITERAL
2196 && this->m_location != DWARF_VALUE_OPTIMIZED_OUT)
2197 pop ();
2198 this->m_location = DWARF_VALUE_MEMORY;
2199 }
2200 goto no_push;
2201
2202 case DW_OP_GNU_uninit:
2203 if (op_ptr != op_end)
2204 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
2205 "be the very last op."));
2206
2207 this->m_initialized = 0;
2208 goto no_push;
2209
2210 case DW_OP_call2:
2211 {
2212 cu_offset cu_off
2213 = (cu_offset) extract_unsigned_integer (op_ptr, 2, byte_order);
2214 op_ptr += 2;
2215 this->dwarf_call (cu_off);
2216 }
2217 goto no_push;
2218
2219 case DW_OP_call4:
2220 {
2221 cu_offset cu_off
2222 = (cu_offset) extract_unsigned_integer (op_ptr, 4, byte_order);
2223 op_ptr += 4;
2224 this->dwarf_call (cu_off);
2225 }
2226 goto no_push;
2227
2228 case DW_OP_GNU_variable_value:
2229 {
2230 ensure_have_per_cu (this->m_per_cu, "DW_OP_GNU_variable_value");
2231 int ref_addr_size = this->m_per_cu->ref_addr_size ();
2232
2233 sect_offset sect_off
2234 = (sect_offset) extract_unsigned_integer (op_ptr,
2235 ref_addr_size,
2236 byte_order);
2237 op_ptr += ref_addr_size;
2238 result_val = sect_variable_value (sect_off, this->m_per_cu,
2239 this->m_per_objfile);
2240 result_val = value_cast (address_type, result_val);
2241 }
2242 break;
2243
2244 case DW_OP_entry_value:
2245 case DW_OP_GNU_entry_value:
2246 {
2247 uint64_t len;
2248 CORE_ADDR deref_size;
2249 union call_site_parameter_u kind_u;
2250
2251 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
2252 if (op_ptr + len > op_end)
2253 error (_("DW_OP_entry_value: too few bytes available."));
2254
2255 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
2256 if (kind_u.dwarf_reg != -1)
2257 {
2258 op_ptr += len;
2259 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG,
2260 kind_u,
2261 -1 /* deref_size */);
2262 goto no_push;
2263 }
2264
2265 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr,
2266 op_ptr + len,
2267 &deref_size);
2268 if (kind_u.dwarf_reg != -1)
2269 {
2270 if (deref_size == -1)
2271 deref_size = this->m_addr_size;
2272 op_ptr += len;
2273 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG,
2274 kind_u, deref_size);
2275 goto no_push;
2276 }
2277
2278 error (_("DWARF-2 expression error: DW_OP_entry_value is "
2279 "supported only for single DW_OP_reg* "
2280 "or for DW_OP_breg*(0)+DW_OP_deref*"));
2281 }
2282
2283 case DW_OP_GNU_parameter_ref:
2284 {
2285 union call_site_parameter_u kind_u;
2286
2287 kind_u.param_cu_off
2288 = (cu_offset) extract_unsigned_integer (op_ptr, 4, byte_order);
2289 op_ptr += 4;
2290 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_PARAM_OFFSET,
2291 kind_u,
2292 -1 /* deref_size */);
2293 }
2294 goto no_push;
2295
2296 case DW_OP_const_type:
2297 case DW_OP_GNU_const_type:
2298 {
2299 int n;
2300 const gdb_byte *data;
2301 struct type *type;
2302
2303 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
2304 cu_offset type_die_cu_off = (cu_offset) uoffset;
2305
2306 n = *op_ptr++;
2307 data = op_ptr;
2308 op_ptr += n;
2309
2310 type = get_base_type (type_die_cu_off);
2311
2312 if (TYPE_LENGTH (type) != n)
2313 error (_("DW_OP_const_type has different sizes for type and data"));
2314
2315 result_val = value_from_contents (type, data);
2316 }
2317 break;
2318
2319 case DW_OP_regval_type:
2320 case DW_OP_GNU_regval_type:
2321 {
2322 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
2323 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
2324 cu_offset type_die_cu_off = (cu_offset) uoffset;
2325
2326 ensure_have_frame (this->m_frame, "DW_OP_regval_type");
2327
2328 struct type *type = get_base_type (type_die_cu_off);
2329 int regnum
2330 = dwarf_reg_to_regnum_or_error (get_frame_arch (this->m_frame),
2331 reg);
2332 result_val = value_from_register (type, regnum, this->m_frame);
2333 }
2334 break;
2335
2336 case DW_OP_convert:
2337 case DW_OP_GNU_convert:
2338 case DW_OP_reinterpret:
2339 case DW_OP_GNU_reinterpret:
2340 {
2341 struct type *type;
2342
2343 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
2344 cu_offset type_die_cu_off = (cu_offset) uoffset;
2345
2346 if (to_underlying (type_die_cu_off) == 0)
2347 type = address_type;
2348 else
2349 type = get_base_type (type_die_cu_off);
2350
2351 result_val = fetch (0);
2352 pop ();
2353
2354 if (op == DW_OP_convert || op == DW_OP_GNU_convert)
2355 result_val = value_cast (type, result_val);
2356 else if (type == value_type (result_val))
2357 {
2358 /* Nothing. */
2359 }
2360 else if (TYPE_LENGTH (type)
2361 != TYPE_LENGTH (value_type (result_val)))
2362 error (_("DW_OP_reinterpret has wrong size"));
2363 else
2364 result_val
2365 = value_from_contents (type,
2366 value_contents_all (result_val).data ());
2367 }
2368 break;
2369
2370 case DW_OP_push_object_address:
2371 /* Return the address of the object we are currently observing. */
2372 if (this->m_addr_info == nullptr
2373 || (this->m_addr_info->valaddr.data () == nullptr
2374 && this->m_addr_info->addr == 0))
2375 error (_("Location address is not set."));
2376
2377 result_val
2378 = value_from_ulongest (address_type, this->m_addr_info->addr);
2379 break;
2380
2381 default:
2382 error (_("Unhandled dwarf expression opcode 0x%x"), op);
2383 }
2384
2385 /* Most things push a result value. */
2386 gdb_assert (result_val != NULL);
2387 push (result_val, in_stack_memory);
2388 no_push:
2389 ;
2390 }
2391
2392 /* To simplify our main caller, if the result is an implicit
2393 pointer, then make a pieced value. This is ok because we can't
2394 have implicit pointers in contexts where pieces are invalid. */
2395 if (this->m_location == DWARF_VALUE_IMPLICIT_POINTER)
2396 add_piece (8 * this->m_addr_size, 0);
2397
2398 this->m_recursion_depth--;
2399 gdb_assert (this->m_recursion_depth >= 0);
2400 }
2401
2402 void _initialize_dwarf2expr ();
2403 void
2404 _initialize_dwarf2expr ()
2405 {
2406 dwarf_arch_cookie
2407 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);
2408 }