Initial conversion of dwarf_expr_ctx
[binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001-2016 Free Software Foundation, Inc.
4
5 Contributed by Daniel Berlin (dan@dberlin.org)
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "symtab.h"
24 #include "gdbtypes.h"
25 #include "value.h"
26 #include "gdbcore.h"
27 #include "dwarf2.h"
28 #include "dwarf2expr.h"
29 #include "dwarf2loc.h"
30
31 /* Local prototypes. */
32
33 static void execute_stack_op (struct dwarf_expr_context *,
34 const gdb_byte *, const gdb_byte *);
35
36 /* Cookie for gdbarch data. */
37
38 static struct gdbarch_data *dwarf_arch_cookie;
39
40 /* This holds gdbarch-specific types used by the DWARF expression
41 evaluator. See comments in execute_stack_op. */
42
43 struct dwarf_gdbarch_types
44 {
45 struct type *dw_types[3];
46 };
47
48 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
49
50 static void *
51 dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
52 {
53 struct dwarf_gdbarch_types *types
54 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
55
56 /* The types themselves are lazily initialized. */
57
58 return types;
59 }
60
61 /* Return the type used for DWARF operations where the type is
62 unspecified in the DWARF spec. Only certain sizes are
63 supported. */
64
65 static struct type *
66 dwarf_expr_address_type (struct dwarf_expr_context *ctx)
67 {
68 struct dwarf_gdbarch_types *types
69 = (struct dwarf_gdbarch_types *) gdbarch_data (ctx->gdbarch,
70 dwarf_arch_cookie);
71 int ndx;
72
73 if (ctx->addr_size == 2)
74 ndx = 0;
75 else if (ctx->addr_size == 4)
76 ndx = 1;
77 else if (ctx->addr_size == 8)
78 ndx = 2;
79 else
80 error (_("Unsupported address size in DWARF expressions: %d bits"),
81 8 * ctx->addr_size);
82
83 if (types->dw_types[ndx] == NULL)
84 types->dw_types[ndx]
85 = arch_integer_type (ctx->gdbarch,
86 8 * ctx->addr_size,
87 0, "<signed DWARF address type>");
88
89 return types->dw_types[ndx];
90 }
91
92 /* Create a new context for the expression evaluator. */
93
94 dwarf_expr_context::dwarf_expr_context ()
95 : stack (NULL),
96 stack_len (0),
97 stack_allocated (10),
98 gdbarch (NULL),
99 addr_size (0),
100 ref_addr_size (0),
101 offset (0),
102 recursion_depth (0),
103 max_recursion_depth (0x100),
104 location (DWARF_VALUE_MEMORY),
105 len (0),
106 data (NULL),
107 initialized (0),
108 num_pieces (0),
109 pieces (NULL)
110 {
111 this->stack = XNEWVEC (struct dwarf_stack_value, this->stack_allocated);
112 }
113
114 /* Clean up a dwarf_expr_context. */
115
116 dwarf_expr_context::~dwarf_expr_context ()
117 {
118 xfree (this->stack);
119 xfree (this->pieces);
120 }
121
122 /* Expand the memory allocated to CTX's stack to contain at least
123 NEED more elements than are currently used. */
124
125 static void
126 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
127 {
128 if (ctx->stack_len + need > ctx->stack_allocated)
129 {
130 size_t newlen = ctx->stack_len + need + 10;
131
132 ctx->stack = XRESIZEVEC (struct dwarf_stack_value, ctx->stack, newlen);
133 ctx->stack_allocated = newlen;
134 }
135 }
136
137 /* Push VALUE onto CTX's stack. */
138
139 static void
140 dwarf_expr_push (struct dwarf_expr_context *ctx, struct value *value,
141 int in_stack_memory)
142 {
143 struct dwarf_stack_value *v;
144
145 dwarf_expr_grow_stack (ctx, 1);
146 v = &ctx->stack[ctx->stack_len++];
147 v->value = value;
148 v->in_stack_memory = in_stack_memory;
149 }
150
151 /* Push VALUE onto CTX's stack. */
152
153 void
154 dwarf_expr_push_address (struct dwarf_expr_context *ctx, CORE_ADDR value,
155 int in_stack_memory)
156 {
157 dwarf_expr_push (ctx,
158 value_from_ulongest (dwarf_expr_address_type (ctx), value),
159 in_stack_memory);
160 }
161
162 /* Pop the top item off of CTX's stack. */
163
164 static void
165 dwarf_expr_pop (struct dwarf_expr_context *ctx)
166 {
167 if (ctx->stack_len <= 0)
168 error (_("dwarf expression stack underflow"));
169 ctx->stack_len--;
170 }
171
172 /* Retrieve the N'th item on CTX's stack. */
173
174 struct value *
175 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
176 {
177 if (ctx->stack_len <= n)
178 error (_("Asked for position %d of stack, "
179 "stack only has %d elements on it."),
180 n, ctx->stack_len);
181 return ctx->stack[ctx->stack_len - (1 + n)].value;
182 }
183
184 /* Require that TYPE be an integral type; throw an exception if not. */
185
186 static void
187 dwarf_require_integral (struct type *type)
188 {
189 if (TYPE_CODE (type) != TYPE_CODE_INT
190 && TYPE_CODE (type) != TYPE_CODE_CHAR
191 && TYPE_CODE (type) != TYPE_CODE_BOOL)
192 error (_("integral type expected in DWARF expression"));
193 }
194
195 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
196 type. */
197
198 static struct type *
199 get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
200 {
201 switch (TYPE_LENGTH (type))
202 {
203 case 1:
204 return builtin_type (gdbarch)->builtin_uint8;
205 case 2:
206 return builtin_type (gdbarch)->builtin_uint16;
207 case 4:
208 return builtin_type (gdbarch)->builtin_uint32;
209 case 8:
210 return builtin_type (gdbarch)->builtin_uint64;
211 default:
212 error (_("no unsigned variant found for type, while evaluating "
213 "DWARF expression"));
214 }
215 }
216
217 /* Return the signed form of TYPE. TYPE is necessarily an integral
218 type. */
219
220 static struct type *
221 get_signed_type (struct gdbarch *gdbarch, struct type *type)
222 {
223 switch (TYPE_LENGTH (type))
224 {
225 case 1:
226 return builtin_type (gdbarch)->builtin_int8;
227 case 2:
228 return builtin_type (gdbarch)->builtin_int16;
229 case 4:
230 return builtin_type (gdbarch)->builtin_int32;
231 case 8:
232 return builtin_type (gdbarch)->builtin_int64;
233 default:
234 error (_("no signed variant found for type, while evaluating "
235 "DWARF expression"));
236 }
237 }
238
239 /* Retrieve the N'th item on CTX's stack, converted to an address. */
240
241 CORE_ADDR
242 dwarf_expr_fetch_address (struct dwarf_expr_context *ctx, int n)
243 {
244 struct value *result_val = dwarf_expr_fetch (ctx, n);
245 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
246 ULONGEST result;
247
248 dwarf_require_integral (value_type (result_val));
249 result = extract_unsigned_integer (value_contents (result_val),
250 TYPE_LENGTH (value_type (result_val)),
251 byte_order);
252
253 /* For most architectures, calling extract_unsigned_integer() alone
254 is sufficient for extracting an address. However, some
255 architectures (e.g. MIPS) use signed addresses and using
256 extract_unsigned_integer() will not produce a correct
257 result. Make sure we invoke gdbarch_integer_to_address()
258 for those architectures which require it. */
259 if (gdbarch_integer_to_address_p (ctx->gdbarch))
260 {
261 gdb_byte *buf = (gdb_byte *) alloca (ctx->addr_size);
262 struct type *int_type = get_unsigned_type (ctx->gdbarch,
263 value_type (result_val));
264
265 store_unsigned_integer (buf, ctx->addr_size, byte_order, result);
266 return gdbarch_integer_to_address (ctx->gdbarch, int_type, buf);
267 }
268
269 return (CORE_ADDR) result;
270 }
271
272 /* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */
273
274 int
275 dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n)
276 {
277 if (ctx->stack_len <= n)
278 error (_("Asked for position %d of stack, "
279 "stack only has %d elements on it."),
280 n, ctx->stack_len);
281 return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory;
282 }
283
284 /* Return true if the expression stack is empty. */
285
286 static int
287 dwarf_expr_stack_empty_p (struct dwarf_expr_context *ctx)
288 {
289 return ctx->stack_len == 0;
290 }
291
292 /* Add a new piece to CTX's piece list. */
293 static void
294 add_piece (struct dwarf_expr_context *ctx, ULONGEST size, ULONGEST offset)
295 {
296 struct dwarf_expr_piece *p;
297
298 ctx->num_pieces++;
299
300 ctx->pieces
301 = XRESIZEVEC (struct dwarf_expr_piece, ctx->pieces, ctx->num_pieces);
302
303 p = &ctx->pieces[ctx->num_pieces - 1];
304 p->location = ctx->location;
305 p->size = size;
306 p->offset = offset;
307
308 if (p->location == DWARF_VALUE_LITERAL)
309 {
310 p->v.literal.data = ctx->data;
311 p->v.literal.length = ctx->len;
312 }
313 else if (dwarf_expr_stack_empty_p (ctx))
314 {
315 p->location = DWARF_VALUE_OPTIMIZED_OUT;
316 /* Also reset the context's location, for our callers. This is
317 a somewhat strange approach, but this lets us avoid setting
318 the location to DWARF_VALUE_MEMORY in all the individual
319 cases in the evaluator. */
320 ctx->location = DWARF_VALUE_OPTIMIZED_OUT;
321 }
322 else if (p->location == DWARF_VALUE_MEMORY)
323 {
324 p->v.mem.addr = dwarf_expr_fetch_address (ctx, 0);
325 p->v.mem.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
326 }
327 else if (p->location == DWARF_VALUE_IMPLICIT_POINTER)
328 {
329 p->v.ptr.die.sect_off = ctx->len;
330 p->v.ptr.offset = value_as_long (dwarf_expr_fetch (ctx, 0));
331 }
332 else if (p->location == DWARF_VALUE_REGISTER)
333 p->v.regno = value_as_long (dwarf_expr_fetch (ctx, 0));
334 else
335 {
336 p->v.value = dwarf_expr_fetch (ctx, 0);
337 }
338 }
339
340 /* Evaluate the expression at ADDR (LEN bytes long) using the context
341 CTX. */
342
343 void
344 dwarf_expr_eval (struct dwarf_expr_context *ctx, const gdb_byte *addr,
345 size_t len)
346 {
347 int old_recursion_depth = ctx->recursion_depth;
348
349 execute_stack_op (ctx, addr, addr + len);
350
351 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
352
353 gdb_assert (ctx->recursion_depth == old_recursion_depth);
354 }
355
356 /* Helper to read a uleb128 value or throw an error. */
357
358 const gdb_byte *
359 safe_read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
360 uint64_t *r)
361 {
362 buf = gdb_read_uleb128 (buf, buf_end, r);
363 if (buf == NULL)
364 error (_("DWARF expression error: ran off end of buffer reading uleb128 value"));
365 return buf;
366 }
367
368 /* Helper to read a sleb128 value or throw an error. */
369
370 const gdb_byte *
371 safe_read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
372 int64_t *r)
373 {
374 buf = gdb_read_sleb128 (buf, buf_end, r);
375 if (buf == NULL)
376 error (_("DWARF expression error: ran off end of buffer reading sleb128 value"));
377 return buf;
378 }
379
380 const gdb_byte *
381 safe_skip_leb128 (const gdb_byte *buf, const gdb_byte *buf_end)
382 {
383 buf = gdb_skip_leb128 (buf, buf_end);
384 if (buf == NULL)
385 error (_("DWARF expression error: ran off end of buffer reading leb128 value"));
386 return buf;
387 }
388 \f
389
390 /* Check that the current operator is either at the end of an
391 expression, or that it is followed by a composition operator or by
392 DW_OP_GNU_uninit (which should terminate the expression). */
393
394 void
395 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
396 const char *op_name)
397 {
398 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece
399 && *op_ptr != DW_OP_GNU_uninit)
400 error (_("DWARF-2 expression error: `%s' operations must be "
401 "used either alone or in conjunction with DW_OP_piece "
402 "or DW_OP_bit_piece."),
403 op_name);
404 }
405
406 /* Return true iff the types T1 and T2 are "the same". This only does
407 checks that might reasonably be needed to compare DWARF base
408 types. */
409
410 static int
411 base_types_equal_p (struct type *t1, struct type *t2)
412 {
413 if (TYPE_CODE (t1) != TYPE_CODE (t2))
414 return 0;
415 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
416 return 0;
417 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
418 }
419
420 /* A convenience function to call get_base_type on CTX and return the
421 result. DIE is the DIE whose type we need. SIZE is non-zero if
422 this function should verify that the resulting type has the correct
423 size. */
424
425 static struct type *
426 dwarf_get_base_type (struct dwarf_expr_context *ctx, cu_offset die, int size)
427 {
428 struct type *result;
429
430 if (ctx->funcs->get_base_type)
431 {
432 result = ctx->funcs->get_base_type (ctx, die);
433 if (result == NULL)
434 error (_("Could not find type for DW_OP_GNU_const_type"));
435 if (size != 0 && TYPE_LENGTH (result) != size)
436 error (_("DW_OP_GNU_const_type has different sizes for type and data"));
437 }
438 else
439 /* Anything will do. */
440 result = builtin_type (ctx->gdbarch)->builtin_int;
441
442 return result;
443 }
444
445 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
446 DWARF register number. Otherwise return -1. */
447
448 int
449 dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
450 {
451 uint64_t dwarf_reg;
452
453 if (buf_end <= buf)
454 return -1;
455 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
456 {
457 if (buf_end - buf != 1)
458 return -1;
459 return *buf - DW_OP_reg0;
460 }
461
462 if (*buf == DW_OP_GNU_regval_type)
463 {
464 buf++;
465 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
466 if (buf == NULL)
467 return -1;
468 buf = gdb_skip_leb128 (buf, buf_end);
469 if (buf == NULL)
470 return -1;
471 }
472 else if (*buf == DW_OP_regx)
473 {
474 buf++;
475 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
476 if (buf == NULL)
477 return -1;
478 }
479 else
480 return -1;
481 if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
482 return -1;
483 return dwarf_reg;
484 }
485
486 /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
487 DW_OP_deref* return the DWARF register number. Otherwise return -1.
488 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
489 size from DW_OP_deref_size. */
490
491 int
492 dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end,
493 CORE_ADDR *deref_size_return)
494 {
495 uint64_t dwarf_reg;
496 int64_t offset;
497
498 if (buf_end <= buf)
499 return -1;
500
501 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
502 {
503 dwarf_reg = *buf - DW_OP_breg0;
504 buf++;
505 if (buf >= buf_end)
506 return -1;
507 }
508 else if (*buf == DW_OP_bregx)
509 {
510 buf++;
511 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
512 if (buf == NULL)
513 return -1;
514 if ((int) dwarf_reg != dwarf_reg)
515 return -1;
516 }
517 else
518 return -1;
519
520 buf = gdb_read_sleb128 (buf, buf_end, &offset);
521 if (buf == NULL)
522 return -1;
523 if (offset != 0)
524 return -1;
525
526 if (*buf == DW_OP_deref)
527 {
528 buf++;
529 *deref_size_return = -1;
530 }
531 else if (*buf == DW_OP_deref_size)
532 {
533 buf++;
534 if (buf >= buf_end)
535 return -1;
536 *deref_size_return = *buf++;
537 }
538 else
539 return -1;
540
541 if (buf != buf_end)
542 return -1;
543
544 return dwarf_reg;
545 }
546
547 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
548 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
549
550 int
551 dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end,
552 CORE_ADDR *fb_offset_return)
553 {
554 int64_t fb_offset;
555
556 if (buf_end <= buf)
557 return 0;
558
559 if (*buf != DW_OP_fbreg)
560 return 0;
561 buf++;
562
563 buf = gdb_read_sleb128 (buf, buf_end, &fb_offset);
564 if (buf == NULL)
565 return 0;
566 *fb_offset_return = fb_offset;
567 if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return)
568 return 0;
569
570 return 1;
571 }
572
573 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
574 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
575 The matched SP register number depends on GDBARCH. */
576
577 int
578 dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf,
579 const gdb_byte *buf_end, CORE_ADDR *sp_offset_return)
580 {
581 uint64_t dwarf_reg;
582 int64_t sp_offset;
583
584 if (buf_end <= buf)
585 return 0;
586 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
587 {
588 dwarf_reg = *buf - DW_OP_breg0;
589 buf++;
590 }
591 else
592 {
593 if (*buf != DW_OP_bregx)
594 return 0;
595 buf++;
596 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
597 if (buf == NULL)
598 return 0;
599 }
600
601 if (dwarf_reg_to_regnum (gdbarch, dwarf_reg)
602 != gdbarch_sp_regnum (gdbarch))
603 return 0;
604
605 buf = gdb_read_sleb128 (buf, buf_end, &sp_offset);
606 if (buf == NULL)
607 return 0;
608 *sp_offset_return = sp_offset;
609 if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return)
610 return 0;
611
612 return 1;
613 }
614
615 /* The engine for the expression evaluator. Using the context in CTX,
616 evaluate the expression between OP_PTR and OP_END. */
617
618 static void
619 execute_stack_op (struct dwarf_expr_context *ctx,
620 const gdb_byte *op_ptr, const gdb_byte *op_end)
621 {
622 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
623 /* Old-style "untyped" DWARF values need special treatment in a
624 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
625 a special type for these values so we can distinguish them from
626 values that have an explicit type, because explicitly-typed
627 values do not need special treatment. This special type must be
628 different (in the `==' sense) from any base type coming from the
629 CU. */
630 struct type *address_type = dwarf_expr_address_type (ctx);
631
632 ctx->location = DWARF_VALUE_MEMORY;
633 ctx->initialized = 1; /* Default is initialized. */
634
635 if (ctx->recursion_depth > ctx->max_recursion_depth)
636 error (_("DWARF-2 expression error: Loop detected (%d)."),
637 ctx->recursion_depth);
638 ctx->recursion_depth++;
639
640 while (op_ptr < op_end)
641 {
642 enum dwarf_location_atom op = (enum dwarf_location_atom) *op_ptr++;
643 ULONGEST result;
644 /* Assume the value is not in stack memory.
645 Code that knows otherwise sets this to 1.
646 Some arithmetic on stack addresses can probably be assumed to still
647 be a stack address, but we skip this complication for now.
648 This is just an optimization, so it's always ok to punt
649 and leave this as 0. */
650 int in_stack_memory = 0;
651 uint64_t uoffset, reg;
652 int64_t offset;
653 struct value *result_val = NULL;
654
655 /* The DWARF expression might have a bug causing an infinite
656 loop. In that case, quitting is the only way out. */
657 QUIT;
658
659 switch (op)
660 {
661 case DW_OP_lit0:
662 case DW_OP_lit1:
663 case DW_OP_lit2:
664 case DW_OP_lit3:
665 case DW_OP_lit4:
666 case DW_OP_lit5:
667 case DW_OP_lit6:
668 case DW_OP_lit7:
669 case DW_OP_lit8:
670 case DW_OP_lit9:
671 case DW_OP_lit10:
672 case DW_OP_lit11:
673 case DW_OP_lit12:
674 case DW_OP_lit13:
675 case DW_OP_lit14:
676 case DW_OP_lit15:
677 case DW_OP_lit16:
678 case DW_OP_lit17:
679 case DW_OP_lit18:
680 case DW_OP_lit19:
681 case DW_OP_lit20:
682 case DW_OP_lit21:
683 case DW_OP_lit22:
684 case DW_OP_lit23:
685 case DW_OP_lit24:
686 case DW_OP_lit25:
687 case DW_OP_lit26:
688 case DW_OP_lit27:
689 case DW_OP_lit28:
690 case DW_OP_lit29:
691 case DW_OP_lit30:
692 case DW_OP_lit31:
693 result = op - DW_OP_lit0;
694 result_val = value_from_ulongest (address_type, result);
695 break;
696
697 case DW_OP_addr:
698 result = extract_unsigned_integer (op_ptr,
699 ctx->addr_size, byte_order);
700 op_ptr += ctx->addr_size;
701 /* Some versions of GCC emit DW_OP_addr before
702 DW_OP_GNU_push_tls_address. In this case the value is an
703 index, not an address. We don't support things like
704 branching between the address and the TLS op. */
705 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
706 result += ctx->offset;
707 result_val = value_from_ulongest (address_type, result);
708 break;
709
710 case DW_OP_GNU_addr_index:
711 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
712 result = (ctx->funcs->get_addr_index) (ctx->baton, uoffset);
713 result += ctx->offset;
714 result_val = value_from_ulongest (address_type, result);
715 break;
716 case DW_OP_GNU_const_index:
717 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
718 result = (ctx->funcs->get_addr_index) (ctx->baton, uoffset);
719 result_val = value_from_ulongest (address_type, result);
720 break;
721
722 case DW_OP_const1u:
723 result = extract_unsigned_integer (op_ptr, 1, byte_order);
724 result_val = value_from_ulongest (address_type, result);
725 op_ptr += 1;
726 break;
727 case DW_OP_const1s:
728 result = extract_signed_integer (op_ptr, 1, byte_order);
729 result_val = value_from_ulongest (address_type, result);
730 op_ptr += 1;
731 break;
732 case DW_OP_const2u:
733 result = extract_unsigned_integer (op_ptr, 2, byte_order);
734 result_val = value_from_ulongest (address_type, result);
735 op_ptr += 2;
736 break;
737 case DW_OP_const2s:
738 result = extract_signed_integer (op_ptr, 2, byte_order);
739 result_val = value_from_ulongest (address_type, result);
740 op_ptr += 2;
741 break;
742 case DW_OP_const4u:
743 result = extract_unsigned_integer (op_ptr, 4, byte_order);
744 result_val = value_from_ulongest (address_type, result);
745 op_ptr += 4;
746 break;
747 case DW_OP_const4s:
748 result = extract_signed_integer (op_ptr, 4, byte_order);
749 result_val = value_from_ulongest (address_type, result);
750 op_ptr += 4;
751 break;
752 case DW_OP_const8u:
753 result = extract_unsigned_integer (op_ptr, 8, byte_order);
754 result_val = value_from_ulongest (address_type, result);
755 op_ptr += 8;
756 break;
757 case DW_OP_const8s:
758 result = extract_signed_integer (op_ptr, 8, byte_order);
759 result_val = value_from_ulongest (address_type, result);
760 op_ptr += 8;
761 break;
762 case DW_OP_constu:
763 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
764 result = uoffset;
765 result_val = value_from_ulongest (address_type, result);
766 break;
767 case DW_OP_consts:
768 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
769 result = offset;
770 result_val = value_from_ulongest (address_type, result);
771 break;
772
773 /* The DW_OP_reg operations are required to occur alone in
774 location expressions. */
775 case DW_OP_reg0:
776 case DW_OP_reg1:
777 case DW_OP_reg2:
778 case DW_OP_reg3:
779 case DW_OP_reg4:
780 case DW_OP_reg5:
781 case DW_OP_reg6:
782 case DW_OP_reg7:
783 case DW_OP_reg8:
784 case DW_OP_reg9:
785 case DW_OP_reg10:
786 case DW_OP_reg11:
787 case DW_OP_reg12:
788 case DW_OP_reg13:
789 case DW_OP_reg14:
790 case DW_OP_reg15:
791 case DW_OP_reg16:
792 case DW_OP_reg17:
793 case DW_OP_reg18:
794 case DW_OP_reg19:
795 case DW_OP_reg20:
796 case DW_OP_reg21:
797 case DW_OP_reg22:
798 case DW_OP_reg23:
799 case DW_OP_reg24:
800 case DW_OP_reg25:
801 case DW_OP_reg26:
802 case DW_OP_reg27:
803 case DW_OP_reg28:
804 case DW_OP_reg29:
805 case DW_OP_reg30:
806 case DW_OP_reg31:
807 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_reg");
808
809 result = op - DW_OP_reg0;
810 result_val = value_from_ulongest (address_type, result);
811 ctx->location = DWARF_VALUE_REGISTER;
812 break;
813
814 case DW_OP_regx:
815 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
816 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
817
818 result = reg;
819 result_val = value_from_ulongest (address_type, result);
820 ctx->location = DWARF_VALUE_REGISTER;
821 break;
822
823 case DW_OP_implicit_value:
824 {
825 uint64_t len;
826
827 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
828 if (op_ptr + len > op_end)
829 error (_("DW_OP_implicit_value: too few bytes available."));
830 ctx->len = len;
831 ctx->data = op_ptr;
832 ctx->location = DWARF_VALUE_LITERAL;
833 op_ptr += len;
834 dwarf_expr_require_composition (op_ptr, op_end,
835 "DW_OP_implicit_value");
836 }
837 goto no_push;
838
839 case DW_OP_stack_value:
840 ctx->location = DWARF_VALUE_STACK;
841 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
842 goto no_push;
843
844 case DW_OP_GNU_implicit_pointer:
845 {
846 int64_t len;
847
848 if (ctx->ref_addr_size == -1)
849 error (_("DWARF-2 expression error: DW_OP_GNU_implicit_pointer "
850 "is not allowed in frame context"));
851
852 /* The referred-to DIE of sect_offset kind. */
853 ctx->len = extract_unsigned_integer (op_ptr, ctx->ref_addr_size,
854 byte_order);
855 op_ptr += ctx->ref_addr_size;
856
857 /* The byte offset into the data. */
858 op_ptr = safe_read_sleb128 (op_ptr, op_end, &len);
859 result = (ULONGEST) len;
860 result_val = value_from_ulongest (address_type, result);
861
862 ctx->location = DWARF_VALUE_IMPLICIT_POINTER;
863 dwarf_expr_require_composition (op_ptr, op_end,
864 "DW_OP_GNU_implicit_pointer");
865 }
866 break;
867
868 case DW_OP_breg0:
869 case DW_OP_breg1:
870 case DW_OP_breg2:
871 case DW_OP_breg3:
872 case DW_OP_breg4:
873 case DW_OP_breg5:
874 case DW_OP_breg6:
875 case DW_OP_breg7:
876 case DW_OP_breg8:
877 case DW_OP_breg9:
878 case DW_OP_breg10:
879 case DW_OP_breg11:
880 case DW_OP_breg12:
881 case DW_OP_breg13:
882 case DW_OP_breg14:
883 case DW_OP_breg15:
884 case DW_OP_breg16:
885 case DW_OP_breg17:
886 case DW_OP_breg18:
887 case DW_OP_breg19:
888 case DW_OP_breg20:
889 case DW_OP_breg21:
890 case DW_OP_breg22:
891 case DW_OP_breg23:
892 case DW_OP_breg24:
893 case DW_OP_breg25:
894 case DW_OP_breg26:
895 case DW_OP_breg27:
896 case DW_OP_breg28:
897 case DW_OP_breg29:
898 case DW_OP_breg30:
899 case DW_OP_breg31:
900 {
901 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
902 result = (ctx->funcs->read_addr_from_reg) (ctx->baton,
903 op - DW_OP_breg0);
904 result += offset;
905 result_val = value_from_ulongest (address_type, result);
906 }
907 break;
908 case DW_OP_bregx:
909 {
910 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
911 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
912 result = (ctx->funcs->read_addr_from_reg) (ctx->baton, reg);
913 result += offset;
914 result_val = value_from_ulongest (address_type, result);
915 }
916 break;
917 case DW_OP_fbreg:
918 {
919 const gdb_byte *datastart;
920 size_t datalen;
921 unsigned int before_stack_len;
922
923 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
924 /* Rather than create a whole new context, we simply
925 record the stack length before execution, then reset it
926 afterwards, effectively erasing whatever the recursive
927 call put there. */
928 before_stack_len = ctx->stack_len;
929 /* FIXME: cagney/2003-03-26: This code should be using
930 get_frame_base_address(), and then implement a dwarf2
931 specific this_base method. */
932 (ctx->funcs->get_frame_base) (ctx->baton, &datastart, &datalen);
933 dwarf_expr_eval (ctx, datastart, datalen);
934 if (ctx->location == DWARF_VALUE_MEMORY)
935 result = dwarf_expr_fetch_address (ctx, 0);
936 else if (ctx->location == DWARF_VALUE_REGISTER)
937 result = (ctx->funcs->read_addr_from_reg)
938 (ctx->baton,
939 value_as_long (dwarf_expr_fetch (ctx, 0)));
940 else
941 error (_("Not implemented: computing frame "
942 "base using explicit value operator"));
943 result = result + offset;
944 result_val = value_from_ulongest (address_type, result);
945 in_stack_memory = 1;
946 ctx->stack_len = before_stack_len;
947 ctx->location = DWARF_VALUE_MEMORY;
948 }
949 break;
950
951 case DW_OP_dup:
952 result_val = dwarf_expr_fetch (ctx, 0);
953 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
954 break;
955
956 case DW_OP_drop:
957 dwarf_expr_pop (ctx);
958 goto no_push;
959
960 case DW_OP_pick:
961 offset = *op_ptr++;
962 result_val = dwarf_expr_fetch (ctx, offset);
963 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset);
964 break;
965
966 case DW_OP_swap:
967 {
968 struct dwarf_stack_value t1, t2;
969
970 if (ctx->stack_len < 2)
971 error (_("Not enough elements for "
972 "DW_OP_swap. Need 2, have %d."),
973 ctx->stack_len);
974 t1 = ctx->stack[ctx->stack_len - 1];
975 t2 = ctx->stack[ctx->stack_len - 2];
976 ctx->stack[ctx->stack_len - 1] = t2;
977 ctx->stack[ctx->stack_len - 2] = t1;
978 goto no_push;
979 }
980
981 case DW_OP_over:
982 result_val = dwarf_expr_fetch (ctx, 1);
983 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1);
984 break;
985
986 case DW_OP_rot:
987 {
988 struct dwarf_stack_value t1, t2, t3;
989
990 if (ctx->stack_len < 3)
991 error (_("Not enough elements for "
992 "DW_OP_rot. Need 3, have %d."),
993 ctx->stack_len);
994 t1 = ctx->stack[ctx->stack_len - 1];
995 t2 = ctx->stack[ctx->stack_len - 2];
996 t3 = ctx->stack[ctx->stack_len - 3];
997 ctx->stack[ctx->stack_len - 1] = t2;
998 ctx->stack[ctx->stack_len - 2] = t3;
999 ctx->stack[ctx->stack_len - 3] = t1;
1000 goto no_push;
1001 }
1002
1003 case DW_OP_deref:
1004 case DW_OP_deref_size:
1005 case DW_OP_GNU_deref_type:
1006 {
1007 int addr_size = (op == DW_OP_deref ? ctx->addr_size : *op_ptr++);
1008 gdb_byte *buf = (gdb_byte *) alloca (addr_size);
1009 CORE_ADDR addr = dwarf_expr_fetch_address (ctx, 0);
1010 struct type *type;
1011
1012 dwarf_expr_pop (ctx);
1013
1014 if (op == DW_OP_GNU_deref_type)
1015 {
1016 cu_offset type_die;
1017
1018 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1019 type_die.cu_off = uoffset;
1020 type = dwarf_get_base_type (ctx, type_die, 0);
1021 }
1022 else
1023 type = address_type;
1024
1025 (ctx->funcs->read_mem) (ctx->baton, buf, addr, addr_size);
1026
1027 /* If the size of the object read from memory is different
1028 from the type length, we need to zero-extend it. */
1029 if (TYPE_LENGTH (type) != addr_size)
1030 {
1031 ULONGEST result =
1032 extract_unsigned_integer (buf, addr_size, byte_order);
1033
1034 buf = (gdb_byte *) alloca (TYPE_LENGTH (type));
1035 store_unsigned_integer (buf, TYPE_LENGTH (type),
1036 byte_order, result);
1037 }
1038
1039 result_val = value_from_contents_and_address (type, buf, addr);
1040 break;
1041 }
1042
1043 case DW_OP_abs:
1044 case DW_OP_neg:
1045 case DW_OP_not:
1046 case DW_OP_plus_uconst:
1047 {
1048 /* Unary operations. */
1049 result_val = dwarf_expr_fetch (ctx, 0);
1050 dwarf_expr_pop (ctx);
1051
1052 switch (op)
1053 {
1054 case DW_OP_abs:
1055 if (value_less (result_val,
1056 value_zero (value_type (result_val), not_lval)))
1057 result_val = value_neg (result_val);
1058 break;
1059 case DW_OP_neg:
1060 result_val = value_neg (result_val);
1061 break;
1062 case DW_OP_not:
1063 dwarf_require_integral (value_type (result_val));
1064 result_val = value_complement (result_val);
1065 break;
1066 case DW_OP_plus_uconst:
1067 dwarf_require_integral (value_type (result_val));
1068 result = value_as_long (result_val);
1069 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1070 result += reg;
1071 result_val = value_from_ulongest (address_type, result);
1072 break;
1073 }
1074 }
1075 break;
1076
1077 case DW_OP_and:
1078 case DW_OP_div:
1079 case DW_OP_minus:
1080 case DW_OP_mod:
1081 case DW_OP_mul:
1082 case DW_OP_or:
1083 case DW_OP_plus:
1084 case DW_OP_shl:
1085 case DW_OP_shr:
1086 case DW_OP_shra:
1087 case DW_OP_xor:
1088 case DW_OP_le:
1089 case DW_OP_ge:
1090 case DW_OP_eq:
1091 case DW_OP_lt:
1092 case DW_OP_gt:
1093 case DW_OP_ne:
1094 {
1095 /* Binary operations. */
1096 struct value *first, *second;
1097
1098 second = dwarf_expr_fetch (ctx, 0);
1099 dwarf_expr_pop (ctx);
1100
1101 first = dwarf_expr_fetch (ctx, 0);
1102 dwarf_expr_pop (ctx);
1103
1104 if (! base_types_equal_p (value_type (first), value_type (second)))
1105 error (_("Incompatible types on DWARF stack"));
1106
1107 switch (op)
1108 {
1109 case DW_OP_and:
1110 dwarf_require_integral (value_type (first));
1111 dwarf_require_integral (value_type (second));
1112 result_val = value_binop (first, second, BINOP_BITWISE_AND);
1113 break;
1114 case DW_OP_div:
1115 result_val = value_binop (first, second, BINOP_DIV);
1116 break;
1117 case DW_OP_minus:
1118 result_val = value_binop (first, second, BINOP_SUB);
1119 break;
1120 case DW_OP_mod:
1121 {
1122 int cast_back = 0;
1123 struct type *orig_type = value_type (first);
1124
1125 /* We have to special-case "old-style" untyped values
1126 -- these must have mod computed using unsigned
1127 math. */
1128 if (orig_type == address_type)
1129 {
1130 struct type *utype
1131 = get_unsigned_type (ctx->gdbarch, orig_type);
1132
1133 cast_back = 1;
1134 first = value_cast (utype, first);
1135 second = value_cast (utype, second);
1136 }
1137 /* Note that value_binop doesn't handle float or
1138 decimal float here. This seems unimportant. */
1139 result_val = value_binop (first, second, BINOP_MOD);
1140 if (cast_back)
1141 result_val = value_cast (orig_type, result_val);
1142 }
1143 break;
1144 case DW_OP_mul:
1145 result_val = value_binop (first, second, BINOP_MUL);
1146 break;
1147 case DW_OP_or:
1148 dwarf_require_integral (value_type (first));
1149 dwarf_require_integral (value_type (second));
1150 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
1151 break;
1152 case DW_OP_plus:
1153 result_val = value_binop (first, second, BINOP_ADD);
1154 break;
1155 case DW_OP_shl:
1156 dwarf_require_integral (value_type (first));
1157 dwarf_require_integral (value_type (second));
1158 result_val = value_binop (first, second, BINOP_LSH);
1159 break;
1160 case DW_OP_shr:
1161 dwarf_require_integral (value_type (first));
1162 dwarf_require_integral (value_type (second));
1163 if (!TYPE_UNSIGNED (value_type (first)))
1164 {
1165 struct type *utype
1166 = get_unsigned_type (ctx->gdbarch, value_type (first));
1167
1168 first = value_cast (utype, first);
1169 }
1170
1171 result_val = value_binop (first, second, BINOP_RSH);
1172 /* Make sure we wind up with the same type we started
1173 with. */
1174 if (value_type (result_val) != value_type (second))
1175 result_val = value_cast (value_type (second), result_val);
1176 break;
1177 case DW_OP_shra:
1178 dwarf_require_integral (value_type (first));
1179 dwarf_require_integral (value_type (second));
1180 if (TYPE_UNSIGNED (value_type (first)))
1181 {
1182 struct type *stype
1183 = get_signed_type (ctx->gdbarch, value_type (first));
1184
1185 first = value_cast (stype, first);
1186 }
1187
1188 result_val = value_binop (first, second, BINOP_RSH);
1189 /* Make sure we wind up with the same type we started
1190 with. */
1191 if (value_type (result_val) != value_type (second))
1192 result_val = value_cast (value_type (second), result_val);
1193 break;
1194 case DW_OP_xor:
1195 dwarf_require_integral (value_type (first));
1196 dwarf_require_integral (value_type (second));
1197 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
1198 break;
1199 case DW_OP_le:
1200 /* A <= B is !(B < A). */
1201 result = ! value_less (second, first);
1202 result_val = value_from_ulongest (address_type, result);
1203 break;
1204 case DW_OP_ge:
1205 /* A >= B is !(A < B). */
1206 result = ! value_less (first, second);
1207 result_val = value_from_ulongest (address_type, result);
1208 break;
1209 case DW_OP_eq:
1210 result = value_equal (first, second);
1211 result_val = value_from_ulongest (address_type, result);
1212 break;
1213 case DW_OP_lt:
1214 result = value_less (first, second);
1215 result_val = value_from_ulongest (address_type, result);
1216 break;
1217 case DW_OP_gt:
1218 /* A > B is B < A. */
1219 result = value_less (second, first);
1220 result_val = value_from_ulongest (address_type, result);
1221 break;
1222 case DW_OP_ne:
1223 result = ! value_equal (first, second);
1224 result_val = value_from_ulongest (address_type, result);
1225 break;
1226 default:
1227 internal_error (__FILE__, __LINE__,
1228 _("Can't be reached."));
1229 }
1230 }
1231 break;
1232
1233 case DW_OP_call_frame_cfa:
1234 result = (ctx->funcs->get_frame_cfa) (ctx->baton);
1235 result_val = value_from_ulongest (address_type, result);
1236 in_stack_memory = 1;
1237 break;
1238
1239 case DW_OP_GNU_push_tls_address:
1240 case DW_OP_form_tls_address:
1241 /* Variable is at a constant offset in the thread-local
1242 storage block into the objfile for the current thread and
1243 the dynamic linker module containing this expression. Here
1244 we return returns the offset from that base. The top of the
1245 stack has the offset from the beginning of the thread
1246 control block at which the variable is located. Nothing
1247 should follow this operator, so the top of stack would be
1248 returned. */
1249 result = value_as_long (dwarf_expr_fetch (ctx, 0));
1250 dwarf_expr_pop (ctx);
1251 result = (ctx->funcs->get_tls_address) (ctx->baton, result);
1252 result_val = value_from_ulongest (address_type, result);
1253 break;
1254
1255 case DW_OP_skip:
1256 offset = extract_signed_integer (op_ptr, 2, byte_order);
1257 op_ptr += 2;
1258 op_ptr += offset;
1259 goto no_push;
1260
1261 case DW_OP_bra:
1262 {
1263 struct value *val;
1264
1265 offset = extract_signed_integer (op_ptr, 2, byte_order);
1266 op_ptr += 2;
1267 val = dwarf_expr_fetch (ctx, 0);
1268 dwarf_require_integral (value_type (val));
1269 if (value_as_long (val) != 0)
1270 op_ptr += offset;
1271 dwarf_expr_pop (ctx);
1272 }
1273 goto no_push;
1274
1275 case DW_OP_nop:
1276 goto no_push;
1277
1278 case DW_OP_piece:
1279 {
1280 uint64_t size;
1281
1282 /* Record the piece. */
1283 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1284 add_piece (ctx, 8 * size, 0);
1285
1286 /* Pop off the address/regnum, and reset the location
1287 type. */
1288 if (ctx->location != DWARF_VALUE_LITERAL
1289 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1290 dwarf_expr_pop (ctx);
1291 ctx->location = DWARF_VALUE_MEMORY;
1292 }
1293 goto no_push;
1294
1295 case DW_OP_bit_piece:
1296 {
1297 uint64_t size, offset;
1298
1299 /* Record the piece. */
1300 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1301 op_ptr = safe_read_uleb128 (op_ptr, op_end, &offset);
1302 add_piece (ctx, size, offset);
1303
1304 /* Pop off the address/regnum, and reset the location
1305 type. */
1306 if (ctx->location != DWARF_VALUE_LITERAL
1307 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1308 dwarf_expr_pop (ctx);
1309 ctx->location = DWARF_VALUE_MEMORY;
1310 }
1311 goto no_push;
1312
1313 case DW_OP_GNU_uninit:
1314 if (op_ptr != op_end)
1315 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1316 "be the very last op."));
1317
1318 ctx->initialized = 0;
1319 goto no_push;
1320
1321 case DW_OP_call2:
1322 {
1323 cu_offset offset;
1324
1325 offset.cu_off = extract_unsigned_integer (op_ptr, 2, byte_order);
1326 op_ptr += 2;
1327 ctx->funcs->dwarf_call (ctx, offset);
1328 }
1329 goto no_push;
1330
1331 case DW_OP_call4:
1332 {
1333 cu_offset offset;
1334
1335 offset.cu_off = extract_unsigned_integer (op_ptr, 4, byte_order);
1336 op_ptr += 4;
1337 ctx->funcs->dwarf_call (ctx, offset);
1338 }
1339 goto no_push;
1340
1341 case DW_OP_GNU_entry_value:
1342 {
1343 uint64_t len;
1344 CORE_ADDR deref_size;
1345 union call_site_parameter_u kind_u;
1346
1347 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
1348 if (op_ptr + len > op_end)
1349 error (_("DW_OP_GNU_entry_value: too few bytes available."));
1350
1351 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
1352 if (kind_u.dwarf_reg != -1)
1353 {
1354 op_ptr += len;
1355 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1356 CALL_SITE_PARAMETER_DWARF_REG,
1357 kind_u,
1358 -1 /* deref_size */);
1359 goto no_push;
1360 }
1361
1362 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr,
1363 op_ptr + len,
1364 &deref_size);
1365 if (kind_u.dwarf_reg != -1)
1366 {
1367 if (deref_size == -1)
1368 deref_size = ctx->addr_size;
1369 op_ptr += len;
1370 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1371 CALL_SITE_PARAMETER_DWARF_REG,
1372 kind_u, deref_size);
1373 goto no_push;
1374 }
1375
1376 error (_("DWARF-2 expression error: DW_OP_GNU_entry_value is "
1377 "supported only for single DW_OP_reg* "
1378 "or for DW_OP_breg*(0)+DW_OP_deref*"));
1379 }
1380
1381 case DW_OP_GNU_parameter_ref:
1382 {
1383 union call_site_parameter_u kind_u;
1384
1385 kind_u.param_offset.cu_off = extract_unsigned_integer (op_ptr, 4,
1386 byte_order);
1387 op_ptr += 4;
1388 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1389 CALL_SITE_PARAMETER_PARAM_OFFSET,
1390 kind_u,
1391 -1 /* deref_size */);
1392 }
1393 goto no_push;
1394
1395 case DW_OP_GNU_const_type:
1396 {
1397 cu_offset type_die;
1398 int n;
1399 const gdb_byte *data;
1400 struct type *type;
1401
1402 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1403 type_die.cu_off = uoffset;
1404 n = *op_ptr++;
1405 data = op_ptr;
1406 op_ptr += n;
1407
1408 type = dwarf_get_base_type (ctx, type_die, n);
1409 result_val = value_from_contents (type, data);
1410 }
1411 break;
1412
1413 case DW_OP_GNU_regval_type:
1414 {
1415 cu_offset type_die;
1416 struct type *type;
1417
1418 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1419 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1420 type_die.cu_off = uoffset;
1421
1422 type = dwarf_get_base_type (ctx, type_die, 0);
1423 result_val = ctx->funcs->get_reg_value (ctx->baton, type, reg);
1424 }
1425 break;
1426
1427 case DW_OP_GNU_convert:
1428 case DW_OP_GNU_reinterpret:
1429 {
1430 cu_offset type_die;
1431 struct type *type;
1432
1433 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1434 type_die.cu_off = uoffset;
1435
1436 if (type_die.cu_off == 0)
1437 type = address_type;
1438 else
1439 type = dwarf_get_base_type (ctx, type_die, 0);
1440
1441 result_val = dwarf_expr_fetch (ctx, 0);
1442 dwarf_expr_pop (ctx);
1443
1444 if (op == DW_OP_GNU_convert)
1445 result_val = value_cast (type, result_val);
1446 else if (type == value_type (result_val))
1447 {
1448 /* Nothing. */
1449 }
1450 else if (TYPE_LENGTH (type)
1451 != TYPE_LENGTH (value_type (result_val)))
1452 error (_("DW_OP_GNU_reinterpret has wrong size"));
1453 else
1454 result_val
1455 = value_from_contents (type,
1456 value_contents_all (result_val));
1457 }
1458 break;
1459
1460 case DW_OP_push_object_address:
1461 /* Return the address of the object we are currently observing. */
1462 result = (ctx->funcs->get_object_address) (ctx->baton);
1463 result_val = value_from_ulongest (address_type, result);
1464 break;
1465
1466 default:
1467 error (_("Unhandled dwarf expression opcode 0x%x"), op);
1468 }
1469
1470 /* Most things push a result value. */
1471 gdb_assert (result_val != NULL);
1472 dwarf_expr_push (ctx, result_val, in_stack_memory);
1473 no_push:
1474 ;
1475 }
1476
1477 /* To simplify our main caller, if the result is an implicit
1478 pointer, then make a pieced value. This is ok because we can't
1479 have implicit pointers in contexts where pieces are invalid. */
1480 if (ctx->location == DWARF_VALUE_IMPLICIT_POINTER)
1481 add_piece (ctx, 8 * ctx->addr_size, 0);
1482
1483 abort_expression:
1484 ctx->recursion_depth--;
1485 gdb_assert (ctx->recursion_depth >= 0);
1486 }
1487
1488 /* Stub dwarf_expr_context_funcs.get_frame_base implementation. */
1489
1490 void
1491 ctx_no_get_frame_base (void *baton, const gdb_byte **start, size_t *length)
1492 {
1493 error (_("%s is invalid in this context"), "DW_OP_fbreg");
1494 }
1495
1496 /* Stub dwarf_expr_context_funcs.get_frame_cfa implementation. */
1497
1498 CORE_ADDR
1499 ctx_no_get_frame_cfa (void *baton)
1500 {
1501 error (_("%s is invalid in this context"), "DW_OP_call_frame_cfa");
1502 }
1503
1504 /* Stub dwarf_expr_context_funcs.get_frame_pc implementation. */
1505
1506 CORE_ADDR
1507 ctx_no_get_frame_pc (void *baton)
1508 {
1509 error (_("%s is invalid in this context"), "DW_OP_GNU_implicit_pointer");
1510 }
1511
1512 /* Stub dwarf_expr_context_funcs.get_tls_address implementation. */
1513
1514 CORE_ADDR
1515 ctx_no_get_tls_address (void *baton, CORE_ADDR offset)
1516 {
1517 error (_("%s is invalid in this context"), "DW_OP_form_tls_address");
1518 }
1519
1520 /* Stub dwarf_expr_context_funcs.dwarf_call implementation. */
1521
1522 void
1523 ctx_no_dwarf_call (struct dwarf_expr_context *ctx, cu_offset die_offset)
1524 {
1525 error (_("%s is invalid in this context"), "DW_OP_call*");
1526 }
1527
1528 /* Stub dwarf_expr_context_funcs.get_base_type implementation. */
1529
1530 struct type *
1531 ctx_no_get_base_type (struct dwarf_expr_context *ctx, cu_offset die)
1532 {
1533 error (_("Support for typed DWARF is not supported in this context"));
1534 }
1535
1536 /* Stub dwarf_expr_context_funcs.push_dwarf_block_entry_value
1537 implementation. */
1538
1539 void
1540 ctx_no_push_dwarf_reg_entry_value (struct dwarf_expr_context *ctx,
1541 enum call_site_parameter_kind kind,
1542 union call_site_parameter_u kind_u,
1543 int deref_size)
1544 {
1545 internal_error (__FILE__, __LINE__,
1546 _("Support for DW_OP_GNU_entry_value is unimplemented"));
1547 }
1548
1549 /* Stub dwarf_expr_context_funcs.get_addr_index implementation. */
1550
1551 CORE_ADDR
1552 ctx_no_get_addr_index (void *baton, unsigned int index)
1553 {
1554 error (_("%s is invalid in this context"), "DW_OP_GNU_addr_index");
1555 }
1556
1557 /* Provide a prototype to silence -Wmissing-prototypes. */
1558 extern initialize_file_ftype _initialize_dwarf2expr;
1559
1560 void
1561 _initialize_dwarf2expr (void)
1562 {
1563 dwarf_arch_cookie
1564 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);
1565 }