2010-05-24 Michael Snyder <msnyder@vmware.com>
[binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001, 2002, 2003, 2005, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 Contributed by Daniel Berlin (dan@dberlin.org)
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "symtab.h"
25 #include "gdbtypes.h"
26 #include "value.h"
27 #include "gdbcore.h"
28 #include "dwarf2.h"
29 #include "dwarf2expr.h"
30 #include "gdb_assert.h"
31
32 /* Local prototypes. */
33
34 static void execute_stack_op (struct dwarf_expr_context *,
35 gdb_byte *, gdb_byte *);
36 static struct type *unsigned_address_type (struct gdbarch *, int);
37
38 /* Create a new context for the expression evaluator. */
39
40 struct dwarf_expr_context *
41 new_dwarf_expr_context (void)
42 {
43 struct dwarf_expr_context *retval;
44
45 retval = xcalloc (1, sizeof (struct dwarf_expr_context));
46 retval->stack_len = 0;
47 retval->stack_allocated = 10;
48 retval->stack = xmalloc (retval->stack_allocated
49 * sizeof (struct dwarf_stack_value));
50 retval->num_pieces = 0;
51 retval->pieces = 0;
52 retval->max_recursion_depth = 0x100;
53 return retval;
54 }
55
56 /* Release the memory allocated to CTX. */
57
58 void
59 free_dwarf_expr_context (struct dwarf_expr_context *ctx)
60 {
61 xfree (ctx->stack);
62 xfree (ctx->pieces);
63 xfree (ctx);
64 }
65
66 /* Helper for make_cleanup_free_dwarf_expr_context. */
67
68 static void
69 free_dwarf_expr_context_cleanup (void *arg)
70 {
71 free_dwarf_expr_context (arg);
72 }
73
74 /* Return a cleanup that calls free_dwarf_expr_context. */
75
76 struct cleanup *
77 make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx)
78 {
79 return make_cleanup (free_dwarf_expr_context_cleanup, ctx);
80 }
81
82 /* Expand the memory allocated to CTX's stack to contain at least
83 NEED more elements than are currently used. */
84
85 static void
86 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
87 {
88 if (ctx->stack_len + need > ctx->stack_allocated)
89 {
90 size_t newlen = ctx->stack_len + need + 10;
91
92 ctx->stack = xrealloc (ctx->stack,
93 newlen * sizeof (struct dwarf_stack_value));
94 ctx->stack_allocated = newlen;
95 }
96 }
97
98 /* Push VALUE onto CTX's stack. */
99
100 void
101 dwarf_expr_push (struct dwarf_expr_context *ctx, CORE_ADDR value,
102 int in_stack_memory)
103 {
104 struct dwarf_stack_value *v;
105
106 dwarf_expr_grow_stack (ctx, 1);
107 v = &ctx->stack[ctx->stack_len++];
108 v->value = value;
109 v->in_stack_memory = in_stack_memory;
110 }
111
112 /* Pop the top item off of CTX's stack. */
113
114 void
115 dwarf_expr_pop (struct dwarf_expr_context *ctx)
116 {
117 if (ctx->stack_len <= 0)
118 error (_("dwarf expression stack underflow"));
119 ctx->stack_len--;
120 }
121
122 /* Retrieve the N'th item on CTX's stack. */
123
124 CORE_ADDR
125 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
126 {
127 if (ctx->stack_len <= n)
128 error (_("Asked for position %d of stack, stack only has %d elements on it."),
129 n, ctx->stack_len);
130 return ctx->stack[ctx->stack_len - (1 + n)].value;
131
132 }
133
134 /* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */
135
136 int
137 dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n)
138 {
139 if (ctx->stack_len <= n)
140 error (_("Asked for position %d of stack, stack only has %d elements on it."),
141 n, ctx->stack_len);
142 return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory;
143
144 }
145
146 /* Return true if the expression stack is empty. */
147
148 static int
149 dwarf_expr_stack_empty_p (struct dwarf_expr_context *ctx)
150 {
151 return ctx->stack_len == 0;
152 }
153
154 /* Add a new piece to CTX's piece list. */
155 static void
156 add_piece (struct dwarf_expr_context *ctx, ULONGEST size, ULONGEST offset)
157 {
158 struct dwarf_expr_piece *p;
159
160 ctx->num_pieces++;
161
162 ctx->pieces = xrealloc (ctx->pieces,
163 (ctx->num_pieces
164 * sizeof (struct dwarf_expr_piece)));
165
166 p = &ctx->pieces[ctx->num_pieces - 1];
167 p->location = ctx->location;
168 p->size = size;
169 p->offset = offset;
170
171 if (p->location == DWARF_VALUE_LITERAL)
172 {
173 p->v.literal.data = ctx->data;
174 p->v.literal.length = ctx->len;
175 }
176 else if (dwarf_expr_stack_empty_p (ctx))
177 {
178 p->location = DWARF_VALUE_OPTIMIZED_OUT;
179 /* Also reset the context's location, for our callers. This is
180 a somewhat strange approach, but this lets us avoid setting
181 the location to DWARF_VALUE_MEMORY in all the individual
182 cases in the evaluator. */
183 ctx->location = DWARF_VALUE_OPTIMIZED_OUT;
184 }
185 else
186 {
187 p->v.expr.value = dwarf_expr_fetch (ctx, 0);
188 p->v.expr.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
189 }
190 }
191
192 /* Evaluate the expression at ADDR (LEN bytes long) using the context
193 CTX. */
194
195 void
196 dwarf_expr_eval (struct dwarf_expr_context *ctx, gdb_byte *addr, size_t len)
197 {
198 int old_recursion_depth = ctx->recursion_depth;
199
200 execute_stack_op (ctx, addr, addr + len);
201
202 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
203
204 gdb_assert (ctx->recursion_depth == old_recursion_depth);
205 }
206
207 /* Decode the unsigned LEB128 constant at BUF into the variable pointed to
208 by R, and return the new value of BUF. Verify that it doesn't extend
209 past BUF_END. */
210
211 gdb_byte *
212 read_uleb128 (gdb_byte *buf, gdb_byte *buf_end, ULONGEST * r)
213 {
214 unsigned shift = 0;
215 ULONGEST result = 0;
216 gdb_byte byte;
217
218 while (1)
219 {
220 if (buf >= buf_end)
221 error (_("read_uleb128: Corrupted DWARF expression."));
222
223 byte = *buf++;
224 result |= (byte & 0x7f) << shift;
225 if ((byte & 0x80) == 0)
226 break;
227 shift += 7;
228 }
229 *r = result;
230 return buf;
231 }
232
233 /* Decode the signed LEB128 constant at BUF into the variable pointed to
234 by R, and return the new value of BUF. Verify that it doesn't extend
235 past BUF_END. */
236
237 gdb_byte *
238 read_sleb128 (gdb_byte *buf, gdb_byte *buf_end, LONGEST * r)
239 {
240 unsigned shift = 0;
241 LONGEST result = 0;
242 gdb_byte byte;
243
244 while (1)
245 {
246 if (buf >= buf_end)
247 error (_("read_sleb128: Corrupted DWARF expression."));
248
249 byte = *buf++;
250 result |= (byte & 0x7f) << shift;
251 shift += 7;
252 if ((byte & 0x80) == 0)
253 break;
254 }
255 if (shift < (sizeof (*r) * 8) && (byte & 0x40) != 0)
256 result |= -(1 << shift);
257
258 *r = result;
259 return buf;
260 }
261
262 /* Read an address of size ADDR_SIZE from BUF, and verify that it
263 doesn't extend past BUF_END. */
264
265 CORE_ADDR
266 dwarf2_read_address (struct gdbarch *gdbarch, gdb_byte *buf,
267 gdb_byte *buf_end, int addr_size)
268 {
269 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
270
271 if (buf_end - buf < addr_size)
272 error (_("dwarf2_read_address: Corrupted DWARF expression."));
273
274 /* For most architectures, calling extract_unsigned_integer() alone
275 is sufficient for extracting an address. However, some
276 architectures (e.g. MIPS) use signed addresses and using
277 extract_unsigned_integer() will not produce a correct
278 result. Make sure we invoke gdbarch_integer_to_address()
279 for those architectures which require it.
280
281 The use of `unsigned_address_type' in the code below refers to
282 the type of buf and has no bearing on the signedness of the
283 address being returned. */
284
285 if (gdbarch_integer_to_address_p (gdbarch))
286 return gdbarch_integer_to_address
287 (gdbarch, unsigned_address_type (gdbarch, addr_size), buf);
288
289 return extract_unsigned_integer (buf, addr_size, byte_order);
290 }
291
292 /* Return the type of an address of size ADDR_SIZE,
293 for unsigned arithmetic. */
294
295 static struct type *
296 unsigned_address_type (struct gdbarch *gdbarch, int addr_size)
297 {
298 switch (addr_size)
299 {
300 case 2:
301 return builtin_type (gdbarch)->builtin_uint16;
302 case 4:
303 return builtin_type (gdbarch)->builtin_uint32;
304 case 8:
305 return builtin_type (gdbarch)->builtin_uint64;
306 default:
307 internal_error (__FILE__, __LINE__,
308 _("Unsupported address size.\n"));
309 }
310 }
311
312 /* Return the type of an address of size ADDR_SIZE,
313 for signed arithmetic. */
314
315 static struct type *
316 signed_address_type (struct gdbarch *gdbarch, int addr_size)
317 {
318 switch (addr_size)
319 {
320 case 2:
321 return builtin_type (gdbarch)->builtin_int16;
322 case 4:
323 return builtin_type (gdbarch)->builtin_int32;
324 case 8:
325 return builtin_type (gdbarch)->builtin_int64;
326 default:
327 internal_error (__FILE__, __LINE__,
328 _("Unsupported address size.\n"));
329 }
330 }
331 \f
332
333 /* Check that the current operator is either at the end of an
334 expression, or that it is followed by a composition operator. */
335
336 static void
337 require_composition (gdb_byte *op_ptr, gdb_byte *op_end, const char *op_name)
338 {
339 /* It seems like DW_OP_GNU_uninit should be handled here. However,
340 it doesn't seem to make sense for DW_OP_*_value, and it was not
341 checked at the other place that this function is called. */
342 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece)
343 error (_("DWARF-2 expression error: `%s' operations must be "
344 "used either alone or in conjuction with DW_OP_piece "
345 "or DW_OP_bit_piece."),
346 op_name);
347 }
348
349 /* The engine for the expression evaluator. Using the context in CTX,
350 evaluate the expression between OP_PTR and OP_END. */
351
352 static void
353 execute_stack_op (struct dwarf_expr_context *ctx,
354 gdb_byte *op_ptr, gdb_byte *op_end)
355 {
356 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
357
358 ctx->location = DWARF_VALUE_MEMORY;
359 ctx->initialized = 1; /* Default is initialized. */
360
361 if (ctx->recursion_depth > ctx->max_recursion_depth)
362 error (_("DWARF-2 expression error: Loop detected (%d)."),
363 ctx->recursion_depth);
364 ctx->recursion_depth++;
365
366 while (op_ptr < op_end)
367 {
368 enum dwarf_location_atom op = *op_ptr++;
369 CORE_ADDR result;
370 /* Assume the value is not in stack memory.
371 Code that knows otherwise sets this to 1.
372 Some arithmetic on stack addresses can probably be assumed to still
373 be a stack address, but we skip this complication for now.
374 This is just an optimization, so it's always ok to punt
375 and leave this as 0. */
376 int in_stack_memory = 0;
377 ULONGEST uoffset, reg;
378 LONGEST offset;
379
380 switch (op)
381 {
382 case DW_OP_lit0:
383 case DW_OP_lit1:
384 case DW_OP_lit2:
385 case DW_OP_lit3:
386 case DW_OP_lit4:
387 case DW_OP_lit5:
388 case DW_OP_lit6:
389 case DW_OP_lit7:
390 case DW_OP_lit8:
391 case DW_OP_lit9:
392 case DW_OP_lit10:
393 case DW_OP_lit11:
394 case DW_OP_lit12:
395 case DW_OP_lit13:
396 case DW_OP_lit14:
397 case DW_OP_lit15:
398 case DW_OP_lit16:
399 case DW_OP_lit17:
400 case DW_OP_lit18:
401 case DW_OP_lit19:
402 case DW_OP_lit20:
403 case DW_OP_lit21:
404 case DW_OP_lit22:
405 case DW_OP_lit23:
406 case DW_OP_lit24:
407 case DW_OP_lit25:
408 case DW_OP_lit26:
409 case DW_OP_lit27:
410 case DW_OP_lit28:
411 case DW_OP_lit29:
412 case DW_OP_lit30:
413 case DW_OP_lit31:
414 result = op - DW_OP_lit0;
415 break;
416
417 case DW_OP_addr:
418 result = dwarf2_read_address (ctx->gdbarch,
419 op_ptr, op_end, ctx->addr_size);
420 op_ptr += ctx->addr_size;
421 break;
422
423 case DW_OP_const1u:
424 result = extract_unsigned_integer (op_ptr, 1, byte_order);
425 op_ptr += 1;
426 break;
427 case DW_OP_const1s:
428 result = extract_signed_integer (op_ptr, 1, byte_order);
429 op_ptr += 1;
430 break;
431 case DW_OP_const2u:
432 result = extract_unsigned_integer (op_ptr, 2, byte_order);
433 op_ptr += 2;
434 break;
435 case DW_OP_const2s:
436 result = extract_signed_integer (op_ptr, 2, byte_order);
437 op_ptr += 2;
438 break;
439 case DW_OP_const4u:
440 result = extract_unsigned_integer (op_ptr, 4, byte_order);
441 op_ptr += 4;
442 break;
443 case DW_OP_const4s:
444 result = extract_signed_integer (op_ptr, 4, byte_order);
445 op_ptr += 4;
446 break;
447 case DW_OP_const8u:
448 result = extract_unsigned_integer (op_ptr, 8, byte_order);
449 op_ptr += 8;
450 break;
451 case DW_OP_const8s:
452 result = extract_signed_integer (op_ptr, 8, byte_order);
453 op_ptr += 8;
454 break;
455 case DW_OP_constu:
456 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
457 result = uoffset;
458 break;
459 case DW_OP_consts:
460 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
461 result = offset;
462 break;
463
464 /* The DW_OP_reg operations are required to occur alone in
465 location expressions. */
466 case DW_OP_reg0:
467 case DW_OP_reg1:
468 case DW_OP_reg2:
469 case DW_OP_reg3:
470 case DW_OP_reg4:
471 case DW_OP_reg5:
472 case DW_OP_reg6:
473 case DW_OP_reg7:
474 case DW_OP_reg8:
475 case DW_OP_reg9:
476 case DW_OP_reg10:
477 case DW_OP_reg11:
478 case DW_OP_reg12:
479 case DW_OP_reg13:
480 case DW_OP_reg14:
481 case DW_OP_reg15:
482 case DW_OP_reg16:
483 case DW_OP_reg17:
484 case DW_OP_reg18:
485 case DW_OP_reg19:
486 case DW_OP_reg20:
487 case DW_OP_reg21:
488 case DW_OP_reg22:
489 case DW_OP_reg23:
490 case DW_OP_reg24:
491 case DW_OP_reg25:
492 case DW_OP_reg26:
493 case DW_OP_reg27:
494 case DW_OP_reg28:
495 case DW_OP_reg29:
496 case DW_OP_reg30:
497 case DW_OP_reg31:
498 if (op_ptr != op_end
499 && *op_ptr != DW_OP_piece
500 && *op_ptr != DW_OP_bit_piece
501 && *op_ptr != DW_OP_GNU_uninit)
502 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
503 "used either alone or in conjuction with DW_OP_piece "
504 "or DW_OP_bit_piece."));
505
506 result = op - DW_OP_reg0;
507 ctx->location = DWARF_VALUE_REGISTER;
508 break;
509
510 case DW_OP_regx:
511 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
512 require_composition (op_ptr, op_end, "DW_OP_regx");
513
514 result = reg;
515 ctx->location = DWARF_VALUE_REGISTER;
516 break;
517
518 case DW_OP_implicit_value:
519 {
520 ULONGEST len;
521
522 op_ptr = read_uleb128 (op_ptr, op_end, &len);
523 if (op_ptr + len > op_end)
524 error (_("DW_OP_implicit_value: too few bytes available."));
525 ctx->len = len;
526 ctx->data = op_ptr;
527 ctx->location = DWARF_VALUE_LITERAL;
528 op_ptr += len;
529 require_composition (op_ptr, op_end, "DW_OP_implicit_value");
530 }
531 goto no_push;
532
533 case DW_OP_stack_value:
534 ctx->location = DWARF_VALUE_STACK;
535 require_composition (op_ptr, op_end, "DW_OP_stack_value");
536 goto no_push;
537
538 case DW_OP_breg0:
539 case DW_OP_breg1:
540 case DW_OP_breg2:
541 case DW_OP_breg3:
542 case DW_OP_breg4:
543 case DW_OP_breg5:
544 case DW_OP_breg6:
545 case DW_OP_breg7:
546 case DW_OP_breg8:
547 case DW_OP_breg9:
548 case DW_OP_breg10:
549 case DW_OP_breg11:
550 case DW_OP_breg12:
551 case DW_OP_breg13:
552 case DW_OP_breg14:
553 case DW_OP_breg15:
554 case DW_OP_breg16:
555 case DW_OP_breg17:
556 case DW_OP_breg18:
557 case DW_OP_breg19:
558 case DW_OP_breg20:
559 case DW_OP_breg21:
560 case DW_OP_breg22:
561 case DW_OP_breg23:
562 case DW_OP_breg24:
563 case DW_OP_breg25:
564 case DW_OP_breg26:
565 case DW_OP_breg27:
566 case DW_OP_breg28:
567 case DW_OP_breg29:
568 case DW_OP_breg30:
569 case DW_OP_breg31:
570 {
571 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
572 result = (ctx->read_reg) (ctx->baton, op - DW_OP_breg0);
573 result += offset;
574 }
575 break;
576 case DW_OP_bregx:
577 {
578 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
579 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
580 result = (ctx->read_reg) (ctx->baton, reg);
581 result += offset;
582 }
583 break;
584 case DW_OP_fbreg:
585 {
586 gdb_byte *datastart;
587 size_t datalen;
588 unsigned int before_stack_len;
589
590 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
591 /* Rather than create a whole new context, we simply
592 record the stack length before execution, then reset it
593 afterwards, effectively erasing whatever the recursive
594 call put there. */
595 before_stack_len = ctx->stack_len;
596 /* FIXME: cagney/2003-03-26: This code should be using
597 get_frame_base_address(), and then implement a dwarf2
598 specific this_base method. */
599 (ctx->get_frame_base) (ctx->baton, &datastart, &datalen);
600 dwarf_expr_eval (ctx, datastart, datalen);
601 if (ctx->location == DWARF_VALUE_LITERAL
602 || ctx->location == DWARF_VALUE_STACK)
603 error (_("Not implemented: computing frame base using explicit value operator"));
604 result = dwarf_expr_fetch (ctx, 0);
605 if (ctx->location == DWARF_VALUE_REGISTER)
606 result = (ctx->read_reg) (ctx->baton, result);
607 result = result + offset;
608 in_stack_memory = 1;
609 ctx->stack_len = before_stack_len;
610 ctx->location = DWARF_VALUE_MEMORY;
611 }
612 break;
613
614 case DW_OP_dup:
615 result = dwarf_expr_fetch (ctx, 0);
616 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
617 break;
618
619 case DW_OP_drop:
620 dwarf_expr_pop (ctx);
621 goto no_push;
622
623 case DW_OP_pick:
624 offset = *op_ptr++;
625 result = dwarf_expr_fetch (ctx, offset);
626 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset);
627 break;
628
629 case DW_OP_swap:
630 {
631 struct dwarf_stack_value t1, t2;
632
633 if (ctx->stack_len < 2)
634 error (_("Not enough elements for DW_OP_swap. Need 2, have %d."),
635 ctx->stack_len);
636 t1 = ctx->stack[ctx->stack_len - 1];
637 t2 = ctx->stack[ctx->stack_len - 2];
638 ctx->stack[ctx->stack_len - 1] = t2;
639 ctx->stack[ctx->stack_len - 2] = t1;
640 goto no_push;
641 }
642
643 case DW_OP_over:
644 result = dwarf_expr_fetch (ctx, 1);
645 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1);
646 break;
647
648 case DW_OP_rot:
649 {
650 struct dwarf_stack_value t1, t2, t3;
651
652 if (ctx->stack_len < 3)
653 error (_("Not enough elements for DW_OP_rot. Need 3, have %d."),
654 ctx->stack_len);
655 t1 = ctx->stack[ctx->stack_len - 1];
656 t2 = ctx->stack[ctx->stack_len - 2];
657 t3 = ctx->stack[ctx->stack_len - 3];
658 ctx->stack[ctx->stack_len - 1] = t2;
659 ctx->stack[ctx->stack_len - 2] = t3;
660 ctx->stack[ctx->stack_len - 3] = t1;
661 goto no_push;
662 }
663
664 case DW_OP_deref:
665 case DW_OP_deref_size:
666 case DW_OP_abs:
667 case DW_OP_neg:
668 case DW_OP_not:
669 case DW_OP_plus_uconst:
670 /* Unary operations. */
671 result = dwarf_expr_fetch (ctx, 0);
672 dwarf_expr_pop (ctx);
673
674 switch (op)
675 {
676 case DW_OP_deref:
677 {
678 gdb_byte *buf = alloca (ctx->addr_size);
679
680 (ctx->read_mem) (ctx->baton, buf, result, ctx->addr_size);
681 result = dwarf2_read_address (ctx->gdbarch,
682 buf, buf + ctx->addr_size,
683 ctx->addr_size);
684 }
685 break;
686
687 case DW_OP_deref_size:
688 {
689 int addr_size = *op_ptr++;
690 gdb_byte *buf = alloca (addr_size);
691
692 (ctx->read_mem) (ctx->baton, buf, result, addr_size);
693 result = dwarf2_read_address (ctx->gdbarch,
694 buf, buf + addr_size,
695 addr_size);
696 }
697 break;
698
699 case DW_OP_abs:
700 if ((signed int) result < 0)
701 result = -result;
702 break;
703 case DW_OP_neg:
704 result = -result;
705 break;
706 case DW_OP_not:
707 result = ~result;
708 break;
709 case DW_OP_plus_uconst:
710 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
711 result += reg;
712 break;
713 }
714 break;
715
716 case DW_OP_and:
717 case DW_OP_div:
718 case DW_OP_minus:
719 case DW_OP_mod:
720 case DW_OP_mul:
721 case DW_OP_or:
722 case DW_OP_plus:
723 case DW_OP_shl:
724 case DW_OP_shr:
725 case DW_OP_shra:
726 case DW_OP_xor:
727 case DW_OP_le:
728 case DW_OP_ge:
729 case DW_OP_eq:
730 case DW_OP_lt:
731 case DW_OP_gt:
732 case DW_OP_ne:
733 {
734 /* Binary operations. Use the value engine to do computations in
735 the right width. */
736 CORE_ADDR first, second;
737 enum exp_opcode binop;
738 struct value *val1 = NULL, *val2 = NULL;
739 struct type *stype, *utype;
740
741 second = dwarf_expr_fetch (ctx, 0);
742 dwarf_expr_pop (ctx);
743
744 first = dwarf_expr_fetch (ctx, 0);
745 dwarf_expr_pop (ctx);
746
747 utype = unsigned_address_type (ctx->gdbarch, ctx->addr_size);
748 stype = signed_address_type (ctx->gdbarch, ctx->addr_size);
749
750 switch (op)
751 {
752 case DW_OP_and:
753 binop = BINOP_BITWISE_AND;
754 break;
755 case DW_OP_div:
756 binop = BINOP_DIV;
757 val1 = value_from_longest (stype, first);
758 val2 = value_from_longest (stype, second);
759 break;
760 case DW_OP_minus:
761 binop = BINOP_SUB;
762 break;
763 case DW_OP_mod:
764 binop = BINOP_MOD;
765 break;
766 case DW_OP_mul:
767 binop = BINOP_MUL;
768 break;
769 case DW_OP_or:
770 binop = BINOP_BITWISE_IOR;
771 break;
772 case DW_OP_plus:
773 binop = BINOP_ADD;
774 break;
775 case DW_OP_shl:
776 binop = BINOP_LSH;
777 break;
778 case DW_OP_shr:
779 binop = BINOP_RSH;
780 break;
781 case DW_OP_shra:
782 binop = BINOP_RSH;
783 val1 = value_from_longest (stype, first);
784 break;
785 case DW_OP_xor:
786 binop = BINOP_BITWISE_XOR;
787 break;
788 case DW_OP_le:
789 binop = BINOP_LEQ;
790 val1 = value_from_longest (stype, first);
791 val2 = value_from_longest (stype, second);
792 break;
793 case DW_OP_ge:
794 binop = BINOP_GEQ;
795 val1 = value_from_longest (stype, first);
796 val2 = value_from_longest (stype, second);
797 break;
798 case DW_OP_eq:
799 binop = BINOP_EQUAL;
800 val1 = value_from_longest (stype, first);
801 val2 = value_from_longest (stype, second);
802 break;
803 case DW_OP_lt:
804 binop = BINOP_LESS;
805 val1 = value_from_longest (stype, first);
806 val2 = value_from_longest (stype, second);
807 break;
808 case DW_OP_gt:
809 binop = BINOP_GTR;
810 val1 = value_from_longest (stype, first);
811 val2 = value_from_longest (stype, second);
812 break;
813 case DW_OP_ne:
814 binop = BINOP_NOTEQUAL;
815 val1 = value_from_longest (stype, first);
816 val2 = value_from_longest (stype, second);
817 break;
818 default:
819 internal_error (__FILE__, __LINE__,
820 _("Can't be reached."));
821 }
822
823 /* We use unsigned operands by default. */
824 if (val1 == NULL)
825 val1 = value_from_longest (utype, first);
826 if (val2 == NULL)
827 val2 = value_from_longest (utype, second);
828
829 result = value_as_long (value_binop (val1, val2, binop));
830 }
831 break;
832
833 case DW_OP_call_frame_cfa:
834 result = (ctx->get_frame_cfa) (ctx->baton);
835 in_stack_memory = 1;
836 break;
837
838 case DW_OP_GNU_push_tls_address:
839 /* Variable is at a constant offset in the thread-local
840 storage block into the objfile for the current thread and
841 the dynamic linker module containing this expression. Here
842 we return returns the offset from that base. The top of the
843 stack has the offset from the beginning of the thread
844 control block at which the variable is located. Nothing
845 should follow this operator, so the top of stack would be
846 returned. */
847 result = dwarf_expr_fetch (ctx, 0);
848 dwarf_expr_pop (ctx);
849 result = (ctx->get_tls_address) (ctx->baton, result);
850 break;
851
852 case DW_OP_skip:
853 offset = extract_signed_integer (op_ptr, 2, byte_order);
854 op_ptr += 2;
855 op_ptr += offset;
856 goto no_push;
857
858 case DW_OP_bra:
859 offset = extract_signed_integer (op_ptr, 2, byte_order);
860 op_ptr += 2;
861 if (dwarf_expr_fetch (ctx, 0) != 0)
862 op_ptr += offset;
863 dwarf_expr_pop (ctx);
864 goto no_push;
865
866 case DW_OP_nop:
867 goto no_push;
868
869 case DW_OP_piece:
870 {
871 ULONGEST size;
872
873 /* Record the piece. */
874 op_ptr = read_uleb128 (op_ptr, op_end, &size);
875 add_piece (ctx, 8 * size, 0);
876
877 /* Pop off the address/regnum, and reset the location
878 type. */
879 if (ctx->location != DWARF_VALUE_LITERAL
880 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
881 dwarf_expr_pop (ctx);
882 ctx->location = DWARF_VALUE_MEMORY;
883 }
884 goto no_push;
885
886 case DW_OP_bit_piece:
887 {
888 ULONGEST size, offset;
889
890 /* Record the piece. */
891 op_ptr = read_uleb128 (op_ptr, op_end, &size);
892 op_ptr = read_uleb128 (op_ptr, op_end, &offset);
893 add_piece (ctx, size, offset);
894
895 /* Pop off the address/regnum, and reset the location
896 type. */
897 if (ctx->location != DWARF_VALUE_LITERAL
898 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
899 dwarf_expr_pop (ctx);
900 ctx->location = DWARF_VALUE_MEMORY;
901 }
902 goto no_push;
903
904 case DW_OP_GNU_uninit:
905 if (op_ptr != op_end)
906 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
907 "be the very last op."));
908
909 ctx->initialized = 0;
910 goto no_push;
911
912 default:
913 error (_("Unhandled dwarf expression opcode 0x%x"), op);
914 }
915
916 /* Most things push a result value. */
917 dwarf_expr_push (ctx, result, in_stack_memory);
918 no_push:;
919 }
920
921 ctx->recursion_depth--;
922 gdb_assert (ctx->recursion_depth >= 0);
923 }