]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/dwarf2expr.c
Copyright updates for 2007.
[thirdparty/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001, 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
4
5 Contributed by Daniel Berlin (dan@dberlin.org)
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin Street, Fifth Floor,
22 Boston, MA 02110-1301, USA. */
23
24 #include "defs.h"
25 #include "symtab.h"
26 #include "gdbtypes.h"
27 #include "value.h"
28 #include "gdbcore.h"
29 #include "elf/dwarf2.h"
30 #include "dwarf2expr.h"
31
32 /* Local prototypes. */
33
34 static void execute_stack_op (struct dwarf_expr_context *,
35 gdb_byte *, gdb_byte *);
36
37 /* Create a new context for the expression evaluator. */
38
39 struct dwarf_expr_context *
40 new_dwarf_expr_context (void)
41 {
42 struct dwarf_expr_context *retval;
43 retval = xcalloc (1, sizeof (struct dwarf_expr_context));
44 retval->stack_len = 0;
45 retval->stack_allocated = 10;
46 retval->stack = xmalloc (retval->stack_allocated * sizeof (CORE_ADDR));
47 retval->num_pieces = 0;
48 retval->pieces = 0;
49 return retval;
50 }
51
52 /* Release the memory allocated to CTX. */
53
54 void
55 free_dwarf_expr_context (struct dwarf_expr_context *ctx)
56 {
57 xfree (ctx->stack);
58 xfree (ctx->pieces);
59 xfree (ctx);
60 }
61
62 /* Expand the memory allocated to CTX's stack to contain at least
63 NEED more elements than are currently used. */
64
65 static void
66 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
67 {
68 if (ctx->stack_len + need > ctx->stack_allocated)
69 {
70 size_t newlen = ctx->stack_len + need + 10;
71 ctx->stack = xrealloc (ctx->stack,
72 newlen * sizeof (CORE_ADDR));
73 ctx->stack_allocated = newlen;
74 }
75 }
76
77 /* Push VALUE onto CTX's stack. */
78
79 void
80 dwarf_expr_push (struct dwarf_expr_context *ctx, CORE_ADDR value)
81 {
82 dwarf_expr_grow_stack (ctx, 1);
83 ctx->stack[ctx->stack_len++] = value;
84 }
85
86 /* Pop the top item off of CTX's stack. */
87
88 void
89 dwarf_expr_pop (struct dwarf_expr_context *ctx)
90 {
91 if (ctx->stack_len <= 0)
92 error (_("dwarf expression stack underflow"));
93 ctx->stack_len--;
94 }
95
96 /* Retrieve the N'th item on CTX's stack. */
97
98 CORE_ADDR
99 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
100 {
101 if (ctx->stack_len <= n)
102 error (_("Asked for position %d of stack, stack only has %d elements on it."),
103 n, ctx->stack_len);
104 return ctx->stack[ctx->stack_len - (1 + n)];
105
106 }
107
108 /* Add a new piece to CTX's piece list. */
109 static void
110 add_piece (struct dwarf_expr_context *ctx,
111 int in_reg, CORE_ADDR value, ULONGEST size)
112 {
113 struct dwarf_expr_piece *p;
114
115 ctx->num_pieces++;
116
117 if (ctx->pieces)
118 ctx->pieces = xrealloc (ctx->pieces,
119 (ctx->num_pieces
120 * sizeof (struct dwarf_expr_piece)));
121 else
122 ctx->pieces = xmalloc (ctx->num_pieces
123 * sizeof (struct dwarf_expr_piece));
124
125 p = &ctx->pieces[ctx->num_pieces - 1];
126 p->in_reg = in_reg;
127 p->value = value;
128 p->size = size;
129 }
130
131 /* Evaluate the expression at ADDR (LEN bytes long) using the context
132 CTX. */
133
134 void
135 dwarf_expr_eval (struct dwarf_expr_context *ctx, gdb_byte *addr, size_t len)
136 {
137 execute_stack_op (ctx, addr, addr + len);
138 }
139
140 /* Decode the unsigned LEB128 constant at BUF into the variable pointed to
141 by R, and return the new value of BUF. Verify that it doesn't extend
142 past BUF_END. */
143
144 gdb_byte *
145 read_uleb128 (gdb_byte *buf, gdb_byte *buf_end, ULONGEST * r)
146 {
147 unsigned shift = 0;
148 ULONGEST result = 0;
149 gdb_byte byte;
150
151 while (1)
152 {
153 if (buf >= buf_end)
154 error (_("read_uleb128: Corrupted DWARF expression."));
155
156 byte = *buf++;
157 result |= (byte & 0x7f) << shift;
158 if ((byte & 0x80) == 0)
159 break;
160 shift += 7;
161 }
162 *r = result;
163 return buf;
164 }
165
166 /* Decode the signed LEB128 constant at BUF into the variable pointed to
167 by R, and return the new value of BUF. Verify that it doesn't extend
168 past BUF_END. */
169
170 gdb_byte *
171 read_sleb128 (gdb_byte *buf, gdb_byte *buf_end, LONGEST * r)
172 {
173 unsigned shift = 0;
174 LONGEST result = 0;
175 gdb_byte byte;
176
177 while (1)
178 {
179 if (buf >= buf_end)
180 error (_("read_sleb128: Corrupted DWARF expression."));
181
182 byte = *buf++;
183 result |= (byte & 0x7f) << shift;
184 shift += 7;
185 if ((byte & 0x80) == 0)
186 break;
187 }
188 if (shift < (sizeof (*r) * 8) && (byte & 0x40) != 0)
189 result |= -(1 << shift);
190
191 *r = result;
192 return buf;
193 }
194
195 /* Read an address from BUF, and verify that it doesn't extend past
196 BUF_END. The address is returned, and *BYTES_READ is set to the
197 number of bytes read from BUF. */
198
199 CORE_ADDR
200 dwarf2_read_address (gdb_byte *buf, gdb_byte *buf_end, int *bytes_read)
201 {
202 CORE_ADDR result;
203
204 if (buf_end - buf < TARGET_ADDR_BIT / TARGET_CHAR_BIT)
205 error (_("dwarf2_read_address: Corrupted DWARF expression."));
206
207 *bytes_read = TARGET_ADDR_BIT / TARGET_CHAR_BIT;
208 /* NOTE: cagney/2003-05-22: This extract is assuming that a DWARF 2
209 address is always unsigned. That may or may not be true. */
210 result = extract_unsigned_integer (buf, TARGET_ADDR_BIT / TARGET_CHAR_BIT);
211 return result;
212 }
213
214 /* Return the type of an address, for unsigned arithmetic. */
215
216 static struct type *
217 unsigned_address_type (void)
218 {
219 switch (TARGET_ADDR_BIT / TARGET_CHAR_BIT)
220 {
221 case 2:
222 return builtin_type_uint16;
223 case 4:
224 return builtin_type_uint32;
225 case 8:
226 return builtin_type_uint64;
227 default:
228 internal_error (__FILE__, __LINE__,
229 _("Unsupported address size.\n"));
230 }
231 }
232
233 /* Return the type of an address, for signed arithmetic. */
234
235 static struct type *
236 signed_address_type (void)
237 {
238 switch (TARGET_ADDR_BIT / TARGET_CHAR_BIT)
239 {
240 case 2:
241 return builtin_type_int16;
242 case 4:
243 return builtin_type_int32;
244 case 8:
245 return builtin_type_int64;
246 default:
247 internal_error (__FILE__, __LINE__,
248 _("Unsupported address size.\n"));
249 }
250 }
251 \f
252 /* The engine for the expression evaluator. Using the context in CTX,
253 evaluate the expression between OP_PTR and OP_END. */
254
255 static void
256 execute_stack_op (struct dwarf_expr_context *ctx,
257 gdb_byte *op_ptr, gdb_byte *op_end)
258 {
259 ctx->in_reg = 0;
260
261 while (op_ptr < op_end)
262 {
263 enum dwarf_location_atom op = *op_ptr++;
264 CORE_ADDR result;
265 ULONGEST uoffset, reg;
266 LONGEST offset;
267 int bytes_read;
268
269 switch (op)
270 {
271 case DW_OP_lit0:
272 case DW_OP_lit1:
273 case DW_OP_lit2:
274 case DW_OP_lit3:
275 case DW_OP_lit4:
276 case DW_OP_lit5:
277 case DW_OP_lit6:
278 case DW_OP_lit7:
279 case DW_OP_lit8:
280 case DW_OP_lit9:
281 case DW_OP_lit10:
282 case DW_OP_lit11:
283 case DW_OP_lit12:
284 case DW_OP_lit13:
285 case DW_OP_lit14:
286 case DW_OP_lit15:
287 case DW_OP_lit16:
288 case DW_OP_lit17:
289 case DW_OP_lit18:
290 case DW_OP_lit19:
291 case DW_OP_lit20:
292 case DW_OP_lit21:
293 case DW_OP_lit22:
294 case DW_OP_lit23:
295 case DW_OP_lit24:
296 case DW_OP_lit25:
297 case DW_OP_lit26:
298 case DW_OP_lit27:
299 case DW_OP_lit28:
300 case DW_OP_lit29:
301 case DW_OP_lit30:
302 case DW_OP_lit31:
303 result = op - DW_OP_lit0;
304 break;
305
306 case DW_OP_addr:
307 result = dwarf2_read_address (op_ptr, op_end, &bytes_read);
308 op_ptr += bytes_read;
309 break;
310
311 case DW_OP_const1u:
312 result = extract_unsigned_integer (op_ptr, 1);
313 op_ptr += 1;
314 break;
315 case DW_OP_const1s:
316 result = extract_signed_integer (op_ptr, 1);
317 op_ptr += 1;
318 break;
319 case DW_OP_const2u:
320 result = extract_unsigned_integer (op_ptr, 2);
321 op_ptr += 2;
322 break;
323 case DW_OP_const2s:
324 result = extract_signed_integer (op_ptr, 2);
325 op_ptr += 2;
326 break;
327 case DW_OP_const4u:
328 result = extract_unsigned_integer (op_ptr, 4);
329 op_ptr += 4;
330 break;
331 case DW_OP_const4s:
332 result = extract_signed_integer (op_ptr, 4);
333 op_ptr += 4;
334 break;
335 case DW_OP_const8u:
336 result = extract_unsigned_integer (op_ptr, 8);
337 op_ptr += 8;
338 break;
339 case DW_OP_const8s:
340 result = extract_signed_integer (op_ptr, 8);
341 op_ptr += 8;
342 break;
343 case DW_OP_constu:
344 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
345 result = uoffset;
346 break;
347 case DW_OP_consts:
348 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
349 result = offset;
350 break;
351
352 /* The DW_OP_reg operations are required to occur alone in
353 location expressions. */
354 case DW_OP_reg0:
355 case DW_OP_reg1:
356 case DW_OP_reg2:
357 case DW_OP_reg3:
358 case DW_OP_reg4:
359 case DW_OP_reg5:
360 case DW_OP_reg6:
361 case DW_OP_reg7:
362 case DW_OP_reg8:
363 case DW_OP_reg9:
364 case DW_OP_reg10:
365 case DW_OP_reg11:
366 case DW_OP_reg12:
367 case DW_OP_reg13:
368 case DW_OP_reg14:
369 case DW_OP_reg15:
370 case DW_OP_reg16:
371 case DW_OP_reg17:
372 case DW_OP_reg18:
373 case DW_OP_reg19:
374 case DW_OP_reg20:
375 case DW_OP_reg21:
376 case DW_OP_reg22:
377 case DW_OP_reg23:
378 case DW_OP_reg24:
379 case DW_OP_reg25:
380 case DW_OP_reg26:
381 case DW_OP_reg27:
382 case DW_OP_reg28:
383 case DW_OP_reg29:
384 case DW_OP_reg30:
385 case DW_OP_reg31:
386 if (op_ptr != op_end && *op_ptr != DW_OP_piece)
387 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
388 "used either alone or in conjuction with DW_OP_piece."));
389
390 result = op - DW_OP_reg0;
391 ctx->in_reg = 1;
392
393 break;
394
395 case DW_OP_regx:
396 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
397 if (op_ptr != op_end && *op_ptr != DW_OP_piece)
398 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
399 "used either alone or in conjuction with DW_OP_piece."));
400
401 result = reg;
402 ctx->in_reg = 1;
403 break;
404
405 case DW_OP_breg0:
406 case DW_OP_breg1:
407 case DW_OP_breg2:
408 case DW_OP_breg3:
409 case DW_OP_breg4:
410 case DW_OP_breg5:
411 case DW_OP_breg6:
412 case DW_OP_breg7:
413 case DW_OP_breg8:
414 case DW_OP_breg9:
415 case DW_OP_breg10:
416 case DW_OP_breg11:
417 case DW_OP_breg12:
418 case DW_OP_breg13:
419 case DW_OP_breg14:
420 case DW_OP_breg15:
421 case DW_OP_breg16:
422 case DW_OP_breg17:
423 case DW_OP_breg18:
424 case DW_OP_breg19:
425 case DW_OP_breg20:
426 case DW_OP_breg21:
427 case DW_OP_breg22:
428 case DW_OP_breg23:
429 case DW_OP_breg24:
430 case DW_OP_breg25:
431 case DW_OP_breg26:
432 case DW_OP_breg27:
433 case DW_OP_breg28:
434 case DW_OP_breg29:
435 case DW_OP_breg30:
436 case DW_OP_breg31:
437 {
438 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
439 result = (ctx->read_reg) (ctx->baton, op - DW_OP_breg0);
440 result += offset;
441 }
442 break;
443 case DW_OP_bregx:
444 {
445 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
446 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
447 result = (ctx->read_reg) (ctx->baton, reg);
448 result += offset;
449 }
450 break;
451 case DW_OP_fbreg:
452 {
453 gdb_byte *datastart;
454 size_t datalen;
455 unsigned int before_stack_len;
456
457 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
458 /* Rather than create a whole new context, we simply
459 record the stack length before execution, then reset it
460 afterwards, effectively erasing whatever the recursive
461 call put there. */
462 before_stack_len = ctx->stack_len;
463 /* FIXME: cagney/2003-03-26: This code should be using
464 get_frame_base_address(), and then implement a dwarf2
465 specific this_base method. */
466 (ctx->get_frame_base) (ctx->baton, &datastart, &datalen);
467 dwarf_expr_eval (ctx, datastart, datalen);
468 result = dwarf_expr_fetch (ctx, 0);
469 if (ctx->in_reg)
470 result = (ctx->read_reg) (ctx->baton, result);
471 result = result + offset;
472 ctx->stack_len = before_stack_len;
473 ctx->in_reg = 0;
474 }
475 break;
476 case DW_OP_dup:
477 result = dwarf_expr_fetch (ctx, 0);
478 break;
479
480 case DW_OP_drop:
481 dwarf_expr_pop (ctx);
482 goto no_push;
483
484 case DW_OP_pick:
485 offset = *op_ptr++;
486 result = dwarf_expr_fetch (ctx, offset);
487 break;
488
489 case DW_OP_over:
490 result = dwarf_expr_fetch (ctx, 1);
491 break;
492
493 case DW_OP_rot:
494 {
495 CORE_ADDR t1, t2, t3;
496
497 if (ctx->stack_len < 3)
498 error (_("Not enough elements for DW_OP_rot. Need 3, have %d."),
499 ctx->stack_len);
500 t1 = ctx->stack[ctx->stack_len - 1];
501 t2 = ctx->stack[ctx->stack_len - 2];
502 t3 = ctx->stack[ctx->stack_len - 3];
503 ctx->stack[ctx->stack_len - 1] = t2;
504 ctx->stack[ctx->stack_len - 2] = t3;
505 ctx->stack[ctx->stack_len - 3] = t1;
506 goto no_push;
507 }
508
509 case DW_OP_deref:
510 case DW_OP_deref_size:
511 case DW_OP_abs:
512 case DW_OP_neg:
513 case DW_OP_not:
514 case DW_OP_plus_uconst:
515 /* Unary operations. */
516 result = dwarf_expr_fetch (ctx, 0);
517 dwarf_expr_pop (ctx);
518
519 switch (op)
520 {
521 case DW_OP_deref:
522 {
523 gdb_byte *buf = alloca (TARGET_ADDR_BIT / TARGET_CHAR_BIT);
524 int bytes_read;
525
526 (ctx->read_mem) (ctx->baton, buf, result,
527 TARGET_ADDR_BIT / TARGET_CHAR_BIT);
528 result = dwarf2_read_address (buf,
529 buf + (TARGET_ADDR_BIT
530 / TARGET_CHAR_BIT),
531 &bytes_read);
532 }
533 break;
534
535 case DW_OP_deref_size:
536 {
537 gdb_byte *buf = alloca (TARGET_ADDR_BIT / TARGET_CHAR_BIT);
538 int bytes_read;
539
540 (ctx->read_mem) (ctx->baton, buf, result, *op_ptr++);
541 result = dwarf2_read_address (buf,
542 buf + (TARGET_ADDR_BIT
543 / TARGET_CHAR_BIT),
544 &bytes_read);
545 }
546 break;
547
548 case DW_OP_abs:
549 if ((signed int) result < 0)
550 result = -result;
551 break;
552 case DW_OP_neg:
553 result = -result;
554 break;
555 case DW_OP_not:
556 result = ~result;
557 break;
558 case DW_OP_plus_uconst:
559 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
560 result += reg;
561 break;
562 }
563 break;
564
565 case DW_OP_and:
566 case DW_OP_div:
567 case DW_OP_minus:
568 case DW_OP_mod:
569 case DW_OP_mul:
570 case DW_OP_or:
571 case DW_OP_plus:
572 case DW_OP_shl:
573 case DW_OP_shr:
574 case DW_OP_shra:
575 case DW_OP_xor:
576 case DW_OP_le:
577 case DW_OP_ge:
578 case DW_OP_eq:
579 case DW_OP_lt:
580 case DW_OP_gt:
581 case DW_OP_ne:
582 {
583 /* Binary operations. Use the value engine to do computations in
584 the right width. */
585 CORE_ADDR first, second;
586 enum exp_opcode binop;
587 struct value *val1, *val2;
588
589 second = dwarf_expr_fetch (ctx, 0);
590 dwarf_expr_pop (ctx);
591
592 first = dwarf_expr_fetch (ctx, 0);
593 dwarf_expr_pop (ctx);
594
595 val1 = value_from_longest (unsigned_address_type (), first);
596 val2 = value_from_longest (unsigned_address_type (), second);
597
598 switch (op)
599 {
600 case DW_OP_and:
601 binop = BINOP_BITWISE_AND;
602 break;
603 case DW_OP_div:
604 binop = BINOP_DIV;
605 break;
606 case DW_OP_minus:
607 binop = BINOP_SUB;
608 break;
609 case DW_OP_mod:
610 binop = BINOP_MOD;
611 break;
612 case DW_OP_mul:
613 binop = BINOP_MUL;
614 break;
615 case DW_OP_or:
616 binop = BINOP_BITWISE_IOR;
617 break;
618 case DW_OP_plus:
619 binop = BINOP_ADD;
620 break;
621 case DW_OP_shl:
622 binop = BINOP_LSH;
623 break;
624 case DW_OP_shr:
625 binop = BINOP_RSH;
626 break;
627 case DW_OP_shra:
628 binop = BINOP_RSH;
629 val1 = value_from_longest (signed_address_type (), first);
630 break;
631 case DW_OP_xor:
632 binop = BINOP_BITWISE_XOR;
633 break;
634 case DW_OP_le:
635 binop = BINOP_LEQ;
636 break;
637 case DW_OP_ge:
638 binop = BINOP_GEQ;
639 break;
640 case DW_OP_eq:
641 binop = BINOP_EQUAL;
642 break;
643 case DW_OP_lt:
644 binop = BINOP_LESS;
645 break;
646 case DW_OP_gt:
647 binop = BINOP_GTR;
648 break;
649 case DW_OP_ne:
650 binop = BINOP_NOTEQUAL;
651 break;
652 default:
653 internal_error (__FILE__, __LINE__,
654 _("Can't be reached."));
655 }
656 result = value_as_long (value_binop (val1, val2, binop));
657 }
658 break;
659
660 case DW_OP_GNU_push_tls_address:
661 /* Variable is at a constant offset in the thread-local
662 storage block into the objfile for the current thread and
663 the dynamic linker module containing this expression. Here
664 we return returns the offset from that base. The top of the
665 stack has the offset from the beginning of the thread
666 control block at which the variable is located. Nothing
667 should follow this operator, so the top of stack would be
668 returned. */
669 result = dwarf_expr_fetch (ctx, 0);
670 dwarf_expr_pop (ctx);
671 result = (ctx->get_tls_address) (ctx->baton, result);
672 break;
673
674 case DW_OP_skip:
675 offset = extract_signed_integer (op_ptr, 2);
676 op_ptr += 2;
677 op_ptr += offset;
678 goto no_push;
679
680 case DW_OP_bra:
681 offset = extract_signed_integer (op_ptr, 2);
682 op_ptr += 2;
683 if (dwarf_expr_fetch (ctx, 0) != 0)
684 op_ptr += offset;
685 dwarf_expr_pop (ctx);
686 goto no_push;
687
688 case DW_OP_nop:
689 goto no_push;
690
691 case DW_OP_piece:
692 {
693 ULONGEST size;
694 CORE_ADDR addr_or_regnum;
695
696 /* Record the piece. */
697 op_ptr = read_uleb128 (op_ptr, op_end, &size);
698 addr_or_regnum = dwarf_expr_fetch (ctx, 0);
699 add_piece (ctx, ctx->in_reg, addr_or_regnum, size);
700
701 /* Pop off the address/regnum, and clear the in_reg flag. */
702 dwarf_expr_pop (ctx);
703 ctx->in_reg = 0;
704 }
705 goto no_push;
706
707 default:
708 error (_("Unhandled dwarf expression opcode 0x%x"), op);
709 }
710
711 /* Most things push a result value. */
712 dwarf_expr_push (ctx, result);
713 no_push:;
714 }
715 }