]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/dwarf2expr.c
gdb/
[thirdparty/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001-2003, 2005, 2007-2012 Free Software Foundation,
4 Inc.
5
6 Contributed by Daniel Berlin (dan@dberlin.org)
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "symtab.h"
25 #include "gdbtypes.h"
26 #include "value.h"
27 #include "gdbcore.h"
28 #include "dwarf2.h"
29 #include "dwarf2expr.h"
30 #include "gdb_assert.h"
31
32 /* Local prototypes. */
33
34 static void execute_stack_op (struct dwarf_expr_context *,
35 const gdb_byte *, const gdb_byte *);
36
37 /* Cookie for gdbarch data. */
38
39 static struct gdbarch_data *dwarf_arch_cookie;
40
41 /* This holds gdbarch-specific types used by the DWARF expression
42 evaluator. See comments in execute_stack_op. */
43
44 struct dwarf_gdbarch_types
45 {
46 struct type *dw_types[3];
47 };
48
49 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
50
51 static void *
52 dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
53 {
54 struct dwarf_gdbarch_types *types
55 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
56
57 /* The types themselves are lazily initialized. */
58
59 return types;
60 }
61
62 /* Return the type used for DWARF operations where the type is
63 unspecified in the DWARF spec. Only certain sizes are
64 supported. */
65
66 static struct type *
67 dwarf_expr_address_type (struct dwarf_expr_context *ctx)
68 {
69 struct dwarf_gdbarch_types *types = gdbarch_data (ctx->gdbarch,
70 dwarf_arch_cookie);
71 int ndx;
72
73 if (ctx->addr_size == 2)
74 ndx = 0;
75 else if (ctx->addr_size == 4)
76 ndx = 1;
77 else if (ctx->addr_size == 8)
78 ndx = 2;
79 else
80 error (_("Unsupported address size in DWARF expressions: %d bits"),
81 8 * ctx->addr_size);
82
83 if (types->dw_types[ndx] == NULL)
84 types->dw_types[ndx]
85 = arch_integer_type (ctx->gdbarch,
86 8 * ctx->addr_size,
87 0, "<signed DWARF address type>");
88
89 return types->dw_types[ndx];
90 }
91
92 /* Create a new context for the expression evaluator. */
93
94 struct dwarf_expr_context *
95 new_dwarf_expr_context (void)
96 {
97 struct dwarf_expr_context *retval;
98
99 retval = xcalloc (1, sizeof (struct dwarf_expr_context));
100 retval->stack_len = 0;
101 retval->stack_allocated = 10;
102 retval->stack = xmalloc (retval->stack_allocated
103 * sizeof (struct dwarf_stack_value));
104 retval->num_pieces = 0;
105 retval->pieces = 0;
106 retval->max_recursion_depth = 0x100;
107 return retval;
108 }
109
110 /* Release the memory allocated to CTX. */
111
112 void
113 free_dwarf_expr_context (struct dwarf_expr_context *ctx)
114 {
115 xfree (ctx->stack);
116 xfree (ctx->pieces);
117 xfree (ctx);
118 }
119
120 /* Helper for make_cleanup_free_dwarf_expr_context. */
121
122 static void
123 free_dwarf_expr_context_cleanup (void *arg)
124 {
125 free_dwarf_expr_context (arg);
126 }
127
128 /* Return a cleanup that calls free_dwarf_expr_context. */
129
130 struct cleanup *
131 make_cleanup_free_dwarf_expr_context (struct dwarf_expr_context *ctx)
132 {
133 return make_cleanup (free_dwarf_expr_context_cleanup, ctx);
134 }
135
136 /* Expand the memory allocated to CTX's stack to contain at least
137 NEED more elements than are currently used. */
138
139 static void
140 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
141 {
142 if (ctx->stack_len + need > ctx->stack_allocated)
143 {
144 size_t newlen = ctx->stack_len + need + 10;
145
146 ctx->stack = xrealloc (ctx->stack,
147 newlen * sizeof (struct dwarf_stack_value));
148 ctx->stack_allocated = newlen;
149 }
150 }
151
152 /* Push VALUE onto CTX's stack. */
153
154 static void
155 dwarf_expr_push (struct dwarf_expr_context *ctx, struct value *value,
156 int in_stack_memory)
157 {
158 struct dwarf_stack_value *v;
159
160 dwarf_expr_grow_stack (ctx, 1);
161 v = &ctx->stack[ctx->stack_len++];
162 v->value = value;
163 v->in_stack_memory = in_stack_memory;
164 }
165
166 /* Push VALUE onto CTX's stack. */
167
168 void
169 dwarf_expr_push_address (struct dwarf_expr_context *ctx, CORE_ADDR value,
170 int in_stack_memory)
171 {
172 dwarf_expr_push (ctx,
173 value_from_ulongest (dwarf_expr_address_type (ctx), value),
174 in_stack_memory);
175 }
176
177 /* Pop the top item off of CTX's stack. */
178
179 static void
180 dwarf_expr_pop (struct dwarf_expr_context *ctx)
181 {
182 if (ctx->stack_len <= 0)
183 error (_("dwarf expression stack underflow"));
184 ctx->stack_len--;
185 }
186
187 /* Retrieve the N'th item on CTX's stack. */
188
189 struct value *
190 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
191 {
192 if (ctx->stack_len <= n)
193 error (_("Asked for position %d of stack, "
194 "stack only has %d elements on it."),
195 n, ctx->stack_len);
196 return ctx->stack[ctx->stack_len - (1 + n)].value;
197 }
198
199 /* Require that TYPE be an integral type; throw an exception if not. */
200
201 static void
202 dwarf_require_integral (struct type *type)
203 {
204 if (TYPE_CODE (type) != TYPE_CODE_INT
205 && TYPE_CODE (type) != TYPE_CODE_CHAR
206 && TYPE_CODE (type) != TYPE_CODE_BOOL)
207 error (_("integral type expected in DWARF expression"));
208 }
209
210 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
211 type. */
212
213 static struct type *
214 get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
215 {
216 switch (TYPE_LENGTH (type))
217 {
218 case 1:
219 return builtin_type (gdbarch)->builtin_uint8;
220 case 2:
221 return builtin_type (gdbarch)->builtin_uint16;
222 case 4:
223 return builtin_type (gdbarch)->builtin_uint32;
224 case 8:
225 return builtin_type (gdbarch)->builtin_uint64;
226 default:
227 error (_("no unsigned variant found for type, while evaluating "
228 "DWARF expression"));
229 }
230 }
231
232 /* Return the signed form of TYPE. TYPE is necessarily an integral
233 type. */
234
235 static struct type *
236 get_signed_type (struct gdbarch *gdbarch, struct type *type)
237 {
238 switch (TYPE_LENGTH (type))
239 {
240 case 1:
241 return builtin_type (gdbarch)->builtin_int8;
242 case 2:
243 return builtin_type (gdbarch)->builtin_int16;
244 case 4:
245 return builtin_type (gdbarch)->builtin_int32;
246 case 8:
247 return builtin_type (gdbarch)->builtin_int64;
248 default:
249 error (_("no signed variant found for type, while evaluating "
250 "DWARF expression"));
251 }
252 }
253
254 /* Retrieve the N'th item on CTX's stack, converted to an address. */
255
256 CORE_ADDR
257 dwarf_expr_fetch_address (struct dwarf_expr_context *ctx, int n)
258 {
259 struct value *result_val = dwarf_expr_fetch (ctx, n);
260 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
261 ULONGEST result;
262
263 dwarf_require_integral (value_type (result_val));
264 result = extract_unsigned_integer (value_contents (result_val),
265 TYPE_LENGTH (value_type (result_val)),
266 byte_order);
267
268 /* For most architectures, calling extract_unsigned_integer() alone
269 is sufficient for extracting an address. However, some
270 architectures (e.g. MIPS) use signed addresses and using
271 extract_unsigned_integer() will not produce a correct
272 result. Make sure we invoke gdbarch_integer_to_address()
273 for those architectures which require it. */
274 if (gdbarch_integer_to_address_p (ctx->gdbarch))
275 {
276 gdb_byte *buf = alloca (ctx->addr_size);
277 struct type *int_type = get_unsigned_type (ctx->gdbarch,
278 value_type (result_val));
279
280 store_unsigned_integer (buf, ctx->addr_size, byte_order, result);
281 return gdbarch_integer_to_address (ctx->gdbarch, int_type, buf);
282 }
283
284 return (CORE_ADDR) result;
285 }
286
287 /* Retrieve the in_stack_memory flag of the N'th item on CTX's stack. */
288
289 int
290 dwarf_expr_fetch_in_stack_memory (struct dwarf_expr_context *ctx, int n)
291 {
292 if (ctx->stack_len <= n)
293 error (_("Asked for position %d of stack, "
294 "stack only has %d elements on it."),
295 n, ctx->stack_len);
296 return ctx->stack[ctx->stack_len - (1 + n)].in_stack_memory;
297 }
298
299 /* Return true if the expression stack is empty. */
300
301 static int
302 dwarf_expr_stack_empty_p (struct dwarf_expr_context *ctx)
303 {
304 return ctx->stack_len == 0;
305 }
306
307 /* Add a new piece to CTX's piece list. */
308 static void
309 add_piece (struct dwarf_expr_context *ctx, ULONGEST size, ULONGEST offset)
310 {
311 struct dwarf_expr_piece *p;
312
313 ctx->num_pieces++;
314
315 ctx->pieces = xrealloc (ctx->pieces,
316 (ctx->num_pieces
317 * sizeof (struct dwarf_expr_piece)));
318
319 p = &ctx->pieces[ctx->num_pieces - 1];
320 p->location = ctx->location;
321 p->size = size;
322 p->offset = offset;
323
324 if (p->location == DWARF_VALUE_LITERAL)
325 {
326 p->v.literal.data = ctx->data;
327 p->v.literal.length = ctx->len;
328 }
329 else if (dwarf_expr_stack_empty_p (ctx))
330 {
331 p->location = DWARF_VALUE_OPTIMIZED_OUT;
332 /* Also reset the context's location, for our callers. This is
333 a somewhat strange approach, but this lets us avoid setting
334 the location to DWARF_VALUE_MEMORY in all the individual
335 cases in the evaluator. */
336 ctx->location = DWARF_VALUE_OPTIMIZED_OUT;
337 }
338 else if (p->location == DWARF_VALUE_MEMORY)
339 {
340 p->v.mem.addr = dwarf_expr_fetch_address (ctx, 0);
341 p->v.mem.in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
342 }
343 else if (p->location == DWARF_VALUE_IMPLICIT_POINTER)
344 {
345 p->v.ptr.die.cu_off = ctx->len;
346 p->v.ptr.offset = value_as_long (dwarf_expr_fetch (ctx, 0));
347 }
348 else if (p->location == DWARF_VALUE_REGISTER)
349 p->v.regno = value_as_long (dwarf_expr_fetch (ctx, 0));
350 else
351 {
352 p->v.value = dwarf_expr_fetch (ctx, 0);
353 }
354 }
355
356 /* Evaluate the expression at ADDR (LEN bytes long) using the context
357 CTX. */
358
359 void
360 dwarf_expr_eval (struct dwarf_expr_context *ctx, const gdb_byte *addr,
361 size_t len)
362 {
363 int old_recursion_depth = ctx->recursion_depth;
364
365 execute_stack_op (ctx, addr, addr + len);
366
367 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
368
369 gdb_assert (ctx->recursion_depth == old_recursion_depth);
370 }
371
372 /* Helper to read a uleb128 value or throw an error. */
373
374 const gdb_byte *
375 safe_read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
376 uint64_t *r)
377 {
378 buf = gdb_read_uleb128 (buf, buf_end, r);
379 if (buf == NULL)
380 error (_("DWARF expression error: ran off end of buffer reading uleb128 value"));
381 return buf;
382 }
383
384 /* Helper to read a sleb128 value or throw an error. */
385
386 const gdb_byte *
387 safe_read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
388 int64_t *r)
389 {
390 buf = gdb_read_sleb128 (buf, buf_end, r);
391 if (buf == NULL)
392 error (_("DWARF expression error: ran off end of buffer reading sleb128 value"));
393 return buf;
394 }
395
396 const gdb_byte *
397 safe_skip_leb128 (const gdb_byte *buf, const gdb_byte *buf_end)
398 {
399 buf = gdb_skip_leb128 (buf, buf_end);
400 if (buf == NULL)
401 error (_("DWARF expression error: ran off end of buffer reading leb128 value"));
402 return buf;
403 }
404 \f
405
406 /* Check that the current operator is either at the end of an
407 expression, or that it is followed by a composition operator. */
408
409 void
410 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
411 const char *op_name)
412 {
413 /* It seems like DW_OP_GNU_uninit should be handled here. However,
414 it doesn't seem to make sense for DW_OP_*_value, and it was not
415 checked at the other place that this function is called. */
416 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece)
417 error (_("DWARF-2 expression error: `%s' operations must be "
418 "used either alone or in conjunction with DW_OP_piece "
419 "or DW_OP_bit_piece."),
420 op_name);
421 }
422
423 /* Return true iff the types T1 and T2 are "the same". This only does
424 checks that might reasonably be needed to compare DWARF base
425 types. */
426
427 static int
428 base_types_equal_p (struct type *t1, struct type *t2)
429 {
430 if (TYPE_CODE (t1) != TYPE_CODE (t2))
431 return 0;
432 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
433 return 0;
434 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
435 }
436
437 /* A convenience function to call get_base_type on CTX and return the
438 result. DIE is the DIE whose type we need. SIZE is non-zero if
439 this function should verify that the resulting type has the correct
440 size. */
441
442 static struct type *
443 dwarf_get_base_type (struct dwarf_expr_context *ctx, cu_offset die, int size)
444 {
445 struct type *result;
446
447 if (ctx->funcs->get_base_type)
448 {
449 result = ctx->funcs->get_base_type (ctx, die);
450 if (result == NULL)
451 error (_("Could not find type for DW_OP_GNU_const_type"));
452 if (size != 0 && TYPE_LENGTH (result) != size)
453 error (_("DW_OP_GNU_const_type has different sizes for type and data"));
454 }
455 else
456 /* Anything will do. */
457 result = builtin_type (ctx->gdbarch)->builtin_int;
458
459 return result;
460 }
461
462 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
463 DWARF register number. Otherwise return -1. */
464
465 int
466 dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
467 {
468 uint64_t dwarf_reg;
469
470 if (buf_end <= buf)
471 return -1;
472 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
473 {
474 if (buf_end - buf != 1)
475 return -1;
476 return *buf - DW_OP_reg0;
477 }
478
479 if (*buf == DW_OP_GNU_regval_type)
480 {
481 buf++;
482 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
483 if (buf == NULL)
484 return -1;
485 buf = gdb_skip_leb128 (buf, buf_end);
486 if (buf == NULL)
487 return -1;
488 }
489 else if (*buf == DW_OP_regx)
490 {
491 buf++;
492 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
493 if (buf == NULL)
494 return -1;
495 }
496 else
497 return -1;
498 if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
499 return -1;
500 return dwarf_reg;
501 }
502
503 /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
504 DW_OP_deref* return the DWARF register number. Otherwise return -1.
505 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
506 size from DW_OP_deref_size. */
507
508 int
509 dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end,
510 CORE_ADDR *deref_size_return)
511 {
512 uint64_t dwarf_reg;
513 int64_t offset;
514
515 if (buf_end <= buf)
516 return -1;
517
518 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
519 {
520 dwarf_reg = *buf - DW_OP_breg0;
521 buf++;
522 if (buf >= buf_end)
523 return -1;
524 }
525 else if (*buf == DW_OP_bregx)
526 {
527 buf++;
528 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
529 if (buf == NULL)
530 return -1;
531 if ((int) dwarf_reg != dwarf_reg)
532 return -1;
533 }
534 else
535 return -1;
536
537 buf = gdb_read_sleb128 (buf, buf_end, &offset);
538 if (buf == NULL)
539 return -1;
540 if (offset != 0)
541 return -1;
542
543 if (*buf == DW_OP_deref)
544 {
545 buf++;
546 *deref_size_return = -1;
547 }
548 else if (*buf == DW_OP_deref_size)
549 {
550 buf++;
551 if (buf >= buf_end)
552 return -1;
553 *deref_size_return = *buf++;
554 }
555 else
556 return -1;
557
558 if (buf != buf_end)
559 return -1;
560
561 return dwarf_reg;
562 }
563
564 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
565 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
566
567 int
568 dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end,
569 CORE_ADDR *fb_offset_return)
570 {
571 int64_t fb_offset;
572
573 if (buf_end <= buf)
574 return 0;
575
576 if (*buf != DW_OP_fbreg)
577 return 0;
578 buf++;
579
580 buf = gdb_read_sleb128 (buf, buf_end, &fb_offset);
581 if (buf == NULL)
582 return 0;
583 *fb_offset_return = fb_offset;
584 if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return)
585 return 0;
586
587 return 1;
588 }
589
590 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
591 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
592 The matched SP register number depends on GDBARCH. */
593
594 int
595 dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf,
596 const gdb_byte *buf_end, CORE_ADDR *sp_offset_return)
597 {
598 uint64_t dwarf_reg;
599 int64_t sp_offset;
600
601 if (buf_end <= buf)
602 return 0;
603 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
604 {
605 dwarf_reg = *buf - DW_OP_breg0;
606 buf++;
607 }
608 else
609 {
610 if (*buf != DW_OP_bregx)
611 return 0;
612 buf++;
613 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
614 if (buf == NULL)
615 return 0;
616 }
617
618 if (gdbarch_dwarf2_reg_to_regnum (gdbarch, dwarf_reg)
619 != gdbarch_sp_regnum (gdbarch))
620 return 0;
621
622 buf = gdb_read_sleb128 (buf, buf_end, &sp_offset);
623 if (buf == NULL)
624 return 0;
625 *sp_offset_return = sp_offset;
626 if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return)
627 return 0;
628
629 return 1;
630 }
631
632 /* The engine for the expression evaluator. Using the context in CTX,
633 evaluate the expression between OP_PTR and OP_END. */
634
635 static void
636 execute_stack_op (struct dwarf_expr_context *ctx,
637 const gdb_byte *op_ptr, const gdb_byte *op_end)
638 {
639 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
640 /* Old-style "untyped" DWARF values need special treatment in a
641 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
642 a special type for these values so we can distinguish them from
643 values that have an explicit type, because explicitly-typed
644 values do not need special treatment. This special type must be
645 different (in the `==' sense) from any base type coming from the
646 CU. */
647 struct type *address_type = dwarf_expr_address_type (ctx);
648
649 ctx->location = DWARF_VALUE_MEMORY;
650 ctx->initialized = 1; /* Default is initialized. */
651
652 if (ctx->recursion_depth > ctx->max_recursion_depth)
653 error (_("DWARF-2 expression error: Loop detected (%d)."),
654 ctx->recursion_depth);
655 ctx->recursion_depth++;
656
657 while (op_ptr < op_end)
658 {
659 enum dwarf_location_atom op = *op_ptr++;
660 ULONGEST result;
661 /* Assume the value is not in stack memory.
662 Code that knows otherwise sets this to 1.
663 Some arithmetic on stack addresses can probably be assumed to still
664 be a stack address, but we skip this complication for now.
665 This is just an optimization, so it's always ok to punt
666 and leave this as 0. */
667 int in_stack_memory = 0;
668 uint64_t uoffset, reg;
669 int64_t offset;
670 struct value *result_val = NULL;
671
672 /* The DWARF expression might have a bug causing an infinite
673 loop. In that case, quitting is the only way out. */
674 QUIT;
675
676 switch (op)
677 {
678 case DW_OP_lit0:
679 case DW_OP_lit1:
680 case DW_OP_lit2:
681 case DW_OP_lit3:
682 case DW_OP_lit4:
683 case DW_OP_lit5:
684 case DW_OP_lit6:
685 case DW_OP_lit7:
686 case DW_OP_lit8:
687 case DW_OP_lit9:
688 case DW_OP_lit10:
689 case DW_OP_lit11:
690 case DW_OP_lit12:
691 case DW_OP_lit13:
692 case DW_OP_lit14:
693 case DW_OP_lit15:
694 case DW_OP_lit16:
695 case DW_OP_lit17:
696 case DW_OP_lit18:
697 case DW_OP_lit19:
698 case DW_OP_lit20:
699 case DW_OP_lit21:
700 case DW_OP_lit22:
701 case DW_OP_lit23:
702 case DW_OP_lit24:
703 case DW_OP_lit25:
704 case DW_OP_lit26:
705 case DW_OP_lit27:
706 case DW_OP_lit28:
707 case DW_OP_lit29:
708 case DW_OP_lit30:
709 case DW_OP_lit31:
710 result = op - DW_OP_lit0;
711 result_val = value_from_ulongest (address_type, result);
712 break;
713
714 case DW_OP_addr:
715 result = extract_unsigned_integer (op_ptr,
716 ctx->addr_size, byte_order);
717 op_ptr += ctx->addr_size;
718 /* Some versions of GCC emit DW_OP_addr before
719 DW_OP_GNU_push_tls_address. In this case the value is an
720 index, not an address. We don't support things like
721 branching between the address and the TLS op. */
722 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
723 result += ctx->offset;
724 result_val = value_from_ulongest (address_type, result);
725 break;
726
727 case DW_OP_GNU_addr_index:
728 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
729 result = (ctx->funcs->get_addr_index) (ctx->baton, uoffset);
730 result += ctx->offset;
731 result_val = value_from_ulongest (address_type, result);
732 break;
733 case DW_OP_GNU_const_index:
734 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
735 result = (ctx->funcs->get_addr_index) (ctx->baton, uoffset);
736 result_val = value_from_ulongest (address_type, result);
737 break;
738
739 case DW_OP_const1u:
740 result = extract_unsigned_integer (op_ptr, 1, byte_order);
741 result_val = value_from_ulongest (address_type, result);
742 op_ptr += 1;
743 break;
744 case DW_OP_const1s:
745 result = extract_signed_integer (op_ptr, 1, byte_order);
746 result_val = value_from_ulongest (address_type, result);
747 op_ptr += 1;
748 break;
749 case DW_OP_const2u:
750 result = extract_unsigned_integer (op_ptr, 2, byte_order);
751 result_val = value_from_ulongest (address_type, result);
752 op_ptr += 2;
753 break;
754 case DW_OP_const2s:
755 result = extract_signed_integer (op_ptr, 2, byte_order);
756 result_val = value_from_ulongest (address_type, result);
757 op_ptr += 2;
758 break;
759 case DW_OP_const4u:
760 result = extract_unsigned_integer (op_ptr, 4, byte_order);
761 result_val = value_from_ulongest (address_type, result);
762 op_ptr += 4;
763 break;
764 case DW_OP_const4s:
765 result = extract_signed_integer (op_ptr, 4, byte_order);
766 result_val = value_from_ulongest (address_type, result);
767 op_ptr += 4;
768 break;
769 case DW_OP_const8u:
770 result = extract_unsigned_integer (op_ptr, 8, byte_order);
771 result_val = value_from_ulongest (address_type, result);
772 op_ptr += 8;
773 break;
774 case DW_OP_const8s:
775 result = extract_signed_integer (op_ptr, 8, byte_order);
776 result_val = value_from_ulongest (address_type, result);
777 op_ptr += 8;
778 break;
779 case DW_OP_constu:
780 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
781 result = uoffset;
782 result_val = value_from_ulongest (address_type, result);
783 break;
784 case DW_OP_consts:
785 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
786 result = offset;
787 result_val = value_from_ulongest (address_type, result);
788 break;
789
790 /* The DW_OP_reg operations are required to occur alone in
791 location expressions. */
792 case DW_OP_reg0:
793 case DW_OP_reg1:
794 case DW_OP_reg2:
795 case DW_OP_reg3:
796 case DW_OP_reg4:
797 case DW_OP_reg5:
798 case DW_OP_reg6:
799 case DW_OP_reg7:
800 case DW_OP_reg8:
801 case DW_OP_reg9:
802 case DW_OP_reg10:
803 case DW_OP_reg11:
804 case DW_OP_reg12:
805 case DW_OP_reg13:
806 case DW_OP_reg14:
807 case DW_OP_reg15:
808 case DW_OP_reg16:
809 case DW_OP_reg17:
810 case DW_OP_reg18:
811 case DW_OP_reg19:
812 case DW_OP_reg20:
813 case DW_OP_reg21:
814 case DW_OP_reg22:
815 case DW_OP_reg23:
816 case DW_OP_reg24:
817 case DW_OP_reg25:
818 case DW_OP_reg26:
819 case DW_OP_reg27:
820 case DW_OP_reg28:
821 case DW_OP_reg29:
822 case DW_OP_reg30:
823 case DW_OP_reg31:
824 if (op_ptr != op_end
825 && *op_ptr != DW_OP_piece
826 && *op_ptr != DW_OP_bit_piece
827 && *op_ptr != DW_OP_GNU_uninit)
828 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
829 "used either alone or in conjunction with DW_OP_piece "
830 "or DW_OP_bit_piece."));
831
832 result = op - DW_OP_reg0;
833 result_val = value_from_ulongest (address_type, result);
834 ctx->location = DWARF_VALUE_REGISTER;
835 break;
836
837 case DW_OP_regx:
838 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
839 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
840
841 result = reg;
842 result_val = value_from_ulongest (address_type, result);
843 ctx->location = DWARF_VALUE_REGISTER;
844 break;
845
846 case DW_OP_implicit_value:
847 {
848 uint64_t len;
849
850 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
851 if (op_ptr + len > op_end)
852 error (_("DW_OP_implicit_value: too few bytes available."));
853 ctx->len = len;
854 ctx->data = op_ptr;
855 ctx->location = DWARF_VALUE_LITERAL;
856 op_ptr += len;
857 dwarf_expr_require_composition (op_ptr, op_end,
858 "DW_OP_implicit_value");
859 }
860 goto no_push;
861
862 case DW_OP_stack_value:
863 ctx->location = DWARF_VALUE_STACK;
864 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
865 goto no_push;
866
867 case DW_OP_GNU_implicit_pointer:
868 {
869 int64_t len;
870
871 if (ctx->ref_addr_size == -1)
872 error (_("DWARF-2 expression error: DW_OP_GNU_implicit_pointer "
873 "is not allowed in frame context"));
874
875 /* The referred-to DIE of cu_offset kind. */
876 ctx->len = extract_unsigned_integer (op_ptr, ctx->ref_addr_size,
877 byte_order);
878 op_ptr += ctx->ref_addr_size;
879
880 /* The byte offset into the data. */
881 op_ptr = safe_read_sleb128 (op_ptr, op_end, &len);
882 result = (ULONGEST) len;
883 result_val = value_from_ulongest (address_type, result);
884
885 ctx->location = DWARF_VALUE_IMPLICIT_POINTER;
886 dwarf_expr_require_composition (op_ptr, op_end,
887 "DW_OP_GNU_implicit_pointer");
888 }
889 break;
890
891 case DW_OP_breg0:
892 case DW_OP_breg1:
893 case DW_OP_breg2:
894 case DW_OP_breg3:
895 case DW_OP_breg4:
896 case DW_OP_breg5:
897 case DW_OP_breg6:
898 case DW_OP_breg7:
899 case DW_OP_breg8:
900 case DW_OP_breg9:
901 case DW_OP_breg10:
902 case DW_OP_breg11:
903 case DW_OP_breg12:
904 case DW_OP_breg13:
905 case DW_OP_breg14:
906 case DW_OP_breg15:
907 case DW_OP_breg16:
908 case DW_OP_breg17:
909 case DW_OP_breg18:
910 case DW_OP_breg19:
911 case DW_OP_breg20:
912 case DW_OP_breg21:
913 case DW_OP_breg22:
914 case DW_OP_breg23:
915 case DW_OP_breg24:
916 case DW_OP_breg25:
917 case DW_OP_breg26:
918 case DW_OP_breg27:
919 case DW_OP_breg28:
920 case DW_OP_breg29:
921 case DW_OP_breg30:
922 case DW_OP_breg31:
923 {
924 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
925 result = (ctx->funcs->read_reg) (ctx->baton, op - DW_OP_breg0);
926 result += offset;
927 result_val = value_from_ulongest (address_type, result);
928 }
929 break;
930 case DW_OP_bregx:
931 {
932 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
933 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
934 result = (ctx->funcs->read_reg) (ctx->baton, reg);
935 result += offset;
936 result_val = value_from_ulongest (address_type, result);
937 }
938 break;
939 case DW_OP_fbreg:
940 {
941 const gdb_byte *datastart;
942 size_t datalen;
943 unsigned int before_stack_len;
944
945 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
946 /* Rather than create a whole new context, we simply
947 record the stack length before execution, then reset it
948 afterwards, effectively erasing whatever the recursive
949 call put there. */
950 before_stack_len = ctx->stack_len;
951 /* FIXME: cagney/2003-03-26: This code should be using
952 get_frame_base_address(), and then implement a dwarf2
953 specific this_base method. */
954 (ctx->funcs->get_frame_base) (ctx->baton, &datastart, &datalen);
955 dwarf_expr_eval (ctx, datastart, datalen);
956 if (ctx->location == DWARF_VALUE_MEMORY)
957 result = dwarf_expr_fetch_address (ctx, 0);
958 else if (ctx->location == DWARF_VALUE_REGISTER)
959 result = (ctx->funcs->read_reg) (ctx->baton,
960 value_as_long (dwarf_expr_fetch (ctx, 0)));
961 else
962 error (_("Not implemented: computing frame "
963 "base using explicit value operator"));
964 result = result + offset;
965 result_val = value_from_ulongest (address_type, result);
966 in_stack_memory = 1;
967 ctx->stack_len = before_stack_len;
968 ctx->location = DWARF_VALUE_MEMORY;
969 }
970 break;
971
972 case DW_OP_dup:
973 result_val = dwarf_expr_fetch (ctx, 0);
974 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 0);
975 break;
976
977 case DW_OP_drop:
978 dwarf_expr_pop (ctx);
979 goto no_push;
980
981 case DW_OP_pick:
982 offset = *op_ptr++;
983 result_val = dwarf_expr_fetch (ctx, offset);
984 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, offset);
985 break;
986
987 case DW_OP_swap:
988 {
989 struct dwarf_stack_value t1, t2;
990
991 if (ctx->stack_len < 2)
992 error (_("Not enough elements for "
993 "DW_OP_swap. Need 2, have %d."),
994 ctx->stack_len);
995 t1 = ctx->stack[ctx->stack_len - 1];
996 t2 = ctx->stack[ctx->stack_len - 2];
997 ctx->stack[ctx->stack_len - 1] = t2;
998 ctx->stack[ctx->stack_len - 2] = t1;
999 goto no_push;
1000 }
1001
1002 case DW_OP_over:
1003 result_val = dwarf_expr_fetch (ctx, 1);
1004 in_stack_memory = dwarf_expr_fetch_in_stack_memory (ctx, 1);
1005 break;
1006
1007 case DW_OP_rot:
1008 {
1009 struct dwarf_stack_value t1, t2, t3;
1010
1011 if (ctx->stack_len < 3)
1012 error (_("Not enough elements for "
1013 "DW_OP_rot. Need 3, have %d."),
1014 ctx->stack_len);
1015 t1 = ctx->stack[ctx->stack_len - 1];
1016 t2 = ctx->stack[ctx->stack_len - 2];
1017 t3 = ctx->stack[ctx->stack_len - 3];
1018 ctx->stack[ctx->stack_len - 1] = t2;
1019 ctx->stack[ctx->stack_len - 2] = t3;
1020 ctx->stack[ctx->stack_len - 3] = t1;
1021 goto no_push;
1022 }
1023
1024 case DW_OP_deref:
1025 case DW_OP_deref_size:
1026 case DW_OP_GNU_deref_type:
1027 {
1028 int addr_size = (op == DW_OP_deref ? ctx->addr_size : *op_ptr++);
1029 gdb_byte *buf = alloca (addr_size);
1030 CORE_ADDR addr = dwarf_expr_fetch_address (ctx, 0);
1031 struct type *type;
1032
1033 dwarf_expr_pop (ctx);
1034
1035 if (op == DW_OP_GNU_deref_type)
1036 {
1037 cu_offset type_die;
1038
1039 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1040 type_die.cu_off = uoffset;
1041 type = dwarf_get_base_type (ctx, type_die, 0);
1042 }
1043 else
1044 type = address_type;
1045
1046 (ctx->funcs->read_mem) (ctx->baton, buf, addr, addr_size);
1047
1048 /* If the size of the object read from memory is different
1049 from the type length, we need to zero-extend it. */
1050 if (TYPE_LENGTH (type) != addr_size)
1051 {
1052 ULONGEST result =
1053 extract_unsigned_integer (buf, addr_size, byte_order);
1054
1055 buf = alloca (TYPE_LENGTH (type));
1056 store_unsigned_integer (buf, TYPE_LENGTH (type),
1057 byte_order, result);
1058 }
1059
1060 result_val = value_from_contents_and_address (type, buf, addr);
1061 break;
1062 }
1063
1064 case DW_OP_abs:
1065 case DW_OP_neg:
1066 case DW_OP_not:
1067 case DW_OP_plus_uconst:
1068 {
1069 /* Unary operations. */
1070 result_val = dwarf_expr_fetch (ctx, 0);
1071 dwarf_expr_pop (ctx);
1072
1073 switch (op)
1074 {
1075 case DW_OP_abs:
1076 if (value_less (result_val,
1077 value_zero (value_type (result_val), not_lval)))
1078 result_val = value_neg (result_val);
1079 break;
1080 case DW_OP_neg:
1081 result_val = value_neg (result_val);
1082 break;
1083 case DW_OP_not:
1084 dwarf_require_integral (value_type (result_val));
1085 result_val = value_complement (result_val);
1086 break;
1087 case DW_OP_plus_uconst:
1088 dwarf_require_integral (value_type (result_val));
1089 result = value_as_long (result_val);
1090 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1091 result += reg;
1092 result_val = value_from_ulongest (address_type, result);
1093 break;
1094 }
1095 }
1096 break;
1097
1098 case DW_OP_and:
1099 case DW_OP_div:
1100 case DW_OP_minus:
1101 case DW_OP_mod:
1102 case DW_OP_mul:
1103 case DW_OP_or:
1104 case DW_OP_plus:
1105 case DW_OP_shl:
1106 case DW_OP_shr:
1107 case DW_OP_shra:
1108 case DW_OP_xor:
1109 case DW_OP_le:
1110 case DW_OP_ge:
1111 case DW_OP_eq:
1112 case DW_OP_lt:
1113 case DW_OP_gt:
1114 case DW_OP_ne:
1115 {
1116 /* Binary operations. */
1117 struct value *first, *second;
1118
1119 second = dwarf_expr_fetch (ctx, 0);
1120 dwarf_expr_pop (ctx);
1121
1122 first = dwarf_expr_fetch (ctx, 0);
1123 dwarf_expr_pop (ctx);
1124
1125 if (! base_types_equal_p (value_type (first), value_type (second)))
1126 error (_("Incompatible types on DWARF stack"));
1127
1128 switch (op)
1129 {
1130 case DW_OP_and:
1131 dwarf_require_integral (value_type (first));
1132 dwarf_require_integral (value_type (second));
1133 result_val = value_binop (first, second, BINOP_BITWISE_AND);
1134 break;
1135 case DW_OP_div:
1136 result_val = value_binop (first, second, BINOP_DIV);
1137 break;
1138 case DW_OP_minus:
1139 result_val = value_binop (first, second, BINOP_SUB);
1140 break;
1141 case DW_OP_mod:
1142 {
1143 int cast_back = 0;
1144 struct type *orig_type = value_type (first);
1145
1146 /* We have to special-case "old-style" untyped values
1147 -- these must have mod computed using unsigned
1148 math. */
1149 if (orig_type == address_type)
1150 {
1151 struct type *utype
1152 = get_unsigned_type (ctx->gdbarch, orig_type);
1153
1154 cast_back = 1;
1155 first = value_cast (utype, first);
1156 second = value_cast (utype, second);
1157 }
1158 /* Note that value_binop doesn't handle float or
1159 decimal float here. This seems unimportant. */
1160 result_val = value_binop (first, second, BINOP_MOD);
1161 if (cast_back)
1162 result_val = value_cast (orig_type, result_val);
1163 }
1164 break;
1165 case DW_OP_mul:
1166 result_val = value_binop (first, second, BINOP_MUL);
1167 break;
1168 case DW_OP_or:
1169 dwarf_require_integral (value_type (first));
1170 dwarf_require_integral (value_type (second));
1171 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
1172 break;
1173 case DW_OP_plus:
1174 result_val = value_binop (first, second, BINOP_ADD);
1175 break;
1176 case DW_OP_shl:
1177 dwarf_require_integral (value_type (first));
1178 dwarf_require_integral (value_type (second));
1179 result_val = value_binop (first, second, BINOP_LSH);
1180 break;
1181 case DW_OP_shr:
1182 dwarf_require_integral (value_type (first));
1183 dwarf_require_integral (value_type (second));
1184 if (!TYPE_UNSIGNED (value_type (first)))
1185 {
1186 struct type *utype
1187 = get_unsigned_type (ctx->gdbarch, value_type (first));
1188
1189 first = value_cast (utype, first);
1190 }
1191
1192 result_val = value_binop (first, second, BINOP_RSH);
1193 /* Make sure we wind up with the same type we started
1194 with. */
1195 if (value_type (result_val) != value_type (second))
1196 result_val = value_cast (value_type (second), result_val);
1197 break;
1198 case DW_OP_shra:
1199 dwarf_require_integral (value_type (first));
1200 dwarf_require_integral (value_type (second));
1201 if (TYPE_UNSIGNED (value_type (first)))
1202 {
1203 struct type *stype
1204 = get_signed_type (ctx->gdbarch, value_type (first));
1205
1206 first = value_cast (stype, first);
1207 }
1208
1209 result_val = value_binop (first, second, BINOP_RSH);
1210 /* Make sure we wind up with the same type we started
1211 with. */
1212 if (value_type (result_val) != value_type (second))
1213 result_val = value_cast (value_type (second), result_val);
1214 break;
1215 case DW_OP_xor:
1216 dwarf_require_integral (value_type (first));
1217 dwarf_require_integral (value_type (second));
1218 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
1219 break;
1220 case DW_OP_le:
1221 /* A <= B is !(B < A). */
1222 result = ! value_less (second, first);
1223 result_val = value_from_ulongest (address_type, result);
1224 break;
1225 case DW_OP_ge:
1226 /* A >= B is !(A < B). */
1227 result = ! value_less (first, second);
1228 result_val = value_from_ulongest (address_type, result);
1229 break;
1230 case DW_OP_eq:
1231 result = value_equal (first, second);
1232 result_val = value_from_ulongest (address_type, result);
1233 break;
1234 case DW_OP_lt:
1235 result = value_less (first, second);
1236 result_val = value_from_ulongest (address_type, result);
1237 break;
1238 case DW_OP_gt:
1239 /* A > B is B < A. */
1240 result = value_less (second, first);
1241 result_val = value_from_ulongest (address_type, result);
1242 break;
1243 case DW_OP_ne:
1244 result = ! value_equal (first, second);
1245 result_val = value_from_ulongest (address_type, result);
1246 break;
1247 default:
1248 internal_error (__FILE__, __LINE__,
1249 _("Can't be reached."));
1250 }
1251 }
1252 break;
1253
1254 case DW_OP_call_frame_cfa:
1255 result = (ctx->funcs->get_frame_cfa) (ctx->baton);
1256 result_val = value_from_ulongest (address_type, result);
1257 in_stack_memory = 1;
1258 break;
1259
1260 case DW_OP_GNU_push_tls_address:
1261 /* Variable is at a constant offset in the thread-local
1262 storage block into the objfile for the current thread and
1263 the dynamic linker module containing this expression. Here
1264 we return returns the offset from that base. The top of the
1265 stack has the offset from the beginning of the thread
1266 control block at which the variable is located. Nothing
1267 should follow this operator, so the top of stack would be
1268 returned. */
1269 result = value_as_long (dwarf_expr_fetch (ctx, 0));
1270 dwarf_expr_pop (ctx);
1271 result = (ctx->funcs->get_tls_address) (ctx->baton, result);
1272 result_val = value_from_ulongest (address_type, result);
1273 break;
1274
1275 case DW_OP_skip:
1276 offset = extract_signed_integer (op_ptr, 2, byte_order);
1277 op_ptr += 2;
1278 op_ptr += offset;
1279 goto no_push;
1280
1281 case DW_OP_bra:
1282 {
1283 struct value *val;
1284
1285 offset = extract_signed_integer (op_ptr, 2, byte_order);
1286 op_ptr += 2;
1287 val = dwarf_expr_fetch (ctx, 0);
1288 dwarf_require_integral (value_type (val));
1289 if (value_as_long (val) != 0)
1290 op_ptr += offset;
1291 dwarf_expr_pop (ctx);
1292 }
1293 goto no_push;
1294
1295 case DW_OP_nop:
1296 goto no_push;
1297
1298 case DW_OP_piece:
1299 {
1300 uint64_t size;
1301
1302 /* Record the piece. */
1303 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1304 add_piece (ctx, 8 * size, 0);
1305
1306 /* Pop off the address/regnum, and reset the location
1307 type. */
1308 if (ctx->location != DWARF_VALUE_LITERAL
1309 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1310 dwarf_expr_pop (ctx);
1311 ctx->location = DWARF_VALUE_MEMORY;
1312 }
1313 goto no_push;
1314
1315 case DW_OP_bit_piece:
1316 {
1317 uint64_t size, offset;
1318
1319 /* Record the piece. */
1320 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1321 op_ptr = safe_read_uleb128 (op_ptr, op_end, &offset);
1322 add_piece (ctx, size, offset);
1323
1324 /* Pop off the address/regnum, and reset the location
1325 type. */
1326 if (ctx->location != DWARF_VALUE_LITERAL
1327 && ctx->location != DWARF_VALUE_OPTIMIZED_OUT)
1328 dwarf_expr_pop (ctx);
1329 ctx->location = DWARF_VALUE_MEMORY;
1330 }
1331 goto no_push;
1332
1333 case DW_OP_GNU_uninit:
1334 if (op_ptr != op_end)
1335 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1336 "be the very last op."));
1337
1338 ctx->initialized = 0;
1339 goto no_push;
1340
1341 case DW_OP_call2:
1342 {
1343 cu_offset offset;
1344
1345 offset.cu_off = extract_unsigned_integer (op_ptr, 2, byte_order);
1346 op_ptr += 2;
1347 ctx->funcs->dwarf_call (ctx, offset);
1348 }
1349 goto no_push;
1350
1351 case DW_OP_call4:
1352 {
1353 cu_offset offset;
1354
1355 offset.cu_off = extract_unsigned_integer (op_ptr, 4, byte_order);
1356 op_ptr += 4;
1357 ctx->funcs->dwarf_call (ctx, offset);
1358 }
1359 goto no_push;
1360
1361 case DW_OP_GNU_entry_value:
1362 {
1363 uint64_t len;
1364 CORE_ADDR deref_size;
1365 union call_site_parameter_u kind_u;
1366
1367 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
1368 if (op_ptr + len > op_end)
1369 error (_("DW_OP_GNU_entry_value: too few bytes available."));
1370
1371 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
1372 if (kind_u.dwarf_reg != -1)
1373 {
1374 op_ptr += len;
1375 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1376 CALL_SITE_PARAMETER_DWARF_REG,
1377 kind_u,
1378 -1 /* deref_size */);
1379 goto no_push;
1380 }
1381
1382 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr,
1383 op_ptr + len,
1384 &deref_size);
1385 if (kind_u.dwarf_reg != -1)
1386 {
1387 if (deref_size == -1)
1388 deref_size = ctx->addr_size;
1389 op_ptr += len;
1390 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1391 CALL_SITE_PARAMETER_DWARF_REG,
1392 kind_u, deref_size);
1393 goto no_push;
1394 }
1395
1396 error (_("DWARF-2 expression error: DW_OP_GNU_entry_value is "
1397 "supported only for single DW_OP_reg* "
1398 "or for DW_OP_breg*(0)+DW_OP_deref*"));
1399 }
1400
1401 case DW_OP_GNU_parameter_ref:
1402 {
1403 union call_site_parameter_u kind_u;
1404
1405 kind_u.param_offset.cu_off = extract_unsigned_integer (op_ptr, 4,
1406 byte_order);
1407 op_ptr += 4;
1408 ctx->funcs->push_dwarf_reg_entry_value (ctx,
1409 CALL_SITE_PARAMETER_PARAM_OFFSET,
1410 kind_u,
1411 -1 /* deref_size */);
1412 }
1413 goto no_push;
1414
1415 case DW_OP_GNU_const_type:
1416 {
1417 cu_offset type_die;
1418 int n;
1419 const gdb_byte *data;
1420 struct type *type;
1421
1422 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1423 type_die.cu_off = uoffset;
1424 n = *op_ptr++;
1425 data = op_ptr;
1426 op_ptr += n;
1427
1428 type = dwarf_get_base_type (ctx, type_die, n);
1429 result_val = value_from_contents (type, data);
1430 }
1431 break;
1432
1433 case DW_OP_GNU_regval_type:
1434 {
1435 cu_offset type_die;
1436 struct type *type;
1437
1438 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1439 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1440 type_die.cu_off = uoffset;
1441
1442 type = dwarf_get_base_type (ctx, type_die, 0);
1443 result = (ctx->funcs->read_reg) (ctx->baton, reg);
1444 result_val = value_from_ulongest (address_type, result);
1445 result_val = value_from_contents (type,
1446 value_contents_all (result_val));
1447 }
1448 break;
1449
1450 case DW_OP_GNU_convert:
1451 case DW_OP_GNU_reinterpret:
1452 {
1453 cu_offset type_die;
1454 struct type *type;
1455
1456 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1457 type_die.cu_off = uoffset;
1458
1459 if (type_die.cu_off == 0)
1460 type = address_type;
1461 else
1462 type = dwarf_get_base_type (ctx, type_die, 0);
1463
1464 result_val = dwarf_expr_fetch (ctx, 0);
1465 dwarf_expr_pop (ctx);
1466
1467 if (op == DW_OP_GNU_convert)
1468 result_val = value_cast (type, result_val);
1469 else if (type == value_type (result_val))
1470 {
1471 /* Nothing. */
1472 }
1473 else if (TYPE_LENGTH (type)
1474 != TYPE_LENGTH (value_type (result_val)))
1475 error (_("DW_OP_GNU_reinterpret has wrong size"));
1476 else
1477 result_val
1478 = value_from_contents (type,
1479 value_contents_all (result_val));
1480 }
1481 break;
1482
1483 default:
1484 error (_("Unhandled dwarf expression opcode 0x%x"), op);
1485 }
1486
1487 /* Most things push a result value. */
1488 gdb_assert (result_val != NULL);
1489 dwarf_expr_push (ctx, result_val, in_stack_memory);
1490 no_push:
1491 ;
1492 }
1493
1494 /* To simplify our main caller, if the result is an implicit
1495 pointer, then make a pieced value. This is ok because we can't
1496 have implicit pointers in contexts where pieces are invalid. */
1497 if (ctx->location == DWARF_VALUE_IMPLICIT_POINTER)
1498 add_piece (ctx, 8 * ctx->addr_size, 0);
1499
1500 abort_expression:
1501 ctx->recursion_depth--;
1502 gdb_assert (ctx->recursion_depth >= 0);
1503 }
1504
1505 /* Stub dwarf_expr_context_funcs.get_frame_base implementation. */
1506
1507 void
1508 ctx_no_get_frame_base (void *baton, const gdb_byte **start, size_t *length)
1509 {
1510 error (_("%s is invalid in this context"), "DW_OP_fbreg");
1511 }
1512
1513 /* Stub dwarf_expr_context_funcs.get_frame_cfa implementation. */
1514
1515 CORE_ADDR
1516 ctx_no_get_frame_cfa (void *baton)
1517 {
1518 error (_("%s is invalid in this context"), "DW_OP_call_frame_cfa");
1519 }
1520
1521 /* Stub dwarf_expr_context_funcs.get_frame_pc implementation. */
1522
1523 CORE_ADDR
1524 ctx_no_get_frame_pc (void *baton)
1525 {
1526 error (_("%s is invalid in this context"), "DW_OP_GNU_implicit_pointer");
1527 }
1528
1529 /* Stub dwarf_expr_context_funcs.get_tls_address implementation. */
1530
1531 CORE_ADDR
1532 ctx_no_get_tls_address (void *baton, CORE_ADDR offset)
1533 {
1534 error (_("%s is invalid in this context"), "DW_OP_GNU_push_tls_address");
1535 }
1536
1537 /* Stub dwarf_expr_context_funcs.dwarf_call implementation. */
1538
1539 void
1540 ctx_no_dwarf_call (struct dwarf_expr_context *ctx, cu_offset die_offset)
1541 {
1542 error (_("%s is invalid in this context"), "DW_OP_call*");
1543 }
1544
1545 /* Stub dwarf_expr_context_funcs.get_base_type implementation. */
1546
1547 struct type *
1548 ctx_no_get_base_type (struct dwarf_expr_context *ctx, cu_offset die)
1549 {
1550 error (_("Support for typed DWARF is not supported in this context"));
1551 }
1552
1553 /* Stub dwarf_expr_context_funcs.push_dwarf_block_entry_value
1554 implementation. */
1555
1556 void
1557 ctx_no_push_dwarf_reg_entry_value (struct dwarf_expr_context *ctx,
1558 enum call_site_parameter_kind kind,
1559 union call_site_parameter_u kind_u,
1560 int deref_size)
1561 {
1562 internal_error (__FILE__, __LINE__,
1563 _("Support for DW_OP_GNU_entry_value is unimplemented"));
1564 }
1565
1566 /* Stub dwarf_expr_context_funcs.get_addr_index implementation. */
1567
1568 CORE_ADDR
1569 ctx_no_get_addr_index (void *baton, unsigned int index)
1570 {
1571 error (_("%s is invalid in this context"), "DW_OP_GNU_addr_index");
1572 }
1573
1574 /* Provide a prototype to silence -Wmissing-prototypes. */
1575 extern initialize_file_ftype _initialize_dwarf2expr;
1576
1577 void
1578 _initialize_dwarf2expr (void)
1579 {
1580 dwarf_arch_cookie
1581 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);
1582 }