]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/dwarf2/expr.c
Add dwarf2_per_objfile to dwarf_expr_context and dwarf2_frame_cache
[thirdparty/binutils-gdb.git] / gdb / dwarf2 / expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001-2020 Free Software Foundation, Inc.
4
5 Contributed by Daniel Berlin (dan@dberlin.org)
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "symtab.h"
24 #include "gdbtypes.h"
25 #include "value.h"
26 #include "gdbcore.h"
27 #include "dwarf2.h"
28 #include "dwarf2/expr.h"
29 #include "dwarf2/loc.h"
30 #include "dwarf2/read.h"
31 #include "gdbsupport/underlying.h"
32 #include "gdbarch.h"
33
34 /* Cookie for gdbarch data. */
35
36 static struct gdbarch_data *dwarf_arch_cookie;
37
38 /* This holds gdbarch-specific types used by the DWARF expression
39 evaluator. See comments in execute_stack_op. */
40
41 struct dwarf_gdbarch_types
42 {
43 struct type *dw_types[3];
44 };
45
46 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
47
48 static void *
49 dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
50 {
51 struct dwarf_gdbarch_types *types
52 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
53
54 /* The types themselves are lazily initialized. */
55
56 return types;
57 }
58
59 /* Return the type used for DWARF operations where the type is
60 unspecified in the DWARF spec. Only certain sizes are
61 supported. */
62
63 struct type *
64 dwarf_expr_context::address_type () const
65 {
66 struct dwarf_gdbarch_types *types
67 = (struct dwarf_gdbarch_types *) gdbarch_data (this->gdbarch,
68 dwarf_arch_cookie);
69 int ndx;
70
71 if (this->addr_size == 2)
72 ndx = 0;
73 else if (this->addr_size == 4)
74 ndx = 1;
75 else if (this->addr_size == 8)
76 ndx = 2;
77 else
78 error (_("Unsupported address size in DWARF expressions: %d bits"),
79 8 * this->addr_size);
80
81 if (types->dw_types[ndx] == NULL)
82 types->dw_types[ndx]
83 = arch_integer_type (this->gdbarch,
84 8 * this->addr_size,
85 0, "<signed DWARF address type>");
86
87 return types->dw_types[ndx];
88 }
89
90 /* Create a new context for the expression evaluator. */
91
92 dwarf_expr_context::dwarf_expr_context (dwarf2_per_objfile *per_objfile)
93 : gdbarch (NULL),
94 addr_size (0),
95 ref_addr_size (0),
96 recursion_depth (0),
97 max_recursion_depth (0x100),
98 location (DWARF_VALUE_MEMORY),
99 len (0),
100 data (NULL),
101 initialized (0),
102 per_objfile (per_objfile)
103 {
104 }
105
106 /* Push VALUE onto the stack. */
107
108 void
109 dwarf_expr_context::push (struct value *value, bool in_stack_memory)
110 {
111 stack.emplace_back (value, in_stack_memory);
112 }
113
114 /* Push VALUE onto the stack. */
115
116 void
117 dwarf_expr_context::push_address (CORE_ADDR value, bool in_stack_memory)
118 {
119 push (value_from_ulongest (address_type (), value), in_stack_memory);
120 }
121
122 /* Pop the top item off of the stack. */
123
124 void
125 dwarf_expr_context::pop ()
126 {
127 if (stack.empty ())
128 error (_("dwarf expression stack underflow"));
129
130 stack.pop_back ();
131 }
132
133 /* Retrieve the N'th item on the stack. */
134
135 struct value *
136 dwarf_expr_context::fetch (int n)
137 {
138 if (stack.size () <= n)
139 error (_("Asked for position %d of stack, "
140 "stack only has %zu elements on it."),
141 n, stack.size ());
142 return stack[stack.size () - (1 + n)].value;
143 }
144
145 /* Require that TYPE be an integral type; throw an exception if not. */
146
147 static void
148 dwarf_require_integral (struct type *type)
149 {
150 if (type->code () != TYPE_CODE_INT
151 && type->code () != TYPE_CODE_CHAR
152 && type->code () != TYPE_CODE_BOOL)
153 error (_("integral type expected in DWARF expression"));
154 }
155
156 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
157 type. */
158
159 static struct type *
160 get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
161 {
162 switch (TYPE_LENGTH (type))
163 {
164 case 1:
165 return builtin_type (gdbarch)->builtin_uint8;
166 case 2:
167 return builtin_type (gdbarch)->builtin_uint16;
168 case 4:
169 return builtin_type (gdbarch)->builtin_uint32;
170 case 8:
171 return builtin_type (gdbarch)->builtin_uint64;
172 default:
173 error (_("no unsigned variant found for type, while evaluating "
174 "DWARF expression"));
175 }
176 }
177
178 /* Return the signed form of TYPE. TYPE is necessarily an integral
179 type. */
180
181 static struct type *
182 get_signed_type (struct gdbarch *gdbarch, struct type *type)
183 {
184 switch (TYPE_LENGTH (type))
185 {
186 case 1:
187 return builtin_type (gdbarch)->builtin_int8;
188 case 2:
189 return builtin_type (gdbarch)->builtin_int16;
190 case 4:
191 return builtin_type (gdbarch)->builtin_int32;
192 case 8:
193 return builtin_type (gdbarch)->builtin_int64;
194 default:
195 error (_("no signed variant found for type, while evaluating "
196 "DWARF expression"));
197 }
198 }
199
200 /* Retrieve the N'th item on the stack, converted to an address. */
201
202 CORE_ADDR
203 dwarf_expr_context::fetch_address (int n)
204 {
205 struct value *result_val = fetch (n);
206 enum bfd_endian byte_order = gdbarch_byte_order (this->gdbarch);
207 ULONGEST result;
208
209 dwarf_require_integral (value_type (result_val));
210 result = extract_unsigned_integer (value_contents (result_val),
211 TYPE_LENGTH (value_type (result_val)),
212 byte_order);
213
214 /* For most architectures, calling extract_unsigned_integer() alone
215 is sufficient for extracting an address. However, some
216 architectures (e.g. MIPS) use signed addresses and using
217 extract_unsigned_integer() will not produce a correct
218 result. Make sure we invoke gdbarch_integer_to_address()
219 for those architectures which require it. */
220 if (gdbarch_integer_to_address_p (this->gdbarch))
221 {
222 gdb_byte *buf = (gdb_byte *) alloca (this->addr_size);
223 struct type *int_type = get_unsigned_type (this->gdbarch,
224 value_type (result_val));
225
226 store_unsigned_integer (buf, this->addr_size, byte_order, result);
227 return gdbarch_integer_to_address (this->gdbarch, int_type, buf);
228 }
229
230 return (CORE_ADDR) result;
231 }
232
233 /* Retrieve the in_stack_memory flag of the N'th item on the stack. */
234
235 bool
236 dwarf_expr_context::fetch_in_stack_memory (int n)
237 {
238 if (stack.size () <= n)
239 error (_("Asked for position %d of stack, "
240 "stack only has %zu elements on it."),
241 n, stack.size ());
242 return stack[stack.size () - (1 + n)].in_stack_memory;
243 }
244
245 /* Return true if the expression stack is empty. */
246
247 bool
248 dwarf_expr_context::stack_empty_p () const
249 {
250 return stack.empty ();
251 }
252
253 /* Add a new piece to the dwarf_expr_context's piece list. */
254 void
255 dwarf_expr_context::add_piece (ULONGEST size, ULONGEST offset)
256 {
257 this->pieces.emplace_back ();
258 dwarf_expr_piece &p = this->pieces.back ();
259
260 p.location = this->location;
261 p.size = size;
262 p.offset = offset;
263
264 if (p.location == DWARF_VALUE_LITERAL)
265 {
266 p.v.literal.data = this->data;
267 p.v.literal.length = this->len;
268 }
269 else if (stack_empty_p ())
270 {
271 p.location = DWARF_VALUE_OPTIMIZED_OUT;
272 /* Also reset the context's location, for our callers. This is
273 a somewhat strange approach, but this lets us avoid setting
274 the location to DWARF_VALUE_MEMORY in all the individual
275 cases in the evaluator. */
276 this->location = DWARF_VALUE_OPTIMIZED_OUT;
277 }
278 else if (p.location == DWARF_VALUE_MEMORY)
279 {
280 p.v.mem.addr = fetch_address (0);
281 p.v.mem.in_stack_memory = fetch_in_stack_memory (0);
282 }
283 else if (p.location == DWARF_VALUE_IMPLICIT_POINTER)
284 {
285 p.v.ptr.die_sect_off = (sect_offset) this->len;
286 p.v.ptr.offset = value_as_long (fetch (0));
287 }
288 else if (p.location == DWARF_VALUE_REGISTER)
289 p.v.regno = value_as_long (fetch (0));
290 else
291 {
292 p.v.value = fetch (0);
293 }
294 }
295
296 /* Evaluate the expression at ADDR (LEN bytes long). */
297
298 void
299 dwarf_expr_context::eval (const gdb_byte *addr, size_t len)
300 {
301 int old_recursion_depth = this->recursion_depth;
302
303 execute_stack_op (addr, addr + len);
304
305 /* RECURSION_DEPTH becomes invalid if an exception was thrown here. */
306
307 gdb_assert (this->recursion_depth == old_recursion_depth);
308 }
309
310 /* Helper to read a uleb128 value or throw an error. */
311
312 const gdb_byte *
313 safe_read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
314 uint64_t *r)
315 {
316 buf = gdb_read_uleb128 (buf, buf_end, r);
317 if (buf == NULL)
318 error (_("DWARF expression error: ran off end of buffer reading uleb128 value"));
319 return buf;
320 }
321
322 /* Helper to read a sleb128 value or throw an error. */
323
324 const gdb_byte *
325 safe_read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
326 int64_t *r)
327 {
328 buf = gdb_read_sleb128 (buf, buf_end, r);
329 if (buf == NULL)
330 error (_("DWARF expression error: ran off end of buffer reading sleb128 value"));
331 return buf;
332 }
333
334 const gdb_byte *
335 safe_skip_leb128 (const gdb_byte *buf, const gdb_byte *buf_end)
336 {
337 buf = gdb_skip_leb128 (buf, buf_end);
338 if (buf == NULL)
339 error (_("DWARF expression error: ran off end of buffer reading leb128 value"));
340 return buf;
341 }
342 \f
343
344 /* Check that the current operator is either at the end of an
345 expression, or that it is followed by a composition operator or by
346 DW_OP_GNU_uninit (which should terminate the expression). */
347
348 void
349 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
350 const char *op_name)
351 {
352 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece
353 && *op_ptr != DW_OP_GNU_uninit)
354 error (_("DWARF-2 expression error: `%s' operations must be "
355 "used either alone or in conjunction with DW_OP_piece "
356 "or DW_OP_bit_piece."),
357 op_name);
358 }
359
360 /* Return true iff the types T1 and T2 are "the same". This only does
361 checks that might reasonably be needed to compare DWARF base
362 types. */
363
364 static int
365 base_types_equal_p (struct type *t1, struct type *t2)
366 {
367 if (t1->code () != t2->code ())
368 return 0;
369 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
370 return 0;
371 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
372 }
373
374 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
375 DWARF register number. Otherwise return -1. */
376
377 int
378 dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
379 {
380 uint64_t dwarf_reg;
381
382 if (buf_end <= buf)
383 return -1;
384 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
385 {
386 if (buf_end - buf != 1)
387 return -1;
388 return *buf - DW_OP_reg0;
389 }
390
391 if (*buf == DW_OP_regval_type || *buf == DW_OP_GNU_regval_type)
392 {
393 buf++;
394 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
395 if (buf == NULL)
396 return -1;
397 buf = gdb_skip_leb128 (buf, buf_end);
398 if (buf == NULL)
399 return -1;
400 }
401 else if (*buf == DW_OP_regx)
402 {
403 buf++;
404 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
405 if (buf == NULL)
406 return -1;
407 }
408 else
409 return -1;
410 if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
411 return -1;
412 return dwarf_reg;
413 }
414
415 /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
416 DW_OP_deref* return the DWARF register number. Otherwise return -1.
417 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
418 size from DW_OP_deref_size. */
419
420 int
421 dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end,
422 CORE_ADDR *deref_size_return)
423 {
424 uint64_t dwarf_reg;
425 int64_t offset;
426
427 if (buf_end <= buf)
428 return -1;
429
430 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
431 {
432 dwarf_reg = *buf - DW_OP_breg0;
433 buf++;
434 if (buf >= buf_end)
435 return -1;
436 }
437 else if (*buf == DW_OP_bregx)
438 {
439 buf++;
440 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
441 if (buf == NULL)
442 return -1;
443 if ((int) dwarf_reg != dwarf_reg)
444 return -1;
445 }
446 else
447 return -1;
448
449 buf = gdb_read_sleb128 (buf, buf_end, &offset);
450 if (buf == NULL)
451 return -1;
452 if (offset != 0)
453 return -1;
454
455 if (*buf == DW_OP_deref)
456 {
457 buf++;
458 *deref_size_return = -1;
459 }
460 else if (*buf == DW_OP_deref_size)
461 {
462 buf++;
463 if (buf >= buf_end)
464 return -1;
465 *deref_size_return = *buf++;
466 }
467 else
468 return -1;
469
470 if (buf != buf_end)
471 return -1;
472
473 return dwarf_reg;
474 }
475
476 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
477 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
478
479 int
480 dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end,
481 CORE_ADDR *fb_offset_return)
482 {
483 int64_t fb_offset;
484
485 if (buf_end <= buf)
486 return 0;
487
488 if (*buf != DW_OP_fbreg)
489 return 0;
490 buf++;
491
492 buf = gdb_read_sleb128 (buf, buf_end, &fb_offset);
493 if (buf == NULL)
494 return 0;
495 *fb_offset_return = fb_offset;
496 if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return)
497 return 0;
498
499 return 1;
500 }
501
502 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
503 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
504 The matched SP register number depends on GDBARCH. */
505
506 int
507 dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf,
508 const gdb_byte *buf_end, CORE_ADDR *sp_offset_return)
509 {
510 uint64_t dwarf_reg;
511 int64_t sp_offset;
512
513 if (buf_end <= buf)
514 return 0;
515 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
516 {
517 dwarf_reg = *buf - DW_OP_breg0;
518 buf++;
519 }
520 else
521 {
522 if (*buf != DW_OP_bregx)
523 return 0;
524 buf++;
525 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
526 if (buf == NULL)
527 return 0;
528 }
529
530 if (dwarf_reg_to_regnum (gdbarch, dwarf_reg)
531 != gdbarch_sp_regnum (gdbarch))
532 return 0;
533
534 buf = gdb_read_sleb128 (buf, buf_end, &sp_offset);
535 if (buf == NULL)
536 return 0;
537 *sp_offset_return = sp_offset;
538 if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return)
539 return 0;
540
541 return 1;
542 }
543
544 /* The engine for the expression evaluator. Using the context in this
545 object, evaluate the expression between OP_PTR and OP_END. */
546
547 void
548 dwarf_expr_context::execute_stack_op (const gdb_byte *op_ptr,
549 const gdb_byte *op_end)
550 {
551 enum bfd_endian byte_order = gdbarch_byte_order (this->gdbarch);
552 /* Old-style "untyped" DWARF values need special treatment in a
553 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
554 a special type for these values so we can distinguish them from
555 values that have an explicit type, because explicitly-typed
556 values do not need special treatment. This special type must be
557 different (in the `==' sense) from any base type coming from the
558 CU. */
559 struct type *address_type = this->address_type ();
560
561 this->location = DWARF_VALUE_MEMORY;
562 this->initialized = 1; /* Default is initialized. */
563
564 if (this->recursion_depth > this->max_recursion_depth)
565 error (_("DWARF-2 expression error: Loop detected (%d)."),
566 this->recursion_depth);
567 this->recursion_depth++;
568
569 while (op_ptr < op_end)
570 {
571 enum dwarf_location_atom op = (enum dwarf_location_atom) *op_ptr++;
572 ULONGEST result;
573 /* Assume the value is not in stack memory.
574 Code that knows otherwise sets this to true.
575 Some arithmetic on stack addresses can probably be assumed to still
576 be a stack address, but we skip this complication for now.
577 This is just an optimization, so it's always ok to punt
578 and leave this as false. */
579 bool in_stack_memory = false;
580 uint64_t uoffset, reg;
581 int64_t offset;
582 struct value *result_val = NULL;
583
584 /* The DWARF expression might have a bug causing an infinite
585 loop. In that case, quitting is the only way out. */
586 QUIT;
587
588 switch (op)
589 {
590 case DW_OP_lit0:
591 case DW_OP_lit1:
592 case DW_OP_lit2:
593 case DW_OP_lit3:
594 case DW_OP_lit4:
595 case DW_OP_lit5:
596 case DW_OP_lit6:
597 case DW_OP_lit7:
598 case DW_OP_lit8:
599 case DW_OP_lit9:
600 case DW_OP_lit10:
601 case DW_OP_lit11:
602 case DW_OP_lit12:
603 case DW_OP_lit13:
604 case DW_OP_lit14:
605 case DW_OP_lit15:
606 case DW_OP_lit16:
607 case DW_OP_lit17:
608 case DW_OP_lit18:
609 case DW_OP_lit19:
610 case DW_OP_lit20:
611 case DW_OP_lit21:
612 case DW_OP_lit22:
613 case DW_OP_lit23:
614 case DW_OP_lit24:
615 case DW_OP_lit25:
616 case DW_OP_lit26:
617 case DW_OP_lit27:
618 case DW_OP_lit28:
619 case DW_OP_lit29:
620 case DW_OP_lit30:
621 case DW_OP_lit31:
622 result = op - DW_OP_lit0;
623 result_val = value_from_ulongest (address_type, result);
624 break;
625
626 case DW_OP_addr:
627 result = extract_unsigned_integer (op_ptr,
628 this->addr_size, byte_order);
629 op_ptr += this->addr_size;
630 /* Some versions of GCC emit DW_OP_addr before
631 DW_OP_GNU_push_tls_address. In this case the value is an
632 index, not an address. We don't support things like
633 branching between the address and the TLS op. */
634 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
635 result += this->per_objfile->objfile->text_section_offset ();
636 result_val = value_from_ulongest (address_type, result);
637 break;
638
639 case DW_OP_addrx:
640 case DW_OP_GNU_addr_index:
641 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
642 result = this->get_addr_index (uoffset);
643 result += this->per_objfile->objfile->text_section_offset ();
644 result_val = value_from_ulongest (address_type, result);
645 break;
646 case DW_OP_GNU_const_index:
647 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
648 result = this->get_addr_index (uoffset);
649 result_val = value_from_ulongest (address_type, result);
650 break;
651
652 case DW_OP_const1u:
653 result = extract_unsigned_integer (op_ptr, 1, byte_order);
654 result_val = value_from_ulongest (address_type, result);
655 op_ptr += 1;
656 break;
657 case DW_OP_const1s:
658 result = extract_signed_integer (op_ptr, 1, byte_order);
659 result_val = value_from_ulongest (address_type, result);
660 op_ptr += 1;
661 break;
662 case DW_OP_const2u:
663 result = extract_unsigned_integer (op_ptr, 2, byte_order);
664 result_val = value_from_ulongest (address_type, result);
665 op_ptr += 2;
666 break;
667 case DW_OP_const2s:
668 result = extract_signed_integer (op_ptr, 2, byte_order);
669 result_val = value_from_ulongest (address_type, result);
670 op_ptr += 2;
671 break;
672 case DW_OP_const4u:
673 result = extract_unsigned_integer (op_ptr, 4, byte_order);
674 result_val = value_from_ulongest (address_type, result);
675 op_ptr += 4;
676 break;
677 case DW_OP_const4s:
678 result = extract_signed_integer (op_ptr, 4, byte_order);
679 result_val = value_from_ulongest (address_type, result);
680 op_ptr += 4;
681 break;
682 case DW_OP_const8u:
683 result = extract_unsigned_integer (op_ptr, 8, byte_order);
684 result_val = value_from_ulongest (address_type, result);
685 op_ptr += 8;
686 break;
687 case DW_OP_const8s:
688 result = extract_signed_integer (op_ptr, 8, byte_order);
689 result_val = value_from_ulongest (address_type, result);
690 op_ptr += 8;
691 break;
692 case DW_OP_constu:
693 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
694 result = uoffset;
695 result_val = value_from_ulongest (address_type, result);
696 break;
697 case DW_OP_consts:
698 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
699 result = offset;
700 result_val = value_from_ulongest (address_type, result);
701 break;
702
703 /* The DW_OP_reg operations are required to occur alone in
704 location expressions. */
705 case DW_OP_reg0:
706 case DW_OP_reg1:
707 case DW_OP_reg2:
708 case DW_OP_reg3:
709 case DW_OP_reg4:
710 case DW_OP_reg5:
711 case DW_OP_reg6:
712 case DW_OP_reg7:
713 case DW_OP_reg8:
714 case DW_OP_reg9:
715 case DW_OP_reg10:
716 case DW_OP_reg11:
717 case DW_OP_reg12:
718 case DW_OP_reg13:
719 case DW_OP_reg14:
720 case DW_OP_reg15:
721 case DW_OP_reg16:
722 case DW_OP_reg17:
723 case DW_OP_reg18:
724 case DW_OP_reg19:
725 case DW_OP_reg20:
726 case DW_OP_reg21:
727 case DW_OP_reg22:
728 case DW_OP_reg23:
729 case DW_OP_reg24:
730 case DW_OP_reg25:
731 case DW_OP_reg26:
732 case DW_OP_reg27:
733 case DW_OP_reg28:
734 case DW_OP_reg29:
735 case DW_OP_reg30:
736 case DW_OP_reg31:
737 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_reg");
738
739 result = op - DW_OP_reg0;
740 result_val = value_from_ulongest (address_type, result);
741 this->location = DWARF_VALUE_REGISTER;
742 break;
743
744 case DW_OP_regx:
745 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
746 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
747
748 result = reg;
749 result_val = value_from_ulongest (address_type, result);
750 this->location = DWARF_VALUE_REGISTER;
751 break;
752
753 case DW_OP_implicit_value:
754 {
755 uint64_t len;
756
757 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
758 if (op_ptr + len > op_end)
759 error (_("DW_OP_implicit_value: too few bytes available."));
760 this->len = len;
761 this->data = op_ptr;
762 this->location = DWARF_VALUE_LITERAL;
763 op_ptr += len;
764 dwarf_expr_require_composition (op_ptr, op_end,
765 "DW_OP_implicit_value");
766 }
767 goto no_push;
768
769 case DW_OP_stack_value:
770 this->location = DWARF_VALUE_STACK;
771 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
772 goto no_push;
773
774 case DW_OP_implicit_pointer:
775 case DW_OP_GNU_implicit_pointer:
776 {
777 int64_t len;
778
779 if (this->ref_addr_size == -1)
780 error (_("DWARF-2 expression error: DW_OP_implicit_pointer "
781 "is not allowed in frame context"));
782
783 /* The referred-to DIE of sect_offset kind. */
784 this->len = extract_unsigned_integer (op_ptr, this->ref_addr_size,
785 byte_order);
786 op_ptr += this->ref_addr_size;
787
788 /* The byte offset into the data. */
789 op_ptr = safe_read_sleb128 (op_ptr, op_end, &len);
790 result = (ULONGEST) len;
791 result_val = value_from_ulongest (address_type, result);
792
793 this->location = DWARF_VALUE_IMPLICIT_POINTER;
794 dwarf_expr_require_composition (op_ptr, op_end,
795 "DW_OP_implicit_pointer");
796 }
797 break;
798
799 case DW_OP_breg0:
800 case DW_OP_breg1:
801 case DW_OP_breg2:
802 case DW_OP_breg3:
803 case DW_OP_breg4:
804 case DW_OP_breg5:
805 case DW_OP_breg6:
806 case DW_OP_breg7:
807 case DW_OP_breg8:
808 case DW_OP_breg9:
809 case DW_OP_breg10:
810 case DW_OP_breg11:
811 case DW_OP_breg12:
812 case DW_OP_breg13:
813 case DW_OP_breg14:
814 case DW_OP_breg15:
815 case DW_OP_breg16:
816 case DW_OP_breg17:
817 case DW_OP_breg18:
818 case DW_OP_breg19:
819 case DW_OP_breg20:
820 case DW_OP_breg21:
821 case DW_OP_breg22:
822 case DW_OP_breg23:
823 case DW_OP_breg24:
824 case DW_OP_breg25:
825 case DW_OP_breg26:
826 case DW_OP_breg27:
827 case DW_OP_breg28:
828 case DW_OP_breg29:
829 case DW_OP_breg30:
830 case DW_OP_breg31:
831 {
832 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
833 result = this->read_addr_from_reg (op - DW_OP_breg0);
834 result += offset;
835 result_val = value_from_ulongest (address_type, result);
836 }
837 break;
838 case DW_OP_bregx:
839 {
840 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
841 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
842 result = this->read_addr_from_reg (reg);
843 result += offset;
844 result_val = value_from_ulongest (address_type, result);
845 }
846 break;
847 case DW_OP_fbreg:
848 {
849 const gdb_byte *datastart;
850 size_t datalen;
851
852 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
853
854 /* Rather than create a whole new context, we simply
855 backup the current stack locally and install a new empty stack,
856 then reset it afterwards, effectively erasing whatever the
857 recursive call put there. */
858 std::vector<dwarf_stack_value> saved_stack = std::move (stack);
859 stack.clear ();
860
861 /* FIXME: cagney/2003-03-26: This code should be using
862 get_frame_base_address(), and then implement a dwarf2
863 specific this_base method. */
864 this->get_frame_base (&datastart, &datalen);
865 eval (datastart, datalen);
866 if (this->location == DWARF_VALUE_MEMORY)
867 result = fetch_address (0);
868 else if (this->location == DWARF_VALUE_REGISTER)
869 result = this->read_addr_from_reg (value_as_long (fetch (0)));
870 else
871 error (_("Not implemented: computing frame "
872 "base using explicit value operator"));
873 result = result + offset;
874 result_val = value_from_ulongest (address_type, result);
875 in_stack_memory = true;
876
877 /* Restore the content of the original stack. */
878 stack = std::move (saved_stack);
879
880 this->location = DWARF_VALUE_MEMORY;
881 }
882 break;
883
884 case DW_OP_dup:
885 result_val = fetch (0);
886 in_stack_memory = fetch_in_stack_memory (0);
887 break;
888
889 case DW_OP_drop:
890 pop ();
891 goto no_push;
892
893 case DW_OP_pick:
894 offset = *op_ptr++;
895 result_val = fetch (offset);
896 in_stack_memory = fetch_in_stack_memory (offset);
897 break;
898
899 case DW_OP_swap:
900 {
901 if (stack.size () < 2)
902 error (_("Not enough elements for "
903 "DW_OP_swap. Need 2, have %zu."),
904 stack.size ());
905
906 dwarf_stack_value &t1 = stack[stack.size () - 1];
907 dwarf_stack_value &t2 = stack[stack.size () - 2];
908 std::swap (t1, t2);
909 goto no_push;
910 }
911
912 case DW_OP_over:
913 result_val = fetch (1);
914 in_stack_memory = fetch_in_stack_memory (1);
915 break;
916
917 case DW_OP_rot:
918 {
919 if (stack.size () < 3)
920 error (_("Not enough elements for "
921 "DW_OP_rot. Need 3, have %zu."),
922 stack.size ());
923
924 dwarf_stack_value temp = stack[stack.size () - 1];
925 stack[stack.size () - 1] = stack[stack.size () - 2];
926 stack[stack.size () - 2] = stack[stack.size () - 3];
927 stack[stack.size () - 3] = temp;
928 goto no_push;
929 }
930
931 case DW_OP_deref:
932 case DW_OP_deref_size:
933 case DW_OP_deref_type:
934 case DW_OP_GNU_deref_type:
935 {
936 int addr_size = (op == DW_OP_deref ? this->addr_size : *op_ptr++);
937 gdb_byte *buf = (gdb_byte *) alloca (addr_size);
938 CORE_ADDR addr = fetch_address (0);
939 struct type *type;
940
941 pop ();
942
943 if (op == DW_OP_deref_type || op == DW_OP_GNU_deref_type)
944 {
945 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
946 cu_offset type_die_cu_off = (cu_offset) uoffset;
947 type = get_base_type (type_die_cu_off, 0);
948 }
949 else
950 type = address_type;
951
952 this->read_mem (buf, addr, addr_size);
953
954 /* If the size of the object read from memory is different
955 from the type length, we need to zero-extend it. */
956 if (TYPE_LENGTH (type) != addr_size)
957 {
958 ULONGEST datum =
959 extract_unsigned_integer (buf, addr_size, byte_order);
960
961 buf = (gdb_byte *) alloca (TYPE_LENGTH (type));
962 store_unsigned_integer (buf, TYPE_LENGTH (type),
963 byte_order, datum);
964 }
965
966 result_val = value_from_contents_and_address (type, buf, addr);
967 break;
968 }
969
970 case DW_OP_abs:
971 case DW_OP_neg:
972 case DW_OP_not:
973 case DW_OP_plus_uconst:
974 {
975 /* Unary operations. */
976 result_val = fetch (0);
977 pop ();
978
979 switch (op)
980 {
981 case DW_OP_abs:
982 if (value_less (result_val,
983 value_zero (value_type (result_val), not_lval)))
984 result_val = value_neg (result_val);
985 break;
986 case DW_OP_neg:
987 result_val = value_neg (result_val);
988 break;
989 case DW_OP_not:
990 dwarf_require_integral (value_type (result_val));
991 result_val = value_complement (result_val);
992 break;
993 case DW_OP_plus_uconst:
994 dwarf_require_integral (value_type (result_val));
995 result = value_as_long (result_val);
996 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
997 result += reg;
998 result_val = value_from_ulongest (address_type, result);
999 break;
1000 }
1001 }
1002 break;
1003
1004 case DW_OP_and:
1005 case DW_OP_div:
1006 case DW_OP_minus:
1007 case DW_OP_mod:
1008 case DW_OP_mul:
1009 case DW_OP_or:
1010 case DW_OP_plus:
1011 case DW_OP_shl:
1012 case DW_OP_shr:
1013 case DW_OP_shra:
1014 case DW_OP_xor:
1015 case DW_OP_le:
1016 case DW_OP_ge:
1017 case DW_OP_eq:
1018 case DW_OP_lt:
1019 case DW_OP_gt:
1020 case DW_OP_ne:
1021 {
1022 /* Binary operations. */
1023 struct value *first, *second;
1024
1025 second = fetch (0);
1026 pop ();
1027
1028 first = fetch (0);
1029 pop ();
1030
1031 if (! base_types_equal_p (value_type (first), value_type (second)))
1032 error (_("Incompatible types on DWARF stack"));
1033
1034 switch (op)
1035 {
1036 case DW_OP_and:
1037 dwarf_require_integral (value_type (first));
1038 dwarf_require_integral (value_type (second));
1039 result_val = value_binop (first, second, BINOP_BITWISE_AND);
1040 break;
1041 case DW_OP_div:
1042 result_val = value_binop (first, second, BINOP_DIV);
1043 break;
1044 case DW_OP_minus:
1045 result_val = value_binop (first, second, BINOP_SUB);
1046 break;
1047 case DW_OP_mod:
1048 {
1049 int cast_back = 0;
1050 struct type *orig_type = value_type (first);
1051
1052 /* We have to special-case "old-style" untyped values
1053 -- these must have mod computed using unsigned
1054 math. */
1055 if (orig_type == address_type)
1056 {
1057 struct type *utype
1058 = get_unsigned_type (this->gdbarch, orig_type);
1059
1060 cast_back = 1;
1061 first = value_cast (utype, first);
1062 second = value_cast (utype, second);
1063 }
1064 /* Note that value_binop doesn't handle float or
1065 decimal float here. This seems unimportant. */
1066 result_val = value_binop (first, second, BINOP_MOD);
1067 if (cast_back)
1068 result_val = value_cast (orig_type, result_val);
1069 }
1070 break;
1071 case DW_OP_mul:
1072 result_val = value_binop (first, second, BINOP_MUL);
1073 break;
1074 case DW_OP_or:
1075 dwarf_require_integral (value_type (first));
1076 dwarf_require_integral (value_type (second));
1077 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
1078 break;
1079 case DW_OP_plus:
1080 result_val = value_binop (first, second, BINOP_ADD);
1081 break;
1082 case DW_OP_shl:
1083 dwarf_require_integral (value_type (first));
1084 dwarf_require_integral (value_type (second));
1085 result_val = value_binop (first, second, BINOP_LSH);
1086 break;
1087 case DW_OP_shr:
1088 dwarf_require_integral (value_type (first));
1089 dwarf_require_integral (value_type (second));
1090 if (!TYPE_UNSIGNED (value_type (first)))
1091 {
1092 struct type *utype
1093 = get_unsigned_type (this->gdbarch, value_type (first));
1094
1095 first = value_cast (utype, first);
1096 }
1097
1098 result_val = value_binop (first, second, BINOP_RSH);
1099 /* Make sure we wind up with the same type we started
1100 with. */
1101 if (value_type (result_val) != value_type (second))
1102 result_val = value_cast (value_type (second), result_val);
1103 break;
1104 case DW_OP_shra:
1105 dwarf_require_integral (value_type (first));
1106 dwarf_require_integral (value_type (second));
1107 if (TYPE_UNSIGNED (value_type (first)))
1108 {
1109 struct type *stype
1110 = get_signed_type (this->gdbarch, value_type (first));
1111
1112 first = value_cast (stype, first);
1113 }
1114
1115 result_val = value_binop (first, second, BINOP_RSH);
1116 /* Make sure we wind up with the same type we started
1117 with. */
1118 if (value_type (result_val) != value_type (second))
1119 result_val = value_cast (value_type (second), result_val);
1120 break;
1121 case DW_OP_xor:
1122 dwarf_require_integral (value_type (first));
1123 dwarf_require_integral (value_type (second));
1124 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
1125 break;
1126 case DW_OP_le:
1127 /* A <= B is !(B < A). */
1128 result = ! value_less (second, first);
1129 result_val = value_from_ulongest (address_type, result);
1130 break;
1131 case DW_OP_ge:
1132 /* A >= B is !(A < B). */
1133 result = ! value_less (first, second);
1134 result_val = value_from_ulongest (address_type, result);
1135 break;
1136 case DW_OP_eq:
1137 result = value_equal (first, second);
1138 result_val = value_from_ulongest (address_type, result);
1139 break;
1140 case DW_OP_lt:
1141 result = value_less (first, second);
1142 result_val = value_from_ulongest (address_type, result);
1143 break;
1144 case DW_OP_gt:
1145 /* A > B is B < A. */
1146 result = value_less (second, first);
1147 result_val = value_from_ulongest (address_type, result);
1148 break;
1149 case DW_OP_ne:
1150 result = ! value_equal (first, second);
1151 result_val = value_from_ulongest (address_type, result);
1152 break;
1153 default:
1154 internal_error (__FILE__, __LINE__,
1155 _("Can't be reached."));
1156 }
1157 }
1158 break;
1159
1160 case DW_OP_call_frame_cfa:
1161 result = this->get_frame_cfa ();
1162 result_val = value_from_ulongest (address_type, result);
1163 in_stack_memory = true;
1164 break;
1165
1166 case DW_OP_GNU_push_tls_address:
1167 case DW_OP_form_tls_address:
1168 /* Variable is at a constant offset in the thread-local
1169 storage block into the objfile for the current thread and
1170 the dynamic linker module containing this expression. Here
1171 we return returns the offset from that base. The top of the
1172 stack has the offset from the beginning of the thread
1173 control block at which the variable is located. Nothing
1174 should follow this operator, so the top of stack would be
1175 returned. */
1176 result = value_as_long (fetch (0));
1177 pop ();
1178 result = this->get_tls_address (result);
1179 result_val = value_from_ulongest (address_type, result);
1180 break;
1181
1182 case DW_OP_skip:
1183 offset = extract_signed_integer (op_ptr, 2, byte_order);
1184 op_ptr += 2;
1185 op_ptr += offset;
1186 goto no_push;
1187
1188 case DW_OP_bra:
1189 {
1190 struct value *val;
1191
1192 offset = extract_signed_integer (op_ptr, 2, byte_order);
1193 op_ptr += 2;
1194 val = fetch (0);
1195 dwarf_require_integral (value_type (val));
1196 if (value_as_long (val) != 0)
1197 op_ptr += offset;
1198 pop ();
1199 }
1200 goto no_push;
1201
1202 case DW_OP_nop:
1203 goto no_push;
1204
1205 case DW_OP_piece:
1206 {
1207 uint64_t size;
1208
1209 /* Record the piece. */
1210 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1211 add_piece (8 * size, 0);
1212
1213 /* Pop off the address/regnum, and reset the location
1214 type. */
1215 if (this->location != DWARF_VALUE_LITERAL
1216 && this->location != DWARF_VALUE_OPTIMIZED_OUT)
1217 pop ();
1218 this->location = DWARF_VALUE_MEMORY;
1219 }
1220 goto no_push;
1221
1222 case DW_OP_bit_piece:
1223 {
1224 uint64_t size, uleb_offset;
1225
1226 /* Record the piece. */
1227 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1228 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uleb_offset);
1229 add_piece (size, uleb_offset);
1230
1231 /* Pop off the address/regnum, and reset the location
1232 type. */
1233 if (this->location != DWARF_VALUE_LITERAL
1234 && this->location != DWARF_VALUE_OPTIMIZED_OUT)
1235 pop ();
1236 this->location = DWARF_VALUE_MEMORY;
1237 }
1238 goto no_push;
1239
1240 case DW_OP_GNU_uninit:
1241 if (op_ptr != op_end)
1242 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1243 "be the very last op."));
1244
1245 this->initialized = 0;
1246 goto no_push;
1247
1248 case DW_OP_call2:
1249 {
1250 cu_offset cu_off
1251 = (cu_offset) extract_unsigned_integer (op_ptr, 2, byte_order);
1252 op_ptr += 2;
1253 this->dwarf_call (cu_off);
1254 }
1255 goto no_push;
1256
1257 case DW_OP_call4:
1258 {
1259 cu_offset cu_off
1260 = (cu_offset) extract_unsigned_integer (op_ptr, 4, byte_order);
1261 op_ptr += 4;
1262 this->dwarf_call (cu_off);
1263 }
1264 goto no_push;
1265
1266 case DW_OP_GNU_variable_value:
1267 {
1268 sect_offset sect_off
1269 = (sect_offset) extract_unsigned_integer (op_ptr,
1270 this->ref_addr_size,
1271 byte_order);
1272 op_ptr += this->ref_addr_size;
1273 result_val = this->dwarf_variable_value (sect_off);
1274 }
1275 break;
1276
1277 case DW_OP_entry_value:
1278 case DW_OP_GNU_entry_value:
1279 {
1280 uint64_t len;
1281 CORE_ADDR deref_size;
1282 union call_site_parameter_u kind_u;
1283
1284 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
1285 if (op_ptr + len > op_end)
1286 error (_("DW_OP_entry_value: too few bytes available."));
1287
1288 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
1289 if (kind_u.dwarf_reg != -1)
1290 {
1291 op_ptr += len;
1292 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG,
1293 kind_u,
1294 -1 /* deref_size */);
1295 goto no_push;
1296 }
1297
1298 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr,
1299 op_ptr + len,
1300 &deref_size);
1301 if (kind_u.dwarf_reg != -1)
1302 {
1303 if (deref_size == -1)
1304 deref_size = this->addr_size;
1305 op_ptr += len;
1306 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG,
1307 kind_u, deref_size);
1308 goto no_push;
1309 }
1310
1311 error (_("DWARF-2 expression error: DW_OP_entry_value is "
1312 "supported only for single DW_OP_reg* "
1313 "or for DW_OP_breg*(0)+DW_OP_deref*"));
1314 }
1315
1316 case DW_OP_GNU_parameter_ref:
1317 {
1318 union call_site_parameter_u kind_u;
1319
1320 kind_u.param_cu_off
1321 = (cu_offset) extract_unsigned_integer (op_ptr, 4, byte_order);
1322 op_ptr += 4;
1323 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_PARAM_OFFSET,
1324 kind_u,
1325 -1 /* deref_size */);
1326 }
1327 goto no_push;
1328
1329 case DW_OP_const_type:
1330 case DW_OP_GNU_const_type:
1331 {
1332 int n;
1333 const gdb_byte *data;
1334 struct type *type;
1335
1336 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1337 cu_offset type_die_cu_off = (cu_offset) uoffset;
1338
1339 n = *op_ptr++;
1340 data = op_ptr;
1341 op_ptr += n;
1342
1343 type = get_base_type (type_die_cu_off, n);
1344 result_val = value_from_contents (type, data);
1345 }
1346 break;
1347
1348 case DW_OP_regval_type:
1349 case DW_OP_GNU_regval_type:
1350 {
1351 struct type *type;
1352
1353 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1354 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1355 cu_offset type_die_cu_off = (cu_offset) uoffset;
1356
1357 type = get_base_type (type_die_cu_off, 0);
1358 result_val = this->get_reg_value (type, reg);
1359 }
1360 break;
1361
1362 case DW_OP_convert:
1363 case DW_OP_GNU_convert:
1364 case DW_OP_reinterpret:
1365 case DW_OP_GNU_reinterpret:
1366 {
1367 struct type *type;
1368
1369 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1370 cu_offset type_die_cu_off = (cu_offset) uoffset;
1371
1372 if (to_underlying (type_die_cu_off) == 0)
1373 type = address_type;
1374 else
1375 type = get_base_type (type_die_cu_off, 0);
1376
1377 result_val = fetch (0);
1378 pop ();
1379
1380 if (op == DW_OP_convert || op == DW_OP_GNU_convert)
1381 result_val = value_cast (type, result_val);
1382 else if (type == value_type (result_val))
1383 {
1384 /* Nothing. */
1385 }
1386 else if (TYPE_LENGTH (type)
1387 != TYPE_LENGTH (value_type (result_val)))
1388 error (_("DW_OP_reinterpret has wrong size"));
1389 else
1390 result_val
1391 = value_from_contents (type,
1392 value_contents_all (result_val));
1393 }
1394 break;
1395
1396 case DW_OP_push_object_address:
1397 /* Return the address of the object we are currently observing. */
1398 result = this->get_object_address ();
1399 result_val = value_from_ulongest (address_type, result);
1400 break;
1401
1402 default:
1403 error (_("Unhandled dwarf expression opcode 0x%x"), op);
1404 }
1405
1406 /* Most things push a result value. */
1407 gdb_assert (result_val != NULL);
1408 push (result_val, in_stack_memory);
1409 no_push:
1410 ;
1411 }
1412
1413 /* To simplify our main caller, if the result is an implicit
1414 pointer, then make a pieced value. This is ok because we can't
1415 have implicit pointers in contexts where pieces are invalid. */
1416 if (this->location == DWARF_VALUE_IMPLICIT_POINTER)
1417 add_piece (8 * this->addr_size, 0);
1418
1419 this->recursion_depth--;
1420 gdb_assert (this->recursion_depth >= 0);
1421 }
1422
1423 void _initialize_dwarf2expr ();
1424 void
1425 _initialize_dwarf2expr ()
1426 {
1427 dwarf_arch_cookie
1428 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);
1429 }