]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/dwarf2expr.c
* defs.h (extract_signed_integer, extract_unsigned_integer,
[thirdparty/binutils-gdb.git] / gdb / dwarf2expr.c
1 /* DWARF 2 Expression Evaluator.
2
3 Copyright (C) 2001, 2002, 2003, 2005, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5
6 Contributed by Daniel Berlin (dan@dberlin.org)
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "symtab.h"
25 #include "gdbtypes.h"
26 #include "value.h"
27 #include "gdbcore.h"
28 #include "elf/dwarf2.h"
29 #include "dwarf2expr.h"
30 #include "gdb_assert.h"
31
32 /* Local prototypes. */
33
34 static void execute_stack_op (struct dwarf_expr_context *,
35 gdb_byte *, gdb_byte *);
36 static struct type *unsigned_address_type (struct gdbarch *, int);
37
38 /* Create a new context for the expression evaluator. */
39
40 struct dwarf_expr_context *
41 new_dwarf_expr_context (void)
42 {
43 struct dwarf_expr_context *retval;
44 retval = xcalloc (1, sizeof (struct dwarf_expr_context));
45 retval->stack_len = 0;
46 retval->stack_allocated = 10;
47 retval->stack = xmalloc (retval->stack_allocated * sizeof (CORE_ADDR));
48 retval->num_pieces = 0;
49 retval->pieces = 0;
50 retval->max_recursion_depth = 0x100;
51 return retval;
52 }
53
54 /* Release the memory allocated to CTX. */
55
56 void
57 free_dwarf_expr_context (struct dwarf_expr_context *ctx)
58 {
59 xfree (ctx->stack);
60 xfree (ctx->pieces);
61 xfree (ctx);
62 }
63
64 /* Expand the memory allocated to CTX's stack to contain at least
65 NEED more elements than are currently used. */
66
67 static void
68 dwarf_expr_grow_stack (struct dwarf_expr_context *ctx, size_t need)
69 {
70 if (ctx->stack_len + need > ctx->stack_allocated)
71 {
72 size_t newlen = ctx->stack_len + need + 10;
73 ctx->stack = xrealloc (ctx->stack,
74 newlen * sizeof (CORE_ADDR));
75 ctx->stack_allocated = newlen;
76 }
77 }
78
79 /* Push VALUE onto CTX's stack. */
80
81 void
82 dwarf_expr_push (struct dwarf_expr_context *ctx, CORE_ADDR value)
83 {
84 dwarf_expr_grow_stack (ctx, 1);
85 ctx->stack[ctx->stack_len++] = value;
86 }
87
88 /* Pop the top item off of CTX's stack. */
89
90 void
91 dwarf_expr_pop (struct dwarf_expr_context *ctx)
92 {
93 if (ctx->stack_len <= 0)
94 error (_("dwarf expression stack underflow"));
95 ctx->stack_len--;
96 }
97
98 /* Retrieve the N'th item on CTX's stack. */
99
100 CORE_ADDR
101 dwarf_expr_fetch (struct dwarf_expr_context *ctx, int n)
102 {
103 if (ctx->stack_len <= n)
104 error (_("Asked for position %d of stack, stack only has %d elements on it."),
105 n, ctx->stack_len);
106 return ctx->stack[ctx->stack_len - (1 + n)];
107
108 }
109
110 /* Add a new piece to CTX's piece list. */
111 static void
112 add_piece (struct dwarf_expr_context *ctx,
113 int in_reg, CORE_ADDR value, ULONGEST size)
114 {
115 struct dwarf_expr_piece *p;
116
117 ctx->num_pieces++;
118
119 if (ctx->pieces)
120 ctx->pieces = xrealloc (ctx->pieces,
121 (ctx->num_pieces
122 * sizeof (struct dwarf_expr_piece)));
123 else
124 ctx->pieces = xmalloc (ctx->num_pieces
125 * sizeof (struct dwarf_expr_piece));
126
127 p = &ctx->pieces[ctx->num_pieces - 1];
128 p->in_reg = in_reg;
129 p->value = value;
130 p->size = size;
131 }
132
133 /* Evaluate the expression at ADDR (LEN bytes long) using the context
134 CTX. */
135
136 void
137 dwarf_expr_eval (struct dwarf_expr_context *ctx, gdb_byte *addr, size_t len)
138 {
139 int old_recursion_depth = ctx->recursion_depth;
140
141 execute_stack_op (ctx, addr, addr + len);
142
143 /* CTX RECURSION_DEPTH becomes invalid if an exception was thrown here. */
144
145 gdb_assert (ctx->recursion_depth == old_recursion_depth);
146 }
147
148 /* Decode the unsigned LEB128 constant at BUF into the variable pointed to
149 by R, and return the new value of BUF. Verify that it doesn't extend
150 past BUF_END. */
151
152 gdb_byte *
153 read_uleb128 (gdb_byte *buf, gdb_byte *buf_end, ULONGEST * r)
154 {
155 unsigned shift = 0;
156 ULONGEST result = 0;
157 gdb_byte byte;
158
159 while (1)
160 {
161 if (buf >= buf_end)
162 error (_("read_uleb128: Corrupted DWARF expression."));
163
164 byte = *buf++;
165 result |= (byte & 0x7f) << shift;
166 if ((byte & 0x80) == 0)
167 break;
168 shift += 7;
169 }
170 *r = result;
171 return buf;
172 }
173
174 /* Decode the signed LEB128 constant at BUF into the variable pointed to
175 by R, and return the new value of BUF. Verify that it doesn't extend
176 past BUF_END. */
177
178 gdb_byte *
179 read_sleb128 (gdb_byte *buf, gdb_byte *buf_end, LONGEST * r)
180 {
181 unsigned shift = 0;
182 LONGEST result = 0;
183 gdb_byte byte;
184
185 while (1)
186 {
187 if (buf >= buf_end)
188 error (_("read_sleb128: Corrupted DWARF expression."));
189
190 byte = *buf++;
191 result |= (byte & 0x7f) << shift;
192 shift += 7;
193 if ((byte & 0x80) == 0)
194 break;
195 }
196 if (shift < (sizeof (*r) * 8) && (byte & 0x40) != 0)
197 result |= -(1 << shift);
198
199 *r = result;
200 return buf;
201 }
202
203 /* Read an address of size ADDR_SIZE from BUF, and verify that it
204 doesn't extend past BUF_END. */
205
206 CORE_ADDR
207 dwarf2_read_address (struct gdbarch *gdbarch, gdb_byte *buf,
208 gdb_byte *buf_end, int addr_size)
209 {
210 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
211 CORE_ADDR result;
212
213 if (buf_end - buf < addr_size)
214 error (_("dwarf2_read_address: Corrupted DWARF expression."));
215
216 /* For most architectures, calling extract_unsigned_integer() alone
217 is sufficient for extracting an address. However, some
218 architectures (e.g. MIPS) use signed addresses and using
219 extract_unsigned_integer() will not produce a correct
220 result. Make sure we invoke gdbarch_integer_to_address()
221 for those architectures which require it.
222
223 The use of `unsigned_address_type' in the code below refers to
224 the type of buf and has no bearing on the signedness of the
225 address being returned. */
226
227 if (gdbarch_integer_to_address_p (gdbarch))
228 return gdbarch_integer_to_address
229 (gdbarch, unsigned_address_type (gdbarch, addr_size), buf);
230
231 return extract_unsigned_integer (buf, addr_size, byte_order);
232 }
233
234 /* Return the type of an address of size ADDR_SIZE,
235 for unsigned arithmetic. */
236
237 static struct type *
238 unsigned_address_type (struct gdbarch *gdbarch, int addr_size)
239 {
240 switch (addr_size)
241 {
242 case 2:
243 return builtin_type (gdbarch)->builtin_uint16;
244 case 4:
245 return builtin_type (gdbarch)->builtin_uint32;
246 case 8:
247 return builtin_type (gdbarch)->builtin_uint64;
248 default:
249 internal_error (__FILE__, __LINE__,
250 _("Unsupported address size.\n"));
251 }
252 }
253
254 /* Return the type of an address of size ADDR_SIZE,
255 for signed arithmetic. */
256
257 static struct type *
258 signed_address_type (struct gdbarch *gdbarch, int addr_size)
259 {
260 switch (addr_size)
261 {
262 case 2:
263 return builtin_type (gdbarch)->builtin_int16;
264 case 4:
265 return builtin_type (gdbarch)->builtin_int32;
266 case 8:
267 return builtin_type (gdbarch)->builtin_int64;
268 default:
269 internal_error (__FILE__, __LINE__,
270 _("Unsupported address size.\n"));
271 }
272 }
273 \f
274 /* The engine for the expression evaluator. Using the context in CTX,
275 evaluate the expression between OP_PTR and OP_END. */
276
277 static void
278 execute_stack_op (struct dwarf_expr_context *ctx,
279 gdb_byte *op_ptr, gdb_byte *op_end)
280 {
281 enum bfd_endian byte_order = gdbarch_byte_order (ctx->gdbarch);
282
283 ctx->in_reg = 0;
284 ctx->initialized = 1; /* Default is initialized. */
285
286 if (ctx->recursion_depth > ctx->max_recursion_depth)
287 error (_("DWARF-2 expression error: Loop detected (%d)."),
288 ctx->recursion_depth);
289 ctx->recursion_depth++;
290
291 while (op_ptr < op_end)
292 {
293 enum dwarf_location_atom op = *op_ptr++;
294 CORE_ADDR result;
295 ULONGEST uoffset, reg;
296 LONGEST offset;
297
298 switch (op)
299 {
300 case DW_OP_lit0:
301 case DW_OP_lit1:
302 case DW_OP_lit2:
303 case DW_OP_lit3:
304 case DW_OP_lit4:
305 case DW_OP_lit5:
306 case DW_OP_lit6:
307 case DW_OP_lit7:
308 case DW_OP_lit8:
309 case DW_OP_lit9:
310 case DW_OP_lit10:
311 case DW_OP_lit11:
312 case DW_OP_lit12:
313 case DW_OP_lit13:
314 case DW_OP_lit14:
315 case DW_OP_lit15:
316 case DW_OP_lit16:
317 case DW_OP_lit17:
318 case DW_OP_lit18:
319 case DW_OP_lit19:
320 case DW_OP_lit20:
321 case DW_OP_lit21:
322 case DW_OP_lit22:
323 case DW_OP_lit23:
324 case DW_OP_lit24:
325 case DW_OP_lit25:
326 case DW_OP_lit26:
327 case DW_OP_lit27:
328 case DW_OP_lit28:
329 case DW_OP_lit29:
330 case DW_OP_lit30:
331 case DW_OP_lit31:
332 result = op - DW_OP_lit0;
333 break;
334
335 case DW_OP_addr:
336 result = dwarf2_read_address (ctx->gdbarch,
337 op_ptr, op_end, ctx->addr_size);
338 op_ptr += ctx->addr_size;
339 break;
340
341 case DW_OP_const1u:
342 result = extract_unsigned_integer (op_ptr, 1, byte_order);
343 op_ptr += 1;
344 break;
345 case DW_OP_const1s:
346 result = extract_signed_integer (op_ptr, 1, byte_order);
347 op_ptr += 1;
348 break;
349 case DW_OP_const2u:
350 result = extract_unsigned_integer (op_ptr, 2, byte_order);
351 op_ptr += 2;
352 break;
353 case DW_OP_const2s:
354 result = extract_signed_integer (op_ptr, 2, byte_order);
355 op_ptr += 2;
356 break;
357 case DW_OP_const4u:
358 result = extract_unsigned_integer (op_ptr, 4, byte_order);
359 op_ptr += 4;
360 break;
361 case DW_OP_const4s:
362 result = extract_signed_integer (op_ptr, 4, byte_order);
363 op_ptr += 4;
364 break;
365 case DW_OP_const8u:
366 result = extract_unsigned_integer (op_ptr, 8, byte_order);
367 op_ptr += 8;
368 break;
369 case DW_OP_const8s:
370 result = extract_signed_integer (op_ptr, 8, byte_order);
371 op_ptr += 8;
372 break;
373 case DW_OP_constu:
374 op_ptr = read_uleb128 (op_ptr, op_end, &uoffset);
375 result = uoffset;
376 break;
377 case DW_OP_consts:
378 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
379 result = offset;
380 break;
381
382 /* The DW_OP_reg operations are required to occur alone in
383 location expressions. */
384 case DW_OP_reg0:
385 case DW_OP_reg1:
386 case DW_OP_reg2:
387 case DW_OP_reg3:
388 case DW_OP_reg4:
389 case DW_OP_reg5:
390 case DW_OP_reg6:
391 case DW_OP_reg7:
392 case DW_OP_reg8:
393 case DW_OP_reg9:
394 case DW_OP_reg10:
395 case DW_OP_reg11:
396 case DW_OP_reg12:
397 case DW_OP_reg13:
398 case DW_OP_reg14:
399 case DW_OP_reg15:
400 case DW_OP_reg16:
401 case DW_OP_reg17:
402 case DW_OP_reg18:
403 case DW_OP_reg19:
404 case DW_OP_reg20:
405 case DW_OP_reg21:
406 case DW_OP_reg22:
407 case DW_OP_reg23:
408 case DW_OP_reg24:
409 case DW_OP_reg25:
410 case DW_OP_reg26:
411 case DW_OP_reg27:
412 case DW_OP_reg28:
413 case DW_OP_reg29:
414 case DW_OP_reg30:
415 case DW_OP_reg31:
416 if (op_ptr != op_end
417 && *op_ptr != DW_OP_piece
418 && *op_ptr != DW_OP_GNU_uninit)
419 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
420 "used either alone or in conjuction with DW_OP_piece."));
421
422 result = op - DW_OP_reg0;
423 ctx->in_reg = 1;
424
425 break;
426
427 case DW_OP_regx:
428 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
429 if (op_ptr != op_end && *op_ptr != DW_OP_piece)
430 error (_("DWARF-2 expression error: DW_OP_reg operations must be "
431 "used either alone or in conjuction with DW_OP_piece."));
432
433 result = reg;
434 ctx->in_reg = 1;
435 break;
436
437 case DW_OP_breg0:
438 case DW_OP_breg1:
439 case DW_OP_breg2:
440 case DW_OP_breg3:
441 case DW_OP_breg4:
442 case DW_OP_breg5:
443 case DW_OP_breg6:
444 case DW_OP_breg7:
445 case DW_OP_breg8:
446 case DW_OP_breg9:
447 case DW_OP_breg10:
448 case DW_OP_breg11:
449 case DW_OP_breg12:
450 case DW_OP_breg13:
451 case DW_OP_breg14:
452 case DW_OP_breg15:
453 case DW_OP_breg16:
454 case DW_OP_breg17:
455 case DW_OP_breg18:
456 case DW_OP_breg19:
457 case DW_OP_breg20:
458 case DW_OP_breg21:
459 case DW_OP_breg22:
460 case DW_OP_breg23:
461 case DW_OP_breg24:
462 case DW_OP_breg25:
463 case DW_OP_breg26:
464 case DW_OP_breg27:
465 case DW_OP_breg28:
466 case DW_OP_breg29:
467 case DW_OP_breg30:
468 case DW_OP_breg31:
469 {
470 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
471 result = (ctx->read_reg) (ctx->baton, op - DW_OP_breg0);
472 result += offset;
473 }
474 break;
475 case DW_OP_bregx:
476 {
477 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
478 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
479 result = (ctx->read_reg) (ctx->baton, reg);
480 result += offset;
481 }
482 break;
483 case DW_OP_fbreg:
484 {
485 gdb_byte *datastart;
486 size_t datalen;
487 unsigned int before_stack_len;
488
489 op_ptr = read_sleb128 (op_ptr, op_end, &offset);
490 /* Rather than create a whole new context, we simply
491 record the stack length before execution, then reset it
492 afterwards, effectively erasing whatever the recursive
493 call put there. */
494 before_stack_len = ctx->stack_len;
495 /* FIXME: cagney/2003-03-26: This code should be using
496 get_frame_base_address(), and then implement a dwarf2
497 specific this_base method. */
498 (ctx->get_frame_base) (ctx->baton, &datastart, &datalen);
499 dwarf_expr_eval (ctx, datastart, datalen);
500 result = dwarf_expr_fetch (ctx, 0);
501 if (ctx->in_reg)
502 result = (ctx->read_reg) (ctx->baton, result);
503 result = result + offset;
504 ctx->stack_len = before_stack_len;
505 ctx->in_reg = 0;
506 }
507 break;
508 case DW_OP_dup:
509 result = dwarf_expr_fetch (ctx, 0);
510 break;
511
512 case DW_OP_drop:
513 dwarf_expr_pop (ctx);
514 goto no_push;
515
516 case DW_OP_pick:
517 offset = *op_ptr++;
518 result = dwarf_expr_fetch (ctx, offset);
519 break;
520
521 case DW_OP_swap:
522 {
523 CORE_ADDR t1, t2;
524
525 if (ctx->stack_len < 2)
526 error (_("Not enough elements for DW_OP_swap. Need 2, have %d."),
527 ctx->stack_len);
528 t1 = ctx->stack[ctx->stack_len - 1];
529 t2 = ctx->stack[ctx->stack_len - 2];
530 ctx->stack[ctx->stack_len - 1] = t2;
531 ctx->stack[ctx->stack_len - 2] = t1;
532 goto no_push;
533 }
534
535 case DW_OP_over:
536 result = dwarf_expr_fetch (ctx, 1);
537 break;
538
539 case DW_OP_rot:
540 {
541 CORE_ADDR t1, t2, t3;
542
543 if (ctx->stack_len < 3)
544 error (_("Not enough elements for DW_OP_rot. Need 3, have %d."),
545 ctx->stack_len);
546 t1 = ctx->stack[ctx->stack_len - 1];
547 t2 = ctx->stack[ctx->stack_len - 2];
548 t3 = ctx->stack[ctx->stack_len - 3];
549 ctx->stack[ctx->stack_len - 1] = t2;
550 ctx->stack[ctx->stack_len - 2] = t3;
551 ctx->stack[ctx->stack_len - 3] = t1;
552 goto no_push;
553 }
554
555 case DW_OP_deref:
556 case DW_OP_deref_size:
557 case DW_OP_abs:
558 case DW_OP_neg:
559 case DW_OP_not:
560 case DW_OP_plus_uconst:
561 /* Unary operations. */
562 result = dwarf_expr_fetch (ctx, 0);
563 dwarf_expr_pop (ctx);
564
565 switch (op)
566 {
567 case DW_OP_deref:
568 {
569 gdb_byte *buf = alloca (ctx->addr_size);
570 (ctx->read_mem) (ctx->baton, buf, result, ctx->addr_size);
571 result = dwarf2_read_address (ctx->gdbarch,
572 buf, buf + ctx->addr_size,
573 ctx->addr_size);
574 }
575 break;
576
577 case DW_OP_deref_size:
578 {
579 int addr_size = *op_ptr++;
580 gdb_byte *buf = alloca (addr_size);
581 (ctx->read_mem) (ctx->baton, buf, result, addr_size);
582 result = dwarf2_read_address (ctx->gdbarch,
583 buf, buf + addr_size,
584 addr_size);
585 }
586 break;
587
588 case DW_OP_abs:
589 if ((signed int) result < 0)
590 result = -result;
591 break;
592 case DW_OP_neg:
593 result = -result;
594 break;
595 case DW_OP_not:
596 result = ~result;
597 break;
598 case DW_OP_plus_uconst:
599 op_ptr = read_uleb128 (op_ptr, op_end, &reg);
600 result += reg;
601 break;
602 }
603 break;
604
605 case DW_OP_and:
606 case DW_OP_div:
607 case DW_OP_minus:
608 case DW_OP_mod:
609 case DW_OP_mul:
610 case DW_OP_or:
611 case DW_OP_plus:
612 case DW_OP_shl:
613 case DW_OP_shr:
614 case DW_OP_shra:
615 case DW_OP_xor:
616 case DW_OP_le:
617 case DW_OP_ge:
618 case DW_OP_eq:
619 case DW_OP_lt:
620 case DW_OP_gt:
621 case DW_OP_ne:
622 {
623 /* Binary operations. Use the value engine to do computations in
624 the right width. */
625 CORE_ADDR first, second;
626 enum exp_opcode binop;
627 struct value *val1, *val2;
628 struct type *stype, *utype;
629
630 second = dwarf_expr_fetch (ctx, 0);
631 dwarf_expr_pop (ctx);
632
633 first = dwarf_expr_fetch (ctx, 0);
634 dwarf_expr_pop (ctx);
635
636 utype = unsigned_address_type (ctx->gdbarch, ctx->addr_size);
637 stype = signed_address_type (ctx->gdbarch, ctx->addr_size);
638 val1 = value_from_longest (utype, first);
639 val2 = value_from_longest (utype, second);
640
641 switch (op)
642 {
643 case DW_OP_and:
644 binop = BINOP_BITWISE_AND;
645 break;
646 case DW_OP_div:
647 binop = BINOP_DIV;
648 break;
649 case DW_OP_minus:
650 binop = BINOP_SUB;
651 break;
652 case DW_OP_mod:
653 binop = BINOP_MOD;
654 break;
655 case DW_OP_mul:
656 binop = BINOP_MUL;
657 break;
658 case DW_OP_or:
659 binop = BINOP_BITWISE_IOR;
660 break;
661 case DW_OP_plus:
662 binop = BINOP_ADD;
663 break;
664 case DW_OP_shl:
665 binop = BINOP_LSH;
666 break;
667 case DW_OP_shr:
668 binop = BINOP_RSH;
669 break;
670 case DW_OP_shra:
671 binop = BINOP_RSH;
672 val1 = value_from_longest (stype, first);
673 break;
674 case DW_OP_xor:
675 binop = BINOP_BITWISE_XOR;
676 break;
677 case DW_OP_le:
678 binop = BINOP_LEQ;
679 break;
680 case DW_OP_ge:
681 binop = BINOP_GEQ;
682 break;
683 case DW_OP_eq:
684 binop = BINOP_EQUAL;
685 break;
686 case DW_OP_lt:
687 binop = BINOP_LESS;
688 break;
689 case DW_OP_gt:
690 binop = BINOP_GTR;
691 break;
692 case DW_OP_ne:
693 binop = BINOP_NOTEQUAL;
694 break;
695 default:
696 internal_error (__FILE__, __LINE__,
697 _("Can't be reached."));
698 }
699 result = value_as_long (value_binop (val1, val2, binop));
700 }
701 break;
702
703 case DW_OP_GNU_push_tls_address:
704 /* Variable is at a constant offset in the thread-local
705 storage block into the objfile for the current thread and
706 the dynamic linker module containing this expression. Here
707 we return returns the offset from that base. The top of the
708 stack has the offset from the beginning of the thread
709 control block at which the variable is located. Nothing
710 should follow this operator, so the top of stack would be
711 returned. */
712 result = dwarf_expr_fetch (ctx, 0);
713 dwarf_expr_pop (ctx);
714 result = (ctx->get_tls_address) (ctx->baton, result);
715 break;
716
717 case DW_OP_skip:
718 offset = extract_signed_integer (op_ptr, 2, byte_order);
719 op_ptr += 2;
720 op_ptr += offset;
721 goto no_push;
722
723 case DW_OP_bra:
724 offset = extract_signed_integer (op_ptr, 2, byte_order);
725 op_ptr += 2;
726 if (dwarf_expr_fetch (ctx, 0) != 0)
727 op_ptr += offset;
728 dwarf_expr_pop (ctx);
729 goto no_push;
730
731 case DW_OP_nop:
732 goto no_push;
733
734 case DW_OP_piece:
735 {
736 ULONGEST size;
737 CORE_ADDR addr_or_regnum;
738
739 /* Record the piece. */
740 op_ptr = read_uleb128 (op_ptr, op_end, &size);
741 addr_or_regnum = dwarf_expr_fetch (ctx, 0);
742 add_piece (ctx, ctx->in_reg, addr_or_regnum, size);
743
744 /* Pop off the address/regnum, and clear the in_reg flag. */
745 dwarf_expr_pop (ctx);
746 ctx->in_reg = 0;
747 }
748 goto no_push;
749
750 case DW_OP_GNU_uninit:
751 if (op_ptr != op_end)
752 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
753 "be the very last op."));
754
755 ctx->initialized = 0;
756 goto no_push;
757
758 default:
759 error (_("Unhandled dwarf expression opcode 0x%x"), op);
760 }
761
762 /* Most things push a result value. */
763 dwarf_expr_push (ctx, result);
764 no_push:;
765 }
766
767 ctx->recursion_depth--;
768 gdb_assert (ctx->recursion_depth >= 0);
769 }