]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/value.c
gdb/
[thirdparty/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
4 1996, 1997, 1998, 1999, 2000, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
5 2009, 2010, 2011 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "arch-utils.h"
24 #include "gdb_string.h"
25 #include "symtab.h"
26 #include "gdbtypes.h"
27 #include "value.h"
28 #include "gdbcore.h"
29 #include "command.h"
30 #include "gdbcmd.h"
31 #include "target.h"
32 #include "language.h"
33 #include "demangle.h"
34 #include "doublest.h"
35 #include "gdb_assert.h"
36 #include "regcache.h"
37 #include "block.h"
38 #include "dfp.h"
39 #include "objfiles.h"
40 #include "valprint.h"
41 #include "cli/cli-decode.h"
42
43 #include "python/python.h"
44
45 #include "tracepoint.h"
46
47 /* Prototypes for exported functions. */
48
49 void _initialize_values (void);
50
51 /* Definition of a user function. */
52 struct internal_function
53 {
54 /* The name of the function. It is a bit odd to have this in the
55 function itself -- the user might use a differently-named
56 convenience variable to hold the function. */
57 char *name;
58
59 /* The handler. */
60 internal_function_fn handler;
61
62 /* User data for the handler. */
63 void *cookie;
64 };
65
66 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
67
68 struct range
69 {
70 /* Lowest offset in the range. */
71 int offset;
72
73 /* Length of the range. */
74 int length;
75 };
76
77 typedef struct range range_s;
78
79 DEF_VEC_O(range_s);
80
81 /* Returns true if the ranges defined by [offset1, offset1+len1) and
82 [offset2, offset2+len2) overlap. */
83
84 static int
85 ranges_overlap (int offset1, int len1,
86 int offset2, int len2)
87 {
88 ULONGEST h, l;
89
90 l = max (offset1, offset2);
91 h = min (offset1 + len1, offset2 + len2);
92 return (l < h);
93 }
94
95 /* Returns true if the first argument is strictly less than the
96 second, useful for VEC_lower_bound. We keep ranges sorted by
97 offset and coalesce overlapping and contiguous ranges, so this just
98 compares the starting offset. */
99
100 static int
101 range_lessthan (const range_s *r1, const range_s *r2)
102 {
103 return r1->offset < r2->offset;
104 }
105
106 /* Returns true if RANGES contains any range that overlaps [OFFSET,
107 OFFSET+LENGTH). */
108
109 static int
110 ranges_contain (VEC(range_s) *ranges, int offset, int length)
111 {
112 range_s what;
113 int i;
114
115 what.offset = offset;
116 what.length = length;
117
118 /* We keep ranges sorted by offset and coalesce overlapping and
119 contiguous ranges, so to check if a range list contains a given
120 range, we can do a binary search for the position the given range
121 would be inserted if we only considered the starting OFFSET of
122 ranges. We call that position I. Since we also have LENGTH to
123 care for (this is a range afterall), we need to check if the
124 _previous_ range overlaps the I range. E.g.,
125
126 R
127 |---|
128 |---| |---| |------| ... |--|
129 0 1 2 N
130
131 I=1
132
133 In the case above, the binary search would return `I=1', meaning,
134 this OFFSET should be inserted at position 1, and the current
135 position 1 should be pushed further (and before 2). But, `0'
136 overlaps with R.
137
138 Then we need to check if the I range overlaps the I range itself.
139 E.g.,
140
141 R
142 |---|
143 |---| |---| |-------| ... |--|
144 0 1 2 N
145
146 I=1
147 */
148
149 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
150
151 if (i > 0)
152 {
153 struct range *bef = VEC_index (range_s, ranges, i - 1);
154
155 if (ranges_overlap (bef->offset, bef->length, offset, length))
156 return 1;
157 }
158
159 if (i < VEC_length (range_s, ranges))
160 {
161 struct range *r = VEC_index (range_s, ranges, i);
162
163 if (ranges_overlap (r->offset, r->length, offset, length))
164 return 1;
165 }
166
167 return 0;
168 }
169
170 static struct cmd_list_element *functionlist;
171
172 struct value
173 {
174 /* Type of value; either not an lval, or one of the various
175 different possible kinds of lval. */
176 enum lval_type lval;
177
178 /* Is it modifiable? Only relevant if lval != not_lval. */
179 int modifiable;
180
181 /* Location of value (if lval). */
182 union
183 {
184 /* If lval == lval_memory, this is the address in the inferior.
185 If lval == lval_register, this is the byte offset into the
186 registers structure. */
187 CORE_ADDR address;
188
189 /* Pointer to internal variable. */
190 struct internalvar *internalvar;
191
192 /* If lval == lval_computed, this is a set of function pointers
193 to use to access and describe the value, and a closure pointer
194 for them to use. */
195 struct
196 {
197 struct lval_funcs *funcs; /* Functions to call. */
198 void *closure; /* Closure for those functions to use. */
199 } computed;
200 } location;
201
202 /* Describes offset of a value within lval of a structure in bytes.
203 If lval == lval_memory, this is an offset to the address. If
204 lval == lval_register, this is a further offset from
205 location.address within the registers structure. Note also the
206 member embedded_offset below. */
207 int offset;
208
209 /* Only used for bitfields; number of bits contained in them. */
210 int bitsize;
211
212 /* Only used for bitfields; position of start of field. For
213 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
214 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
215 int bitpos;
216
217 /* Only used for bitfields; the containing value. This allows a
218 single read from the target when displaying multiple
219 bitfields. */
220 struct value *parent;
221
222 /* Frame register value is relative to. This will be described in
223 the lval enum above as "lval_register". */
224 struct frame_id frame_id;
225
226 /* Type of the value. */
227 struct type *type;
228
229 /* If a value represents a C++ object, then the `type' field gives
230 the object's compile-time type. If the object actually belongs
231 to some class derived from `type', perhaps with other base
232 classes and additional members, then `type' is just a subobject
233 of the real thing, and the full object is probably larger than
234 `type' would suggest.
235
236 If `type' is a dynamic class (i.e. one with a vtable), then GDB
237 can actually determine the object's run-time type by looking at
238 the run-time type information in the vtable. When this
239 information is available, we may elect to read in the entire
240 object, for several reasons:
241
242 - When printing the value, the user would probably rather see the
243 full object, not just the limited portion apparent from the
244 compile-time type.
245
246 - If `type' has virtual base classes, then even printing `type'
247 alone may require reaching outside the `type' portion of the
248 object to wherever the virtual base class has been stored.
249
250 When we store the entire object, `enclosing_type' is the run-time
251 type -- the complete object -- and `embedded_offset' is the
252 offset of `type' within that larger type, in bytes. The
253 value_contents() macro takes `embedded_offset' into account, so
254 most GDB code continues to see the `type' portion of the value,
255 just as the inferior would.
256
257 If `type' is a pointer to an object, then `enclosing_type' is a
258 pointer to the object's run-time type, and `pointed_to_offset' is
259 the offset in bytes from the full object to the pointed-to object
260 -- that is, the value `embedded_offset' would have if we followed
261 the pointer and fetched the complete object. (I don't really see
262 the point. Why not just determine the run-time type when you
263 indirect, and avoid the special case? The contents don't matter
264 until you indirect anyway.)
265
266 If we're not doing anything fancy, `enclosing_type' is equal to
267 `type', and `embedded_offset' is zero, so everything works
268 normally. */
269 struct type *enclosing_type;
270 int embedded_offset;
271 int pointed_to_offset;
272
273 /* Values are stored in a chain, so that they can be deleted easily
274 over calls to the inferior. Values assigned to internal
275 variables, put into the value history or exposed to Python are
276 taken off this list. */
277 struct value *next;
278
279 /* Register number if the value is from a register. */
280 short regnum;
281
282 /* If zero, contents of this value are in the contents field. If
283 nonzero, contents are in inferior. If the lval field is lval_memory,
284 the contents are in inferior memory at location.address plus offset.
285 The lval field may also be lval_register.
286
287 WARNING: This field is used by the code which handles watchpoints
288 (see breakpoint.c) to decide whether a particular value can be
289 watched by hardware watchpoints. If the lazy flag is set for
290 some member of a value chain, it is assumed that this member of
291 the chain doesn't need to be watched as part of watching the
292 value itself. This is how GDB avoids watching the entire struct
293 or array when the user wants to watch a single struct member or
294 array element. If you ever change the way lazy flag is set and
295 reset, be sure to consider this use as well! */
296 char lazy;
297
298 /* If nonzero, this is the value of a variable which does not
299 actually exist in the program. */
300 char optimized_out;
301
302 /* If value is a variable, is it initialized or not. */
303 int initialized;
304
305 /* If value is from the stack. If this is set, read_stack will be
306 used instead of read_memory to enable extra caching. */
307 int stack;
308
309 /* Actual contents of the value. Target byte-order. NULL or not
310 valid if lazy is nonzero. */
311 gdb_byte *contents;
312
313 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
314 rather than available, since the common and default case is for a
315 value to be available. This is filled in at value read time. */
316 VEC(range_s) *unavailable;
317
318 /* The number of references to this value. When a value is created,
319 the value chain holds a reference, so REFERENCE_COUNT is 1. If
320 release_value is called, this value is removed from the chain but
321 the caller of release_value now has a reference to this value.
322 The caller must arrange for a call to value_free later. */
323 int reference_count;
324 };
325
326 int
327 value_bytes_available (const struct value *value, int offset, int length)
328 {
329 gdb_assert (!value->lazy);
330
331 return !ranges_contain (value->unavailable, offset, length);
332 }
333
334 void
335 mark_value_bytes_unavailable (struct value *value, int offset, int length)
336 {
337 range_s newr;
338 int i;
339
340 /* Insert the range sorted. If there's overlap or the new range
341 would be contiguous with an existing range, merge. */
342
343 newr.offset = offset;
344 newr.length = length;
345
346 /* Do a binary search for the position the given range would be
347 inserted if we only considered the starting OFFSET of ranges.
348 Call that position I. Since we also have LENGTH to care for
349 (this is a range afterall), we need to check if the _previous_
350 range overlaps the I range. E.g., calling R the new range:
351
352 #1 - overlaps with previous
353
354 R
355 |-...-|
356 |---| |---| |------| ... |--|
357 0 1 2 N
358
359 I=1
360
361 In the case #1 above, the binary search would return `I=1',
362 meaning, this OFFSET should be inserted at position 1, and the
363 current position 1 should be pushed further (and become 2). But,
364 note that `0' overlaps with R, so we want to merge them.
365
366 A similar consideration needs to be taken if the new range would
367 be contiguous with the previous range:
368
369 #2 - contiguous with previous
370
371 R
372 |-...-|
373 |--| |---| |------| ... |--|
374 0 1 2 N
375
376 I=1
377
378 If there's no overlap with the previous range, as in:
379
380 #3 - not overlapping and not contiguous
381
382 R
383 |-...-|
384 |--| |---| |------| ... |--|
385 0 1 2 N
386
387 I=1
388
389 or if I is 0:
390
391 #4 - R is the range with lowest offset
392
393 R
394 |-...-|
395 |--| |---| |------| ... |--|
396 0 1 2 N
397
398 I=0
399
400 ... we just push the new range to I.
401
402 All the 4 cases above need to consider that the new range may
403 also overlap several of the ranges that follow, or that R may be
404 contiguous with the following range, and merge. E.g.,
405
406 #5 - overlapping following ranges
407
408 R
409 |------------------------|
410 |--| |---| |------| ... |--|
411 0 1 2 N
412
413 I=0
414
415 or:
416
417 R
418 |-------|
419 |--| |---| |------| ... |--|
420 0 1 2 N
421
422 I=1
423
424 */
425
426 i = VEC_lower_bound (range_s, value->unavailable, &newr, range_lessthan);
427 if (i > 0)
428 {
429 struct range *bef = VEC_index (range_s, value->unavailable, i - i);
430
431 if (ranges_overlap (bef->offset, bef->length, offset, length))
432 {
433 /* #1 */
434 ULONGEST l = min (bef->offset, offset);
435 ULONGEST h = max (bef->offset + bef->length, offset + length);
436
437 bef->offset = l;
438 bef->length = h - l;
439 i--;
440 }
441 else if (offset == bef->offset + bef->length)
442 {
443 /* #2 */
444 bef->length += length;
445 i--;
446 }
447 else
448 {
449 /* #3 */
450 VEC_safe_insert (range_s, value->unavailable, i, &newr);
451 }
452 }
453 else
454 {
455 /* #4 */
456 VEC_safe_insert (range_s, value->unavailable, i, &newr);
457 }
458
459 /* Check whether the ranges following the one we've just added or
460 touched can be folded in (#5 above). */
461 if (i + 1 < VEC_length (range_s, value->unavailable))
462 {
463 struct range *t;
464 struct range *r;
465 int removed = 0;
466 int next = i + 1;
467
468 /* Get the range we just touched. */
469 t = VEC_index (range_s, value->unavailable, i);
470 removed = 0;
471
472 i = next;
473 for (; VEC_iterate (range_s, value->unavailable, i, r); i++)
474 if (r->offset <= t->offset + t->length)
475 {
476 ULONGEST l, h;
477
478 l = min (t->offset, r->offset);
479 h = max (t->offset + t->length, r->offset + r->length);
480
481 t->offset = l;
482 t->length = h - l;
483
484 removed++;
485 }
486 else
487 {
488 /* If we couldn't merge this one, we won't be able to
489 merge following ones either, since the ranges are
490 always sorted by OFFSET. */
491 break;
492 }
493
494 if (removed != 0)
495 VEC_block_remove (range_s, value->unavailable, next, removed);
496 }
497 }
498
499 /* Find the first range in RANGES that overlaps the range defined by
500 OFFSET and LENGTH, starting at element POS in the RANGES vector,
501 Returns the index into RANGES where such overlapping range was
502 found, or -1 if none was found. */
503
504 static int
505 find_first_range_overlap (VEC(range_s) *ranges, int pos,
506 int offset, int length)
507 {
508 range_s *r;
509 int i;
510
511 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
512 if (ranges_overlap (r->offset, r->length, offset, length))
513 return i;
514
515 return -1;
516 }
517
518 int
519 value_available_contents_eq (const struct value *val1, int offset1,
520 const struct value *val2, int offset2,
521 int length)
522 {
523 int org_len = length;
524 int org_offset1 = offset1;
525 int org_offset2 = offset2;
526 int idx1 = 0, idx2 = 0;
527 int prev_avail;
528
529 /* This routine is used by printing routines, where we should
530 already have read the value. Note that we only know whether a
531 value chunk is available if we've tried to read it. */
532 gdb_assert (!val1->lazy && !val2->lazy);
533
534 /* The offset from either ORG_OFFSET1 or ORG_OFFSET2 where the
535 available contents we haven't compared yet start. */
536 prev_avail = 0;
537
538 while (length > 0)
539 {
540 range_s *r1, *r2;
541 ULONGEST l1, h1;
542 ULONGEST l2, h2;
543
544 idx1 = find_first_range_overlap (val1->unavailable, idx1,
545 offset1, length);
546 idx2 = find_first_range_overlap (val2->unavailable, idx2,
547 offset2, length);
548
549 /* The usual case is for both values to be completely available. */
550 if (idx1 == -1 && idx2 == -1)
551 return (memcmp (val1->contents + org_offset1 + prev_avail,
552 val2->contents + org_offset2 + prev_avail,
553 org_len - prev_avail) == 0);
554 /* The contents only match equal if the available set matches as
555 well. */
556 else if (idx1 == -1 || idx2 == -1)
557 return 0;
558
559 gdb_assert (idx1 != -1 && idx2 != -1);
560
561 r1 = VEC_index (range_s, val1->unavailable, idx1);
562 r2 = VEC_index (range_s, val2->unavailable, idx2);
563
564 /* Get the unavailable windows intersected by the incoming
565 ranges. The first and last ranges that overlap the argument
566 range may be wider than said incoming arguments ranges. */
567 l1 = max (offset1, r1->offset);
568 h1 = min (offset1 + length, r1->offset + r1->length);
569
570 l2 = max (offset2, r2->offset);
571 h2 = min (offset2 + length, r2->offset + r2->length);
572
573 /* Make them relative to the respective start offsets, so we can
574 compare them for equality. */
575 l1 -= offset1;
576 h1 -= offset1;
577
578 l2 -= offset2;
579 h2 -= offset2;
580
581 /* Different availability, no match. */
582 if (l1 != l2 || h1 != h2)
583 return 0;
584
585 /* Compare the _available_ contents. */
586 if (memcmp (val1->contents + org_offset1 + prev_avail,
587 val2->contents + org_offset2 + prev_avail,
588 l2 - prev_avail) != 0)
589 return 0;
590
591 prev_avail += h1;
592 length -= h1;
593 offset1 += h1;
594 offset2 += h1;
595 }
596
597 return 1;
598 }
599
600 /* Prototypes for local functions. */
601
602 static void show_values (char *, int);
603
604 static void show_convenience (char *, int);
605
606
607 /* The value-history records all the values printed
608 by print commands during this session. Each chunk
609 records 60 consecutive values. The first chunk on
610 the chain records the most recent values.
611 The total number of values is in value_history_count. */
612
613 #define VALUE_HISTORY_CHUNK 60
614
615 struct value_history_chunk
616 {
617 struct value_history_chunk *next;
618 struct value *values[VALUE_HISTORY_CHUNK];
619 };
620
621 /* Chain of chunks now in use. */
622
623 static struct value_history_chunk *value_history_chain;
624
625 static int value_history_count; /* Abs number of last entry stored. */
626
627 \f
628 /* List of all value objects currently allocated
629 (except for those released by calls to release_value)
630 This is so they can be freed after each command. */
631
632 static struct value *all_values;
633
634 /* Allocate a lazy value for type TYPE. Its actual content is
635 "lazily" allocated too: the content field of the return value is
636 NULL; it will be allocated when it is fetched from the target. */
637
638 struct value *
639 allocate_value_lazy (struct type *type)
640 {
641 struct value *val;
642
643 /* Call check_typedef on our type to make sure that, if TYPE
644 is a TYPE_CODE_TYPEDEF, its length is set to the length
645 of the target type instead of zero. However, we do not
646 replace the typedef type by the target type, because we want
647 to keep the typedef in order to be able to set the VAL's type
648 description correctly. */
649 check_typedef (type);
650
651 val = (struct value *) xzalloc (sizeof (struct value));
652 val->contents = NULL;
653 val->next = all_values;
654 all_values = val;
655 val->type = type;
656 val->enclosing_type = type;
657 VALUE_LVAL (val) = not_lval;
658 val->location.address = 0;
659 VALUE_FRAME_ID (val) = null_frame_id;
660 val->offset = 0;
661 val->bitpos = 0;
662 val->bitsize = 0;
663 VALUE_REGNUM (val) = -1;
664 val->lazy = 1;
665 val->optimized_out = 0;
666 val->embedded_offset = 0;
667 val->pointed_to_offset = 0;
668 val->modifiable = 1;
669 val->initialized = 1; /* Default to initialized. */
670
671 /* Values start out on the all_values chain. */
672 val->reference_count = 1;
673
674 return val;
675 }
676
677 /* Allocate the contents of VAL if it has not been allocated yet. */
678
679 void
680 allocate_value_contents (struct value *val)
681 {
682 if (!val->contents)
683 val->contents = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
684 }
685
686 /* Allocate a value and its contents for type TYPE. */
687
688 struct value *
689 allocate_value (struct type *type)
690 {
691 struct value *val = allocate_value_lazy (type);
692
693 allocate_value_contents (val);
694 val->lazy = 0;
695 return val;
696 }
697
698 /* Allocate a value that has the correct length
699 for COUNT repetitions of type TYPE. */
700
701 struct value *
702 allocate_repeat_value (struct type *type, int count)
703 {
704 int low_bound = current_language->string_lower_bound; /* ??? */
705 /* FIXME-type-allocation: need a way to free this type when we are
706 done with it. */
707 struct type *array_type
708 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
709
710 return allocate_value (array_type);
711 }
712
713 struct value *
714 allocate_computed_value (struct type *type,
715 struct lval_funcs *funcs,
716 void *closure)
717 {
718 struct value *v = allocate_value_lazy (type);
719
720 VALUE_LVAL (v) = lval_computed;
721 v->location.computed.funcs = funcs;
722 v->location.computed.closure = closure;
723
724 return v;
725 }
726
727 /* Accessor methods. */
728
729 struct value *
730 value_next (struct value *value)
731 {
732 return value->next;
733 }
734
735 struct type *
736 value_type (const struct value *value)
737 {
738 return value->type;
739 }
740 void
741 deprecated_set_value_type (struct value *value, struct type *type)
742 {
743 value->type = type;
744 }
745
746 int
747 value_offset (const struct value *value)
748 {
749 return value->offset;
750 }
751 void
752 set_value_offset (struct value *value, int offset)
753 {
754 value->offset = offset;
755 }
756
757 int
758 value_bitpos (const struct value *value)
759 {
760 return value->bitpos;
761 }
762 void
763 set_value_bitpos (struct value *value, int bit)
764 {
765 value->bitpos = bit;
766 }
767
768 int
769 value_bitsize (const struct value *value)
770 {
771 return value->bitsize;
772 }
773 void
774 set_value_bitsize (struct value *value, int bit)
775 {
776 value->bitsize = bit;
777 }
778
779 struct value *
780 value_parent (struct value *value)
781 {
782 return value->parent;
783 }
784
785 gdb_byte *
786 value_contents_raw (struct value *value)
787 {
788 allocate_value_contents (value);
789 return value->contents + value->embedded_offset;
790 }
791
792 gdb_byte *
793 value_contents_all_raw (struct value *value)
794 {
795 allocate_value_contents (value);
796 return value->contents;
797 }
798
799 struct type *
800 value_enclosing_type (struct value *value)
801 {
802 return value->enclosing_type;
803 }
804
805 static void
806 require_not_optimized_out (const struct value *value)
807 {
808 if (value->optimized_out)
809 error (_("value has been optimized out"));
810 }
811
812 static void
813 require_available (const struct value *value)
814 {
815 if (!VEC_empty (range_s, value->unavailable))
816 error (_("value is not available"));
817 }
818
819 const gdb_byte *
820 value_contents_for_printing (struct value *value)
821 {
822 if (value->lazy)
823 value_fetch_lazy (value);
824 return value->contents;
825 }
826
827 const gdb_byte *
828 value_contents_for_printing_const (const struct value *value)
829 {
830 gdb_assert (!value->lazy);
831 return value->contents;
832 }
833
834 const gdb_byte *
835 value_contents_all (struct value *value)
836 {
837 const gdb_byte *result = value_contents_for_printing (value);
838 require_not_optimized_out (value);
839 require_available (value);
840 return result;
841 }
842
843 int
844 value_lazy (struct value *value)
845 {
846 return value->lazy;
847 }
848
849 void
850 set_value_lazy (struct value *value, int val)
851 {
852 value->lazy = val;
853 }
854
855 int
856 value_stack (struct value *value)
857 {
858 return value->stack;
859 }
860
861 void
862 set_value_stack (struct value *value, int val)
863 {
864 value->stack = val;
865 }
866
867 const gdb_byte *
868 value_contents (struct value *value)
869 {
870 const gdb_byte *result = value_contents_writeable (value);
871 require_not_optimized_out (value);
872 require_available (value);
873 return result;
874 }
875
876 gdb_byte *
877 value_contents_writeable (struct value *value)
878 {
879 if (value->lazy)
880 value_fetch_lazy (value);
881 return value_contents_raw (value);
882 }
883
884 /* Return non-zero if VAL1 and VAL2 have the same contents. Note that
885 this function is different from value_equal; in C the operator ==
886 can return 0 even if the two values being compared are equal. */
887
888 int
889 value_contents_equal (struct value *val1, struct value *val2)
890 {
891 struct type *type1;
892 struct type *type2;
893 int len;
894
895 type1 = check_typedef (value_type (val1));
896 type2 = check_typedef (value_type (val2));
897 len = TYPE_LENGTH (type1);
898 if (len != TYPE_LENGTH (type2))
899 return 0;
900
901 return (memcmp (value_contents (val1), value_contents (val2), len) == 0);
902 }
903
904 int
905 value_optimized_out (struct value *value)
906 {
907 return value->optimized_out;
908 }
909
910 void
911 set_value_optimized_out (struct value *value, int val)
912 {
913 value->optimized_out = val;
914 }
915
916 int
917 value_entirely_optimized_out (const struct value *value)
918 {
919 if (!value->optimized_out)
920 return 0;
921 if (value->lval != lval_computed
922 || !value->location.computed.funcs->check_any_valid)
923 return 1;
924 return !value->location.computed.funcs->check_any_valid (value);
925 }
926
927 int
928 value_bits_valid (const struct value *value, int offset, int length)
929 {
930 if (value == NULL || !value->optimized_out)
931 return 1;
932 if (value->lval != lval_computed
933 || !value->location.computed.funcs->check_validity)
934 return 0;
935 return value->location.computed.funcs->check_validity (value, offset,
936 length);
937 }
938
939 int
940 value_bits_synthetic_pointer (const struct value *value,
941 int offset, int length)
942 {
943 if (value == NULL || value->lval != lval_computed
944 || !value->location.computed.funcs->check_synthetic_pointer)
945 return 0;
946 return value->location.computed.funcs->check_synthetic_pointer (value,
947 offset,
948 length);
949 }
950
951 int
952 value_embedded_offset (struct value *value)
953 {
954 return value->embedded_offset;
955 }
956
957 void
958 set_value_embedded_offset (struct value *value, int val)
959 {
960 value->embedded_offset = val;
961 }
962
963 int
964 value_pointed_to_offset (struct value *value)
965 {
966 return value->pointed_to_offset;
967 }
968
969 void
970 set_value_pointed_to_offset (struct value *value, int val)
971 {
972 value->pointed_to_offset = val;
973 }
974
975 struct lval_funcs *
976 value_computed_funcs (struct value *v)
977 {
978 gdb_assert (VALUE_LVAL (v) == lval_computed);
979
980 return v->location.computed.funcs;
981 }
982
983 void *
984 value_computed_closure (const struct value *v)
985 {
986 gdb_assert (v->lval == lval_computed);
987
988 return v->location.computed.closure;
989 }
990
991 enum lval_type *
992 deprecated_value_lval_hack (struct value *value)
993 {
994 return &value->lval;
995 }
996
997 CORE_ADDR
998 value_address (const struct value *value)
999 {
1000 if (value->lval == lval_internalvar
1001 || value->lval == lval_internalvar_component)
1002 return 0;
1003 return value->location.address + value->offset;
1004 }
1005
1006 CORE_ADDR
1007 value_raw_address (struct value *value)
1008 {
1009 if (value->lval == lval_internalvar
1010 || value->lval == lval_internalvar_component)
1011 return 0;
1012 return value->location.address;
1013 }
1014
1015 void
1016 set_value_address (struct value *value, CORE_ADDR addr)
1017 {
1018 gdb_assert (value->lval != lval_internalvar
1019 && value->lval != lval_internalvar_component);
1020 value->location.address = addr;
1021 }
1022
1023 struct internalvar **
1024 deprecated_value_internalvar_hack (struct value *value)
1025 {
1026 return &value->location.internalvar;
1027 }
1028
1029 struct frame_id *
1030 deprecated_value_frame_id_hack (struct value *value)
1031 {
1032 return &value->frame_id;
1033 }
1034
1035 short *
1036 deprecated_value_regnum_hack (struct value *value)
1037 {
1038 return &value->regnum;
1039 }
1040
1041 int
1042 deprecated_value_modifiable (struct value *value)
1043 {
1044 return value->modifiable;
1045 }
1046 void
1047 deprecated_set_value_modifiable (struct value *value, int modifiable)
1048 {
1049 value->modifiable = modifiable;
1050 }
1051 \f
1052 /* Return a mark in the value chain. All values allocated after the
1053 mark is obtained (except for those released) are subject to being freed
1054 if a subsequent value_free_to_mark is passed the mark. */
1055 struct value *
1056 value_mark (void)
1057 {
1058 return all_values;
1059 }
1060
1061 /* Take a reference to VAL. VAL will not be deallocated until all
1062 references are released. */
1063
1064 void
1065 value_incref (struct value *val)
1066 {
1067 val->reference_count++;
1068 }
1069
1070 /* Release a reference to VAL, which was acquired with value_incref.
1071 This function is also called to deallocate values from the value
1072 chain. */
1073
1074 void
1075 value_free (struct value *val)
1076 {
1077 if (val)
1078 {
1079 gdb_assert (val->reference_count > 0);
1080 val->reference_count--;
1081 if (val->reference_count > 0)
1082 return;
1083
1084 /* If there's an associated parent value, drop our reference to
1085 it. */
1086 if (val->parent != NULL)
1087 value_free (val->parent);
1088
1089 if (VALUE_LVAL (val) == lval_computed)
1090 {
1091 struct lval_funcs *funcs = val->location.computed.funcs;
1092
1093 if (funcs->free_closure)
1094 funcs->free_closure (val);
1095 }
1096
1097 xfree (val->contents);
1098 VEC_free (range_s, val->unavailable);
1099 }
1100 xfree (val);
1101 }
1102
1103 /* Free all values allocated since MARK was obtained by value_mark
1104 (except for those released). */
1105 void
1106 value_free_to_mark (struct value *mark)
1107 {
1108 struct value *val;
1109 struct value *next;
1110
1111 for (val = all_values; val && val != mark; val = next)
1112 {
1113 next = val->next;
1114 value_free (val);
1115 }
1116 all_values = val;
1117 }
1118
1119 /* Free all the values that have been allocated (except for those released).
1120 Call after each command, successful or not.
1121 In practice this is called before each command, which is sufficient. */
1122
1123 void
1124 free_all_values (void)
1125 {
1126 struct value *val;
1127 struct value *next;
1128
1129 for (val = all_values; val; val = next)
1130 {
1131 next = val->next;
1132 value_free (val);
1133 }
1134
1135 all_values = 0;
1136 }
1137
1138 /* Frees all the elements in a chain of values. */
1139
1140 void
1141 free_value_chain (struct value *v)
1142 {
1143 struct value *next;
1144
1145 for (; v; v = next)
1146 {
1147 next = value_next (v);
1148 value_free (v);
1149 }
1150 }
1151
1152 /* Remove VAL from the chain all_values
1153 so it will not be freed automatically. */
1154
1155 void
1156 release_value (struct value *val)
1157 {
1158 struct value *v;
1159
1160 if (all_values == val)
1161 {
1162 all_values = val->next;
1163 val->next = NULL;
1164 return;
1165 }
1166
1167 for (v = all_values; v; v = v->next)
1168 {
1169 if (v->next == val)
1170 {
1171 v->next = val->next;
1172 val->next = NULL;
1173 break;
1174 }
1175 }
1176 }
1177
1178 /* Release all values up to mark */
1179 struct value *
1180 value_release_to_mark (struct value *mark)
1181 {
1182 struct value *val;
1183 struct value *next;
1184
1185 for (val = next = all_values; next; next = next->next)
1186 if (next->next == mark)
1187 {
1188 all_values = next->next;
1189 next->next = NULL;
1190 return val;
1191 }
1192 all_values = 0;
1193 return val;
1194 }
1195
1196 /* Return a copy of the value ARG.
1197 It contains the same contents, for same memory address,
1198 but it's a different block of storage. */
1199
1200 struct value *
1201 value_copy (struct value *arg)
1202 {
1203 struct type *encl_type = value_enclosing_type (arg);
1204 struct value *val;
1205
1206 if (value_lazy (arg))
1207 val = allocate_value_lazy (encl_type);
1208 else
1209 val = allocate_value (encl_type);
1210 val->type = arg->type;
1211 VALUE_LVAL (val) = VALUE_LVAL (arg);
1212 val->location = arg->location;
1213 val->offset = arg->offset;
1214 val->bitpos = arg->bitpos;
1215 val->bitsize = arg->bitsize;
1216 VALUE_FRAME_ID (val) = VALUE_FRAME_ID (arg);
1217 VALUE_REGNUM (val) = VALUE_REGNUM (arg);
1218 val->lazy = arg->lazy;
1219 val->optimized_out = arg->optimized_out;
1220 val->embedded_offset = value_embedded_offset (arg);
1221 val->pointed_to_offset = arg->pointed_to_offset;
1222 val->modifiable = arg->modifiable;
1223 if (!value_lazy (val))
1224 {
1225 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1226 TYPE_LENGTH (value_enclosing_type (arg)));
1227
1228 }
1229 val->unavailable = VEC_copy (range_s, arg->unavailable);
1230 val->parent = arg->parent;
1231 if (val->parent)
1232 value_incref (val->parent);
1233 if (VALUE_LVAL (val) == lval_computed)
1234 {
1235 struct lval_funcs *funcs = val->location.computed.funcs;
1236
1237 if (funcs->copy_closure)
1238 val->location.computed.closure = funcs->copy_closure (val);
1239 }
1240 return val;
1241 }
1242
1243 /* Return a version of ARG that is non-lvalue. */
1244
1245 struct value *
1246 value_non_lval (struct value *arg)
1247 {
1248 if (VALUE_LVAL (arg) != not_lval)
1249 {
1250 struct type *enc_type = value_enclosing_type (arg);
1251 struct value *val = allocate_value (enc_type);
1252
1253 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1254 TYPE_LENGTH (enc_type));
1255 val->type = arg->type;
1256 set_value_embedded_offset (val, value_embedded_offset (arg));
1257 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1258 return val;
1259 }
1260 return arg;
1261 }
1262
1263 void
1264 set_value_component_location (struct value *component,
1265 const struct value *whole)
1266 {
1267 if (whole->lval == lval_internalvar)
1268 VALUE_LVAL (component) = lval_internalvar_component;
1269 else
1270 VALUE_LVAL (component) = whole->lval;
1271
1272 component->location = whole->location;
1273 if (whole->lval == lval_computed)
1274 {
1275 struct lval_funcs *funcs = whole->location.computed.funcs;
1276
1277 if (funcs->copy_closure)
1278 component->location.computed.closure = funcs->copy_closure (whole);
1279 }
1280 }
1281
1282 \f
1283 /* Access to the value history. */
1284
1285 /* Record a new value in the value history.
1286 Returns the absolute history index of the entry.
1287 Result of -1 indicates the value was not saved; otherwise it is the
1288 value history index of this new item. */
1289
1290 int
1291 record_latest_value (struct value *val)
1292 {
1293 int i;
1294
1295 /* We don't want this value to have anything to do with the inferior anymore.
1296 In particular, "set $1 = 50" should not affect the variable from which
1297 the value was taken, and fast watchpoints should be able to assume that
1298 a value on the value history never changes. */
1299 if (value_lazy (val))
1300 value_fetch_lazy (val);
1301 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1302 from. This is a bit dubious, because then *&$1 does not just return $1
1303 but the current contents of that location. c'est la vie... */
1304 val->modifiable = 0;
1305 release_value (val);
1306
1307 /* Here we treat value_history_count as origin-zero
1308 and applying to the value being stored now. */
1309
1310 i = value_history_count % VALUE_HISTORY_CHUNK;
1311 if (i == 0)
1312 {
1313 struct value_history_chunk *new
1314 = (struct value_history_chunk *)
1315
1316 xmalloc (sizeof (struct value_history_chunk));
1317 memset (new->values, 0, sizeof new->values);
1318 new->next = value_history_chain;
1319 value_history_chain = new;
1320 }
1321
1322 value_history_chain->values[i] = val;
1323
1324 /* Now we regard value_history_count as origin-one
1325 and applying to the value just stored. */
1326
1327 return ++value_history_count;
1328 }
1329
1330 /* Return a copy of the value in the history with sequence number NUM. */
1331
1332 struct value *
1333 access_value_history (int num)
1334 {
1335 struct value_history_chunk *chunk;
1336 int i;
1337 int absnum = num;
1338
1339 if (absnum <= 0)
1340 absnum += value_history_count;
1341
1342 if (absnum <= 0)
1343 {
1344 if (num == 0)
1345 error (_("The history is empty."));
1346 else if (num == 1)
1347 error (_("There is only one value in the history."));
1348 else
1349 error (_("History does not go back to $$%d."), -num);
1350 }
1351 if (absnum > value_history_count)
1352 error (_("History has not yet reached $%d."), absnum);
1353
1354 absnum--;
1355
1356 /* Now absnum is always absolute and origin zero. */
1357
1358 chunk = value_history_chain;
1359 for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1360 - absnum / VALUE_HISTORY_CHUNK;
1361 i > 0; i--)
1362 chunk = chunk->next;
1363
1364 return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1365 }
1366
1367 static void
1368 show_values (char *num_exp, int from_tty)
1369 {
1370 int i;
1371 struct value *val;
1372 static int num = 1;
1373
1374 if (num_exp)
1375 {
1376 /* "show values +" should print from the stored position.
1377 "show values <exp>" should print around value number <exp>. */
1378 if (num_exp[0] != '+' || num_exp[1] != '\0')
1379 num = parse_and_eval_long (num_exp) - 5;
1380 }
1381 else
1382 {
1383 /* "show values" means print the last 10 values. */
1384 num = value_history_count - 9;
1385 }
1386
1387 if (num <= 0)
1388 num = 1;
1389
1390 for (i = num; i < num + 10 && i <= value_history_count; i++)
1391 {
1392 struct value_print_options opts;
1393
1394 val = access_value_history (i);
1395 printf_filtered (("$%d = "), i);
1396 get_user_print_options (&opts);
1397 value_print (val, gdb_stdout, &opts);
1398 printf_filtered (("\n"));
1399 }
1400
1401 /* The next "show values +" should start after what we just printed. */
1402 num += 10;
1403
1404 /* Hitting just return after this command should do the same thing as
1405 "show values +". If num_exp is null, this is unnecessary, since
1406 "show values +" is not useful after "show values". */
1407 if (from_tty && num_exp)
1408 {
1409 num_exp[0] = '+';
1410 num_exp[1] = '\0';
1411 }
1412 }
1413 \f
1414 /* Internal variables. These are variables within the debugger
1415 that hold values assigned by debugger commands.
1416 The user refers to them with a '$' prefix
1417 that does not appear in the variable names stored internally. */
1418
1419 struct internalvar
1420 {
1421 struct internalvar *next;
1422 char *name;
1423
1424 /* We support various different kinds of content of an internal variable.
1425 enum internalvar_kind specifies the kind, and union internalvar_data
1426 provides the data associated with this particular kind. */
1427
1428 enum internalvar_kind
1429 {
1430 /* The internal variable is empty. */
1431 INTERNALVAR_VOID,
1432
1433 /* The value of the internal variable is provided directly as
1434 a GDB value object. */
1435 INTERNALVAR_VALUE,
1436
1437 /* A fresh value is computed via a call-back routine on every
1438 access to the internal variable. */
1439 INTERNALVAR_MAKE_VALUE,
1440
1441 /* The internal variable holds a GDB internal convenience function. */
1442 INTERNALVAR_FUNCTION,
1443
1444 /* The variable holds an integer value. */
1445 INTERNALVAR_INTEGER,
1446
1447 /* The variable holds a GDB-provided string. */
1448 INTERNALVAR_STRING,
1449
1450 } kind;
1451
1452 union internalvar_data
1453 {
1454 /* A value object used with INTERNALVAR_VALUE. */
1455 struct value *value;
1456
1457 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1458 internalvar_make_value make_value;
1459
1460 /* The internal function used with INTERNALVAR_FUNCTION. */
1461 struct
1462 {
1463 struct internal_function *function;
1464 /* True if this is the canonical name for the function. */
1465 int canonical;
1466 } fn;
1467
1468 /* An integer value used with INTERNALVAR_INTEGER. */
1469 struct
1470 {
1471 /* If type is non-NULL, it will be used as the type to generate
1472 a value for this internal variable. If type is NULL, a default
1473 integer type for the architecture is used. */
1474 struct type *type;
1475 LONGEST val;
1476 } integer;
1477
1478 /* A string value used with INTERNALVAR_STRING. */
1479 char *string;
1480 } u;
1481 };
1482
1483 static struct internalvar *internalvars;
1484
1485 /* If the variable does not already exist create it and give it the
1486 value given. If no value is given then the default is zero. */
1487 static void
1488 init_if_undefined_command (char* args, int from_tty)
1489 {
1490 struct internalvar* intvar;
1491
1492 /* Parse the expression - this is taken from set_command(). */
1493 struct expression *expr = parse_expression (args);
1494 register struct cleanup *old_chain =
1495 make_cleanup (free_current_contents, &expr);
1496
1497 /* Validate the expression.
1498 Was the expression an assignment?
1499 Or even an expression at all? */
1500 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
1501 error (_("Init-if-undefined requires an assignment expression."));
1502
1503 /* Extract the variable from the parsed expression.
1504 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
1505 if (expr->elts[1].opcode != OP_INTERNALVAR)
1506 error (_("The first parameter to init-if-undefined "
1507 "should be a GDB variable."));
1508 intvar = expr->elts[2].internalvar;
1509
1510 /* Only evaluate the expression if the lvalue is void.
1511 This may still fail if the expresssion is invalid. */
1512 if (intvar->kind == INTERNALVAR_VOID)
1513 evaluate_expression (expr);
1514
1515 do_cleanups (old_chain);
1516 }
1517
1518
1519 /* Look up an internal variable with name NAME. NAME should not
1520 normally include a dollar sign.
1521
1522 If the specified internal variable does not exist,
1523 the return value is NULL. */
1524
1525 struct internalvar *
1526 lookup_only_internalvar (const char *name)
1527 {
1528 struct internalvar *var;
1529
1530 for (var = internalvars; var; var = var->next)
1531 if (strcmp (var->name, name) == 0)
1532 return var;
1533
1534 return NULL;
1535 }
1536
1537
1538 /* Create an internal variable with name NAME and with a void value.
1539 NAME should not normally include a dollar sign. */
1540
1541 struct internalvar *
1542 create_internalvar (const char *name)
1543 {
1544 struct internalvar *var;
1545
1546 var = (struct internalvar *) xmalloc (sizeof (struct internalvar));
1547 var->name = concat (name, (char *)NULL);
1548 var->kind = INTERNALVAR_VOID;
1549 var->next = internalvars;
1550 internalvars = var;
1551 return var;
1552 }
1553
1554 /* Create an internal variable with name NAME and register FUN as the
1555 function that value_of_internalvar uses to create a value whenever
1556 this variable is referenced. NAME should not normally include a
1557 dollar sign. */
1558
1559 struct internalvar *
1560 create_internalvar_type_lazy (char *name, internalvar_make_value fun)
1561 {
1562 struct internalvar *var = create_internalvar (name);
1563
1564 var->kind = INTERNALVAR_MAKE_VALUE;
1565 var->u.make_value = fun;
1566 return var;
1567 }
1568
1569 /* Look up an internal variable with name NAME. NAME should not
1570 normally include a dollar sign.
1571
1572 If the specified internal variable does not exist,
1573 one is created, with a void value. */
1574
1575 struct internalvar *
1576 lookup_internalvar (const char *name)
1577 {
1578 struct internalvar *var;
1579
1580 var = lookup_only_internalvar (name);
1581 if (var)
1582 return var;
1583
1584 return create_internalvar (name);
1585 }
1586
1587 /* Return current value of internal variable VAR. For variables that
1588 are not inherently typed, use a value type appropriate for GDBARCH. */
1589
1590 struct value *
1591 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
1592 {
1593 struct value *val;
1594 struct trace_state_variable *tsv;
1595
1596 /* If there is a trace state variable of the same name, assume that
1597 is what we really want to see. */
1598 tsv = find_trace_state_variable (var->name);
1599 if (tsv)
1600 {
1601 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
1602 &(tsv->value));
1603 if (tsv->value_known)
1604 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
1605 tsv->value);
1606 else
1607 val = allocate_value (builtin_type (gdbarch)->builtin_void);
1608 return val;
1609 }
1610
1611 switch (var->kind)
1612 {
1613 case INTERNALVAR_VOID:
1614 val = allocate_value (builtin_type (gdbarch)->builtin_void);
1615 break;
1616
1617 case INTERNALVAR_FUNCTION:
1618 val = allocate_value (builtin_type (gdbarch)->internal_fn);
1619 break;
1620
1621 case INTERNALVAR_INTEGER:
1622 if (!var->u.integer.type)
1623 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
1624 var->u.integer.val);
1625 else
1626 val = value_from_longest (var->u.integer.type, var->u.integer.val);
1627 break;
1628
1629 case INTERNALVAR_STRING:
1630 val = value_cstring (var->u.string, strlen (var->u.string),
1631 builtin_type (gdbarch)->builtin_char);
1632 break;
1633
1634 case INTERNALVAR_VALUE:
1635 val = value_copy (var->u.value);
1636 if (value_lazy (val))
1637 value_fetch_lazy (val);
1638 break;
1639
1640 case INTERNALVAR_MAKE_VALUE:
1641 val = (*var->u.make_value) (gdbarch, var);
1642 break;
1643
1644 default:
1645 internal_error (__FILE__, __LINE__, _("bad kind"));
1646 }
1647
1648 /* Change the VALUE_LVAL to lval_internalvar so that future operations
1649 on this value go back to affect the original internal variable.
1650
1651 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
1652 no underlying modifyable state in the internal variable.
1653
1654 Likewise, if the variable's value is a computed lvalue, we want
1655 references to it to produce another computed lvalue, where
1656 references and assignments actually operate through the
1657 computed value's functions.
1658
1659 This means that internal variables with computed values
1660 behave a little differently from other internal variables:
1661 assignments to them don't just replace the previous value
1662 altogether. At the moment, this seems like the behavior we
1663 want. */
1664
1665 if (var->kind != INTERNALVAR_MAKE_VALUE
1666 && val->lval != lval_computed)
1667 {
1668 VALUE_LVAL (val) = lval_internalvar;
1669 VALUE_INTERNALVAR (val) = var;
1670 }
1671
1672 return val;
1673 }
1674
1675 int
1676 get_internalvar_integer (struct internalvar *var, LONGEST *result)
1677 {
1678 switch (var->kind)
1679 {
1680 case INTERNALVAR_INTEGER:
1681 *result = var->u.integer.val;
1682 return 1;
1683
1684 default:
1685 return 0;
1686 }
1687 }
1688
1689 static int
1690 get_internalvar_function (struct internalvar *var,
1691 struct internal_function **result)
1692 {
1693 switch (var->kind)
1694 {
1695 case INTERNALVAR_FUNCTION:
1696 *result = var->u.fn.function;
1697 return 1;
1698
1699 default:
1700 return 0;
1701 }
1702 }
1703
1704 void
1705 set_internalvar_component (struct internalvar *var, int offset, int bitpos,
1706 int bitsize, struct value *newval)
1707 {
1708 gdb_byte *addr;
1709
1710 switch (var->kind)
1711 {
1712 case INTERNALVAR_VALUE:
1713 addr = value_contents_writeable (var->u.value);
1714
1715 if (bitsize)
1716 modify_field (value_type (var->u.value), addr + offset,
1717 value_as_long (newval), bitpos, bitsize);
1718 else
1719 memcpy (addr + offset, value_contents (newval),
1720 TYPE_LENGTH (value_type (newval)));
1721 break;
1722
1723 default:
1724 /* We can never get a component of any other kind. */
1725 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
1726 }
1727 }
1728
1729 void
1730 set_internalvar (struct internalvar *var, struct value *val)
1731 {
1732 enum internalvar_kind new_kind;
1733 union internalvar_data new_data = { 0 };
1734
1735 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
1736 error (_("Cannot overwrite convenience function %s"), var->name);
1737
1738 /* Prepare new contents. */
1739 switch (TYPE_CODE (check_typedef (value_type (val))))
1740 {
1741 case TYPE_CODE_VOID:
1742 new_kind = INTERNALVAR_VOID;
1743 break;
1744
1745 case TYPE_CODE_INTERNAL_FUNCTION:
1746 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
1747 new_kind = INTERNALVAR_FUNCTION;
1748 get_internalvar_function (VALUE_INTERNALVAR (val),
1749 &new_data.fn.function);
1750 /* Copies created here are never canonical. */
1751 break;
1752
1753 case TYPE_CODE_INT:
1754 new_kind = INTERNALVAR_INTEGER;
1755 new_data.integer.type = value_type (val);
1756 new_data.integer.val = value_as_long (val);
1757 break;
1758
1759 default:
1760 new_kind = INTERNALVAR_VALUE;
1761 new_data.value = value_copy (val);
1762 new_data.value->modifiable = 1;
1763
1764 /* Force the value to be fetched from the target now, to avoid problems
1765 later when this internalvar is referenced and the target is gone or
1766 has changed. */
1767 if (value_lazy (new_data.value))
1768 value_fetch_lazy (new_data.value);
1769
1770 /* Release the value from the value chain to prevent it from being
1771 deleted by free_all_values. From here on this function should not
1772 call error () until new_data is installed into the var->u to avoid
1773 leaking memory. */
1774 release_value (new_data.value);
1775 break;
1776 }
1777
1778 /* Clean up old contents. */
1779 clear_internalvar (var);
1780
1781 /* Switch over. */
1782 var->kind = new_kind;
1783 var->u = new_data;
1784 /* End code which must not call error(). */
1785 }
1786
1787 void
1788 set_internalvar_integer (struct internalvar *var, LONGEST l)
1789 {
1790 /* Clean up old contents. */
1791 clear_internalvar (var);
1792
1793 var->kind = INTERNALVAR_INTEGER;
1794 var->u.integer.type = NULL;
1795 var->u.integer.val = l;
1796 }
1797
1798 void
1799 set_internalvar_string (struct internalvar *var, const char *string)
1800 {
1801 /* Clean up old contents. */
1802 clear_internalvar (var);
1803
1804 var->kind = INTERNALVAR_STRING;
1805 var->u.string = xstrdup (string);
1806 }
1807
1808 static void
1809 set_internalvar_function (struct internalvar *var, struct internal_function *f)
1810 {
1811 /* Clean up old contents. */
1812 clear_internalvar (var);
1813
1814 var->kind = INTERNALVAR_FUNCTION;
1815 var->u.fn.function = f;
1816 var->u.fn.canonical = 1;
1817 /* Variables installed here are always the canonical version. */
1818 }
1819
1820 void
1821 clear_internalvar (struct internalvar *var)
1822 {
1823 /* Clean up old contents. */
1824 switch (var->kind)
1825 {
1826 case INTERNALVAR_VALUE:
1827 value_free (var->u.value);
1828 break;
1829
1830 case INTERNALVAR_STRING:
1831 xfree (var->u.string);
1832 break;
1833
1834 default:
1835 break;
1836 }
1837
1838 /* Reset to void kind. */
1839 var->kind = INTERNALVAR_VOID;
1840 }
1841
1842 char *
1843 internalvar_name (struct internalvar *var)
1844 {
1845 return var->name;
1846 }
1847
1848 static struct internal_function *
1849 create_internal_function (const char *name,
1850 internal_function_fn handler, void *cookie)
1851 {
1852 struct internal_function *ifn = XNEW (struct internal_function);
1853
1854 ifn->name = xstrdup (name);
1855 ifn->handler = handler;
1856 ifn->cookie = cookie;
1857 return ifn;
1858 }
1859
1860 char *
1861 value_internal_function_name (struct value *val)
1862 {
1863 struct internal_function *ifn;
1864 int result;
1865
1866 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
1867 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
1868 gdb_assert (result);
1869
1870 return ifn->name;
1871 }
1872
1873 struct value *
1874 call_internal_function (struct gdbarch *gdbarch,
1875 const struct language_defn *language,
1876 struct value *func, int argc, struct value **argv)
1877 {
1878 struct internal_function *ifn;
1879 int result;
1880
1881 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
1882 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
1883 gdb_assert (result);
1884
1885 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
1886 }
1887
1888 /* The 'function' command. This does nothing -- it is just a
1889 placeholder to let "help function NAME" work. This is also used as
1890 the implementation of the sub-command that is created when
1891 registering an internal function. */
1892 static void
1893 function_command (char *command, int from_tty)
1894 {
1895 /* Do nothing. */
1896 }
1897
1898 /* Clean up if an internal function's command is destroyed. */
1899 static void
1900 function_destroyer (struct cmd_list_element *self, void *ignore)
1901 {
1902 xfree (self->name);
1903 xfree (self->doc);
1904 }
1905
1906 /* Add a new internal function. NAME is the name of the function; DOC
1907 is a documentation string describing the function. HANDLER is
1908 called when the function is invoked. COOKIE is an arbitrary
1909 pointer which is passed to HANDLER and is intended for "user
1910 data". */
1911 void
1912 add_internal_function (const char *name, const char *doc,
1913 internal_function_fn handler, void *cookie)
1914 {
1915 struct cmd_list_element *cmd;
1916 struct internal_function *ifn;
1917 struct internalvar *var = lookup_internalvar (name);
1918
1919 ifn = create_internal_function (name, handler, cookie);
1920 set_internalvar_function (var, ifn);
1921
1922 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
1923 &functionlist);
1924 cmd->destroyer = function_destroyer;
1925 }
1926
1927 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
1928 prevent cycles / duplicates. */
1929
1930 void
1931 preserve_one_value (struct value *value, struct objfile *objfile,
1932 htab_t copied_types)
1933 {
1934 if (TYPE_OBJFILE (value->type) == objfile)
1935 value->type = copy_type_recursive (objfile, value->type, copied_types);
1936
1937 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
1938 value->enclosing_type = copy_type_recursive (objfile,
1939 value->enclosing_type,
1940 copied_types);
1941 }
1942
1943 /* Likewise for internal variable VAR. */
1944
1945 static void
1946 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
1947 htab_t copied_types)
1948 {
1949 switch (var->kind)
1950 {
1951 case INTERNALVAR_INTEGER:
1952 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
1953 var->u.integer.type
1954 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
1955 break;
1956
1957 case INTERNALVAR_VALUE:
1958 preserve_one_value (var->u.value, objfile, copied_types);
1959 break;
1960 }
1961 }
1962
1963 /* Update the internal variables and value history when OBJFILE is
1964 discarded; we must copy the types out of the objfile. New global types
1965 will be created for every convenience variable which currently points to
1966 this objfile's types, and the convenience variables will be adjusted to
1967 use the new global types. */
1968
1969 void
1970 preserve_values (struct objfile *objfile)
1971 {
1972 htab_t copied_types;
1973 struct value_history_chunk *cur;
1974 struct internalvar *var;
1975 int i;
1976
1977 /* Create the hash table. We allocate on the objfile's obstack, since
1978 it is soon to be deleted. */
1979 copied_types = create_copied_types_hash (objfile);
1980
1981 for (cur = value_history_chain; cur; cur = cur->next)
1982 for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
1983 if (cur->values[i])
1984 preserve_one_value (cur->values[i], objfile, copied_types);
1985
1986 for (var = internalvars; var; var = var->next)
1987 preserve_one_internalvar (var, objfile, copied_types);
1988
1989 preserve_python_values (objfile, copied_types);
1990
1991 htab_delete (copied_types);
1992 }
1993
1994 static void
1995 show_convenience (char *ignore, int from_tty)
1996 {
1997 struct gdbarch *gdbarch = get_current_arch ();
1998 struct internalvar *var;
1999 int varseen = 0;
2000 struct value_print_options opts;
2001
2002 get_user_print_options (&opts);
2003 for (var = internalvars; var; var = var->next)
2004 {
2005 if (!varseen)
2006 {
2007 varseen = 1;
2008 }
2009 printf_filtered (("$%s = "), var->name);
2010 value_print (value_of_internalvar (gdbarch, var), gdb_stdout,
2011 &opts);
2012 printf_filtered (("\n"));
2013 }
2014 if (!varseen)
2015 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2016 "Convenience variables have "
2017 "names starting with \"$\";\n"
2018 "use \"set\" as in \"set "
2019 "$foo = 5\" to define them.\n"));
2020 }
2021 \f
2022 /* Extract a value as a C number (either long or double).
2023 Knows how to convert fixed values to double, or
2024 floating values to long.
2025 Does not deallocate the value. */
2026
2027 LONGEST
2028 value_as_long (struct value *val)
2029 {
2030 /* This coerces arrays and functions, which is necessary (e.g.
2031 in disassemble_command). It also dereferences references, which
2032 I suspect is the most logical thing to do. */
2033 val = coerce_array (val);
2034 return unpack_long (value_type (val), value_contents (val));
2035 }
2036
2037 DOUBLEST
2038 value_as_double (struct value *val)
2039 {
2040 DOUBLEST foo;
2041 int inv;
2042
2043 foo = unpack_double (value_type (val), value_contents (val), &inv);
2044 if (inv)
2045 error (_("Invalid floating value found in program."));
2046 return foo;
2047 }
2048
2049 /* Extract a value as a C pointer. Does not deallocate the value.
2050 Note that val's type may not actually be a pointer; value_as_long
2051 handles all the cases. */
2052 CORE_ADDR
2053 value_as_address (struct value *val)
2054 {
2055 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2056
2057 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2058 whether we want this to be true eventually. */
2059 #if 0
2060 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2061 non-address (e.g. argument to "signal", "info break", etc.), or
2062 for pointers to char, in which the low bits *are* significant. */
2063 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2064 #else
2065
2066 /* There are several targets (IA-64, PowerPC, and others) which
2067 don't represent pointers to functions as simply the address of
2068 the function's entry point. For example, on the IA-64, a
2069 function pointer points to a two-word descriptor, generated by
2070 the linker, which contains the function's entry point, and the
2071 value the IA-64 "global pointer" register should have --- to
2072 support position-independent code. The linker generates
2073 descriptors only for those functions whose addresses are taken.
2074
2075 On such targets, it's difficult for GDB to convert an arbitrary
2076 function address into a function pointer; it has to either find
2077 an existing descriptor for that function, or call malloc and
2078 build its own. On some targets, it is impossible for GDB to
2079 build a descriptor at all: the descriptor must contain a jump
2080 instruction; data memory cannot be executed; and code memory
2081 cannot be modified.
2082
2083 Upon entry to this function, if VAL is a value of type `function'
2084 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2085 value_address (val) is the address of the function. This is what
2086 you'll get if you evaluate an expression like `main'. The call
2087 to COERCE_ARRAY below actually does all the usual unary
2088 conversions, which includes converting values of type `function'
2089 to `pointer to function'. This is the challenging conversion
2090 discussed above. Then, `unpack_long' will convert that pointer
2091 back into an address.
2092
2093 So, suppose the user types `disassemble foo' on an architecture
2094 with a strange function pointer representation, on which GDB
2095 cannot build its own descriptors, and suppose further that `foo'
2096 has no linker-built descriptor. The address->pointer conversion
2097 will signal an error and prevent the command from running, even
2098 though the next step would have been to convert the pointer
2099 directly back into the same address.
2100
2101 The following shortcut avoids this whole mess. If VAL is a
2102 function, just return its address directly. */
2103 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2104 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2105 return value_address (val);
2106
2107 val = coerce_array (val);
2108
2109 /* Some architectures (e.g. Harvard), map instruction and data
2110 addresses onto a single large unified address space. For
2111 instance: An architecture may consider a large integer in the
2112 range 0x10000000 .. 0x1000ffff to already represent a data
2113 addresses (hence not need a pointer to address conversion) while
2114 a small integer would still need to be converted integer to
2115 pointer to address. Just assume such architectures handle all
2116 integer conversions in a single function. */
2117
2118 /* JimB writes:
2119
2120 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2121 must admonish GDB hackers to make sure its behavior matches the
2122 compiler's, whenever possible.
2123
2124 In general, I think GDB should evaluate expressions the same way
2125 the compiler does. When the user copies an expression out of
2126 their source code and hands it to a `print' command, they should
2127 get the same value the compiler would have computed. Any
2128 deviation from this rule can cause major confusion and annoyance,
2129 and needs to be justified carefully. In other words, GDB doesn't
2130 really have the freedom to do these conversions in clever and
2131 useful ways.
2132
2133 AndrewC pointed out that users aren't complaining about how GDB
2134 casts integers to pointers; they are complaining that they can't
2135 take an address from a disassembly listing and give it to `x/i'.
2136 This is certainly important.
2137
2138 Adding an architecture method like integer_to_address() certainly
2139 makes it possible for GDB to "get it right" in all circumstances
2140 --- the target has complete control over how things get done, so
2141 people can Do The Right Thing for their target without breaking
2142 anyone else. The standard doesn't specify how integers get
2143 converted to pointers; usually, the ABI doesn't either, but
2144 ABI-specific code is a more reasonable place to handle it. */
2145
2146 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2147 && TYPE_CODE (value_type (val)) != TYPE_CODE_REF
2148 && gdbarch_integer_to_address_p (gdbarch))
2149 return gdbarch_integer_to_address (gdbarch, value_type (val),
2150 value_contents (val));
2151
2152 return unpack_long (value_type (val), value_contents (val));
2153 #endif
2154 }
2155 \f
2156 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2157 as a long, or as a double, assuming the raw data is described
2158 by type TYPE. Knows how to convert different sizes of values
2159 and can convert between fixed and floating point. We don't assume
2160 any alignment for the raw data. Return value is in host byte order.
2161
2162 If you want functions and arrays to be coerced to pointers, and
2163 references to be dereferenced, call value_as_long() instead.
2164
2165 C++: It is assumed that the front-end has taken care of
2166 all matters concerning pointers to members. A pointer
2167 to member which reaches here is considered to be equivalent
2168 to an INT (or some size). After all, it is only an offset. */
2169
2170 LONGEST
2171 unpack_long (struct type *type, const gdb_byte *valaddr)
2172 {
2173 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2174 enum type_code code = TYPE_CODE (type);
2175 int len = TYPE_LENGTH (type);
2176 int nosign = TYPE_UNSIGNED (type);
2177
2178 switch (code)
2179 {
2180 case TYPE_CODE_TYPEDEF:
2181 return unpack_long (check_typedef (type), valaddr);
2182 case TYPE_CODE_ENUM:
2183 case TYPE_CODE_FLAGS:
2184 case TYPE_CODE_BOOL:
2185 case TYPE_CODE_INT:
2186 case TYPE_CODE_CHAR:
2187 case TYPE_CODE_RANGE:
2188 case TYPE_CODE_MEMBERPTR:
2189 if (nosign)
2190 return extract_unsigned_integer (valaddr, len, byte_order);
2191 else
2192 return extract_signed_integer (valaddr, len, byte_order);
2193
2194 case TYPE_CODE_FLT:
2195 return extract_typed_floating (valaddr, type);
2196
2197 case TYPE_CODE_DECFLOAT:
2198 /* libdecnumber has a function to convert from decimal to integer, but
2199 it doesn't work when the decimal number has a fractional part. */
2200 return decimal_to_doublest (valaddr, len, byte_order);
2201
2202 case TYPE_CODE_PTR:
2203 case TYPE_CODE_REF:
2204 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2205 whether we want this to be true eventually. */
2206 return extract_typed_address (valaddr, type);
2207
2208 default:
2209 error (_("Value can't be converted to integer."));
2210 }
2211 return 0; /* Placate lint. */
2212 }
2213
2214 /* Return a double value from the specified type and address.
2215 INVP points to an int which is set to 0 for valid value,
2216 1 for invalid value (bad float format). In either case,
2217 the returned double is OK to use. Argument is in target
2218 format, result is in host format. */
2219
2220 DOUBLEST
2221 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2222 {
2223 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2224 enum type_code code;
2225 int len;
2226 int nosign;
2227
2228 *invp = 0; /* Assume valid. */
2229 CHECK_TYPEDEF (type);
2230 code = TYPE_CODE (type);
2231 len = TYPE_LENGTH (type);
2232 nosign = TYPE_UNSIGNED (type);
2233 if (code == TYPE_CODE_FLT)
2234 {
2235 /* NOTE: cagney/2002-02-19: There was a test here to see if the
2236 floating-point value was valid (using the macro
2237 INVALID_FLOAT). That test/macro have been removed.
2238
2239 It turns out that only the VAX defined this macro and then
2240 only in a non-portable way. Fixing the portability problem
2241 wouldn't help since the VAX floating-point code is also badly
2242 bit-rotten. The target needs to add definitions for the
2243 methods gdbarch_float_format and gdbarch_double_format - these
2244 exactly describe the target floating-point format. The
2245 problem here is that the corresponding floatformat_vax_f and
2246 floatformat_vax_d values these methods should be set to are
2247 also not defined either. Oops!
2248
2249 Hopefully someone will add both the missing floatformat
2250 definitions and the new cases for floatformat_is_valid (). */
2251
2252 if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
2253 {
2254 *invp = 1;
2255 return 0.0;
2256 }
2257
2258 return extract_typed_floating (valaddr, type);
2259 }
2260 else if (code == TYPE_CODE_DECFLOAT)
2261 return decimal_to_doublest (valaddr, len, byte_order);
2262 else if (nosign)
2263 {
2264 /* Unsigned -- be sure we compensate for signed LONGEST. */
2265 return (ULONGEST) unpack_long (type, valaddr);
2266 }
2267 else
2268 {
2269 /* Signed -- we are OK with unpack_long. */
2270 return unpack_long (type, valaddr);
2271 }
2272 }
2273
2274 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2275 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2276 We don't assume any alignment for the raw data. Return value is in
2277 host byte order.
2278
2279 If you want functions and arrays to be coerced to pointers, and
2280 references to be dereferenced, call value_as_address() instead.
2281
2282 C++: It is assumed that the front-end has taken care of
2283 all matters concerning pointers to members. A pointer
2284 to member which reaches here is considered to be equivalent
2285 to an INT (or some size). After all, it is only an offset. */
2286
2287 CORE_ADDR
2288 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2289 {
2290 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2291 whether we want this to be true eventually. */
2292 return unpack_long (type, valaddr);
2293 }
2294
2295 \f
2296 /* Get the value of the FIELDNO'th field (which must be static) of
2297 TYPE. Return NULL if the field doesn't exist or has been
2298 optimized out. */
2299
2300 struct value *
2301 value_static_field (struct type *type, int fieldno)
2302 {
2303 struct value *retval;
2304
2305 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2306 {
2307 case FIELD_LOC_KIND_PHYSADDR:
2308 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2309 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2310 break;
2311 case FIELD_LOC_KIND_PHYSNAME:
2312 {
2313 char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2314 /* TYPE_FIELD_NAME (type, fieldno); */
2315 struct symbol *sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2316
2317 if (sym == NULL)
2318 {
2319 /* With some compilers, e.g. HP aCC, static data members are
2320 reported as non-debuggable symbols. */
2321 struct minimal_symbol *msym = lookup_minimal_symbol (phys_name,
2322 NULL, NULL);
2323
2324 if (!msym)
2325 return NULL;
2326 else
2327 {
2328 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2329 SYMBOL_VALUE_ADDRESS (msym));
2330 }
2331 }
2332 else
2333 retval = value_of_variable (sym, NULL);
2334 break;
2335 }
2336 default:
2337 gdb_assert_not_reached ("unexpected field location kind");
2338 }
2339
2340 return retval;
2341 }
2342
2343 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2344 You have to be careful here, since the size of the data area for the value
2345 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2346 than the old enclosing type, you have to allocate more space for the
2347 data. */
2348
2349 void
2350 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2351 {
2352 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2353 val->contents =
2354 (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
2355
2356 val->enclosing_type = new_encl_type;
2357 }
2358
2359 /* Given a value ARG1 (offset by OFFSET bytes)
2360 of a struct or union type ARG_TYPE,
2361 extract and return the value of one of its (non-static) fields.
2362 FIELDNO says which field. */
2363
2364 struct value *
2365 value_primitive_field (struct value *arg1, int offset,
2366 int fieldno, struct type *arg_type)
2367 {
2368 struct value *v;
2369 struct type *type;
2370
2371 CHECK_TYPEDEF (arg_type);
2372 type = TYPE_FIELD_TYPE (arg_type, fieldno);
2373
2374 /* Call check_typedef on our type to make sure that, if TYPE
2375 is a TYPE_CODE_TYPEDEF, its length is set to the length
2376 of the target type instead of zero. However, we do not
2377 replace the typedef type by the target type, because we want
2378 to keep the typedef in order to be able to print the type
2379 description correctly. */
2380 check_typedef (type);
2381
2382 /* Handle packed fields */
2383
2384 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
2385 {
2386 /* Create a new value for the bitfield, with bitpos and bitsize
2387 set. If possible, arrange offset and bitpos so that we can
2388 do a single aligned read of the size of the containing type.
2389 Otherwise, adjust offset to the byte containing the first
2390 bit. Assume that the address, offset, and embedded offset
2391 are sufficiently aligned. */
2392 int bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
2393 int container_bitsize = TYPE_LENGTH (type) * 8;
2394
2395 v = allocate_value_lazy (type);
2396 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
2397 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
2398 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
2399 v->bitpos = bitpos % container_bitsize;
2400 else
2401 v->bitpos = bitpos % 8;
2402 v->offset = (value_embedded_offset (arg1)
2403 + offset
2404 + (bitpos - v->bitpos) / 8);
2405 v->parent = arg1;
2406 value_incref (v->parent);
2407 if (!value_lazy (arg1))
2408 value_fetch_lazy (v);
2409 }
2410 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
2411 {
2412 /* This field is actually a base subobject, so preserve the
2413 entire object's contents for later references to virtual
2414 bases, etc. */
2415
2416 /* Lazy register values with offsets are not supported. */
2417 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2418 value_fetch_lazy (arg1);
2419
2420 if (value_lazy (arg1))
2421 v = allocate_value_lazy (value_enclosing_type (arg1));
2422 else
2423 {
2424 v = allocate_value (value_enclosing_type (arg1));
2425 memcpy (value_contents_all_raw (v), value_contents_all_raw (arg1),
2426 TYPE_LENGTH (value_enclosing_type (arg1)));
2427 }
2428 v->type = type;
2429 v->offset = value_offset (arg1);
2430 v->embedded_offset = (offset + value_embedded_offset (arg1)
2431 + TYPE_FIELD_BITPOS (arg_type, fieldno) / 8);
2432 }
2433 else
2434 {
2435 /* Plain old data member */
2436 offset += TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
2437
2438 /* Lazy register values with offsets are not supported. */
2439 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2440 value_fetch_lazy (arg1);
2441
2442 if (value_lazy (arg1))
2443 v = allocate_value_lazy (type);
2444 else
2445 {
2446 v = allocate_value (type);
2447 memcpy (value_contents_raw (v),
2448 value_contents_raw (arg1) + offset,
2449 TYPE_LENGTH (type));
2450 }
2451 v->offset = (value_offset (arg1) + offset
2452 + value_embedded_offset (arg1));
2453 }
2454 set_value_component_location (v, arg1);
2455 VALUE_REGNUM (v) = VALUE_REGNUM (arg1);
2456 VALUE_FRAME_ID (v) = VALUE_FRAME_ID (arg1);
2457 return v;
2458 }
2459
2460 /* Given a value ARG1 of a struct or union type,
2461 extract and return the value of one of its (non-static) fields.
2462 FIELDNO says which field. */
2463
2464 struct value *
2465 value_field (struct value *arg1, int fieldno)
2466 {
2467 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
2468 }
2469
2470 /* Return a non-virtual function as a value.
2471 F is the list of member functions which contains the desired method.
2472 J is an index into F which provides the desired method.
2473
2474 We only use the symbol for its address, so be happy with either a
2475 full symbol or a minimal symbol. */
2476
2477 struct value *
2478 value_fn_field (struct value **arg1p, struct fn_field *f,
2479 int j, struct type *type,
2480 int offset)
2481 {
2482 struct value *v;
2483 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
2484 char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
2485 struct symbol *sym;
2486 struct minimal_symbol *msym;
2487
2488 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0);
2489 if (sym != NULL)
2490 {
2491 msym = NULL;
2492 }
2493 else
2494 {
2495 gdb_assert (sym == NULL);
2496 msym = lookup_minimal_symbol (physname, NULL, NULL);
2497 if (msym == NULL)
2498 return NULL;
2499 }
2500
2501 v = allocate_value (ftype);
2502 if (sym)
2503 {
2504 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
2505 }
2506 else
2507 {
2508 /* The minimal symbol might point to a function descriptor;
2509 resolve it to the actual code address instead. */
2510 struct objfile *objfile = msymbol_objfile (msym);
2511 struct gdbarch *gdbarch = get_objfile_arch (objfile);
2512
2513 set_value_address (v,
2514 gdbarch_convert_from_func_ptr_addr
2515 (gdbarch, SYMBOL_VALUE_ADDRESS (msym), &current_target));
2516 }
2517
2518 if (arg1p)
2519 {
2520 if (type != value_type (*arg1p))
2521 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
2522 value_addr (*arg1p)));
2523
2524 /* Move the `this' pointer according to the offset.
2525 VALUE_OFFSET (*arg1p) += offset; */
2526 }
2527
2528 return v;
2529 }
2530
2531 \f
2532 /* Unpack a bitfield of the specified FIELD_TYPE, from the anonymous
2533 object at VALADDR. The bitfield starts at BITPOS bits and contains
2534 BITSIZE bits.
2535
2536 Extracting bits depends on endianness of the machine. Compute the
2537 number of least significant bits to discard. For big endian machines,
2538 we compute the total number of bits in the anonymous object, subtract
2539 off the bit count from the MSB of the object to the MSB of the
2540 bitfield, then the size of the bitfield, which leaves the LSB discard
2541 count. For little endian machines, the discard count is simply the
2542 number of bits from the LSB of the anonymous object to the LSB of the
2543 bitfield.
2544
2545 If the field is signed, we also do sign extension. */
2546
2547 LONGEST
2548 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
2549 int bitpos, int bitsize)
2550 {
2551 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
2552 ULONGEST val;
2553 ULONGEST valmask;
2554 int lsbcount;
2555 int bytes_read;
2556
2557 /* Read the minimum number of bytes required; there may not be
2558 enough bytes to read an entire ULONGEST. */
2559 CHECK_TYPEDEF (field_type);
2560 if (bitsize)
2561 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
2562 else
2563 bytes_read = TYPE_LENGTH (field_type);
2564
2565 val = extract_unsigned_integer (valaddr + bitpos / 8,
2566 bytes_read, byte_order);
2567
2568 /* Extract bits. See comment above. */
2569
2570 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
2571 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
2572 else
2573 lsbcount = (bitpos % 8);
2574 val >>= lsbcount;
2575
2576 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
2577 If the field is signed, and is negative, then sign extend. */
2578
2579 if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
2580 {
2581 valmask = (((ULONGEST) 1) << bitsize) - 1;
2582 val &= valmask;
2583 if (!TYPE_UNSIGNED (field_type))
2584 {
2585 if (val & (valmask ^ (valmask >> 1)))
2586 {
2587 val |= ~valmask;
2588 }
2589 }
2590 }
2591 return (val);
2592 }
2593
2594 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous object at
2595 VALADDR. See unpack_bits_as_long for more details. */
2596
2597 LONGEST
2598 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
2599 {
2600 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
2601 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
2602 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
2603
2604 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
2605 }
2606
2607 /* Modify the value of a bitfield. ADDR points to a block of memory in
2608 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
2609 is the desired value of the field, in host byte order. BITPOS and BITSIZE
2610 indicate which bits (in target bit order) comprise the bitfield.
2611 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
2612 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
2613
2614 void
2615 modify_field (struct type *type, gdb_byte *addr,
2616 LONGEST fieldval, int bitpos, int bitsize)
2617 {
2618 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2619 ULONGEST oword;
2620 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
2621 int bytesize;
2622
2623 /* Normalize BITPOS. */
2624 addr += bitpos / 8;
2625 bitpos %= 8;
2626
2627 /* If a negative fieldval fits in the field in question, chop
2628 off the sign extension bits. */
2629 if ((~fieldval & ~(mask >> 1)) == 0)
2630 fieldval &= mask;
2631
2632 /* Warn if value is too big to fit in the field in question. */
2633 if (0 != (fieldval & ~mask))
2634 {
2635 /* FIXME: would like to include fieldval in the message, but
2636 we don't have a sprintf_longest. */
2637 warning (_("Value does not fit in %d bits."), bitsize);
2638
2639 /* Truncate it, otherwise adjoining fields may be corrupted. */
2640 fieldval &= mask;
2641 }
2642
2643 /* Ensure no bytes outside of the modified ones get accessed as it may cause
2644 false valgrind reports. */
2645
2646 bytesize = (bitpos + bitsize + 7) / 8;
2647 oword = extract_unsigned_integer (addr, bytesize, byte_order);
2648
2649 /* Shifting for bit field depends on endianness of the target machine. */
2650 if (gdbarch_bits_big_endian (get_type_arch (type)))
2651 bitpos = bytesize * 8 - bitpos - bitsize;
2652
2653 oword &= ~(mask << bitpos);
2654 oword |= fieldval << bitpos;
2655
2656 store_unsigned_integer (addr, bytesize, byte_order, oword);
2657 }
2658 \f
2659 /* Pack NUM into BUF using a target format of TYPE. */
2660
2661 void
2662 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
2663 {
2664 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2665 int len;
2666
2667 type = check_typedef (type);
2668 len = TYPE_LENGTH (type);
2669
2670 switch (TYPE_CODE (type))
2671 {
2672 case TYPE_CODE_INT:
2673 case TYPE_CODE_CHAR:
2674 case TYPE_CODE_ENUM:
2675 case TYPE_CODE_FLAGS:
2676 case TYPE_CODE_BOOL:
2677 case TYPE_CODE_RANGE:
2678 case TYPE_CODE_MEMBERPTR:
2679 store_signed_integer (buf, len, byte_order, num);
2680 break;
2681
2682 case TYPE_CODE_REF:
2683 case TYPE_CODE_PTR:
2684 store_typed_address (buf, type, (CORE_ADDR) num);
2685 break;
2686
2687 default:
2688 error (_("Unexpected type (%d) encountered for integer constant."),
2689 TYPE_CODE (type));
2690 }
2691 }
2692
2693
2694 /* Pack NUM into BUF using a target format of TYPE. */
2695
2696 void
2697 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
2698 {
2699 int len;
2700 enum bfd_endian byte_order;
2701
2702 type = check_typedef (type);
2703 len = TYPE_LENGTH (type);
2704 byte_order = gdbarch_byte_order (get_type_arch (type));
2705
2706 switch (TYPE_CODE (type))
2707 {
2708 case TYPE_CODE_INT:
2709 case TYPE_CODE_CHAR:
2710 case TYPE_CODE_ENUM:
2711 case TYPE_CODE_FLAGS:
2712 case TYPE_CODE_BOOL:
2713 case TYPE_CODE_RANGE:
2714 case TYPE_CODE_MEMBERPTR:
2715 store_unsigned_integer (buf, len, byte_order, num);
2716 break;
2717
2718 case TYPE_CODE_REF:
2719 case TYPE_CODE_PTR:
2720 store_typed_address (buf, type, (CORE_ADDR) num);
2721 break;
2722
2723 default:
2724 error (_("Unexpected type (%d) encountered "
2725 "for unsigned integer constant."),
2726 TYPE_CODE (type));
2727 }
2728 }
2729
2730
2731 /* Convert C numbers into newly allocated values. */
2732
2733 struct value *
2734 value_from_longest (struct type *type, LONGEST num)
2735 {
2736 struct value *val = allocate_value (type);
2737
2738 pack_long (value_contents_raw (val), type, num);
2739 return val;
2740 }
2741
2742
2743 /* Convert C unsigned numbers into newly allocated values. */
2744
2745 struct value *
2746 value_from_ulongest (struct type *type, ULONGEST num)
2747 {
2748 struct value *val = allocate_value (type);
2749
2750 pack_unsigned_long (value_contents_raw (val), type, num);
2751
2752 return val;
2753 }
2754
2755
2756 /* Create a value representing a pointer of type TYPE to the address
2757 ADDR. */
2758 struct value *
2759 value_from_pointer (struct type *type, CORE_ADDR addr)
2760 {
2761 struct value *val = allocate_value (type);
2762
2763 store_typed_address (value_contents_raw (val), check_typedef (type), addr);
2764 return val;
2765 }
2766
2767
2768 /* Create a value of type TYPE whose contents come from VALADDR, if it
2769 is non-null, and whose memory address (in the inferior) is
2770 ADDRESS. */
2771
2772 struct value *
2773 value_from_contents_and_address (struct type *type,
2774 const gdb_byte *valaddr,
2775 CORE_ADDR address)
2776 {
2777 struct value *v;
2778
2779 if (valaddr == NULL)
2780 v = allocate_value_lazy (type);
2781 else
2782 {
2783 v = allocate_value (type);
2784 memcpy (value_contents_raw (v), valaddr, TYPE_LENGTH (type));
2785 }
2786 set_value_address (v, address);
2787 VALUE_LVAL (v) = lval_memory;
2788 return v;
2789 }
2790
2791 struct value *
2792 value_from_double (struct type *type, DOUBLEST num)
2793 {
2794 struct value *val = allocate_value (type);
2795 struct type *base_type = check_typedef (type);
2796 enum type_code code = TYPE_CODE (base_type);
2797
2798 if (code == TYPE_CODE_FLT)
2799 {
2800 store_typed_floating (value_contents_raw (val), base_type, num);
2801 }
2802 else
2803 error (_("Unexpected type encountered for floating constant."));
2804
2805 return val;
2806 }
2807
2808 struct value *
2809 value_from_decfloat (struct type *type, const gdb_byte *dec)
2810 {
2811 struct value *val = allocate_value (type);
2812
2813 memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
2814 return val;
2815 }
2816
2817 struct value *
2818 coerce_ref (struct value *arg)
2819 {
2820 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
2821
2822 if (TYPE_CODE (value_type_arg_tmp) == TYPE_CODE_REF)
2823 arg = value_at_lazy (TYPE_TARGET_TYPE (value_type_arg_tmp),
2824 unpack_pointer (value_type (arg),
2825 value_contents (arg)));
2826 return arg;
2827 }
2828
2829 struct value *
2830 coerce_array (struct value *arg)
2831 {
2832 struct type *type;
2833
2834 arg = coerce_ref (arg);
2835 type = check_typedef (value_type (arg));
2836
2837 switch (TYPE_CODE (type))
2838 {
2839 case TYPE_CODE_ARRAY:
2840 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
2841 arg = value_coerce_array (arg);
2842 break;
2843 case TYPE_CODE_FUNC:
2844 arg = value_coerce_function (arg);
2845 break;
2846 }
2847 return arg;
2848 }
2849 \f
2850
2851 /* Return true if the function returning the specified type is using
2852 the convention of returning structures in memory (passing in the
2853 address as a hidden first parameter). */
2854
2855 int
2856 using_struct_return (struct gdbarch *gdbarch,
2857 struct type *func_type, struct type *value_type)
2858 {
2859 enum type_code code = TYPE_CODE (value_type);
2860
2861 if (code == TYPE_CODE_ERROR)
2862 error (_("Function return type unknown."));
2863
2864 if (code == TYPE_CODE_VOID)
2865 /* A void return value is never in memory. See also corresponding
2866 code in "print_return_value". */
2867 return 0;
2868
2869 /* Probe the architecture for the return-value convention. */
2870 return (gdbarch_return_value (gdbarch, func_type, value_type,
2871 NULL, NULL, NULL)
2872 != RETURN_VALUE_REGISTER_CONVENTION);
2873 }
2874
2875 /* Set the initialized field in a value struct. */
2876
2877 void
2878 set_value_initialized (struct value *val, int status)
2879 {
2880 val->initialized = status;
2881 }
2882
2883 /* Return the initialized field in a value struct. */
2884
2885 int
2886 value_initialized (struct value *val)
2887 {
2888 return val->initialized;
2889 }
2890
2891 void
2892 _initialize_values (void)
2893 {
2894 add_cmd ("convenience", no_class, show_convenience, _("\
2895 Debugger convenience (\"$foo\") variables.\n\
2896 These variables are created when you assign them values;\n\
2897 thus, \"print $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
2898 \n\
2899 A few convenience variables are given values automatically:\n\
2900 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
2901 \"$__\" holds the contents of the last address examined with \"x\"."),
2902 &showlist);
2903
2904 add_cmd ("values", no_class, show_values, _("\
2905 Elements of value history around item number IDX (or last ten)."),
2906 &showlist);
2907
2908 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
2909 Initialize a convenience variable if necessary.\n\
2910 init-if-undefined VARIABLE = EXPRESSION\n\
2911 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
2912 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
2913 VARIABLE is already initialized."));
2914
2915 add_prefix_cmd ("function", no_class, function_command, _("\
2916 Placeholder command for showing help on convenience functions."),
2917 &functionlist, "function ", 0, &cmdlist);
2918 }