]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/value.c
Replace the block_found global with explicit data-flow
[thirdparty/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2015 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "doublest.h"
32 #include "regcache.h"
33 #include "block.h"
34 #include "dfp.h"
35 #include "objfiles.h"
36 #include "valprint.h"
37 #include "cli/cli-decode.h"
38 #include "extension.h"
39 #include <ctype.h>
40 #include "tracepoint.h"
41 #include "cp-abi.h"
42 #include "user-regs.h"
43
44 /* Prototypes for exported functions. */
45
46 void _initialize_values (void);
47
48 /* Definition of a user function. */
49 struct internal_function
50 {
51 /* The name of the function. It is a bit odd to have this in the
52 function itself -- the user might use a differently-named
53 convenience variable to hold the function. */
54 char *name;
55
56 /* The handler. */
57 internal_function_fn handler;
58
59 /* User data for the handler. */
60 void *cookie;
61 };
62
63 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
64
65 struct range
66 {
67 /* Lowest offset in the range. */
68 int offset;
69
70 /* Length of the range. */
71 int length;
72 };
73
74 typedef struct range range_s;
75
76 DEF_VEC_O(range_s);
77
78 /* Returns true if the ranges defined by [offset1, offset1+len1) and
79 [offset2, offset2+len2) overlap. */
80
81 static int
82 ranges_overlap (int offset1, int len1,
83 int offset2, int len2)
84 {
85 ULONGEST h, l;
86
87 l = max (offset1, offset2);
88 h = min (offset1 + len1, offset2 + len2);
89 return (l < h);
90 }
91
92 /* Returns true if the first argument is strictly less than the
93 second, useful for VEC_lower_bound. We keep ranges sorted by
94 offset and coalesce overlapping and contiguous ranges, so this just
95 compares the starting offset. */
96
97 static int
98 range_lessthan (const range_s *r1, const range_s *r2)
99 {
100 return r1->offset < r2->offset;
101 }
102
103 /* Returns true if RANGES contains any range that overlaps [OFFSET,
104 OFFSET+LENGTH). */
105
106 static int
107 ranges_contain (VEC(range_s) *ranges, int offset, int length)
108 {
109 range_s what;
110 int i;
111
112 what.offset = offset;
113 what.length = length;
114
115 /* We keep ranges sorted by offset and coalesce overlapping and
116 contiguous ranges, so to check if a range list contains a given
117 range, we can do a binary search for the position the given range
118 would be inserted if we only considered the starting OFFSET of
119 ranges. We call that position I. Since we also have LENGTH to
120 care for (this is a range afterall), we need to check if the
121 _previous_ range overlaps the I range. E.g.,
122
123 R
124 |---|
125 |---| |---| |------| ... |--|
126 0 1 2 N
127
128 I=1
129
130 In the case above, the binary search would return `I=1', meaning,
131 this OFFSET should be inserted at position 1, and the current
132 position 1 should be pushed further (and before 2). But, `0'
133 overlaps with R.
134
135 Then we need to check if the I range overlaps the I range itself.
136 E.g.,
137
138 R
139 |---|
140 |---| |---| |-------| ... |--|
141 0 1 2 N
142
143 I=1
144 */
145
146 i = VEC_lower_bound (range_s, ranges, &what, range_lessthan);
147
148 if (i > 0)
149 {
150 struct range *bef = VEC_index (range_s, ranges, i - 1);
151
152 if (ranges_overlap (bef->offset, bef->length, offset, length))
153 return 1;
154 }
155
156 if (i < VEC_length (range_s, ranges))
157 {
158 struct range *r = VEC_index (range_s, ranges, i);
159
160 if (ranges_overlap (r->offset, r->length, offset, length))
161 return 1;
162 }
163
164 return 0;
165 }
166
167 static struct cmd_list_element *functionlist;
168
169 /* Note that the fields in this structure are arranged to save a bit
170 of memory. */
171
172 struct value
173 {
174 /* Type of value; either not an lval, or one of the various
175 different possible kinds of lval. */
176 enum lval_type lval;
177
178 /* Is it modifiable? Only relevant if lval != not_lval. */
179 unsigned int modifiable : 1;
180
181 /* If zero, contents of this value are in the contents field. If
182 nonzero, contents are in inferior. If the lval field is lval_memory,
183 the contents are in inferior memory at location.address plus offset.
184 The lval field may also be lval_register.
185
186 WARNING: This field is used by the code which handles watchpoints
187 (see breakpoint.c) to decide whether a particular value can be
188 watched by hardware watchpoints. If the lazy flag is set for
189 some member of a value chain, it is assumed that this member of
190 the chain doesn't need to be watched as part of watching the
191 value itself. This is how GDB avoids watching the entire struct
192 or array when the user wants to watch a single struct member or
193 array element. If you ever change the way lazy flag is set and
194 reset, be sure to consider this use as well! */
195 unsigned int lazy : 1;
196
197 /* If value is a variable, is it initialized or not. */
198 unsigned int initialized : 1;
199
200 /* If value is from the stack. If this is set, read_stack will be
201 used instead of read_memory to enable extra caching. */
202 unsigned int stack : 1;
203
204 /* If the value has been released. */
205 unsigned int released : 1;
206
207 /* Register number if the value is from a register. */
208 short regnum;
209
210 /* Location of value (if lval). */
211 union
212 {
213 /* If lval == lval_memory, this is the address in the inferior.
214 If lval == lval_register, this is the byte offset into the
215 registers structure. */
216 CORE_ADDR address;
217
218 /* Pointer to internal variable. */
219 struct internalvar *internalvar;
220
221 /* Pointer to xmethod worker. */
222 struct xmethod_worker *xm_worker;
223
224 /* If lval == lval_computed, this is a set of function pointers
225 to use to access and describe the value, and a closure pointer
226 for them to use. */
227 struct
228 {
229 /* Functions to call. */
230 const struct lval_funcs *funcs;
231
232 /* Closure for those functions to use. */
233 void *closure;
234 } computed;
235 } location;
236
237 /* Describes offset of a value within lval of a structure in target
238 addressable memory units. If lval == lval_memory, this is an offset to
239 the address. If lval == lval_register, this is a further offset from
240 location.address within the registers structure. Note also the member
241 embedded_offset below. */
242 int offset;
243
244 /* Only used for bitfields; number of bits contained in them. */
245 int bitsize;
246
247 /* Only used for bitfields; position of start of field. For
248 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
249 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
250 int bitpos;
251
252 /* The number of references to this value. When a value is created,
253 the value chain holds a reference, so REFERENCE_COUNT is 1. If
254 release_value is called, this value is removed from the chain but
255 the caller of release_value now has a reference to this value.
256 The caller must arrange for a call to value_free later. */
257 int reference_count;
258
259 /* Only used for bitfields; the containing value. This allows a
260 single read from the target when displaying multiple
261 bitfields. */
262 struct value *parent;
263
264 /* Frame register value is relative to. This will be described in
265 the lval enum above as "lval_register". */
266 struct frame_id frame_id;
267
268 /* Type of the value. */
269 struct type *type;
270
271 /* If a value represents a C++ object, then the `type' field gives
272 the object's compile-time type. If the object actually belongs
273 to some class derived from `type', perhaps with other base
274 classes and additional members, then `type' is just a subobject
275 of the real thing, and the full object is probably larger than
276 `type' would suggest.
277
278 If `type' is a dynamic class (i.e. one with a vtable), then GDB
279 can actually determine the object's run-time type by looking at
280 the run-time type information in the vtable. When this
281 information is available, we may elect to read in the entire
282 object, for several reasons:
283
284 - When printing the value, the user would probably rather see the
285 full object, not just the limited portion apparent from the
286 compile-time type.
287
288 - If `type' has virtual base classes, then even printing `type'
289 alone may require reaching outside the `type' portion of the
290 object to wherever the virtual base class has been stored.
291
292 When we store the entire object, `enclosing_type' is the run-time
293 type -- the complete object -- and `embedded_offset' is the
294 offset of `type' within that larger type, in target addressable memory
295 units. The value_contents() macro takes `embedded_offset' into account,
296 so most GDB code continues to see the `type' portion of the value, just
297 as the inferior would.
298
299 If `type' is a pointer to an object, then `enclosing_type' is a
300 pointer to the object's run-time type, and `pointed_to_offset' is
301 the offset in target addressable memory units from the full object
302 to the pointed-to object -- that is, the value `embedded_offset' would
303 have if we followed the pointer and fetched the complete object.
304 (I don't really see the point. Why not just determine the
305 run-time type when you indirect, and avoid the special case? The
306 contents don't matter until you indirect anyway.)
307
308 If we're not doing anything fancy, `enclosing_type' is equal to
309 `type', and `embedded_offset' is zero, so everything works
310 normally. */
311 struct type *enclosing_type;
312 int embedded_offset;
313 int pointed_to_offset;
314
315 /* Values are stored in a chain, so that they can be deleted easily
316 over calls to the inferior. Values assigned to internal
317 variables, put into the value history or exposed to Python are
318 taken off this list. */
319 struct value *next;
320
321 /* Actual contents of the value. Target byte-order. NULL or not
322 valid if lazy is nonzero. */
323 gdb_byte *contents;
324
325 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
326 rather than available, since the common and default case is for a
327 value to be available. This is filled in at value read time.
328 The unavailable ranges are tracked in bits. Note that a contents
329 bit that has been optimized out doesn't really exist in the
330 program, so it can't be marked unavailable either. */
331 VEC(range_s) *unavailable;
332
333 /* Likewise, but for optimized out contents (a chunk of the value of
334 a variable that does not actually exist in the program). If LVAL
335 is lval_register, this is a register ($pc, $sp, etc., never a
336 program variable) that has not been saved in the frame. Not
337 saved registers and optimized-out program variables values are
338 treated pretty much the same, except not-saved registers have a
339 different string representation and related error strings. */
340 VEC(range_s) *optimized_out;
341 };
342
343 /* See value.h. */
344
345 struct gdbarch *
346 get_value_arch (const struct value *value)
347 {
348 return get_type_arch (value_type (value));
349 }
350
351 int
352 value_bits_available (const struct value *value, int offset, int length)
353 {
354 gdb_assert (!value->lazy);
355
356 return !ranges_contain (value->unavailable, offset, length);
357 }
358
359 int
360 value_bytes_available (const struct value *value, int offset, int length)
361 {
362 return value_bits_available (value,
363 offset * TARGET_CHAR_BIT,
364 length * TARGET_CHAR_BIT);
365 }
366
367 int
368 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
369 {
370 gdb_assert (!value->lazy);
371
372 return ranges_contain (value->optimized_out, bit_offset, bit_length);
373 }
374
375 int
376 value_entirely_available (struct value *value)
377 {
378 /* We can only tell whether the whole value is available when we try
379 to read it. */
380 if (value->lazy)
381 value_fetch_lazy (value);
382
383 if (VEC_empty (range_s, value->unavailable))
384 return 1;
385 return 0;
386 }
387
388 /* Returns true if VALUE is entirely covered by RANGES. If the value
389 is lazy, it'll be read now. Note that RANGE is a pointer to
390 pointer because reading the value might change *RANGE. */
391
392 static int
393 value_entirely_covered_by_range_vector (struct value *value,
394 VEC(range_s) **ranges)
395 {
396 /* We can only tell whether the whole value is optimized out /
397 unavailable when we try to read it. */
398 if (value->lazy)
399 value_fetch_lazy (value);
400
401 if (VEC_length (range_s, *ranges) == 1)
402 {
403 struct range *t = VEC_index (range_s, *ranges, 0);
404
405 if (t->offset == 0
406 && t->length == (TARGET_CHAR_BIT
407 * TYPE_LENGTH (value_enclosing_type (value))))
408 return 1;
409 }
410
411 return 0;
412 }
413
414 int
415 value_entirely_unavailable (struct value *value)
416 {
417 return value_entirely_covered_by_range_vector (value, &value->unavailable);
418 }
419
420 int
421 value_entirely_optimized_out (struct value *value)
422 {
423 return value_entirely_covered_by_range_vector (value, &value->optimized_out);
424 }
425
426 /* Insert into the vector pointed to by VECTORP the bit range starting of
427 OFFSET bits, and extending for the next LENGTH bits. */
428
429 static void
430 insert_into_bit_range_vector (VEC(range_s) **vectorp, int offset, int length)
431 {
432 range_s newr;
433 int i;
434
435 /* Insert the range sorted. If there's overlap or the new range
436 would be contiguous with an existing range, merge. */
437
438 newr.offset = offset;
439 newr.length = length;
440
441 /* Do a binary search for the position the given range would be
442 inserted if we only considered the starting OFFSET of ranges.
443 Call that position I. Since we also have LENGTH to care for
444 (this is a range afterall), we need to check if the _previous_
445 range overlaps the I range. E.g., calling R the new range:
446
447 #1 - overlaps with previous
448
449 R
450 |-...-|
451 |---| |---| |------| ... |--|
452 0 1 2 N
453
454 I=1
455
456 In the case #1 above, the binary search would return `I=1',
457 meaning, this OFFSET should be inserted at position 1, and the
458 current position 1 should be pushed further (and become 2). But,
459 note that `0' overlaps with R, so we want to merge them.
460
461 A similar consideration needs to be taken if the new range would
462 be contiguous with the previous range:
463
464 #2 - contiguous with previous
465
466 R
467 |-...-|
468 |--| |---| |------| ... |--|
469 0 1 2 N
470
471 I=1
472
473 If there's no overlap with the previous range, as in:
474
475 #3 - not overlapping and not contiguous
476
477 R
478 |-...-|
479 |--| |---| |------| ... |--|
480 0 1 2 N
481
482 I=1
483
484 or if I is 0:
485
486 #4 - R is the range with lowest offset
487
488 R
489 |-...-|
490 |--| |---| |------| ... |--|
491 0 1 2 N
492
493 I=0
494
495 ... we just push the new range to I.
496
497 All the 4 cases above need to consider that the new range may
498 also overlap several of the ranges that follow, or that R may be
499 contiguous with the following range, and merge. E.g.,
500
501 #5 - overlapping following ranges
502
503 R
504 |------------------------|
505 |--| |---| |------| ... |--|
506 0 1 2 N
507
508 I=0
509
510 or:
511
512 R
513 |-------|
514 |--| |---| |------| ... |--|
515 0 1 2 N
516
517 I=1
518
519 */
520
521 i = VEC_lower_bound (range_s, *vectorp, &newr, range_lessthan);
522 if (i > 0)
523 {
524 struct range *bef = VEC_index (range_s, *vectorp, i - 1);
525
526 if (ranges_overlap (bef->offset, bef->length, offset, length))
527 {
528 /* #1 */
529 ULONGEST l = min (bef->offset, offset);
530 ULONGEST h = max (bef->offset + bef->length, offset + length);
531
532 bef->offset = l;
533 bef->length = h - l;
534 i--;
535 }
536 else if (offset == bef->offset + bef->length)
537 {
538 /* #2 */
539 bef->length += length;
540 i--;
541 }
542 else
543 {
544 /* #3 */
545 VEC_safe_insert (range_s, *vectorp, i, &newr);
546 }
547 }
548 else
549 {
550 /* #4 */
551 VEC_safe_insert (range_s, *vectorp, i, &newr);
552 }
553
554 /* Check whether the ranges following the one we've just added or
555 touched can be folded in (#5 above). */
556 if (i + 1 < VEC_length (range_s, *vectorp))
557 {
558 struct range *t;
559 struct range *r;
560 int removed = 0;
561 int next = i + 1;
562
563 /* Get the range we just touched. */
564 t = VEC_index (range_s, *vectorp, i);
565 removed = 0;
566
567 i = next;
568 for (; VEC_iterate (range_s, *vectorp, i, r); i++)
569 if (r->offset <= t->offset + t->length)
570 {
571 ULONGEST l, h;
572
573 l = min (t->offset, r->offset);
574 h = max (t->offset + t->length, r->offset + r->length);
575
576 t->offset = l;
577 t->length = h - l;
578
579 removed++;
580 }
581 else
582 {
583 /* If we couldn't merge this one, we won't be able to
584 merge following ones either, since the ranges are
585 always sorted by OFFSET. */
586 break;
587 }
588
589 if (removed != 0)
590 VEC_block_remove (range_s, *vectorp, next, removed);
591 }
592 }
593
594 void
595 mark_value_bits_unavailable (struct value *value, int offset, int length)
596 {
597 insert_into_bit_range_vector (&value->unavailable, offset, length);
598 }
599
600 void
601 mark_value_bytes_unavailable (struct value *value, int offset, int length)
602 {
603 mark_value_bits_unavailable (value,
604 offset * TARGET_CHAR_BIT,
605 length * TARGET_CHAR_BIT);
606 }
607
608 /* Find the first range in RANGES that overlaps the range defined by
609 OFFSET and LENGTH, starting at element POS in the RANGES vector,
610 Returns the index into RANGES where such overlapping range was
611 found, or -1 if none was found. */
612
613 static int
614 find_first_range_overlap (VEC(range_s) *ranges, int pos,
615 int offset, int length)
616 {
617 range_s *r;
618 int i;
619
620 for (i = pos; VEC_iterate (range_s, ranges, i, r); i++)
621 if (ranges_overlap (r->offset, r->length, offset, length))
622 return i;
623
624 return -1;
625 }
626
627 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
628 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
629 return non-zero.
630
631 It must always be the case that:
632 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
633
634 It is assumed that memory can be accessed from:
635 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
636 to:
637 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
638 / TARGET_CHAR_BIT) */
639 static int
640 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
641 const gdb_byte *ptr2, size_t offset2_bits,
642 size_t length_bits)
643 {
644 gdb_assert (offset1_bits % TARGET_CHAR_BIT
645 == offset2_bits % TARGET_CHAR_BIT);
646
647 if (offset1_bits % TARGET_CHAR_BIT != 0)
648 {
649 size_t bits;
650 gdb_byte mask, b1, b2;
651
652 /* The offset from the base pointers PTR1 and PTR2 is not a complete
653 number of bytes. A number of bits up to either the next exact
654 byte boundary, or LENGTH_BITS (which ever is sooner) will be
655 compared. */
656 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
657 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
658 mask = (1 << bits) - 1;
659
660 if (length_bits < bits)
661 {
662 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
663 bits = length_bits;
664 }
665
666 /* Now load the two bytes and mask off the bits we care about. */
667 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
668 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
669
670 if (b1 != b2)
671 return 1;
672
673 /* Now update the length and offsets to take account of the bits
674 we've just compared. */
675 length_bits -= bits;
676 offset1_bits += bits;
677 offset2_bits += bits;
678 }
679
680 if (length_bits % TARGET_CHAR_BIT != 0)
681 {
682 size_t bits;
683 size_t o1, o2;
684 gdb_byte mask, b1, b2;
685
686 /* The length is not an exact number of bytes. After the previous
687 IF.. block then the offsets are byte aligned, or the
688 length is zero (in which case this code is not reached). Compare
689 a number of bits at the end of the region, starting from an exact
690 byte boundary. */
691 bits = length_bits % TARGET_CHAR_BIT;
692 o1 = offset1_bits + length_bits - bits;
693 o2 = offset2_bits + length_bits - bits;
694
695 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
696 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
697
698 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
699 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
700
701 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
702 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
703
704 if (b1 != b2)
705 return 1;
706
707 length_bits -= bits;
708 }
709
710 if (length_bits > 0)
711 {
712 /* We've now taken care of any stray "bits" at the start, or end of
713 the region to compare, the remainder can be covered with a simple
714 memcmp. */
715 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
716 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
717 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
718
719 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
720 ptr2 + offset2_bits / TARGET_CHAR_BIT,
721 length_bits / TARGET_CHAR_BIT);
722 }
723
724 /* Length is zero, regions match. */
725 return 0;
726 }
727
728 /* Helper struct for find_first_range_overlap_and_match and
729 value_contents_bits_eq. Keep track of which slot of a given ranges
730 vector have we last looked at. */
731
732 struct ranges_and_idx
733 {
734 /* The ranges. */
735 VEC(range_s) *ranges;
736
737 /* The range we've last found in RANGES. Given ranges are sorted,
738 we can start the next lookup here. */
739 int idx;
740 };
741
742 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
743 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
744 ranges starting at OFFSET2 bits. Return true if the ranges match
745 and fill in *L and *H with the overlapping window relative to
746 (both) OFFSET1 or OFFSET2. */
747
748 static int
749 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
750 struct ranges_and_idx *rp2,
751 int offset1, int offset2,
752 int length, ULONGEST *l, ULONGEST *h)
753 {
754 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
755 offset1, length);
756 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
757 offset2, length);
758
759 if (rp1->idx == -1 && rp2->idx == -1)
760 {
761 *l = length;
762 *h = length;
763 return 1;
764 }
765 else if (rp1->idx == -1 || rp2->idx == -1)
766 return 0;
767 else
768 {
769 range_s *r1, *r2;
770 ULONGEST l1, h1;
771 ULONGEST l2, h2;
772
773 r1 = VEC_index (range_s, rp1->ranges, rp1->idx);
774 r2 = VEC_index (range_s, rp2->ranges, rp2->idx);
775
776 /* Get the unavailable windows intersected by the incoming
777 ranges. The first and last ranges that overlap the argument
778 range may be wider than said incoming arguments ranges. */
779 l1 = max (offset1, r1->offset);
780 h1 = min (offset1 + length, r1->offset + r1->length);
781
782 l2 = max (offset2, r2->offset);
783 h2 = min (offset2 + length, offset2 + r2->length);
784
785 /* Make them relative to the respective start offsets, so we can
786 compare them for equality. */
787 l1 -= offset1;
788 h1 -= offset1;
789
790 l2 -= offset2;
791 h2 -= offset2;
792
793 /* Different ranges, no match. */
794 if (l1 != l2 || h1 != h2)
795 return 0;
796
797 *h = h1;
798 *l = l1;
799 return 1;
800 }
801 }
802
803 /* Helper function for value_contents_eq. The only difference is that
804 this function is bit rather than byte based.
805
806 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
807 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
808 Return true if the available bits match. */
809
810 static int
811 value_contents_bits_eq (const struct value *val1, int offset1,
812 const struct value *val2, int offset2,
813 int length)
814 {
815 /* Each array element corresponds to a ranges source (unavailable,
816 optimized out). '1' is for VAL1, '2' for VAL2. */
817 struct ranges_and_idx rp1[2], rp2[2];
818
819 /* See function description in value.h. */
820 gdb_assert (!val1->lazy && !val2->lazy);
821
822 /* We shouldn't be trying to compare past the end of the values. */
823 gdb_assert (offset1 + length
824 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
825 gdb_assert (offset2 + length
826 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
827
828 memset (&rp1, 0, sizeof (rp1));
829 memset (&rp2, 0, sizeof (rp2));
830 rp1[0].ranges = val1->unavailable;
831 rp2[0].ranges = val2->unavailable;
832 rp1[1].ranges = val1->optimized_out;
833 rp2[1].ranges = val2->optimized_out;
834
835 while (length > 0)
836 {
837 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
838 int i;
839
840 for (i = 0; i < 2; i++)
841 {
842 ULONGEST l_tmp, h_tmp;
843
844 /* The contents only match equal if the invalid/unavailable
845 contents ranges match as well. */
846 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
847 offset1, offset2, length,
848 &l_tmp, &h_tmp))
849 return 0;
850
851 /* We're interested in the lowest/first range found. */
852 if (i == 0 || l_tmp < l)
853 {
854 l = l_tmp;
855 h = h_tmp;
856 }
857 }
858
859 /* Compare the available/valid contents. */
860 if (memcmp_with_bit_offsets (val1->contents, offset1,
861 val2->contents, offset2, l) != 0)
862 return 0;
863
864 length -= h;
865 offset1 += h;
866 offset2 += h;
867 }
868
869 return 1;
870 }
871
872 int
873 value_contents_eq (const struct value *val1, int offset1,
874 const struct value *val2, int offset2,
875 int length)
876 {
877 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
878 val2, offset2 * TARGET_CHAR_BIT,
879 length * TARGET_CHAR_BIT);
880 }
881
882 /* Prototypes for local functions. */
883
884 static void show_values (char *, int);
885
886 static void show_convenience (char *, int);
887
888
889 /* The value-history records all the values printed
890 by print commands during this session. Each chunk
891 records 60 consecutive values. The first chunk on
892 the chain records the most recent values.
893 The total number of values is in value_history_count. */
894
895 #define VALUE_HISTORY_CHUNK 60
896
897 struct value_history_chunk
898 {
899 struct value_history_chunk *next;
900 struct value *values[VALUE_HISTORY_CHUNK];
901 };
902
903 /* Chain of chunks now in use. */
904
905 static struct value_history_chunk *value_history_chain;
906
907 static int value_history_count; /* Abs number of last entry stored. */
908
909 \f
910 /* List of all value objects currently allocated
911 (except for those released by calls to release_value)
912 This is so they can be freed after each command. */
913
914 static struct value *all_values;
915
916 /* Allocate a lazy value for type TYPE. Its actual content is
917 "lazily" allocated too: the content field of the return value is
918 NULL; it will be allocated when it is fetched from the target. */
919
920 struct value *
921 allocate_value_lazy (struct type *type)
922 {
923 struct value *val;
924
925 /* Call check_typedef on our type to make sure that, if TYPE
926 is a TYPE_CODE_TYPEDEF, its length is set to the length
927 of the target type instead of zero. However, we do not
928 replace the typedef type by the target type, because we want
929 to keep the typedef in order to be able to set the VAL's type
930 description correctly. */
931 check_typedef (type);
932
933 val = (struct value *) xzalloc (sizeof (struct value));
934 val->contents = NULL;
935 val->next = all_values;
936 all_values = val;
937 val->type = type;
938 val->enclosing_type = type;
939 VALUE_LVAL (val) = not_lval;
940 val->location.address = 0;
941 VALUE_FRAME_ID (val) = null_frame_id;
942 val->offset = 0;
943 val->bitpos = 0;
944 val->bitsize = 0;
945 VALUE_REGNUM (val) = -1;
946 val->lazy = 1;
947 val->embedded_offset = 0;
948 val->pointed_to_offset = 0;
949 val->modifiable = 1;
950 val->initialized = 1; /* Default to initialized. */
951
952 /* Values start out on the all_values chain. */
953 val->reference_count = 1;
954
955 return val;
956 }
957
958 /* Allocate the contents of VAL if it has not been allocated yet. */
959
960 static void
961 allocate_value_contents (struct value *val)
962 {
963 if (!val->contents)
964 val->contents = (gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type));
965 }
966
967 /* Allocate a value and its contents for type TYPE. */
968
969 struct value *
970 allocate_value (struct type *type)
971 {
972 struct value *val = allocate_value_lazy (type);
973
974 allocate_value_contents (val);
975 val->lazy = 0;
976 return val;
977 }
978
979 /* Allocate a value that has the correct length
980 for COUNT repetitions of type TYPE. */
981
982 struct value *
983 allocate_repeat_value (struct type *type, int count)
984 {
985 int low_bound = current_language->string_lower_bound; /* ??? */
986 /* FIXME-type-allocation: need a way to free this type when we are
987 done with it. */
988 struct type *array_type
989 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
990
991 return allocate_value (array_type);
992 }
993
994 struct value *
995 allocate_computed_value (struct type *type,
996 const struct lval_funcs *funcs,
997 void *closure)
998 {
999 struct value *v = allocate_value_lazy (type);
1000
1001 VALUE_LVAL (v) = lval_computed;
1002 v->location.computed.funcs = funcs;
1003 v->location.computed.closure = closure;
1004
1005 return v;
1006 }
1007
1008 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1009
1010 struct value *
1011 allocate_optimized_out_value (struct type *type)
1012 {
1013 struct value *retval = allocate_value_lazy (type);
1014
1015 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1016 set_value_lazy (retval, 0);
1017 return retval;
1018 }
1019
1020 /* Accessor methods. */
1021
1022 struct value *
1023 value_next (struct value *value)
1024 {
1025 return value->next;
1026 }
1027
1028 struct type *
1029 value_type (const struct value *value)
1030 {
1031 return value->type;
1032 }
1033 void
1034 deprecated_set_value_type (struct value *value, struct type *type)
1035 {
1036 value->type = type;
1037 }
1038
1039 int
1040 value_offset (const struct value *value)
1041 {
1042 return value->offset;
1043 }
1044 void
1045 set_value_offset (struct value *value, int offset)
1046 {
1047 value->offset = offset;
1048 }
1049
1050 int
1051 value_bitpos (const struct value *value)
1052 {
1053 return value->bitpos;
1054 }
1055 void
1056 set_value_bitpos (struct value *value, int bit)
1057 {
1058 value->bitpos = bit;
1059 }
1060
1061 int
1062 value_bitsize (const struct value *value)
1063 {
1064 return value->bitsize;
1065 }
1066 void
1067 set_value_bitsize (struct value *value, int bit)
1068 {
1069 value->bitsize = bit;
1070 }
1071
1072 struct value *
1073 value_parent (struct value *value)
1074 {
1075 return value->parent;
1076 }
1077
1078 /* See value.h. */
1079
1080 void
1081 set_value_parent (struct value *value, struct value *parent)
1082 {
1083 struct value *old = value->parent;
1084
1085 value->parent = parent;
1086 if (parent != NULL)
1087 value_incref (parent);
1088 value_free (old);
1089 }
1090
1091 gdb_byte *
1092 value_contents_raw (struct value *value)
1093 {
1094 struct gdbarch *arch = get_value_arch (value);
1095 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1096
1097 allocate_value_contents (value);
1098 return value->contents + value->embedded_offset * unit_size;
1099 }
1100
1101 gdb_byte *
1102 value_contents_all_raw (struct value *value)
1103 {
1104 allocate_value_contents (value);
1105 return value->contents;
1106 }
1107
1108 struct type *
1109 value_enclosing_type (struct value *value)
1110 {
1111 return value->enclosing_type;
1112 }
1113
1114 /* Look at value.h for description. */
1115
1116 struct type *
1117 value_actual_type (struct value *value, int resolve_simple_types,
1118 int *real_type_found)
1119 {
1120 struct value_print_options opts;
1121 struct type *result;
1122
1123 get_user_print_options (&opts);
1124
1125 if (real_type_found)
1126 *real_type_found = 0;
1127 result = value_type (value);
1128 if (opts.objectprint)
1129 {
1130 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1131 fetch its rtti type. */
1132 if ((TYPE_CODE (result) == TYPE_CODE_PTR
1133 || TYPE_CODE (result) == TYPE_CODE_REF)
1134 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1135 == TYPE_CODE_STRUCT)
1136 {
1137 struct type *real_type;
1138
1139 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1140 if (real_type)
1141 {
1142 if (real_type_found)
1143 *real_type_found = 1;
1144 result = real_type;
1145 }
1146 }
1147 else if (resolve_simple_types)
1148 {
1149 if (real_type_found)
1150 *real_type_found = 1;
1151 result = value_enclosing_type (value);
1152 }
1153 }
1154
1155 return result;
1156 }
1157
1158 void
1159 error_value_optimized_out (void)
1160 {
1161 error (_("value has been optimized out"));
1162 }
1163
1164 static void
1165 require_not_optimized_out (const struct value *value)
1166 {
1167 if (!VEC_empty (range_s, value->optimized_out))
1168 {
1169 if (value->lval == lval_register)
1170 error (_("register has not been saved in frame"));
1171 else
1172 error_value_optimized_out ();
1173 }
1174 }
1175
1176 static void
1177 require_available (const struct value *value)
1178 {
1179 if (!VEC_empty (range_s, value->unavailable))
1180 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1181 }
1182
1183 const gdb_byte *
1184 value_contents_for_printing (struct value *value)
1185 {
1186 if (value->lazy)
1187 value_fetch_lazy (value);
1188 return value->contents;
1189 }
1190
1191 const gdb_byte *
1192 value_contents_for_printing_const (const struct value *value)
1193 {
1194 gdb_assert (!value->lazy);
1195 return value->contents;
1196 }
1197
1198 const gdb_byte *
1199 value_contents_all (struct value *value)
1200 {
1201 const gdb_byte *result = value_contents_for_printing (value);
1202 require_not_optimized_out (value);
1203 require_available (value);
1204 return result;
1205 }
1206
1207 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1208 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1209
1210 static void
1211 ranges_copy_adjusted (VEC (range_s) **dst_range, int dst_bit_offset,
1212 VEC (range_s) *src_range, int src_bit_offset,
1213 int bit_length)
1214 {
1215 range_s *r;
1216 int i;
1217
1218 for (i = 0; VEC_iterate (range_s, src_range, i, r); i++)
1219 {
1220 ULONGEST h, l;
1221
1222 l = max (r->offset, src_bit_offset);
1223 h = min (r->offset + r->length, src_bit_offset + bit_length);
1224
1225 if (l < h)
1226 insert_into_bit_range_vector (dst_range,
1227 dst_bit_offset + (l - src_bit_offset),
1228 h - l);
1229 }
1230 }
1231
1232 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1233 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1234
1235 static void
1236 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1237 const struct value *src, int src_bit_offset,
1238 int bit_length)
1239 {
1240 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1241 src->unavailable, src_bit_offset,
1242 bit_length);
1243 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1244 src->optimized_out, src_bit_offset,
1245 bit_length);
1246 }
1247
1248 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1249 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1250 contents, starting at DST_OFFSET. If unavailable contents are
1251 being copied from SRC, the corresponding DST contents are marked
1252 unavailable accordingly. Neither DST nor SRC may be lazy
1253 values.
1254
1255 It is assumed the contents of DST in the [DST_OFFSET,
1256 DST_OFFSET+LENGTH) range are wholly available. */
1257
1258 void
1259 value_contents_copy_raw (struct value *dst, int dst_offset,
1260 struct value *src, int src_offset, int length)
1261 {
1262 range_s *r;
1263 int src_bit_offset, dst_bit_offset, bit_length;
1264 struct gdbarch *arch = get_value_arch (src);
1265 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1266
1267 /* A lazy DST would make that this copy operation useless, since as
1268 soon as DST's contents were un-lazied (by a later value_contents
1269 call, say), the contents would be overwritten. A lazy SRC would
1270 mean we'd be copying garbage. */
1271 gdb_assert (!dst->lazy && !src->lazy);
1272
1273 /* The overwritten DST range gets unavailability ORed in, not
1274 replaced. Make sure to remember to implement replacing if it
1275 turns out actually necessary. */
1276 gdb_assert (value_bytes_available (dst, dst_offset, length));
1277 gdb_assert (!value_bits_any_optimized_out (dst,
1278 TARGET_CHAR_BIT * dst_offset,
1279 TARGET_CHAR_BIT * length));
1280
1281 /* Copy the data. */
1282 memcpy (value_contents_all_raw (dst) + dst_offset * unit_size,
1283 value_contents_all_raw (src) + src_offset * unit_size,
1284 length * unit_size);
1285
1286 /* Copy the meta-data, adjusted. */
1287 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1288 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1289 bit_length = length * unit_size * HOST_CHAR_BIT;
1290
1291 value_ranges_copy_adjusted (dst, dst_bit_offset,
1292 src, src_bit_offset,
1293 bit_length);
1294 }
1295
1296 /* Copy LENGTH bytes of SRC value's (all) contents
1297 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1298 (all) contents, starting at DST_OFFSET. If unavailable contents
1299 are being copied from SRC, the corresponding DST contents are
1300 marked unavailable accordingly. DST must not be lazy. If SRC is
1301 lazy, it will be fetched now.
1302
1303 It is assumed the contents of DST in the [DST_OFFSET,
1304 DST_OFFSET+LENGTH) range are wholly available. */
1305
1306 void
1307 value_contents_copy (struct value *dst, int dst_offset,
1308 struct value *src, int src_offset, int length)
1309 {
1310 if (src->lazy)
1311 value_fetch_lazy (src);
1312
1313 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1314 }
1315
1316 int
1317 value_lazy (struct value *value)
1318 {
1319 return value->lazy;
1320 }
1321
1322 void
1323 set_value_lazy (struct value *value, int val)
1324 {
1325 value->lazy = val;
1326 }
1327
1328 int
1329 value_stack (struct value *value)
1330 {
1331 return value->stack;
1332 }
1333
1334 void
1335 set_value_stack (struct value *value, int val)
1336 {
1337 value->stack = val;
1338 }
1339
1340 const gdb_byte *
1341 value_contents (struct value *value)
1342 {
1343 const gdb_byte *result = value_contents_writeable (value);
1344 require_not_optimized_out (value);
1345 require_available (value);
1346 return result;
1347 }
1348
1349 gdb_byte *
1350 value_contents_writeable (struct value *value)
1351 {
1352 if (value->lazy)
1353 value_fetch_lazy (value);
1354 return value_contents_raw (value);
1355 }
1356
1357 int
1358 value_optimized_out (struct value *value)
1359 {
1360 /* We can only know if a value is optimized out once we have tried to
1361 fetch it. */
1362 if (VEC_empty (range_s, value->optimized_out) && value->lazy)
1363 value_fetch_lazy (value);
1364
1365 return !VEC_empty (range_s, value->optimized_out);
1366 }
1367
1368 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1369 the following LENGTH bytes. */
1370
1371 void
1372 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1373 {
1374 mark_value_bits_optimized_out (value,
1375 offset * TARGET_CHAR_BIT,
1376 length * TARGET_CHAR_BIT);
1377 }
1378
1379 /* See value.h. */
1380
1381 void
1382 mark_value_bits_optimized_out (struct value *value, int offset, int length)
1383 {
1384 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1385 }
1386
1387 int
1388 value_bits_synthetic_pointer (const struct value *value,
1389 int offset, int length)
1390 {
1391 if (value->lval != lval_computed
1392 || !value->location.computed.funcs->check_synthetic_pointer)
1393 return 0;
1394 return value->location.computed.funcs->check_synthetic_pointer (value,
1395 offset,
1396 length);
1397 }
1398
1399 int
1400 value_embedded_offset (struct value *value)
1401 {
1402 return value->embedded_offset;
1403 }
1404
1405 void
1406 set_value_embedded_offset (struct value *value, int val)
1407 {
1408 value->embedded_offset = val;
1409 }
1410
1411 int
1412 value_pointed_to_offset (struct value *value)
1413 {
1414 return value->pointed_to_offset;
1415 }
1416
1417 void
1418 set_value_pointed_to_offset (struct value *value, int val)
1419 {
1420 value->pointed_to_offset = val;
1421 }
1422
1423 const struct lval_funcs *
1424 value_computed_funcs (const struct value *v)
1425 {
1426 gdb_assert (value_lval_const (v) == lval_computed);
1427
1428 return v->location.computed.funcs;
1429 }
1430
1431 void *
1432 value_computed_closure (const struct value *v)
1433 {
1434 gdb_assert (v->lval == lval_computed);
1435
1436 return v->location.computed.closure;
1437 }
1438
1439 enum lval_type *
1440 deprecated_value_lval_hack (struct value *value)
1441 {
1442 return &value->lval;
1443 }
1444
1445 enum lval_type
1446 value_lval_const (const struct value *value)
1447 {
1448 return value->lval;
1449 }
1450
1451 CORE_ADDR
1452 value_address (const struct value *value)
1453 {
1454 if (value->lval == lval_internalvar
1455 || value->lval == lval_internalvar_component
1456 || value->lval == lval_xcallable)
1457 return 0;
1458 if (value->parent != NULL)
1459 return value_address (value->parent) + value->offset;
1460 else
1461 return value->location.address + value->offset;
1462 }
1463
1464 CORE_ADDR
1465 value_raw_address (struct value *value)
1466 {
1467 if (value->lval == lval_internalvar
1468 || value->lval == lval_internalvar_component
1469 || value->lval == lval_xcallable)
1470 return 0;
1471 return value->location.address;
1472 }
1473
1474 void
1475 set_value_address (struct value *value, CORE_ADDR addr)
1476 {
1477 gdb_assert (value->lval != lval_internalvar
1478 && value->lval != lval_internalvar_component
1479 && value->lval != lval_xcallable);
1480 value->location.address = addr;
1481 }
1482
1483 struct internalvar **
1484 deprecated_value_internalvar_hack (struct value *value)
1485 {
1486 return &value->location.internalvar;
1487 }
1488
1489 struct frame_id *
1490 deprecated_value_frame_id_hack (struct value *value)
1491 {
1492 return &value->frame_id;
1493 }
1494
1495 short *
1496 deprecated_value_regnum_hack (struct value *value)
1497 {
1498 return &value->regnum;
1499 }
1500
1501 int
1502 deprecated_value_modifiable (struct value *value)
1503 {
1504 return value->modifiable;
1505 }
1506 \f
1507 /* Return a mark in the value chain. All values allocated after the
1508 mark is obtained (except for those released) are subject to being freed
1509 if a subsequent value_free_to_mark is passed the mark. */
1510 struct value *
1511 value_mark (void)
1512 {
1513 return all_values;
1514 }
1515
1516 /* Take a reference to VAL. VAL will not be deallocated until all
1517 references are released. */
1518
1519 void
1520 value_incref (struct value *val)
1521 {
1522 val->reference_count++;
1523 }
1524
1525 /* Release a reference to VAL, which was acquired with value_incref.
1526 This function is also called to deallocate values from the value
1527 chain. */
1528
1529 void
1530 value_free (struct value *val)
1531 {
1532 if (val)
1533 {
1534 gdb_assert (val->reference_count > 0);
1535 val->reference_count--;
1536 if (val->reference_count > 0)
1537 return;
1538
1539 /* If there's an associated parent value, drop our reference to
1540 it. */
1541 if (val->parent != NULL)
1542 value_free (val->parent);
1543
1544 if (VALUE_LVAL (val) == lval_computed)
1545 {
1546 const struct lval_funcs *funcs = val->location.computed.funcs;
1547
1548 if (funcs->free_closure)
1549 funcs->free_closure (val);
1550 }
1551 else if (VALUE_LVAL (val) == lval_xcallable)
1552 free_xmethod_worker (val->location.xm_worker);
1553
1554 xfree (val->contents);
1555 VEC_free (range_s, val->unavailable);
1556 }
1557 xfree (val);
1558 }
1559
1560 /* Free all values allocated since MARK was obtained by value_mark
1561 (except for those released). */
1562 void
1563 value_free_to_mark (struct value *mark)
1564 {
1565 struct value *val;
1566 struct value *next;
1567
1568 for (val = all_values; val && val != mark; val = next)
1569 {
1570 next = val->next;
1571 val->released = 1;
1572 value_free (val);
1573 }
1574 all_values = val;
1575 }
1576
1577 /* Free all the values that have been allocated (except for those released).
1578 Call after each command, successful or not.
1579 In practice this is called before each command, which is sufficient. */
1580
1581 void
1582 free_all_values (void)
1583 {
1584 struct value *val;
1585 struct value *next;
1586
1587 for (val = all_values; val; val = next)
1588 {
1589 next = val->next;
1590 val->released = 1;
1591 value_free (val);
1592 }
1593
1594 all_values = 0;
1595 }
1596
1597 /* Frees all the elements in a chain of values. */
1598
1599 void
1600 free_value_chain (struct value *v)
1601 {
1602 struct value *next;
1603
1604 for (; v; v = next)
1605 {
1606 next = value_next (v);
1607 value_free (v);
1608 }
1609 }
1610
1611 /* Remove VAL from the chain all_values
1612 so it will not be freed automatically. */
1613
1614 void
1615 release_value (struct value *val)
1616 {
1617 struct value *v;
1618
1619 if (all_values == val)
1620 {
1621 all_values = val->next;
1622 val->next = NULL;
1623 val->released = 1;
1624 return;
1625 }
1626
1627 for (v = all_values; v; v = v->next)
1628 {
1629 if (v->next == val)
1630 {
1631 v->next = val->next;
1632 val->next = NULL;
1633 val->released = 1;
1634 break;
1635 }
1636 }
1637 }
1638
1639 /* If the value is not already released, release it.
1640 If the value is already released, increment its reference count.
1641 That is, this function ensures that the value is released from the
1642 value chain and that the caller owns a reference to it. */
1643
1644 void
1645 release_value_or_incref (struct value *val)
1646 {
1647 if (val->released)
1648 value_incref (val);
1649 else
1650 release_value (val);
1651 }
1652
1653 /* Release all values up to mark */
1654 struct value *
1655 value_release_to_mark (struct value *mark)
1656 {
1657 struct value *val;
1658 struct value *next;
1659
1660 for (val = next = all_values; next; next = next->next)
1661 {
1662 if (next->next == mark)
1663 {
1664 all_values = next->next;
1665 next->next = NULL;
1666 return val;
1667 }
1668 next->released = 1;
1669 }
1670 all_values = 0;
1671 return val;
1672 }
1673
1674 /* Return a copy of the value ARG.
1675 It contains the same contents, for same memory address,
1676 but it's a different block of storage. */
1677
1678 struct value *
1679 value_copy (struct value *arg)
1680 {
1681 struct type *encl_type = value_enclosing_type (arg);
1682 struct value *val;
1683
1684 if (value_lazy (arg))
1685 val = allocate_value_lazy (encl_type);
1686 else
1687 val = allocate_value (encl_type);
1688 val->type = arg->type;
1689 VALUE_LVAL (val) = VALUE_LVAL (arg);
1690 val->location = arg->location;
1691 val->offset = arg->offset;
1692 val->bitpos = arg->bitpos;
1693 val->bitsize = arg->bitsize;
1694 VALUE_FRAME_ID (val) = VALUE_FRAME_ID (arg);
1695 VALUE_REGNUM (val) = VALUE_REGNUM (arg);
1696 val->lazy = arg->lazy;
1697 val->embedded_offset = value_embedded_offset (arg);
1698 val->pointed_to_offset = arg->pointed_to_offset;
1699 val->modifiable = arg->modifiable;
1700 if (!value_lazy (val))
1701 {
1702 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1703 TYPE_LENGTH (value_enclosing_type (arg)));
1704
1705 }
1706 val->unavailable = VEC_copy (range_s, arg->unavailable);
1707 val->optimized_out = VEC_copy (range_s, arg->optimized_out);
1708 set_value_parent (val, arg->parent);
1709 if (VALUE_LVAL (val) == lval_computed)
1710 {
1711 const struct lval_funcs *funcs = val->location.computed.funcs;
1712
1713 if (funcs->copy_closure)
1714 val->location.computed.closure = funcs->copy_closure (val);
1715 }
1716 return val;
1717 }
1718
1719 /* Return a "const" and/or "volatile" qualified version of the value V.
1720 If CNST is true, then the returned value will be qualified with
1721 "const".
1722 if VOLTL is true, then the returned value will be qualified with
1723 "volatile". */
1724
1725 struct value *
1726 make_cv_value (int cnst, int voltl, struct value *v)
1727 {
1728 struct type *val_type = value_type (v);
1729 struct type *enclosing_type = value_enclosing_type (v);
1730 struct value *cv_val = value_copy (v);
1731
1732 deprecated_set_value_type (cv_val,
1733 make_cv_type (cnst, voltl, val_type, NULL));
1734 set_value_enclosing_type (cv_val,
1735 make_cv_type (cnst, voltl, enclosing_type, NULL));
1736
1737 return cv_val;
1738 }
1739
1740 /* Return a version of ARG that is non-lvalue. */
1741
1742 struct value *
1743 value_non_lval (struct value *arg)
1744 {
1745 if (VALUE_LVAL (arg) != not_lval)
1746 {
1747 struct type *enc_type = value_enclosing_type (arg);
1748 struct value *val = allocate_value (enc_type);
1749
1750 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1751 TYPE_LENGTH (enc_type));
1752 val->type = arg->type;
1753 set_value_embedded_offset (val, value_embedded_offset (arg));
1754 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1755 return val;
1756 }
1757 return arg;
1758 }
1759
1760 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1761
1762 void
1763 value_force_lval (struct value *v, CORE_ADDR addr)
1764 {
1765 gdb_assert (VALUE_LVAL (v) == not_lval);
1766
1767 write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1768 v->lval = lval_memory;
1769 v->location.address = addr;
1770 }
1771
1772 void
1773 set_value_component_location (struct value *component,
1774 const struct value *whole)
1775 {
1776 gdb_assert (whole->lval != lval_xcallable);
1777
1778 if (whole->lval == lval_internalvar)
1779 VALUE_LVAL (component) = lval_internalvar_component;
1780 else
1781 VALUE_LVAL (component) = whole->lval;
1782
1783 component->location = whole->location;
1784 if (whole->lval == lval_computed)
1785 {
1786 const struct lval_funcs *funcs = whole->location.computed.funcs;
1787
1788 if (funcs->copy_closure)
1789 component->location.computed.closure = funcs->copy_closure (whole);
1790 }
1791 }
1792
1793 \f
1794 /* Access to the value history. */
1795
1796 /* Record a new value in the value history.
1797 Returns the absolute history index of the entry. */
1798
1799 int
1800 record_latest_value (struct value *val)
1801 {
1802 int i;
1803
1804 /* We don't want this value to have anything to do with the inferior anymore.
1805 In particular, "set $1 = 50" should not affect the variable from which
1806 the value was taken, and fast watchpoints should be able to assume that
1807 a value on the value history never changes. */
1808 if (value_lazy (val))
1809 value_fetch_lazy (val);
1810 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1811 from. This is a bit dubious, because then *&$1 does not just return $1
1812 but the current contents of that location. c'est la vie... */
1813 val->modifiable = 0;
1814
1815 /* The value may have already been released, in which case we're adding a
1816 new reference for its entry in the history. That is why we call
1817 release_value_or_incref here instead of release_value. */
1818 release_value_or_incref (val);
1819
1820 /* Here we treat value_history_count as origin-zero
1821 and applying to the value being stored now. */
1822
1823 i = value_history_count % VALUE_HISTORY_CHUNK;
1824 if (i == 0)
1825 {
1826 struct value_history_chunk *newobj
1827 = (struct value_history_chunk *)
1828
1829 xmalloc (sizeof (struct value_history_chunk));
1830 memset (newobj->values, 0, sizeof newobj->values);
1831 newobj->next = value_history_chain;
1832 value_history_chain = newobj;
1833 }
1834
1835 value_history_chain->values[i] = val;
1836
1837 /* Now we regard value_history_count as origin-one
1838 and applying to the value just stored. */
1839
1840 return ++value_history_count;
1841 }
1842
1843 /* Return a copy of the value in the history with sequence number NUM. */
1844
1845 struct value *
1846 access_value_history (int num)
1847 {
1848 struct value_history_chunk *chunk;
1849 int i;
1850 int absnum = num;
1851
1852 if (absnum <= 0)
1853 absnum += value_history_count;
1854
1855 if (absnum <= 0)
1856 {
1857 if (num == 0)
1858 error (_("The history is empty."));
1859 else if (num == 1)
1860 error (_("There is only one value in the history."));
1861 else
1862 error (_("History does not go back to $$%d."), -num);
1863 }
1864 if (absnum > value_history_count)
1865 error (_("History has not yet reached $%d."), absnum);
1866
1867 absnum--;
1868
1869 /* Now absnum is always absolute and origin zero. */
1870
1871 chunk = value_history_chain;
1872 for (i = (value_history_count - 1) / VALUE_HISTORY_CHUNK
1873 - absnum / VALUE_HISTORY_CHUNK;
1874 i > 0; i--)
1875 chunk = chunk->next;
1876
1877 return value_copy (chunk->values[absnum % VALUE_HISTORY_CHUNK]);
1878 }
1879
1880 static void
1881 show_values (char *num_exp, int from_tty)
1882 {
1883 int i;
1884 struct value *val;
1885 static int num = 1;
1886
1887 if (num_exp)
1888 {
1889 /* "show values +" should print from the stored position.
1890 "show values <exp>" should print around value number <exp>. */
1891 if (num_exp[0] != '+' || num_exp[1] != '\0')
1892 num = parse_and_eval_long (num_exp) - 5;
1893 }
1894 else
1895 {
1896 /* "show values" means print the last 10 values. */
1897 num = value_history_count - 9;
1898 }
1899
1900 if (num <= 0)
1901 num = 1;
1902
1903 for (i = num; i < num + 10 && i <= value_history_count; i++)
1904 {
1905 struct value_print_options opts;
1906
1907 val = access_value_history (i);
1908 printf_filtered (("$%d = "), i);
1909 get_user_print_options (&opts);
1910 value_print (val, gdb_stdout, &opts);
1911 printf_filtered (("\n"));
1912 }
1913
1914 /* The next "show values +" should start after what we just printed. */
1915 num += 10;
1916
1917 /* Hitting just return after this command should do the same thing as
1918 "show values +". If num_exp is null, this is unnecessary, since
1919 "show values +" is not useful after "show values". */
1920 if (from_tty && num_exp)
1921 {
1922 num_exp[0] = '+';
1923 num_exp[1] = '\0';
1924 }
1925 }
1926 \f
1927 enum internalvar_kind
1928 {
1929 /* The internal variable is empty. */
1930 INTERNALVAR_VOID,
1931
1932 /* The value of the internal variable is provided directly as
1933 a GDB value object. */
1934 INTERNALVAR_VALUE,
1935
1936 /* A fresh value is computed via a call-back routine on every
1937 access to the internal variable. */
1938 INTERNALVAR_MAKE_VALUE,
1939
1940 /* The internal variable holds a GDB internal convenience function. */
1941 INTERNALVAR_FUNCTION,
1942
1943 /* The variable holds an integer value. */
1944 INTERNALVAR_INTEGER,
1945
1946 /* The variable holds a GDB-provided string. */
1947 INTERNALVAR_STRING,
1948 };
1949
1950 union internalvar_data
1951 {
1952 /* A value object used with INTERNALVAR_VALUE. */
1953 struct value *value;
1954
1955 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1956 struct
1957 {
1958 /* The functions to call. */
1959 const struct internalvar_funcs *functions;
1960
1961 /* The function's user-data. */
1962 void *data;
1963 } make_value;
1964
1965 /* The internal function used with INTERNALVAR_FUNCTION. */
1966 struct
1967 {
1968 struct internal_function *function;
1969 /* True if this is the canonical name for the function. */
1970 int canonical;
1971 } fn;
1972
1973 /* An integer value used with INTERNALVAR_INTEGER. */
1974 struct
1975 {
1976 /* If type is non-NULL, it will be used as the type to generate
1977 a value for this internal variable. If type is NULL, a default
1978 integer type for the architecture is used. */
1979 struct type *type;
1980 LONGEST val;
1981 } integer;
1982
1983 /* A string value used with INTERNALVAR_STRING. */
1984 char *string;
1985 };
1986
1987 /* Internal variables. These are variables within the debugger
1988 that hold values assigned by debugger commands.
1989 The user refers to them with a '$' prefix
1990 that does not appear in the variable names stored internally. */
1991
1992 struct internalvar
1993 {
1994 struct internalvar *next;
1995 char *name;
1996
1997 /* We support various different kinds of content of an internal variable.
1998 enum internalvar_kind specifies the kind, and union internalvar_data
1999 provides the data associated with this particular kind. */
2000
2001 enum internalvar_kind kind;
2002
2003 union internalvar_data u;
2004 };
2005
2006 static struct internalvar *internalvars;
2007
2008 /* If the variable does not already exist create it and give it the
2009 value given. If no value is given then the default is zero. */
2010 static void
2011 init_if_undefined_command (char* args, int from_tty)
2012 {
2013 struct internalvar* intvar;
2014
2015 /* Parse the expression - this is taken from set_command(). */
2016 struct expression *expr = parse_expression (args);
2017 register struct cleanup *old_chain =
2018 make_cleanup (free_current_contents, &expr);
2019
2020 /* Validate the expression.
2021 Was the expression an assignment?
2022 Or even an expression at all? */
2023 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
2024 error (_("Init-if-undefined requires an assignment expression."));
2025
2026 /* Extract the variable from the parsed expression.
2027 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
2028 if (expr->elts[1].opcode != OP_INTERNALVAR)
2029 error (_("The first parameter to init-if-undefined "
2030 "should be a GDB variable."));
2031 intvar = expr->elts[2].internalvar;
2032
2033 /* Only evaluate the expression if the lvalue is void.
2034 This may still fail if the expresssion is invalid. */
2035 if (intvar->kind == INTERNALVAR_VOID)
2036 evaluate_expression (expr);
2037
2038 do_cleanups (old_chain);
2039 }
2040
2041
2042 /* Look up an internal variable with name NAME. NAME should not
2043 normally include a dollar sign.
2044
2045 If the specified internal variable does not exist,
2046 the return value is NULL. */
2047
2048 struct internalvar *
2049 lookup_only_internalvar (const char *name)
2050 {
2051 struct internalvar *var;
2052
2053 for (var = internalvars; var; var = var->next)
2054 if (strcmp (var->name, name) == 0)
2055 return var;
2056
2057 return NULL;
2058 }
2059
2060 /* Complete NAME by comparing it to the names of internal variables.
2061 Returns a vector of newly allocated strings, or NULL if no matches
2062 were found. */
2063
2064 VEC (char_ptr) *
2065 complete_internalvar (const char *name)
2066 {
2067 VEC (char_ptr) *result = NULL;
2068 struct internalvar *var;
2069 int len;
2070
2071 len = strlen (name);
2072
2073 for (var = internalvars; var; var = var->next)
2074 if (strncmp (var->name, name, len) == 0)
2075 {
2076 char *r = xstrdup (var->name);
2077
2078 VEC_safe_push (char_ptr, result, r);
2079 }
2080
2081 return result;
2082 }
2083
2084 /* Create an internal variable with name NAME and with a void value.
2085 NAME should not normally include a dollar sign. */
2086
2087 struct internalvar *
2088 create_internalvar (const char *name)
2089 {
2090 struct internalvar *var;
2091
2092 var = (struct internalvar *) xmalloc (sizeof (struct internalvar));
2093 var->name = concat (name, (char *)NULL);
2094 var->kind = INTERNALVAR_VOID;
2095 var->next = internalvars;
2096 internalvars = var;
2097 return var;
2098 }
2099
2100 /* Create an internal variable with name NAME and register FUN as the
2101 function that value_of_internalvar uses to create a value whenever
2102 this variable is referenced. NAME should not normally include a
2103 dollar sign. DATA is passed uninterpreted to FUN when it is
2104 called. CLEANUP, if not NULL, is called when the internal variable
2105 is destroyed. It is passed DATA as its only argument. */
2106
2107 struct internalvar *
2108 create_internalvar_type_lazy (const char *name,
2109 const struct internalvar_funcs *funcs,
2110 void *data)
2111 {
2112 struct internalvar *var = create_internalvar (name);
2113
2114 var->kind = INTERNALVAR_MAKE_VALUE;
2115 var->u.make_value.functions = funcs;
2116 var->u.make_value.data = data;
2117 return var;
2118 }
2119
2120 /* See documentation in value.h. */
2121
2122 int
2123 compile_internalvar_to_ax (struct internalvar *var,
2124 struct agent_expr *expr,
2125 struct axs_value *value)
2126 {
2127 if (var->kind != INTERNALVAR_MAKE_VALUE
2128 || var->u.make_value.functions->compile_to_ax == NULL)
2129 return 0;
2130
2131 var->u.make_value.functions->compile_to_ax (var, expr, value,
2132 var->u.make_value.data);
2133 return 1;
2134 }
2135
2136 /* Look up an internal variable with name NAME. NAME should not
2137 normally include a dollar sign.
2138
2139 If the specified internal variable does not exist,
2140 one is created, with a void value. */
2141
2142 struct internalvar *
2143 lookup_internalvar (const char *name)
2144 {
2145 struct internalvar *var;
2146
2147 var = lookup_only_internalvar (name);
2148 if (var)
2149 return var;
2150
2151 return create_internalvar (name);
2152 }
2153
2154 /* Return current value of internal variable VAR. For variables that
2155 are not inherently typed, use a value type appropriate for GDBARCH. */
2156
2157 struct value *
2158 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2159 {
2160 struct value *val;
2161 struct trace_state_variable *tsv;
2162
2163 /* If there is a trace state variable of the same name, assume that
2164 is what we really want to see. */
2165 tsv = find_trace_state_variable (var->name);
2166 if (tsv)
2167 {
2168 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2169 &(tsv->value));
2170 if (tsv->value_known)
2171 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2172 tsv->value);
2173 else
2174 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2175 return val;
2176 }
2177
2178 switch (var->kind)
2179 {
2180 case INTERNALVAR_VOID:
2181 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2182 break;
2183
2184 case INTERNALVAR_FUNCTION:
2185 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2186 break;
2187
2188 case INTERNALVAR_INTEGER:
2189 if (!var->u.integer.type)
2190 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2191 var->u.integer.val);
2192 else
2193 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2194 break;
2195
2196 case INTERNALVAR_STRING:
2197 val = value_cstring (var->u.string, strlen (var->u.string),
2198 builtin_type (gdbarch)->builtin_char);
2199 break;
2200
2201 case INTERNALVAR_VALUE:
2202 val = value_copy (var->u.value);
2203 if (value_lazy (val))
2204 value_fetch_lazy (val);
2205 break;
2206
2207 case INTERNALVAR_MAKE_VALUE:
2208 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2209 var->u.make_value.data);
2210 break;
2211
2212 default:
2213 internal_error (__FILE__, __LINE__, _("bad kind"));
2214 }
2215
2216 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2217 on this value go back to affect the original internal variable.
2218
2219 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2220 no underlying modifyable state in the internal variable.
2221
2222 Likewise, if the variable's value is a computed lvalue, we want
2223 references to it to produce another computed lvalue, where
2224 references and assignments actually operate through the
2225 computed value's functions.
2226
2227 This means that internal variables with computed values
2228 behave a little differently from other internal variables:
2229 assignments to them don't just replace the previous value
2230 altogether. At the moment, this seems like the behavior we
2231 want. */
2232
2233 if (var->kind != INTERNALVAR_MAKE_VALUE
2234 && val->lval != lval_computed)
2235 {
2236 VALUE_LVAL (val) = lval_internalvar;
2237 VALUE_INTERNALVAR (val) = var;
2238 }
2239
2240 return val;
2241 }
2242
2243 int
2244 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2245 {
2246 if (var->kind == INTERNALVAR_INTEGER)
2247 {
2248 *result = var->u.integer.val;
2249 return 1;
2250 }
2251
2252 if (var->kind == INTERNALVAR_VALUE)
2253 {
2254 struct type *type = check_typedef (value_type (var->u.value));
2255
2256 if (TYPE_CODE (type) == TYPE_CODE_INT)
2257 {
2258 *result = value_as_long (var->u.value);
2259 return 1;
2260 }
2261 }
2262
2263 return 0;
2264 }
2265
2266 static int
2267 get_internalvar_function (struct internalvar *var,
2268 struct internal_function **result)
2269 {
2270 switch (var->kind)
2271 {
2272 case INTERNALVAR_FUNCTION:
2273 *result = var->u.fn.function;
2274 return 1;
2275
2276 default:
2277 return 0;
2278 }
2279 }
2280
2281 void
2282 set_internalvar_component (struct internalvar *var, int offset, int bitpos,
2283 int bitsize, struct value *newval)
2284 {
2285 gdb_byte *addr;
2286 struct gdbarch *arch;
2287 int unit_size;
2288
2289 switch (var->kind)
2290 {
2291 case INTERNALVAR_VALUE:
2292 addr = value_contents_writeable (var->u.value);
2293 arch = get_value_arch (var->u.value);
2294 unit_size = gdbarch_addressable_memory_unit_size (arch);
2295
2296 if (bitsize)
2297 modify_field (value_type (var->u.value), addr + offset,
2298 value_as_long (newval), bitpos, bitsize);
2299 else
2300 memcpy (addr + offset * unit_size, value_contents (newval),
2301 TYPE_LENGTH (value_type (newval)));
2302 break;
2303
2304 default:
2305 /* We can never get a component of any other kind. */
2306 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2307 }
2308 }
2309
2310 void
2311 set_internalvar (struct internalvar *var, struct value *val)
2312 {
2313 enum internalvar_kind new_kind;
2314 union internalvar_data new_data = { 0 };
2315
2316 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2317 error (_("Cannot overwrite convenience function %s"), var->name);
2318
2319 /* Prepare new contents. */
2320 switch (TYPE_CODE (check_typedef (value_type (val))))
2321 {
2322 case TYPE_CODE_VOID:
2323 new_kind = INTERNALVAR_VOID;
2324 break;
2325
2326 case TYPE_CODE_INTERNAL_FUNCTION:
2327 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2328 new_kind = INTERNALVAR_FUNCTION;
2329 get_internalvar_function (VALUE_INTERNALVAR (val),
2330 &new_data.fn.function);
2331 /* Copies created here are never canonical. */
2332 break;
2333
2334 default:
2335 new_kind = INTERNALVAR_VALUE;
2336 new_data.value = value_copy (val);
2337 new_data.value->modifiable = 1;
2338
2339 /* Force the value to be fetched from the target now, to avoid problems
2340 later when this internalvar is referenced and the target is gone or
2341 has changed. */
2342 if (value_lazy (new_data.value))
2343 value_fetch_lazy (new_data.value);
2344
2345 /* Release the value from the value chain to prevent it from being
2346 deleted by free_all_values. From here on this function should not
2347 call error () until new_data is installed into the var->u to avoid
2348 leaking memory. */
2349 release_value (new_data.value);
2350 break;
2351 }
2352
2353 /* Clean up old contents. */
2354 clear_internalvar (var);
2355
2356 /* Switch over. */
2357 var->kind = new_kind;
2358 var->u = new_data;
2359 /* End code which must not call error(). */
2360 }
2361
2362 void
2363 set_internalvar_integer (struct internalvar *var, LONGEST l)
2364 {
2365 /* Clean up old contents. */
2366 clear_internalvar (var);
2367
2368 var->kind = INTERNALVAR_INTEGER;
2369 var->u.integer.type = NULL;
2370 var->u.integer.val = l;
2371 }
2372
2373 void
2374 set_internalvar_string (struct internalvar *var, const char *string)
2375 {
2376 /* Clean up old contents. */
2377 clear_internalvar (var);
2378
2379 var->kind = INTERNALVAR_STRING;
2380 var->u.string = xstrdup (string);
2381 }
2382
2383 static void
2384 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2385 {
2386 /* Clean up old contents. */
2387 clear_internalvar (var);
2388
2389 var->kind = INTERNALVAR_FUNCTION;
2390 var->u.fn.function = f;
2391 var->u.fn.canonical = 1;
2392 /* Variables installed here are always the canonical version. */
2393 }
2394
2395 void
2396 clear_internalvar (struct internalvar *var)
2397 {
2398 /* Clean up old contents. */
2399 switch (var->kind)
2400 {
2401 case INTERNALVAR_VALUE:
2402 value_free (var->u.value);
2403 break;
2404
2405 case INTERNALVAR_STRING:
2406 xfree (var->u.string);
2407 break;
2408
2409 case INTERNALVAR_MAKE_VALUE:
2410 if (var->u.make_value.functions->destroy != NULL)
2411 var->u.make_value.functions->destroy (var->u.make_value.data);
2412 break;
2413
2414 default:
2415 break;
2416 }
2417
2418 /* Reset to void kind. */
2419 var->kind = INTERNALVAR_VOID;
2420 }
2421
2422 char *
2423 internalvar_name (struct internalvar *var)
2424 {
2425 return var->name;
2426 }
2427
2428 static struct internal_function *
2429 create_internal_function (const char *name,
2430 internal_function_fn handler, void *cookie)
2431 {
2432 struct internal_function *ifn = XNEW (struct internal_function);
2433
2434 ifn->name = xstrdup (name);
2435 ifn->handler = handler;
2436 ifn->cookie = cookie;
2437 return ifn;
2438 }
2439
2440 char *
2441 value_internal_function_name (struct value *val)
2442 {
2443 struct internal_function *ifn;
2444 int result;
2445
2446 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2447 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2448 gdb_assert (result);
2449
2450 return ifn->name;
2451 }
2452
2453 struct value *
2454 call_internal_function (struct gdbarch *gdbarch,
2455 const struct language_defn *language,
2456 struct value *func, int argc, struct value **argv)
2457 {
2458 struct internal_function *ifn;
2459 int result;
2460
2461 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2462 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2463 gdb_assert (result);
2464
2465 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2466 }
2467
2468 /* The 'function' command. This does nothing -- it is just a
2469 placeholder to let "help function NAME" work. This is also used as
2470 the implementation of the sub-command that is created when
2471 registering an internal function. */
2472 static void
2473 function_command (char *command, int from_tty)
2474 {
2475 /* Do nothing. */
2476 }
2477
2478 /* Clean up if an internal function's command is destroyed. */
2479 static void
2480 function_destroyer (struct cmd_list_element *self, void *ignore)
2481 {
2482 xfree ((char *) self->name);
2483 xfree ((char *) self->doc);
2484 }
2485
2486 /* Add a new internal function. NAME is the name of the function; DOC
2487 is a documentation string describing the function. HANDLER is
2488 called when the function is invoked. COOKIE is an arbitrary
2489 pointer which is passed to HANDLER and is intended for "user
2490 data". */
2491 void
2492 add_internal_function (const char *name, const char *doc,
2493 internal_function_fn handler, void *cookie)
2494 {
2495 struct cmd_list_element *cmd;
2496 struct internal_function *ifn;
2497 struct internalvar *var = lookup_internalvar (name);
2498
2499 ifn = create_internal_function (name, handler, cookie);
2500 set_internalvar_function (var, ifn);
2501
2502 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2503 &functionlist);
2504 cmd->destroyer = function_destroyer;
2505 }
2506
2507 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2508 prevent cycles / duplicates. */
2509
2510 void
2511 preserve_one_value (struct value *value, struct objfile *objfile,
2512 htab_t copied_types)
2513 {
2514 if (TYPE_OBJFILE (value->type) == objfile)
2515 value->type = copy_type_recursive (objfile, value->type, copied_types);
2516
2517 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2518 value->enclosing_type = copy_type_recursive (objfile,
2519 value->enclosing_type,
2520 copied_types);
2521 }
2522
2523 /* Likewise for internal variable VAR. */
2524
2525 static void
2526 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2527 htab_t copied_types)
2528 {
2529 switch (var->kind)
2530 {
2531 case INTERNALVAR_INTEGER:
2532 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2533 var->u.integer.type
2534 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2535 break;
2536
2537 case INTERNALVAR_VALUE:
2538 preserve_one_value (var->u.value, objfile, copied_types);
2539 break;
2540 }
2541 }
2542
2543 /* Update the internal variables and value history when OBJFILE is
2544 discarded; we must copy the types out of the objfile. New global types
2545 will be created for every convenience variable which currently points to
2546 this objfile's types, and the convenience variables will be adjusted to
2547 use the new global types. */
2548
2549 void
2550 preserve_values (struct objfile *objfile)
2551 {
2552 htab_t copied_types;
2553 struct value_history_chunk *cur;
2554 struct internalvar *var;
2555 int i;
2556
2557 /* Create the hash table. We allocate on the objfile's obstack, since
2558 it is soon to be deleted. */
2559 copied_types = create_copied_types_hash (objfile);
2560
2561 for (cur = value_history_chain; cur; cur = cur->next)
2562 for (i = 0; i < VALUE_HISTORY_CHUNK; i++)
2563 if (cur->values[i])
2564 preserve_one_value (cur->values[i], objfile, copied_types);
2565
2566 for (var = internalvars; var; var = var->next)
2567 preserve_one_internalvar (var, objfile, copied_types);
2568
2569 preserve_ext_lang_values (objfile, copied_types);
2570
2571 htab_delete (copied_types);
2572 }
2573
2574 static void
2575 show_convenience (char *ignore, int from_tty)
2576 {
2577 struct gdbarch *gdbarch = get_current_arch ();
2578 struct internalvar *var;
2579 int varseen = 0;
2580 struct value_print_options opts;
2581
2582 get_user_print_options (&opts);
2583 for (var = internalvars; var; var = var->next)
2584 {
2585
2586 if (!varseen)
2587 {
2588 varseen = 1;
2589 }
2590 printf_filtered (("$%s = "), var->name);
2591
2592 TRY
2593 {
2594 struct value *val;
2595
2596 val = value_of_internalvar (gdbarch, var);
2597 value_print (val, gdb_stdout, &opts);
2598 }
2599 CATCH (ex, RETURN_MASK_ERROR)
2600 {
2601 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.message);
2602 }
2603 END_CATCH
2604
2605 printf_filtered (("\n"));
2606 }
2607 if (!varseen)
2608 {
2609 /* This text does not mention convenience functions on purpose.
2610 The user can't create them except via Python, and if Python support
2611 is installed this message will never be printed ($_streq will
2612 exist). */
2613 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2614 "Convenience variables have "
2615 "names starting with \"$\";\n"
2616 "use \"set\" as in \"set "
2617 "$foo = 5\" to define them.\n"));
2618 }
2619 }
2620 \f
2621 /* Return the TYPE_CODE_XMETHOD value corresponding to WORKER. */
2622
2623 struct value *
2624 value_of_xmethod (struct xmethod_worker *worker)
2625 {
2626 if (worker->value == NULL)
2627 {
2628 struct value *v;
2629
2630 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2631 v->lval = lval_xcallable;
2632 v->location.xm_worker = worker;
2633 v->modifiable = 0;
2634 worker->value = v;
2635 }
2636
2637 return worker->value;
2638 }
2639
2640 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2641
2642 struct type *
2643 result_type_of_xmethod (struct value *method, int argc, struct value **argv)
2644 {
2645 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2646 && method->lval == lval_xcallable && argc > 0);
2647
2648 return get_xmethod_result_type (method->location.xm_worker,
2649 argv[0], argv + 1, argc - 1);
2650 }
2651
2652 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2653
2654 struct value *
2655 call_xmethod (struct value *method, int argc, struct value **argv)
2656 {
2657 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2658 && method->lval == lval_xcallable && argc > 0);
2659
2660 return invoke_xmethod (method->location.xm_worker,
2661 argv[0], argv + 1, argc - 1);
2662 }
2663 \f
2664 /* Extract a value as a C number (either long or double).
2665 Knows how to convert fixed values to double, or
2666 floating values to long.
2667 Does not deallocate the value. */
2668
2669 LONGEST
2670 value_as_long (struct value *val)
2671 {
2672 /* This coerces arrays and functions, which is necessary (e.g.
2673 in disassemble_command). It also dereferences references, which
2674 I suspect is the most logical thing to do. */
2675 val = coerce_array (val);
2676 return unpack_long (value_type (val), value_contents (val));
2677 }
2678
2679 DOUBLEST
2680 value_as_double (struct value *val)
2681 {
2682 DOUBLEST foo;
2683 int inv;
2684
2685 foo = unpack_double (value_type (val), value_contents (val), &inv);
2686 if (inv)
2687 error (_("Invalid floating value found in program."));
2688 return foo;
2689 }
2690
2691 /* Extract a value as a C pointer. Does not deallocate the value.
2692 Note that val's type may not actually be a pointer; value_as_long
2693 handles all the cases. */
2694 CORE_ADDR
2695 value_as_address (struct value *val)
2696 {
2697 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2698
2699 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2700 whether we want this to be true eventually. */
2701 #if 0
2702 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2703 non-address (e.g. argument to "signal", "info break", etc.), or
2704 for pointers to char, in which the low bits *are* significant. */
2705 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2706 #else
2707
2708 /* There are several targets (IA-64, PowerPC, and others) which
2709 don't represent pointers to functions as simply the address of
2710 the function's entry point. For example, on the IA-64, a
2711 function pointer points to a two-word descriptor, generated by
2712 the linker, which contains the function's entry point, and the
2713 value the IA-64 "global pointer" register should have --- to
2714 support position-independent code. The linker generates
2715 descriptors only for those functions whose addresses are taken.
2716
2717 On such targets, it's difficult for GDB to convert an arbitrary
2718 function address into a function pointer; it has to either find
2719 an existing descriptor for that function, or call malloc and
2720 build its own. On some targets, it is impossible for GDB to
2721 build a descriptor at all: the descriptor must contain a jump
2722 instruction; data memory cannot be executed; and code memory
2723 cannot be modified.
2724
2725 Upon entry to this function, if VAL is a value of type `function'
2726 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2727 value_address (val) is the address of the function. This is what
2728 you'll get if you evaluate an expression like `main'. The call
2729 to COERCE_ARRAY below actually does all the usual unary
2730 conversions, which includes converting values of type `function'
2731 to `pointer to function'. This is the challenging conversion
2732 discussed above. Then, `unpack_long' will convert that pointer
2733 back into an address.
2734
2735 So, suppose the user types `disassemble foo' on an architecture
2736 with a strange function pointer representation, on which GDB
2737 cannot build its own descriptors, and suppose further that `foo'
2738 has no linker-built descriptor. The address->pointer conversion
2739 will signal an error and prevent the command from running, even
2740 though the next step would have been to convert the pointer
2741 directly back into the same address.
2742
2743 The following shortcut avoids this whole mess. If VAL is a
2744 function, just return its address directly. */
2745 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2746 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2747 return value_address (val);
2748
2749 val = coerce_array (val);
2750
2751 /* Some architectures (e.g. Harvard), map instruction and data
2752 addresses onto a single large unified address space. For
2753 instance: An architecture may consider a large integer in the
2754 range 0x10000000 .. 0x1000ffff to already represent a data
2755 addresses (hence not need a pointer to address conversion) while
2756 a small integer would still need to be converted integer to
2757 pointer to address. Just assume such architectures handle all
2758 integer conversions in a single function. */
2759
2760 /* JimB writes:
2761
2762 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2763 must admonish GDB hackers to make sure its behavior matches the
2764 compiler's, whenever possible.
2765
2766 In general, I think GDB should evaluate expressions the same way
2767 the compiler does. When the user copies an expression out of
2768 their source code and hands it to a `print' command, they should
2769 get the same value the compiler would have computed. Any
2770 deviation from this rule can cause major confusion and annoyance,
2771 and needs to be justified carefully. In other words, GDB doesn't
2772 really have the freedom to do these conversions in clever and
2773 useful ways.
2774
2775 AndrewC pointed out that users aren't complaining about how GDB
2776 casts integers to pointers; they are complaining that they can't
2777 take an address from a disassembly listing and give it to `x/i'.
2778 This is certainly important.
2779
2780 Adding an architecture method like integer_to_address() certainly
2781 makes it possible for GDB to "get it right" in all circumstances
2782 --- the target has complete control over how things get done, so
2783 people can Do The Right Thing for their target without breaking
2784 anyone else. The standard doesn't specify how integers get
2785 converted to pointers; usually, the ABI doesn't either, but
2786 ABI-specific code is a more reasonable place to handle it. */
2787
2788 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2789 && TYPE_CODE (value_type (val)) != TYPE_CODE_REF
2790 && gdbarch_integer_to_address_p (gdbarch))
2791 return gdbarch_integer_to_address (gdbarch, value_type (val),
2792 value_contents (val));
2793
2794 return unpack_long (value_type (val), value_contents (val));
2795 #endif
2796 }
2797 \f
2798 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2799 as a long, or as a double, assuming the raw data is described
2800 by type TYPE. Knows how to convert different sizes of values
2801 and can convert between fixed and floating point. We don't assume
2802 any alignment for the raw data. Return value is in host byte order.
2803
2804 If you want functions and arrays to be coerced to pointers, and
2805 references to be dereferenced, call value_as_long() instead.
2806
2807 C++: It is assumed that the front-end has taken care of
2808 all matters concerning pointers to members. A pointer
2809 to member which reaches here is considered to be equivalent
2810 to an INT (or some size). After all, it is only an offset. */
2811
2812 LONGEST
2813 unpack_long (struct type *type, const gdb_byte *valaddr)
2814 {
2815 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2816 enum type_code code = TYPE_CODE (type);
2817 int len = TYPE_LENGTH (type);
2818 int nosign = TYPE_UNSIGNED (type);
2819
2820 switch (code)
2821 {
2822 case TYPE_CODE_TYPEDEF:
2823 return unpack_long (check_typedef (type), valaddr);
2824 case TYPE_CODE_ENUM:
2825 case TYPE_CODE_FLAGS:
2826 case TYPE_CODE_BOOL:
2827 case TYPE_CODE_INT:
2828 case TYPE_CODE_CHAR:
2829 case TYPE_CODE_RANGE:
2830 case TYPE_CODE_MEMBERPTR:
2831 if (nosign)
2832 return extract_unsigned_integer (valaddr, len, byte_order);
2833 else
2834 return extract_signed_integer (valaddr, len, byte_order);
2835
2836 case TYPE_CODE_FLT:
2837 return extract_typed_floating (valaddr, type);
2838
2839 case TYPE_CODE_DECFLOAT:
2840 /* libdecnumber has a function to convert from decimal to integer, but
2841 it doesn't work when the decimal number has a fractional part. */
2842 return decimal_to_doublest (valaddr, len, byte_order);
2843
2844 case TYPE_CODE_PTR:
2845 case TYPE_CODE_REF:
2846 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2847 whether we want this to be true eventually. */
2848 return extract_typed_address (valaddr, type);
2849
2850 default:
2851 error (_("Value can't be converted to integer."));
2852 }
2853 return 0; /* Placate lint. */
2854 }
2855
2856 /* Return a double value from the specified type and address.
2857 INVP points to an int which is set to 0 for valid value,
2858 1 for invalid value (bad float format). In either case,
2859 the returned double is OK to use. Argument is in target
2860 format, result is in host format. */
2861
2862 DOUBLEST
2863 unpack_double (struct type *type, const gdb_byte *valaddr, int *invp)
2864 {
2865 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2866 enum type_code code;
2867 int len;
2868 int nosign;
2869
2870 *invp = 0; /* Assume valid. */
2871 type = check_typedef (type);
2872 code = TYPE_CODE (type);
2873 len = TYPE_LENGTH (type);
2874 nosign = TYPE_UNSIGNED (type);
2875 if (code == TYPE_CODE_FLT)
2876 {
2877 /* NOTE: cagney/2002-02-19: There was a test here to see if the
2878 floating-point value was valid (using the macro
2879 INVALID_FLOAT). That test/macro have been removed.
2880
2881 It turns out that only the VAX defined this macro and then
2882 only in a non-portable way. Fixing the portability problem
2883 wouldn't help since the VAX floating-point code is also badly
2884 bit-rotten. The target needs to add definitions for the
2885 methods gdbarch_float_format and gdbarch_double_format - these
2886 exactly describe the target floating-point format. The
2887 problem here is that the corresponding floatformat_vax_f and
2888 floatformat_vax_d values these methods should be set to are
2889 also not defined either. Oops!
2890
2891 Hopefully someone will add both the missing floatformat
2892 definitions and the new cases for floatformat_is_valid (). */
2893
2894 if (!floatformat_is_valid (floatformat_from_type (type), valaddr))
2895 {
2896 *invp = 1;
2897 return 0.0;
2898 }
2899
2900 return extract_typed_floating (valaddr, type);
2901 }
2902 else if (code == TYPE_CODE_DECFLOAT)
2903 return decimal_to_doublest (valaddr, len, byte_order);
2904 else if (nosign)
2905 {
2906 /* Unsigned -- be sure we compensate for signed LONGEST. */
2907 return (ULONGEST) unpack_long (type, valaddr);
2908 }
2909 else
2910 {
2911 /* Signed -- we are OK with unpack_long. */
2912 return unpack_long (type, valaddr);
2913 }
2914 }
2915
2916 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2917 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2918 We don't assume any alignment for the raw data. Return value is in
2919 host byte order.
2920
2921 If you want functions and arrays to be coerced to pointers, and
2922 references to be dereferenced, call value_as_address() instead.
2923
2924 C++: It is assumed that the front-end has taken care of
2925 all matters concerning pointers to members. A pointer
2926 to member which reaches here is considered to be equivalent
2927 to an INT (or some size). After all, it is only an offset. */
2928
2929 CORE_ADDR
2930 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2931 {
2932 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2933 whether we want this to be true eventually. */
2934 return unpack_long (type, valaddr);
2935 }
2936
2937 \f
2938 /* Get the value of the FIELDNO'th field (which must be static) of
2939 TYPE. */
2940
2941 struct value *
2942 value_static_field (struct type *type, int fieldno)
2943 {
2944 struct value *retval;
2945
2946 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2947 {
2948 case FIELD_LOC_KIND_PHYSADDR:
2949 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2950 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2951 break;
2952 case FIELD_LOC_KIND_PHYSNAME:
2953 {
2954 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2955 /* TYPE_FIELD_NAME (type, fieldno); */
2956 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2957
2958 if (sym.symbol == NULL)
2959 {
2960 /* With some compilers, e.g. HP aCC, static data members are
2961 reported as non-debuggable symbols. */
2962 struct bound_minimal_symbol msym
2963 = lookup_minimal_symbol (phys_name, NULL, NULL);
2964
2965 if (!msym.minsym)
2966 return allocate_optimized_out_value (type);
2967 else
2968 {
2969 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2970 BMSYMBOL_VALUE_ADDRESS (msym));
2971 }
2972 }
2973 else
2974 retval = value_of_variable (sym.symbol, sym.block);
2975 break;
2976 }
2977 default:
2978 gdb_assert_not_reached ("unexpected field location kind");
2979 }
2980
2981 return retval;
2982 }
2983
2984 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2985 You have to be careful here, since the size of the data area for the value
2986 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2987 than the old enclosing type, you have to allocate more space for the
2988 data. */
2989
2990 void
2991 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2992 {
2993 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2994 val->contents =
2995 (gdb_byte *) xrealloc (val->contents, TYPE_LENGTH (new_encl_type));
2996
2997 val->enclosing_type = new_encl_type;
2998 }
2999
3000 /* Given a value ARG1 (offset by OFFSET bytes)
3001 of a struct or union type ARG_TYPE,
3002 extract and return the value of one of its (non-static) fields.
3003 FIELDNO says which field. */
3004
3005 struct value *
3006 value_primitive_field (struct value *arg1, int offset,
3007 int fieldno, struct type *arg_type)
3008 {
3009 struct value *v;
3010 struct type *type;
3011 struct gdbarch *arch = get_value_arch (arg1);
3012 int unit_size = gdbarch_addressable_memory_unit_size (arch);
3013
3014 arg_type = check_typedef (arg_type);
3015 type = TYPE_FIELD_TYPE (arg_type, fieldno);
3016
3017 /* Call check_typedef on our type to make sure that, if TYPE
3018 is a TYPE_CODE_TYPEDEF, its length is set to the length
3019 of the target type instead of zero. However, we do not
3020 replace the typedef type by the target type, because we want
3021 to keep the typedef in order to be able to print the type
3022 description correctly. */
3023 check_typedef (type);
3024
3025 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
3026 {
3027 /* Handle packed fields.
3028
3029 Create a new value for the bitfield, with bitpos and bitsize
3030 set. If possible, arrange offset and bitpos so that we can
3031 do a single aligned read of the size of the containing type.
3032 Otherwise, adjust offset to the byte containing the first
3033 bit. Assume that the address, offset, and embedded offset
3034 are sufficiently aligned. */
3035
3036 int bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
3037 int container_bitsize = TYPE_LENGTH (type) * 8;
3038
3039 v = allocate_value_lazy (type);
3040 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
3041 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
3042 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
3043 v->bitpos = bitpos % container_bitsize;
3044 else
3045 v->bitpos = bitpos % 8;
3046 v->offset = (value_embedded_offset (arg1)
3047 + offset
3048 + (bitpos - v->bitpos) / 8);
3049 set_value_parent (v, arg1);
3050 if (!value_lazy (arg1))
3051 value_fetch_lazy (v);
3052 }
3053 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3054 {
3055 /* This field is actually a base subobject, so preserve the
3056 entire object's contents for later references to virtual
3057 bases, etc. */
3058 int boffset;
3059
3060 /* Lazy register values with offsets are not supported. */
3061 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3062 value_fetch_lazy (arg1);
3063
3064 /* We special case virtual inheritance here because this
3065 requires access to the contents, which we would rather avoid
3066 for references to ordinary fields of unavailable values. */
3067 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3068 boffset = baseclass_offset (arg_type, fieldno,
3069 value_contents (arg1),
3070 value_embedded_offset (arg1),
3071 value_address (arg1),
3072 arg1);
3073 else
3074 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3075
3076 if (value_lazy (arg1))
3077 v = allocate_value_lazy (value_enclosing_type (arg1));
3078 else
3079 {
3080 v = allocate_value (value_enclosing_type (arg1));
3081 value_contents_copy_raw (v, 0, arg1, 0,
3082 TYPE_LENGTH (value_enclosing_type (arg1)));
3083 }
3084 v->type = type;
3085 v->offset = value_offset (arg1);
3086 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3087 }
3088 else
3089 {
3090 /* Plain old data member */
3091 offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
3092 / (HOST_CHAR_BIT * unit_size));
3093
3094 /* Lazy register values with offsets are not supported. */
3095 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3096 value_fetch_lazy (arg1);
3097
3098 if (value_lazy (arg1))
3099 v = allocate_value_lazy (type);
3100 else
3101 {
3102 v = allocate_value (type);
3103 value_contents_copy_raw (v, value_embedded_offset (v),
3104 arg1, value_embedded_offset (arg1) + offset,
3105 type_length_units (type));
3106 }
3107 v->offset = (value_offset (arg1) + offset
3108 + value_embedded_offset (arg1));
3109 }
3110 set_value_component_location (v, arg1);
3111 VALUE_REGNUM (v) = VALUE_REGNUM (arg1);
3112 VALUE_FRAME_ID (v) = VALUE_FRAME_ID (arg1);
3113 return v;
3114 }
3115
3116 /* Given a value ARG1 of a struct or union type,
3117 extract and return the value of one of its (non-static) fields.
3118 FIELDNO says which field. */
3119
3120 struct value *
3121 value_field (struct value *arg1, int fieldno)
3122 {
3123 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3124 }
3125
3126 /* Return a non-virtual function as a value.
3127 F is the list of member functions which contains the desired method.
3128 J is an index into F which provides the desired method.
3129
3130 We only use the symbol for its address, so be happy with either a
3131 full symbol or a minimal symbol. */
3132
3133 struct value *
3134 value_fn_field (struct value **arg1p, struct fn_field *f,
3135 int j, struct type *type,
3136 int offset)
3137 {
3138 struct value *v;
3139 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3140 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3141 struct symbol *sym;
3142 struct bound_minimal_symbol msym;
3143
3144 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3145 if (sym != NULL)
3146 {
3147 memset (&msym, 0, sizeof (msym));
3148 }
3149 else
3150 {
3151 gdb_assert (sym == NULL);
3152 msym = lookup_bound_minimal_symbol (physname);
3153 if (msym.minsym == NULL)
3154 return NULL;
3155 }
3156
3157 v = allocate_value (ftype);
3158 if (sym)
3159 {
3160 set_value_address (v, BLOCK_START (SYMBOL_BLOCK_VALUE (sym)));
3161 }
3162 else
3163 {
3164 /* The minimal symbol might point to a function descriptor;
3165 resolve it to the actual code address instead. */
3166 struct objfile *objfile = msym.objfile;
3167 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3168
3169 set_value_address (v,
3170 gdbarch_convert_from_func_ptr_addr
3171 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), &current_target));
3172 }
3173
3174 if (arg1p)
3175 {
3176 if (type != value_type (*arg1p))
3177 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3178 value_addr (*arg1p)));
3179
3180 /* Move the `this' pointer according to the offset.
3181 VALUE_OFFSET (*arg1p) += offset; */
3182 }
3183
3184 return v;
3185 }
3186
3187 \f
3188
3189 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3190 VALADDR, and store the result in *RESULT.
3191 The bitfield starts at BITPOS bits and contains BITSIZE bits.
3192
3193 Extracting bits depends on endianness of the machine. Compute the
3194 number of least significant bits to discard. For big endian machines,
3195 we compute the total number of bits in the anonymous object, subtract
3196 off the bit count from the MSB of the object to the MSB of the
3197 bitfield, then the size of the bitfield, which leaves the LSB discard
3198 count. For little endian machines, the discard count is simply the
3199 number of bits from the LSB of the anonymous object to the LSB of the
3200 bitfield.
3201
3202 If the field is signed, we also do sign extension. */
3203
3204 static LONGEST
3205 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3206 int bitpos, int bitsize)
3207 {
3208 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3209 ULONGEST val;
3210 ULONGEST valmask;
3211 int lsbcount;
3212 int bytes_read;
3213 int read_offset;
3214
3215 /* Read the minimum number of bytes required; there may not be
3216 enough bytes to read an entire ULONGEST. */
3217 field_type = check_typedef (field_type);
3218 if (bitsize)
3219 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3220 else
3221 bytes_read = TYPE_LENGTH (field_type);
3222
3223 read_offset = bitpos / 8;
3224
3225 val = extract_unsigned_integer (valaddr + read_offset,
3226 bytes_read, byte_order);
3227
3228 /* Extract bits. See comment above. */
3229
3230 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3231 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3232 else
3233 lsbcount = (bitpos % 8);
3234 val >>= lsbcount;
3235
3236 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3237 If the field is signed, and is negative, then sign extend. */
3238
3239 if ((bitsize > 0) && (bitsize < 8 * (int) sizeof (val)))
3240 {
3241 valmask = (((ULONGEST) 1) << bitsize) - 1;
3242 val &= valmask;
3243 if (!TYPE_UNSIGNED (field_type))
3244 {
3245 if (val & (valmask ^ (valmask >> 1)))
3246 {
3247 val |= ~valmask;
3248 }
3249 }
3250 }
3251
3252 return val;
3253 }
3254
3255 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3256 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3257 ORIGINAL_VALUE, which must not be NULL. See
3258 unpack_value_bits_as_long for more details. */
3259
3260 int
3261 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3262 int embedded_offset, int fieldno,
3263 const struct value *val, LONGEST *result)
3264 {
3265 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3266 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3267 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3268 int bit_offset;
3269
3270 gdb_assert (val != NULL);
3271
3272 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3273 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3274 || !value_bits_available (val, bit_offset, bitsize))
3275 return 0;
3276
3277 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3278 bitpos, bitsize);
3279 return 1;
3280 }
3281
3282 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3283 object at VALADDR. See unpack_bits_as_long for more details. */
3284
3285 LONGEST
3286 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3287 {
3288 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3289 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3290 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3291
3292 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3293 }
3294
3295 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3296 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3297 the contents in DEST_VAL, zero or sign extending if the type of
3298 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3299 VAL. If the VAL's contents required to extract the bitfield from
3300 are unavailable/optimized out, DEST_VAL is correspondingly
3301 marked unavailable/optimized out. */
3302
3303 void
3304 unpack_value_bitfield (struct value *dest_val,
3305 int bitpos, int bitsize,
3306 const gdb_byte *valaddr, int embedded_offset,
3307 const struct value *val)
3308 {
3309 enum bfd_endian byte_order;
3310 int src_bit_offset;
3311 int dst_bit_offset;
3312 LONGEST num;
3313 struct type *field_type = value_type (dest_val);
3314
3315 /* First, unpack and sign extend the bitfield as if it was wholly
3316 available. Invalid/unavailable bits are read as zero, but that's
3317 OK, as they'll end up marked below. */
3318 byte_order = gdbarch_byte_order (get_type_arch (field_type));
3319 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3320 bitpos, bitsize);
3321 store_signed_integer (value_contents_raw (dest_val),
3322 TYPE_LENGTH (field_type), byte_order, num);
3323
3324 /* Now copy the optimized out / unavailability ranges to the right
3325 bits. */
3326 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3327 if (byte_order == BFD_ENDIAN_BIG)
3328 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3329 else
3330 dst_bit_offset = 0;
3331 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3332 val, src_bit_offset, bitsize);
3333 }
3334
3335 /* Return a new value with type TYPE, which is FIELDNO field of the
3336 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3337 of VAL. If the VAL's contents required to extract the bitfield
3338 from are unavailable/optimized out, the new value is
3339 correspondingly marked unavailable/optimized out. */
3340
3341 struct value *
3342 value_field_bitfield (struct type *type, int fieldno,
3343 const gdb_byte *valaddr,
3344 int embedded_offset, const struct value *val)
3345 {
3346 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3347 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3348 struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3349
3350 unpack_value_bitfield (res_val, bitpos, bitsize,
3351 valaddr, embedded_offset, val);
3352
3353 return res_val;
3354 }
3355
3356 /* Modify the value of a bitfield. ADDR points to a block of memory in
3357 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3358 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3359 indicate which bits (in target bit order) comprise the bitfield.
3360 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3361 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3362
3363 void
3364 modify_field (struct type *type, gdb_byte *addr,
3365 LONGEST fieldval, int bitpos, int bitsize)
3366 {
3367 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3368 ULONGEST oword;
3369 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3370 int bytesize;
3371
3372 /* Normalize BITPOS. */
3373 addr += bitpos / 8;
3374 bitpos %= 8;
3375
3376 /* If a negative fieldval fits in the field in question, chop
3377 off the sign extension bits. */
3378 if ((~fieldval & ~(mask >> 1)) == 0)
3379 fieldval &= mask;
3380
3381 /* Warn if value is too big to fit in the field in question. */
3382 if (0 != (fieldval & ~mask))
3383 {
3384 /* FIXME: would like to include fieldval in the message, but
3385 we don't have a sprintf_longest. */
3386 warning (_("Value does not fit in %d bits."), bitsize);
3387
3388 /* Truncate it, otherwise adjoining fields may be corrupted. */
3389 fieldval &= mask;
3390 }
3391
3392 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3393 false valgrind reports. */
3394
3395 bytesize = (bitpos + bitsize + 7) / 8;
3396 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3397
3398 /* Shifting for bit field depends on endianness of the target machine. */
3399 if (gdbarch_bits_big_endian (get_type_arch (type)))
3400 bitpos = bytesize * 8 - bitpos - bitsize;
3401
3402 oword &= ~(mask << bitpos);
3403 oword |= fieldval << bitpos;
3404
3405 store_unsigned_integer (addr, bytesize, byte_order, oword);
3406 }
3407 \f
3408 /* Pack NUM into BUF using a target format of TYPE. */
3409
3410 void
3411 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3412 {
3413 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3414 int len;
3415
3416 type = check_typedef (type);
3417 len = TYPE_LENGTH (type);
3418
3419 switch (TYPE_CODE (type))
3420 {
3421 case TYPE_CODE_INT:
3422 case TYPE_CODE_CHAR:
3423 case TYPE_CODE_ENUM:
3424 case TYPE_CODE_FLAGS:
3425 case TYPE_CODE_BOOL:
3426 case TYPE_CODE_RANGE:
3427 case TYPE_CODE_MEMBERPTR:
3428 store_signed_integer (buf, len, byte_order, num);
3429 break;
3430
3431 case TYPE_CODE_REF:
3432 case TYPE_CODE_PTR:
3433 store_typed_address (buf, type, (CORE_ADDR) num);
3434 break;
3435
3436 default:
3437 error (_("Unexpected type (%d) encountered for integer constant."),
3438 TYPE_CODE (type));
3439 }
3440 }
3441
3442
3443 /* Pack NUM into BUF using a target format of TYPE. */
3444
3445 static void
3446 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3447 {
3448 int len;
3449 enum bfd_endian byte_order;
3450
3451 type = check_typedef (type);
3452 len = TYPE_LENGTH (type);
3453 byte_order = gdbarch_byte_order (get_type_arch (type));
3454
3455 switch (TYPE_CODE (type))
3456 {
3457 case TYPE_CODE_INT:
3458 case TYPE_CODE_CHAR:
3459 case TYPE_CODE_ENUM:
3460 case TYPE_CODE_FLAGS:
3461 case TYPE_CODE_BOOL:
3462 case TYPE_CODE_RANGE:
3463 case TYPE_CODE_MEMBERPTR:
3464 store_unsigned_integer (buf, len, byte_order, num);
3465 break;
3466
3467 case TYPE_CODE_REF:
3468 case TYPE_CODE_PTR:
3469 store_typed_address (buf, type, (CORE_ADDR) num);
3470 break;
3471
3472 default:
3473 error (_("Unexpected type (%d) encountered "
3474 "for unsigned integer constant."),
3475 TYPE_CODE (type));
3476 }
3477 }
3478
3479
3480 /* Convert C numbers into newly allocated values. */
3481
3482 struct value *
3483 value_from_longest (struct type *type, LONGEST num)
3484 {
3485 struct value *val = allocate_value (type);
3486
3487 pack_long (value_contents_raw (val), type, num);
3488 return val;
3489 }
3490
3491
3492 /* Convert C unsigned numbers into newly allocated values. */
3493
3494 struct value *
3495 value_from_ulongest (struct type *type, ULONGEST num)
3496 {
3497 struct value *val = allocate_value (type);
3498
3499 pack_unsigned_long (value_contents_raw (val), type, num);
3500
3501 return val;
3502 }
3503
3504
3505 /* Create a value representing a pointer of type TYPE to the address
3506 ADDR. */
3507
3508 struct value *
3509 value_from_pointer (struct type *type, CORE_ADDR addr)
3510 {
3511 struct value *val = allocate_value (type);
3512
3513 store_typed_address (value_contents_raw (val),
3514 check_typedef (type), addr);
3515 return val;
3516 }
3517
3518
3519 /* Create a value of type TYPE whose contents come from VALADDR, if it
3520 is non-null, and whose memory address (in the inferior) is
3521 ADDRESS. The type of the created value may differ from the passed
3522 type TYPE. Make sure to retrieve values new type after this call.
3523 Note that TYPE is not passed through resolve_dynamic_type; this is
3524 a special API intended for use only by Ada. */
3525
3526 struct value *
3527 value_from_contents_and_address_unresolved (struct type *type,
3528 const gdb_byte *valaddr,
3529 CORE_ADDR address)
3530 {
3531 struct value *v;
3532
3533 if (valaddr == NULL)
3534 v = allocate_value_lazy (type);
3535 else
3536 v = value_from_contents (type, valaddr);
3537 set_value_address (v, address);
3538 VALUE_LVAL (v) = lval_memory;
3539 return v;
3540 }
3541
3542 /* Create a value of type TYPE whose contents come from VALADDR, if it
3543 is non-null, and whose memory address (in the inferior) is
3544 ADDRESS. The type of the created value may differ from the passed
3545 type TYPE. Make sure to retrieve values new type after this call. */
3546
3547 struct value *
3548 value_from_contents_and_address (struct type *type,
3549 const gdb_byte *valaddr,
3550 CORE_ADDR address)
3551 {
3552 struct type *resolved_type = resolve_dynamic_type (type, valaddr, address);
3553 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3554 struct value *v;
3555
3556 if (valaddr == NULL)
3557 v = allocate_value_lazy (resolved_type);
3558 else
3559 v = value_from_contents (resolved_type, valaddr);
3560 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3561 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3562 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3563 set_value_address (v, address);
3564 VALUE_LVAL (v) = lval_memory;
3565 return v;
3566 }
3567
3568 /* Create a value of type TYPE holding the contents CONTENTS.
3569 The new value is `not_lval'. */
3570
3571 struct value *
3572 value_from_contents (struct type *type, const gdb_byte *contents)
3573 {
3574 struct value *result;
3575
3576 result = allocate_value (type);
3577 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3578 return result;
3579 }
3580
3581 struct value *
3582 value_from_double (struct type *type, DOUBLEST num)
3583 {
3584 struct value *val = allocate_value (type);
3585 struct type *base_type = check_typedef (type);
3586 enum type_code code = TYPE_CODE (base_type);
3587
3588 if (code == TYPE_CODE_FLT)
3589 {
3590 store_typed_floating (value_contents_raw (val), base_type, num);
3591 }
3592 else
3593 error (_("Unexpected type encountered for floating constant."));
3594
3595 return val;
3596 }
3597
3598 struct value *
3599 value_from_decfloat (struct type *type, const gdb_byte *dec)
3600 {
3601 struct value *val = allocate_value (type);
3602
3603 memcpy (value_contents_raw (val), dec, TYPE_LENGTH (type));
3604 return val;
3605 }
3606
3607 /* Extract a value from the history file. Input will be of the form
3608 $digits or $$digits. See block comment above 'write_dollar_variable'
3609 for details. */
3610
3611 struct value *
3612 value_from_history_ref (const char *h, const char **endp)
3613 {
3614 int index, len;
3615
3616 if (h[0] == '$')
3617 len = 1;
3618 else
3619 return NULL;
3620
3621 if (h[1] == '$')
3622 len = 2;
3623
3624 /* Find length of numeral string. */
3625 for (; isdigit (h[len]); len++)
3626 ;
3627
3628 /* Make sure numeral string is not part of an identifier. */
3629 if (h[len] == '_' || isalpha (h[len]))
3630 return NULL;
3631
3632 /* Now collect the index value. */
3633 if (h[1] == '$')
3634 {
3635 if (len == 2)
3636 {
3637 /* For some bizarre reason, "$$" is equivalent to "$$1",
3638 rather than to "$$0" as it ought to be! */
3639 index = -1;
3640 *endp += len;
3641 }
3642 else
3643 {
3644 char *local_end;
3645
3646 index = -strtol (&h[2], &local_end, 10);
3647 *endp = local_end;
3648 }
3649 }
3650 else
3651 {
3652 if (len == 1)
3653 {
3654 /* "$" is equivalent to "$0". */
3655 index = 0;
3656 *endp += len;
3657 }
3658 else
3659 {
3660 char *local_end;
3661
3662 index = strtol (&h[1], &local_end, 10);
3663 *endp = local_end;
3664 }
3665 }
3666
3667 return access_value_history (index);
3668 }
3669
3670 struct value *
3671 coerce_ref_if_computed (const struct value *arg)
3672 {
3673 const struct lval_funcs *funcs;
3674
3675 if (TYPE_CODE (check_typedef (value_type (arg))) != TYPE_CODE_REF)
3676 return NULL;
3677
3678 if (value_lval_const (arg) != lval_computed)
3679 return NULL;
3680
3681 funcs = value_computed_funcs (arg);
3682 if (funcs->coerce_ref == NULL)
3683 return NULL;
3684
3685 return funcs->coerce_ref (arg);
3686 }
3687
3688 /* Look at value.h for description. */
3689
3690 struct value *
3691 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3692 struct type *original_type,
3693 struct value *original_value)
3694 {
3695 /* Re-adjust type. */
3696 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3697
3698 /* Add embedding info. */
3699 set_value_enclosing_type (value, enc_type);
3700 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3701
3702 /* We may be pointing to an object of some derived type. */
3703 return value_full_object (value, NULL, 0, 0, 0);
3704 }
3705
3706 struct value *
3707 coerce_ref (struct value *arg)
3708 {
3709 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3710 struct value *retval;
3711 struct type *enc_type;
3712
3713 retval = coerce_ref_if_computed (arg);
3714 if (retval)
3715 return retval;
3716
3717 if (TYPE_CODE (value_type_arg_tmp) != TYPE_CODE_REF)
3718 return arg;
3719
3720 enc_type = check_typedef (value_enclosing_type (arg));
3721 enc_type = TYPE_TARGET_TYPE (enc_type);
3722
3723 retval = value_at_lazy (enc_type,
3724 unpack_pointer (value_type (arg),
3725 value_contents (arg)));
3726 enc_type = value_type (retval);
3727 return readjust_indirect_value_type (retval, enc_type,
3728 value_type_arg_tmp, arg);
3729 }
3730
3731 struct value *
3732 coerce_array (struct value *arg)
3733 {
3734 struct type *type;
3735
3736 arg = coerce_ref (arg);
3737 type = check_typedef (value_type (arg));
3738
3739 switch (TYPE_CODE (type))
3740 {
3741 case TYPE_CODE_ARRAY:
3742 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3743 arg = value_coerce_array (arg);
3744 break;
3745 case TYPE_CODE_FUNC:
3746 arg = value_coerce_function (arg);
3747 break;
3748 }
3749 return arg;
3750 }
3751 \f
3752
3753 /* Return the return value convention that will be used for the
3754 specified type. */
3755
3756 enum return_value_convention
3757 struct_return_convention (struct gdbarch *gdbarch,
3758 struct value *function, struct type *value_type)
3759 {
3760 enum type_code code = TYPE_CODE (value_type);
3761
3762 if (code == TYPE_CODE_ERROR)
3763 error (_("Function return type unknown."));
3764
3765 /* Probe the architecture for the return-value convention. */
3766 return gdbarch_return_value (gdbarch, function, value_type,
3767 NULL, NULL, NULL);
3768 }
3769
3770 /* Return true if the function returning the specified type is using
3771 the convention of returning structures in memory (passing in the
3772 address as a hidden first parameter). */
3773
3774 int
3775 using_struct_return (struct gdbarch *gdbarch,
3776 struct value *function, struct type *value_type)
3777 {
3778 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3779 /* A void return value is never in memory. See also corresponding
3780 code in "print_return_value". */
3781 return 0;
3782
3783 return (struct_return_convention (gdbarch, function, value_type)
3784 != RETURN_VALUE_REGISTER_CONVENTION);
3785 }
3786
3787 /* Set the initialized field in a value struct. */
3788
3789 void
3790 set_value_initialized (struct value *val, int status)
3791 {
3792 val->initialized = status;
3793 }
3794
3795 /* Return the initialized field in a value struct. */
3796
3797 int
3798 value_initialized (struct value *val)
3799 {
3800 return val->initialized;
3801 }
3802
3803 /* Load the actual content of a lazy value. Fetch the data from the
3804 user's process and clear the lazy flag to indicate that the data in
3805 the buffer is valid.
3806
3807 If the value is zero-length, we avoid calling read_memory, which
3808 would abort. We mark the value as fetched anyway -- all 0 bytes of
3809 it. */
3810
3811 void
3812 value_fetch_lazy (struct value *val)
3813 {
3814 gdb_assert (value_lazy (val));
3815 allocate_value_contents (val);
3816 /* A value is either lazy, or fully fetched. The
3817 availability/validity is only established as we try to fetch a
3818 value. */
3819 gdb_assert (VEC_empty (range_s, val->optimized_out));
3820 gdb_assert (VEC_empty (range_s, val->unavailable));
3821 if (value_bitsize (val))
3822 {
3823 /* To read a lazy bitfield, read the entire enclosing value. This
3824 prevents reading the same block of (possibly volatile) memory once
3825 per bitfield. It would be even better to read only the containing
3826 word, but we have no way to record that just specific bits of a
3827 value have been fetched. */
3828 struct type *type = check_typedef (value_type (val));
3829 struct value *parent = value_parent (val);
3830
3831 if (value_lazy (parent))
3832 value_fetch_lazy (parent);
3833
3834 unpack_value_bitfield (val,
3835 value_bitpos (val), value_bitsize (val),
3836 value_contents_for_printing (parent),
3837 value_offset (val), parent);
3838 }
3839 else if (VALUE_LVAL (val) == lval_memory)
3840 {
3841 CORE_ADDR addr = value_address (val);
3842 struct type *type = check_typedef (value_enclosing_type (val));
3843
3844 if (TYPE_LENGTH (type))
3845 read_value_memory (val, 0, value_stack (val),
3846 addr, value_contents_all_raw (val),
3847 type_length_units (type));
3848 }
3849 else if (VALUE_LVAL (val) == lval_register)
3850 {
3851 struct frame_info *frame;
3852 int regnum;
3853 struct type *type = check_typedef (value_type (val));
3854 struct value *new_val = val, *mark = value_mark ();
3855
3856 /* Offsets are not supported here; lazy register values must
3857 refer to the entire register. */
3858 gdb_assert (value_offset (val) == 0);
3859
3860 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3861 {
3862 struct frame_id frame_id = VALUE_FRAME_ID (new_val);
3863
3864 frame = frame_find_by_id (frame_id);
3865 regnum = VALUE_REGNUM (new_val);
3866
3867 gdb_assert (frame != NULL);
3868
3869 /* Convertible register routines are used for multi-register
3870 values and for interpretation in different types
3871 (e.g. float or int from a double register). Lazy
3872 register values should have the register's natural type,
3873 so they do not apply. */
3874 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (frame),
3875 regnum, type));
3876
3877 new_val = get_frame_register_value (frame, regnum);
3878
3879 /* If we get another lazy lval_register value, it means the
3880 register is found by reading it from the next frame.
3881 get_frame_register_value should never return a value with
3882 the frame id pointing to FRAME. If it does, it means we
3883 either have two consecutive frames with the same frame id
3884 in the frame chain, or some code is trying to unwind
3885 behind get_prev_frame's back (e.g., a frame unwind
3886 sniffer trying to unwind), bypassing its validations. In
3887 any case, it should always be an internal error to end up
3888 in this situation. */
3889 if (VALUE_LVAL (new_val) == lval_register
3890 && value_lazy (new_val)
3891 && frame_id_eq (VALUE_FRAME_ID (new_val), frame_id))
3892 internal_error (__FILE__, __LINE__,
3893 _("infinite loop while fetching a register"));
3894 }
3895
3896 /* If it's still lazy (for instance, a saved register on the
3897 stack), fetch it. */
3898 if (value_lazy (new_val))
3899 value_fetch_lazy (new_val);
3900
3901 /* Copy the contents and the unavailability/optimized-out
3902 meta-data from NEW_VAL to VAL. */
3903 set_value_lazy (val, 0);
3904 value_contents_copy (val, value_embedded_offset (val),
3905 new_val, value_embedded_offset (new_val),
3906 type_length_units (type));
3907
3908 if (frame_debug)
3909 {
3910 struct gdbarch *gdbarch;
3911 frame = frame_find_by_id (VALUE_FRAME_ID (val));
3912 regnum = VALUE_REGNUM (val);
3913 gdbarch = get_frame_arch (frame);
3914
3915 fprintf_unfiltered (gdb_stdlog,
3916 "{ value_fetch_lazy "
3917 "(frame=%d,regnum=%d(%s),...) ",
3918 frame_relative_level (frame), regnum,
3919 user_reg_map_regnum_to_name (gdbarch, regnum));
3920
3921 fprintf_unfiltered (gdb_stdlog, "->");
3922 if (value_optimized_out (new_val))
3923 {
3924 fprintf_unfiltered (gdb_stdlog, " ");
3925 val_print_optimized_out (new_val, gdb_stdlog);
3926 }
3927 else
3928 {
3929 int i;
3930 const gdb_byte *buf = value_contents (new_val);
3931
3932 if (VALUE_LVAL (new_val) == lval_register)
3933 fprintf_unfiltered (gdb_stdlog, " register=%d",
3934 VALUE_REGNUM (new_val));
3935 else if (VALUE_LVAL (new_val) == lval_memory)
3936 fprintf_unfiltered (gdb_stdlog, " address=%s",
3937 paddress (gdbarch,
3938 value_address (new_val)));
3939 else
3940 fprintf_unfiltered (gdb_stdlog, " computed");
3941
3942 fprintf_unfiltered (gdb_stdlog, " bytes=");
3943 fprintf_unfiltered (gdb_stdlog, "[");
3944 for (i = 0; i < register_size (gdbarch, regnum); i++)
3945 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3946 fprintf_unfiltered (gdb_stdlog, "]");
3947 }
3948
3949 fprintf_unfiltered (gdb_stdlog, " }\n");
3950 }
3951
3952 /* Dispose of the intermediate values. This prevents
3953 watchpoints from trying to watch the saved frame pointer. */
3954 value_free_to_mark (mark);
3955 }
3956 else if (VALUE_LVAL (val) == lval_computed
3957 && value_computed_funcs (val)->read != NULL)
3958 value_computed_funcs (val)->read (val);
3959 else
3960 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
3961
3962 set_value_lazy (val, 0);
3963 }
3964
3965 /* Implementation of the convenience function $_isvoid. */
3966
3967 static struct value *
3968 isvoid_internal_fn (struct gdbarch *gdbarch,
3969 const struct language_defn *language,
3970 void *cookie, int argc, struct value **argv)
3971 {
3972 int ret;
3973
3974 if (argc != 1)
3975 error (_("You must provide one argument for $_isvoid."));
3976
3977 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
3978
3979 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
3980 }
3981
3982 void
3983 _initialize_values (void)
3984 {
3985 add_cmd ("convenience", no_class, show_convenience, _("\
3986 Debugger convenience (\"$foo\") variables and functions.\n\
3987 Convenience variables are created when you assign them values;\n\
3988 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
3989 \n\
3990 A few convenience variables are given values automatically:\n\
3991 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
3992 \"$__\" holds the contents of the last address examined with \"x\"."
3993 #ifdef HAVE_PYTHON
3994 "\n\n\
3995 Convenience functions are defined via the Python API."
3996 #endif
3997 ), &showlist);
3998 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
3999
4000 add_cmd ("values", no_set_class, show_values, _("\
4001 Elements of value history around item number IDX (or last ten)."),
4002 &showlist);
4003
4004 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4005 Initialize a convenience variable if necessary.\n\
4006 init-if-undefined VARIABLE = EXPRESSION\n\
4007 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4008 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4009 VARIABLE is already initialized."));
4010
4011 add_prefix_cmd ("function", no_class, function_command, _("\
4012 Placeholder command for showing help on convenience functions."),
4013 &functionlist, "function ", 0, &cmdlist);
4014
4015 add_internal_function ("_isvoid", _("\
4016 Check whether an expression is void.\n\
4017 Usage: $_isvoid (expression)\n\
4018 Return 1 if the expression is void, zero otherwise."),
4019 isvoid_internal_fn, NULL);
4020 }