1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
3 Copyright (C) 1986-2025 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "arch-utils.h"
21 #include "extract-store-integer.h"
27 #include "cli/cli-cmds.h"
33 #include "target-float.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
39 #include "tracepoint.h"
41 #include "user-regs.h"
47 #include "completer.h"
48 #include "gdbsupport/selftest.h"
49 #include "gdbsupport/array-view.h"
50 #include "cli/cli-style.h"
55 /* Definition of a user function. */
56 struct internal_function
58 internal_function (std::string name
, internal_function_fn_noside handler
,
60 : name (std::move (name
)),
65 /* The name of the function. It is a bit odd to have this in the
66 function itself -- the user might use a differently-named
67 convenience variable to hold the function. */
71 internal_function_fn_noside handler
;
73 /* User data for the handler. */
77 using internal_function_up
= std::unique_ptr
<internal_function
>;
79 /* Returns true if the ranges defined by [offset1, offset1+len1) and
80 [offset2, offset2+len2) overlap. */
83 ranges_overlap (LONGEST offset1
, ULONGEST len1
,
84 LONGEST offset2
, ULONGEST len2
)
88 l
= std::max (offset1
, offset2
);
89 h
= std::min (offset1
+ len1
, offset2
+ len2
);
93 /* Returns true if RANGES contains any range that overlaps [OFFSET,
97 ranges_contain (const std::vector
<range
> &ranges
, LONGEST offset
,
102 what
.offset
= offset
;
103 what
.length
= length
;
105 /* We keep ranges sorted by offset and coalesce overlapping and
106 contiguous ranges, so to check if a range list contains a given
107 range, we can do a binary search for the position the given range
108 would be inserted if we only considered the starting OFFSET of
109 ranges. We call that position I. Since we also have LENGTH to
110 care for (this is a range after all), we need to check if the
111 _previous_ range overlaps the I range. E.g.,
115 |---| |---| |------| ... |--|
120 In the case above, the binary search would return `I=1', meaning,
121 this OFFSET should be inserted at position 1, and the current
122 position 1 should be pushed further (and before 2). But, `0'
125 Then we need to check if the I range overlaps the I range itself.
130 |---| |---| |-------| ... |--|
137 auto i
= std::lower_bound (ranges
.begin (), ranges
.end (), what
);
139 if (i
> ranges
.begin ())
141 const struct range
&bef
= *(i
- 1);
143 if (ranges_overlap (bef
.offset
, bef
.length
, offset
, length
))
147 if (i
< ranges
.end ())
149 const struct range
&r
= *i
;
151 if (ranges_overlap (r
.offset
, r
.length
, offset
, length
))
158 static struct cmd_list_element
*functionlist
;
162 if (this->lval () == lval_computed
)
164 const struct lval_funcs
*funcs
= m_location
.computed
.funcs
;
166 if (funcs
->free_closure
)
167 funcs
->free_closure (this);
169 else if (this->lval () == lval_xcallable
)
170 delete m_location
.xm_worker
;
178 return type ()->arch ();
182 value::bits_available (LONGEST offset
, ULONGEST length
) const
184 gdb_assert (!m_lazy
);
186 /* Don't pretend we have anything available there in the history beyond
187 the boundaries of the value recorded. It's not like inferior memory
188 where there is actual stuff underneath. */
189 ULONGEST val_len
= TARGET_CHAR_BIT
* enclosing_type ()->length ();
190 return !((m_in_history
191 && (offset
< 0 || offset
+ length
> val_len
))
192 || ranges_contain (m_unavailable
, offset
, length
));
196 value::bytes_available (LONGEST offset
, ULONGEST length
) const
198 ULONGEST sign
= (1ULL << (sizeof (ULONGEST
) * 8 - 1)) / TARGET_CHAR_BIT
;
199 ULONGEST mask
= (sign
<< 1) - 1;
201 if (offset
!= ((offset
& mask
) ^ sign
) - sign
202 || length
!= ((length
& mask
) ^ sign
) - sign
203 || (length
> 0 && (~offset
& (offset
+ length
- 1) & sign
) != 0))
204 error (_("Integer overflow in data location calculation"));
206 return bits_available (offset
* TARGET_CHAR_BIT
, length
* TARGET_CHAR_BIT
);
210 value::bits_any_optimized_out (int bit_offset
, int bit_length
) const
212 gdb_assert (!m_lazy
);
214 return ranges_contain (m_optimized_out
, bit_offset
, bit_length
);
218 value::entirely_available ()
220 /* We can only tell whether the whole value is available when we try
225 if (m_unavailable
.empty ())
233 value::entirely_covered_by_range_vector (const std::vector
<range
> &ranges
)
235 /* We can only tell whether the whole value is optimized out /
236 unavailable when we try to read it. */
240 if (ranges
.size () == 1)
242 const struct range
&t
= ranges
[0];
245 && t
.length
== TARGET_CHAR_BIT
* enclosing_type ()->length ())
252 /* Insert into the vector pointed to by VECTORP the bit range starting of
253 OFFSET bits, and extending for the next LENGTH bits. */
256 insert_into_bit_range_vector (std::vector
<range
> *vectorp
,
257 LONGEST offset
, ULONGEST length
)
261 /* Insert the range sorted. If there's overlap or the new range
262 would be contiguous with an existing range, merge. */
264 newr
.offset
= offset
;
265 newr
.length
= length
;
267 /* Do a binary search for the position the given range would be
268 inserted if we only considered the starting OFFSET of ranges.
269 Call that position I. Since we also have LENGTH to care for
270 (this is a range after all), we need to check if the _previous_
271 range overlaps the I range. E.g., calling R the new range:
273 #1 - overlaps with previous
277 |---| |---| |------| ... |--|
282 In the case #1 above, the binary search would return `I=1',
283 meaning, this OFFSET should be inserted at position 1, and the
284 current position 1 should be pushed further (and become 2). But,
285 note that `0' overlaps with R, so we want to merge them.
287 A similar consideration needs to be taken if the new range would
288 be contiguous with the previous range:
290 #2 - contiguous with previous
294 |--| |---| |------| ... |--|
299 If there's no overlap with the previous range, as in:
301 #3 - not overlapping and not contiguous
305 |--| |---| |------| ... |--|
312 #4 - R is the range with lowest offset
316 |--| |---| |------| ... |--|
321 ... we just push the new range to I.
323 All the 4 cases above need to consider that the new range may
324 also overlap several of the ranges that follow, or that R may be
325 contiguous with the following range, and merge. E.g.,
327 #5 - overlapping following ranges
330 |------------------------|
331 |--| |---| |------| ... |--|
340 |--| |---| |------| ... |--|
347 auto i
= std::lower_bound (vectorp
->begin (), vectorp
->end (), newr
);
348 if (i
> vectorp
->begin ())
350 struct range
&bef
= *(i
- 1);
352 if (ranges_overlap (bef
.offset
, bef
.length
, offset
, length
))
355 LONGEST l
= std::min (bef
.offset
, offset
);
356 LONGEST h
= std::max (bef
.offset
+ bef
.length
, offset
+ length
);
362 else if (offset
== bef
.offset
+ bef
.length
)
365 bef
.length
+= length
;
371 i
= vectorp
->insert (i
, newr
);
377 i
= vectorp
->insert (i
, newr
);
380 /* Check whether the ranges following the one we've just added or
381 touched can be folded in (#5 above). */
382 if (i
!= vectorp
->end () && i
+ 1 < vectorp
->end ())
387 /* Get the range we just touched. */
388 struct range
&t
= *i
;
392 for (; i
< vectorp
->end (); i
++)
394 struct range
&r
= *i
;
395 if (r
.offset
<= t
.offset
+ t
.length
)
399 l
= std::min (t
.offset
, r
.offset
);
400 h
= std::max (t
.offset
+ t
.length
, r
.offset
+ r
.length
);
409 /* If we couldn't merge this one, we won't be able to
410 merge following ones either, since the ranges are
411 always sorted by OFFSET. */
417 vectorp
->erase (next
, next
+ removed
);
422 value::mark_bits_unavailable (LONGEST offset
, ULONGEST length
)
424 insert_into_bit_range_vector (&m_unavailable
, offset
, length
);
428 value::mark_bytes_unavailable (LONGEST offset
, ULONGEST length
)
430 mark_bits_unavailable (offset
* TARGET_CHAR_BIT
,
431 length
* TARGET_CHAR_BIT
);
434 /* Find the first range in RANGES that overlaps the range defined by
435 OFFSET and LENGTH, starting at element POS in the RANGES vector,
436 Returns the index into RANGES where such overlapping range was
437 found, or -1 if none was found. */
440 find_first_range_overlap (const std::vector
<range
> *ranges
, int pos
,
441 LONGEST offset
, LONGEST length
)
445 for (i
= pos
; i
< ranges
->size (); i
++)
447 const range
&r
= (*ranges
)[i
];
448 if (ranges_overlap (r
.offset
, r
.length
, offset
, length
))
455 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
456 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
459 It must always be the case that:
460 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
462 It is assumed that memory can be accessed from:
463 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
465 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
466 / TARGET_CHAR_BIT) */
468 memcmp_with_bit_offsets (const gdb_byte
*ptr1
, size_t offset1_bits
,
469 const gdb_byte
*ptr2
, size_t offset2_bits
,
472 gdb_assert (offset1_bits
% TARGET_CHAR_BIT
473 == offset2_bits
% TARGET_CHAR_BIT
);
475 if (offset1_bits
% TARGET_CHAR_BIT
!= 0)
478 gdb_byte mask
, b1
, b2
;
480 /* The offset from the base pointers PTR1 and PTR2 is not a complete
481 number of bytes. A number of bits up to either the next exact
482 byte boundary, or LENGTH_BITS (which ever is sooner) will be
484 bits
= TARGET_CHAR_BIT
- offset1_bits
% TARGET_CHAR_BIT
;
485 gdb_assert (bits
< sizeof (mask
) * TARGET_CHAR_BIT
);
486 mask
= (1 << bits
) - 1;
488 if (length_bits
< bits
)
490 mask
&= ~(gdb_byte
) ((1 << (bits
- length_bits
)) - 1);
494 /* Now load the two bytes and mask off the bits we care about. */
495 b1
= *(ptr1
+ offset1_bits
/ TARGET_CHAR_BIT
) & mask
;
496 b2
= *(ptr2
+ offset2_bits
/ TARGET_CHAR_BIT
) & mask
;
501 /* Now update the length and offsets to take account of the bits
502 we've just compared. */
504 offset1_bits
+= bits
;
505 offset2_bits
+= bits
;
508 if (length_bits
% TARGET_CHAR_BIT
!= 0)
512 gdb_byte mask
, b1
, b2
;
514 /* The length is not an exact number of bytes. After the previous
515 IF.. block then the offsets are byte aligned, or the
516 length is zero (in which case this code is not reached). Compare
517 a number of bits at the end of the region, starting from an exact
519 bits
= length_bits
% TARGET_CHAR_BIT
;
520 o1
= offset1_bits
+ length_bits
- bits
;
521 o2
= offset2_bits
+ length_bits
- bits
;
523 gdb_assert (bits
< sizeof (mask
) * TARGET_CHAR_BIT
);
524 mask
= ((1 << bits
) - 1) << (TARGET_CHAR_BIT
- bits
);
526 gdb_assert (o1
% TARGET_CHAR_BIT
== 0);
527 gdb_assert (o2
% TARGET_CHAR_BIT
== 0);
529 b1
= *(ptr1
+ o1
/ TARGET_CHAR_BIT
) & mask
;
530 b2
= *(ptr2
+ o2
/ TARGET_CHAR_BIT
) & mask
;
540 /* We've now taken care of any stray "bits" at the start, or end of
541 the region to compare, the remainder can be covered with a simple
543 gdb_assert (offset1_bits
% TARGET_CHAR_BIT
== 0);
544 gdb_assert (offset2_bits
% TARGET_CHAR_BIT
== 0);
545 gdb_assert (length_bits
% TARGET_CHAR_BIT
== 0);
547 return memcmp (ptr1
+ offset1_bits
/ TARGET_CHAR_BIT
,
548 ptr2
+ offset2_bits
/ TARGET_CHAR_BIT
,
549 length_bits
/ TARGET_CHAR_BIT
);
552 /* Length is zero, regions match. */
556 /* Helper struct for find_first_range_overlap_and_match and
557 value_contents_bits_eq. Keep track of which slot of a given ranges
558 vector have we last looked at. */
560 struct ranges_and_idx
563 const std::vector
<range
> *ranges
;
565 /* The range we've last found in RANGES. Given ranges are sorted,
566 we can start the next lookup here. */
570 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
571 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
572 ranges starting at OFFSET2 bits. Return true if the ranges match
573 and fill in *L and *H with the overlapping window relative to
574 (both) OFFSET1 or OFFSET2. */
577 find_first_range_overlap_and_match (struct ranges_and_idx
*rp1
,
578 struct ranges_and_idx
*rp2
,
579 LONGEST offset1
, LONGEST offset2
,
580 ULONGEST length
, ULONGEST
*l
, ULONGEST
*h
)
582 rp1
->idx
= find_first_range_overlap (rp1
->ranges
, rp1
->idx
,
584 rp2
->idx
= find_first_range_overlap (rp2
->ranges
, rp2
->idx
,
587 if (rp1
->idx
== -1 && rp2
->idx
== -1)
593 else if (rp1
->idx
== -1 || rp2
->idx
== -1)
597 const range
*r1
, *r2
;
601 r1
= &(*rp1
->ranges
)[rp1
->idx
];
602 r2
= &(*rp2
->ranges
)[rp2
->idx
];
604 /* Get the unavailable windows intersected by the incoming
605 ranges. The first and last ranges that overlap the argument
606 range may be wider than said incoming arguments ranges. */
607 l1
= std::max (offset1
, r1
->offset
);
608 h1
= std::min (offset1
+ length
, r1
->offset
+ r1
->length
);
610 l2
= std::max (offset2
, r2
->offset
);
611 h2
= std::min (offset2
+ length
, offset2
+ r2
->length
);
613 /* Make them relative to the respective start offsets, so we can
614 compare them for equality. */
621 /* Different ranges, no match. */
622 if (l1
!= l2
|| h1
!= h2
)
631 /* Helper function for value_contents_eq. The only difference is that
632 this function is bit rather than byte based.
634 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
635 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
636 Return true if the available bits match. */
639 value::contents_bits_eq (int offset1
, const struct value
*val2
, int offset2
,
642 /* Each array element corresponds to a ranges source (unavailable,
643 optimized out). '1' is for VAL1, '2' for VAL2. */
644 struct ranges_and_idx rp1
[2], rp2
[2];
646 /* See function description in value.h. */
647 gdb_assert (!m_lazy
&& !val2
->m_lazy
);
649 /* We shouldn't be trying to compare past the end of the values. */
650 gdb_assert (offset1
+ length
651 <= m_enclosing_type
->length () * TARGET_CHAR_BIT
);
652 gdb_assert (offset2
+ length
653 <= val2
->m_enclosing_type
->length () * TARGET_CHAR_BIT
);
655 memset (&rp1
, 0, sizeof (rp1
));
656 memset (&rp2
, 0, sizeof (rp2
));
657 rp1
[0].ranges
= &m_unavailable
;
658 rp2
[0].ranges
= &val2
->m_unavailable
;
659 rp1
[1].ranges
= &m_optimized_out
;
660 rp2
[1].ranges
= &val2
->m_optimized_out
;
664 ULONGEST l
= 0, h
= 0; /* init for gcc -Wall */
667 for (i
= 0; i
< 2; i
++)
669 ULONGEST l_tmp
, h_tmp
;
671 /* The contents only match equal if the invalid/unavailable
672 contents ranges match as well. */
673 if (!find_first_range_overlap_and_match (&rp1
[i
], &rp2
[i
],
674 offset1
, offset2
, length
,
678 /* We're interested in the lowest/first range found. */
679 if (i
== 0 || l_tmp
< l
)
686 /* Compare the available/valid contents. */
687 if (memcmp_with_bit_offsets (m_contents
.get (), offset1
,
688 val2
->m_contents
.get (), offset2
, l
) != 0)
702 value::contents_eq (LONGEST offset1
,
703 const struct value
*val2
, LONGEST offset2
,
704 LONGEST length
) const
706 return contents_bits_eq (offset1
* TARGET_CHAR_BIT
,
707 val2
, offset2
* TARGET_CHAR_BIT
,
708 length
* TARGET_CHAR_BIT
);
714 value::contents_eq (const struct value
*val2
) const
716 ULONGEST len1
= check_typedef (enclosing_type ())->length ();
717 ULONGEST len2
= check_typedef (val2
->enclosing_type ())->length ();
720 return contents_eq (0, val2
, 0, len1
);
723 /* The value-history records all the values printed by print commands
724 during this session. */
726 static std::vector
<value_ref_ptr
> value_history
;
729 /* List of all value objects currently allocated
730 (except for those released by calls to release_value)
731 This is so they can be freed after each command. */
733 static std::vector
<value_ref_ptr
> all_values
;
738 value::allocate_lazy (struct type
*type
)
742 /* Call check_typedef on our type to make sure that, if TYPE
743 is a TYPE_CODE_TYPEDEF, its length is set to the length
744 of the target type instead of zero. However, we do not
745 replace the typedef type by the target type, because we want
746 to keep the typedef in order to be able to set the VAL's type
747 description correctly. */
748 check_typedef (type
);
750 val
= new struct value (type
);
752 /* Values start out on the all_values chain. */
753 all_values
.emplace_back (val
);
758 /* The maximum size, in bytes, that GDB will try to allocate for a value.
759 The initial value of 64k was not selected for any specific reason, it is
760 just a reasonable starting point. */
762 static int max_value_size
= 65536; /* 64k bytes */
764 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
765 LONGEST, otherwise GDB will not be able to parse integer values from the
766 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
767 be unable to parse "set max-value-size 2".
769 As we want a consistent GDB experience across hosts with different sizes
770 of LONGEST, this arbitrary minimum value was selected, so long as this
771 is bigger than LONGEST on all GDB supported hosts we're fine. */
773 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
774 static_assert (sizeof (LONGEST
) <= MIN_VALUE_FOR_MAX_VALUE_SIZE
);
776 /* Implement the "set max-value-size" command. */
779 set_max_value_size (const char *args
, int from_tty
,
780 struct cmd_list_element
*c
)
782 gdb_assert (max_value_size
== -1 || max_value_size
>= 0);
784 if (max_value_size
> -1 && max_value_size
< MIN_VALUE_FOR_MAX_VALUE_SIZE
)
786 max_value_size
= MIN_VALUE_FOR_MAX_VALUE_SIZE
;
787 error (_("max-value-size set too low, increasing to %d bytes"),
792 /* Implement the "show max-value-size" command. */
795 show_max_value_size (struct ui_file
*file
, int from_tty
,
796 struct cmd_list_element
*c
, const char *value
)
798 if (max_value_size
== -1)
799 gdb_printf (file
, _("Maximum value size is unlimited.\n"));
801 gdb_printf (file
, _("Maximum value size is %d bytes.\n"),
805 /* Called before we attempt to allocate or reallocate a buffer for the
806 contents of a value. TYPE is the type of the value for which we are
807 allocating the buffer. If the buffer is too large (based on the user
808 controllable setting) then throw an error. If this function returns
809 then we should attempt to allocate the buffer. */
812 check_type_length_before_alloc (const struct type
*type
)
814 ULONGEST length
= type
->length ();
816 if (exceeds_max_value_size (length
))
818 if (type
->name () != NULL
)
819 error (_("value of type `%s' requires %s bytes, which is more "
820 "than max-value-size"), type
->name (), pulongest (length
));
822 error (_("value requires %s bytes, which is more than "
823 "max-value-size"), pulongest (length
));
830 exceeds_max_value_size (ULONGEST length
)
832 return max_value_size
> -1 && length
> max_value_size
;
835 /* When this has a value, it is used to limit the number of array elements
836 of an array that are loaded into memory when an array value is made
838 static std::optional
<int> array_length_limiting_element_count
;
841 scoped_array_length_limiting::scoped_array_length_limiting (int elements
)
843 m_old_value
= array_length_limiting_element_count
;
844 array_length_limiting_element_count
.emplace (elements
);
848 scoped_array_length_limiting::~scoped_array_length_limiting ()
850 array_length_limiting_element_count
= m_old_value
;
853 /* Find the inner element type for ARRAY_TYPE. */
856 find_array_element_type (struct type
*array_type
)
858 array_type
= check_typedef (array_type
);
859 gdb_assert (array_type
->code () == TYPE_CODE_ARRAY
);
861 if (current_language
->la_language
== language_fortran
)
862 while (array_type
->code () == TYPE_CODE_ARRAY
)
864 array_type
= array_type
->target_type ();
865 array_type
= check_typedef (array_type
);
869 array_type
= array_type
->target_type ();
870 array_type
= check_typedef (array_type
);
876 /* Return the limited length of ARRAY_TYPE, which must be of
877 TYPE_CODE_ARRAY. This function can only be called when the global
878 ARRAY_LENGTH_LIMITING_ELEMENT_COUNT has a value.
880 The limited length of an array is the smallest of either (1) the total
881 size of the array type, or (2) the array target type multiplies by the
882 array_length_limiting_element_count. */
885 calculate_limited_array_length (struct type
*array_type
)
887 gdb_assert (array_length_limiting_element_count
.has_value ());
889 array_type
= check_typedef (array_type
);
890 gdb_assert (array_type
->code () == TYPE_CODE_ARRAY
);
892 struct type
*elm_type
= find_array_element_type (array_type
);
893 ULONGEST len
= (elm_type
->length ()
894 * (*array_length_limiting_element_count
));
895 len
= std::min (len
, array_type
->length ());
903 value::set_limited_array_length ()
905 ULONGEST limit
= m_limited_length
;
906 ULONGEST len
= type ()->length ();
908 if (array_length_limiting_element_count
.has_value ())
909 len
= calculate_limited_array_length (type ());
911 if (limit
!= 0 && len
> limit
)
913 if (len
> max_value_size
)
916 m_limited_length
= max_value_size
;
923 value::allocate_contents (bool check_size
)
927 struct type
*enc_type
= enclosing_type ();
928 ULONGEST len
= enc_type
->length ();
932 /* If we are allocating the contents of an array, which
933 is greater in size than max_value_size, and there is
934 an element limit in effect, then we can possibly try
935 to load only a sub-set of the array contents into
937 if (type () == enc_type
938 && type ()->code () == TYPE_CODE_ARRAY
939 && len
> max_value_size
940 && set_limited_array_length ())
941 len
= m_limited_length
;
943 check_type_length_before_alloc (enc_type
);
946 m_contents
.reset ((gdb_byte
*) xzalloc (len
));
950 /* Allocate a value and its contents for type TYPE. If CHECK_SIZE is true,
951 then apply the usual max-value-size checks. */
954 value::allocate (struct type
*type
, bool check_size
)
956 struct value
*val
= value::allocate_lazy (type
);
958 val
->allocate_contents (check_size
);
963 /* Allocate a value and its contents for type TYPE. */
966 value::allocate (struct type
*type
)
968 return allocate (type
, true);
974 value::allocate_register_lazy (const frame_info_ptr
&initial_next_frame
,
975 int regnum
, struct type
*type
)
978 type
= register_type (frame_unwind_arch (initial_next_frame
), regnum
);
980 value
*result
= value::allocate_lazy (type
);
982 result
->set_lval (lval_register
);
983 result
->m_location
.reg
.regnum
= regnum
;
985 /* If this register value is created during unwind (while computing a frame
986 id), and NEXT_FRAME is a frame inlined in the frame being unwound, then
987 NEXT_FRAME will not have a valid frame id yet. Find the next non-inline
988 frame (possibly the sentinel frame). This is where registers are unwound
990 frame_info_ptr next_frame
= initial_next_frame
;
991 while (get_frame_type (next_frame
) == INLINE_FRAME
)
992 next_frame
= get_next_frame_sentinel_okay (next_frame
);
994 result
->m_location
.reg
.next_frame_id
= get_frame_id (next_frame
);
996 /* We should have a next frame with a valid id. */
997 gdb_assert (frame_id_p (result
->m_location
.reg
.next_frame_id
));
1005 value::allocate_register (const frame_info_ptr
&next_frame
, int regnum
,
1008 value
*result
= value::allocate_register_lazy (next_frame
, regnum
, type
);
1009 result
->set_lazy (false);
1013 /* Allocate a value that has the correct length
1014 for COUNT repetitions of type TYPE. */
1017 allocate_repeat_value (struct type
*type
, int count
)
1019 /* Despite the fact that we are really creating an array of TYPE here, we
1020 use the string lower bound as the array lower bound. This seems to
1021 work fine for now. */
1022 int low_bound
= current_language
->string_lower_bound ();
1023 /* FIXME-type-allocation: need a way to free this type when we are
1025 struct type
*array_type
1026 = lookup_array_range_type (type
, low_bound
, count
+ low_bound
- 1);
1028 return value::allocate (array_type
);
1032 value::allocate_computed (struct type
*type
,
1033 const struct lval_funcs
*funcs
,
1036 struct value
*v
= value::allocate_lazy (type
);
1038 v
->set_lval (lval_computed
);
1039 v
->m_location
.computed
.funcs
= funcs
;
1040 v
->m_location
.computed
.closure
= closure
;
1048 value::allocate_optimized_out (struct type
*type
)
1050 struct value
*retval
= value::allocate_lazy (type
);
1052 retval
->mark_bytes_optimized_out (0, type
->length ());
1053 retval
->set_lazy (false);
1057 /* Accessor methods. */
1059 gdb::array_view
<gdb_byte
>
1060 value::contents_raw ()
1062 int unit_size
= gdbarch_addressable_memory_unit_size (arch ());
1064 allocate_contents (true);
1066 ULONGEST length
= type ()->length ();
1067 return gdb::make_array_view
1068 (m_contents
.get () + m_embedded_offset
* unit_size
, length
);
1071 gdb::array_view
<gdb_byte
>
1072 value::contents_all_raw ()
1074 allocate_contents (true);
1076 ULONGEST length
= enclosing_type ()->length ();
1077 return gdb::make_array_view (m_contents
.get (), length
);
1080 /* Look at value.h for description. */
1083 value_actual_type (struct value
*value
, int resolve_simple_types
,
1084 int *real_type_found
)
1086 struct value_print_options opts
;
1087 struct type
*result
;
1089 get_user_print_options (&opts
);
1091 if (real_type_found
)
1092 *real_type_found
= 0;
1093 result
= value
->type ();
1094 if (opts
.objectprint
)
1096 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1097 fetch its rtti type. */
1098 if (result
->is_pointer_or_reference ()
1099 && (check_typedef (result
->target_type ())->code ()
1100 == TYPE_CODE_STRUCT
)
1101 && !value
->optimized_out ())
1103 struct type
*real_type
;
1105 real_type
= value_rtti_indirect_type (value
, NULL
, NULL
, NULL
);
1108 if (real_type_found
)
1109 *real_type_found
= 1;
1113 else if (resolve_simple_types
)
1115 if (real_type_found
)
1116 *real_type_found
= 1;
1117 result
= value
->enclosing_type ();
1125 error_value_optimized_out (void)
1127 throw_error (OPTIMIZED_OUT_ERROR
, _("value has been optimized out"));
1131 value::require_not_optimized_out () const
1133 if (!m_optimized_out
.empty ())
1135 if (m_lval
== lval_register
)
1136 throw_error (OPTIMIZED_OUT_ERROR
,
1137 _("register has not been saved in frame"));
1139 error_value_optimized_out ();
1144 value::require_available () const
1146 if (!m_unavailable
.empty ())
1147 throw_error (NOT_AVAILABLE_ERROR
, _("value is not available"));
1150 gdb::array_view
<const gdb_byte
>
1151 value::contents_for_printing ()
1156 ULONGEST length
= enclosing_type ()->length ();
1157 return gdb::make_array_view (m_contents
.get (), length
);
1160 gdb::array_view
<const gdb_byte
>
1161 value::contents_for_printing () const
1163 gdb_assert (!m_lazy
);
1165 ULONGEST length
= enclosing_type ()->length ();
1166 return gdb::make_array_view (m_contents
.get (), length
);
1169 gdb::array_view
<const gdb_byte
>
1170 value::contents_all ()
1172 gdb::array_view
<const gdb_byte
> result
= contents_for_printing ();
1173 require_not_optimized_out ();
1174 require_available ();
1178 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1179 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1182 ranges_copy_adjusted (std::vector
<range
> *dst_range
, int dst_bit_offset
,
1183 const std::vector
<range
> &src_range
, int src_bit_offset
,
1184 unsigned int bit_length
)
1186 for (const range
&r
: src_range
)
1190 l
= std::max (r
.offset
, (LONGEST
) src_bit_offset
);
1191 h
= std::min ((LONGEST
) (r
.offset
+ r
.length
),
1192 (LONGEST
) src_bit_offset
+ bit_length
);
1195 insert_into_bit_range_vector (dst_range
,
1196 dst_bit_offset
+ (l
- src_bit_offset
),
1204 value::ranges_copy_adjusted (struct value
*dst
, int dst_bit_offset
,
1205 int src_bit_offset
, int bit_length
) const
1207 ::ranges_copy_adjusted (&dst
->m_unavailable
, dst_bit_offset
,
1208 m_unavailable
, src_bit_offset
,
1210 ::ranges_copy_adjusted (&dst
->m_optimized_out
, dst_bit_offset
,
1211 m_optimized_out
, src_bit_offset
,
1218 value::contents_copy_raw (struct value
*dst
, LONGEST dst_offset
,
1219 LONGEST src_offset
, LONGEST length
)
1221 LONGEST src_bit_offset
, dst_bit_offset
, bit_length
;
1222 int unit_size
= gdbarch_addressable_memory_unit_size (arch ());
1224 /* A lazy DST would make that this copy operation useless, since as
1225 soon as DST's contents were un-lazied (by a later value_contents
1226 call, say), the contents would be overwritten. A lazy SRC would
1227 mean we'd be copying garbage. */
1228 gdb_assert (!dst
->m_lazy
&& !m_lazy
);
1230 ULONGEST copy_length
= length
;
1231 ULONGEST limit
= m_limited_length
;
1232 if (limit
> 0 && src_offset
+ length
> limit
)
1233 copy_length
= src_offset
> limit
? 0 : limit
- src_offset
;
1235 /* The overwritten DST range gets unavailability ORed in, not
1236 replaced. Make sure to remember to implement replacing if it
1237 turns out actually necessary. */
1238 gdb_assert (dst
->bytes_available (dst_offset
, length
));
1239 gdb_assert (!dst
->bits_any_optimized_out (TARGET_CHAR_BIT
* dst_offset
,
1240 TARGET_CHAR_BIT
* length
));
1242 if ((src_offset
+ copy_length
) * unit_size
> enclosing_type ()-> length ())
1243 error (_("access outside bounds of object"));
1245 /* Copy the data. */
1246 gdb::array_view
<gdb_byte
> dst_contents
1247 = dst
->contents_all_raw ().slice (dst_offset
* unit_size
,
1248 copy_length
* unit_size
);
1249 gdb::array_view
<const gdb_byte
> src_contents
1250 = contents_all_raw ().slice (src_offset
* unit_size
,
1251 copy_length
* unit_size
);
1252 gdb::copy (src_contents
, dst_contents
);
1254 /* Copy the meta-data, adjusted. */
1255 src_bit_offset
= src_offset
* unit_size
* HOST_CHAR_BIT
;
1256 dst_bit_offset
= dst_offset
* unit_size
* HOST_CHAR_BIT
;
1257 bit_length
= length
* unit_size
* HOST_CHAR_BIT
;
1259 ranges_copy_adjusted (dst
, dst_bit_offset
,
1260 src_bit_offset
, bit_length
);
1266 value::contents_copy_raw_bitwise (struct value
*dst
, LONGEST dst_bit_offset
,
1267 LONGEST src_bit_offset
,
1270 /* A lazy DST would make that this copy operation useless, since as
1271 soon as DST's contents were un-lazied (by a later value_contents
1272 call, say), the contents would be overwritten. A lazy SRC would
1273 mean we'd be copying garbage. */
1274 gdb_assert (!dst
->m_lazy
&& !m_lazy
);
1276 ULONGEST copy_bit_length
= bit_length
;
1277 ULONGEST bit_limit
= m_limited_length
* TARGET_CHAR_BIT
;
1278 if (bit_limit
> 0 && src_bit_offset
+ bit_length
> bit_limit
)
1279 copy_bit_length
= (src_bit_offset
> bit_limit
? 0
1280 : bit_limit
- src_bit_offset
);
1282 /* The overwritten DST range gets unavailability ORed in, not
1283 replaced. Make sure to remember to implement replacing if it
1284 turns out actually necessary. */
1285 LONGEST dst_offset
= dst_bit_offset
/ TARGET_CHAR_BIT
;
1286 LONGEST length
= bit_length
/ TARGET_CHAR_BIT
;
1287 gdb_assert (dst
->bytes_available (dst_offset
, length
));
1288 gdb_assert (!dst
->bits_any_optimized_out (dst_bit_offset
,
1291 /* Copy the data. */
1292 gdb::array_view
<gdb_byte
> dst_contents
= dst
->contents_all_raw ();
1293 gdb::array_view
<const gdb_byte
> src_contents
= contents_all_raw ();
1294 copy_bitwise (dst_contents
.data (), dst_bit_offset
,
1295 src_contents
.data (), src_bit_offset
,
1297 type_byte_order (type ()) == BFD_ENDIAN_BIG
);
1299 /* Copy the meta-data. */
1300 ranges_copy_adjusted (dst
, dst_bit_offset
, src_bit_offset
, bit_length
);
1306 value::contents_copy (struct value
*dst
, LONGEST dst_offset
,
1307 LONGEST src_offset
, LONGEST length
)
1312 contents_copy_raw (dst
, dst_offset
, src_offset
, length
);
1315 gdb::array_view
<const gdb_byte
>
1318 gdb::array_view
<const gdb_byte
> result
= contents_writeable ();
1319 require_not_optimized_out ();
1320 require_available ();
1324 gdb::array_view
<gdb_byte
>
1325 value::contents_writeable ()
1329 return contents_raw ();
1333 value::optimized_out ()
1337 /* See if we can compute the result without fetching the
1339 if (this->lval () == lval_memory
)
1341 else if (this->lval () == lval_computed
)
1343 const struct lval_funcs
*funcs
= m_location
.computed
.funcs
;
1345 if (funcs
->is_optimized_out
!= nullptr)
1346 return funcs
->is_optimized_out (this);
1349 /* Fall back to fetching. */
1354 catch (const gdb_exception_error
&ex
)
1359 case OPTIMIZED_OUT_ERROR
:
1360 case NOT_AVAILABLE_ERROR
:
1361 /* These can normally happen when we try to access an
1362 optimized out or unavailable register, either in a
1363 physical register or spilled to memory. */
1371 return !m_optimized_out
.empty ();
1374 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1375 the following LENGTH bytes. */
1378 value::mark_bytes_optimized_out (int offset
, int length
)
1380 mark_bits_optimized_out (offset
* TARGET_CHAR_BIT
,
1381 length
* TARGET_CHAR_BIT
);
1387 value::mark_bits_optimized_out (LONGEST offset
, LONGEST length
)
1389 insert_into_bit_range_vector (&m_optimized_out
, offset
, length
);
1393 value::bits_synthetic_pointer (LONGEST offset
, LONGEST length
) const
1395 if (m_lval
!= lval_computed
1396 || !m_location
.computed
.funcs
->check_synthetic_pointer
)
1398 return m_location
.computed
.funcs
->check_synthetic_pointer (this, offset
,
1402 const struct lval_funcs
*
1403 value::computed_funcs () const
1405 gdb_assert (m_lval
== lval_computed
);
1407 return m_location
.computed
.funcs
;
1411 value::computed_closure () const
1413 gdb_assert (m_lval
== lval_computed
);
1415 return m_location
.computed
.closure
;
1419 value::address () const
1421 if (m_lval
!= lval_memory
)
1423 if (m_parent
!= NULL
)
1424 return m_parent
->address () + m_offset
;
1425 if (NULL
!= TYPE_DATA_LOCATION (type ()))
1427 gdb_assert (TYPE_DATA_LOCATION (type ())->is_constant ());
1428 return TYPE_DATA_LOCATION_ADDR (type ());
1431 return m_location
.address
+ m_offset
;
1435 value::raw_address () const
1437 if (m_lval
!= lval_memory
)
1439 return m_location
.address
;
1443 value::set_address (CORE_ADDR addr
)
1445 gdb_assert (m_lval
== lval_memory
);
1446 m_location
.address
= addr
;
1449 /* Return a mark in the value chain. All values allocated after the
1450 mark is obtained (except for those released) are subject to being freed
1451 if a subsequent value_free_to_mark is passed the mark. */
1455 if (all_values
.empty ())
1457 return all_values
.back ().get ();
1460 /* Release a reference to VAL, which was acquired with value_incref.
1461 This function is also called to deallocate values from the value
1467 gdb_assert (m_reference_count
> 0);
1468 m_reference_count
--;
1469 if (m_reference_count
== 0)
1473 /* Free all values allocated since MARK was obtained by value_mark
1474 (except for those released). */
1476 value_free_to_mark (const struct value
*mark
)
1478 auto iter
= std::find (all_values
.begin (), all_values
.end (), mark
);
1479 if (iter
== all_values
.end ())
1480 all_values
.clear ();
1482 all_values
.erase (iter
+ 1, all_values
.end ());
1485 /* Remove VAL from the chain all_values
1486 so it will not be freed automatically. */
1489 release_value (struct value
*val
)
1492 return value_ref_ptr ();
1494 std::vector
<value_ref_ptr
>::reverse_iterator iter
;
1495 for (iter
= all_values
.rbegin (); iter
!= all_values
.rend (); ++iter
)
1499 value_ref_ptr result
= *iter
;
1500 all_values
.erase (iter
.base () - 1);
1505 /* We must always return an owned reference. Normally this happens
1506 because we transfer the reference from the value chain, but in
1507 this case the value was not on the chain. */
1508 return value_ref_ptr::new_reference (val
);
1513 std::vector
<value_ref_ptr
>
1514 value_release_to_mark (const struct value
*mark
)
1516 std::vector
<value_ref_ptr
> result
;
1518 auto iter
= std::find (all_values
.begin (), all_values
.end (), mark
);
1519 if (iter
== all_values
.end ())
1520 std::swap (result
, all_values
);
1523 std::move (iter
+ 1, all_values
.end (), std::back_inserter (result
));
1524 all_values
.erase (iter
+ 1, all_values
.end ());
1526 std::reverse (result
.begin (), result
.end ());
1533 value::copy () const
1535 struct type
*encl_type
= enclosing_type ();
1538 val
= value::allocate_lazy (encl_type
);
1539 val
->m_type
= m_type
;
1540 val
->set_lval (m_lval
);
1541 val
->m_location
= m_location
;
1542 val
->m_offset
= m_offset
;
1543 val
->m_bitpos
= m_bitpos
;
1544 val
->m_bitsize
= m_bitsize
;
1545 val
->m_lazy
= m_lazy
;
1546 val
->m_embedded_offset
= embedded_offset ();
1547 val
->m_pointed_to_offset
= m_pointed_to_offset
;
1548 val
->m_modifiable
= m_modifiable
;
1549 val
->m_stack
= m_stack
;
1550 val
->m_is_zero
= m_is_zero
;
1551 val
->m_in_history
= m_in_history
;
1552 val
->m_initialized
= m_initialized
;
1553 val
->m_unavailable
= m_unavailable
;
1554 val
->m_optimized_out
= m_optimized_out
;
1555 val
->m_parent
= m_parent
;
1556 val
->m_limited_length
= m_limited_length
;
1559 && !(val
->entirely_optimized_out ()
1560 || val
->entirely_unavailable ()))
1562 ULONGEST length
= val
->m_limited_length
;
1564 length
= val
->enclosing_type ()->length ();
1566 gdb_assert (m_contents
!= nullptr);
1567 const auto &arg_view
1568 = gdb::make_array_view (m_contents
.get (), length
);
1570 val
->allocate_contents (false);
1571 gdb::array_view
<gdb_byte
> val_contents
1572 = val
->contents_all_raw ().slice (0, length
);
1574 gdb::copy (arg_view
, val_contents
);
1577 if (val
->lval () == lval_computed
)
1579 const struct lval_funcs
*funcs
= val
->m_location
.computed
.funcs
;
1581 if (funcs
->copy_closure
)
1582 val
->m_location
.computed
.closure
= funcs
->copy_closure (val
);
1587 /* Return a "const" and/or "volatile" qualified version of the value V.
1588 If CNST is true, then the returned value will be qualified with
1590 if VOLTL is true, then the returned value will be qualified with
1594 make_cv_value (int cnst
, int voltl
, struct value
*v
)
1596 struct type
*val_type
= v
->type ();
1597 struct type
*m_enclosing_type
= v
->enclosing_type ();
1598 struct value
*cv_val
= v
->copy ();
1600 cv_val
->deprecated_set_type (make_cv_type (cnst
, voltl
, val_type
, NULL
));
1601 cv_val
->set_enclosing_type (make_cv_type (cnst
, voltl
, m_enclosing_type
, NULL
));
1611 if (this->lval () != not_lval
)
1613 struct type
*enc_type
= enclosing_type ();
1614 struct value
*val
= value::allocate (enc_type
);
1616 gdb::copy (contents_all (), val
->contents_all_raw ());
1617 val
->m_type
= m_type
;
1618 val
->set_embedded_offset (embedded_offset ());
1619 val
->set_pointed_to_offset (pointed_to_offset ());
1628 value::force_lval (CORE_ADDR addr
)
1630 gdb_assert (this->lval () == not_lval
);
1632 write_memory (addr
, contents_raw ().data (), type ()->length ());
1633 m_lval
= lval_memory
;
1634 m_location
.address
= addr
;
1638 value::set_component_location (const struct value
*whole
)
1642 gdb_assert (whole
->m_lval
!= lval_xcallable
);
1644 if (whole
->m_lval
== lval_internalvar
)
1645 m_lval
= lval_internalvar_component
;
1647 m_lval
= whole
->m_lval
;
1649 m_location
= whole
->m_location
;
1650 if (whole
->m_lval
== lval_computed
)
1652 const struct lval_funcs
*funcs
= whole
->m_location
.computed
.funcs
;
1654 if (funcs
->copy_closure
)
1655 m_location
.computed
.closure
= funcs
->copy_closure (whole
);
1658 /* If the WHOLE value has a dynamically resolved location property then
1659 update the address of the COMPONENT. */
1660 type
= whole
->type ();
1661 if (NULL
!= TYPE_DATA_LOCATION (type
)
1662 && TYPE_DATA_LOCATION (type
)->is_constant ())
1663 set_address (TYPE_DATA_LOCATION_ADDR (type
));
1665 /* Similarly, if the COMPONENT value has a dynamically resolved location
1666 property then update its address. */
1667 type
= this->type ();
1668 if (NULL
!= TYPE_DATA_LOCATION (type
)
1669 && TYPE_DATA_LOCATION (type
)->is_constant ())
1671 /* If the COMPONENT has a dynamic location, and is an
1672 lval_internalvar_component, then we change it to a lval_memory.
1674 Usually a component of an internalvar is created non-lazy, and has
1675 its content immediately copied from the parent internalvar.
1676 However, for components with a dynamic location, the content of
1677 the component is not contained within the parent, but is instead
1678 accessed indirectly. Further, the component will be created as a
1681 By changing the type of the component to lval_memory we ensure
1682 that value_fetch_lazy can successfully load the component.
1684 This solution isn't ideal, but a real fix would require values to
1685 carry around both the parent value contents, and the contents of
1686 any dynamic fields within the parent. This is a substantial
1687 change to how values work in GDB. */
1688 if (this->lval () == lval_internalvar_component
)
1690 gdb_assert (lazy ());
1691 m_lval
= lval_memory
;
1694 gdb_assert (this->lval () == lval_memory
);
1695 set_address (TYPE_DATA_LOCATION_ADDR (type
));
1699 /* Access to the value history. */
1701 /* Record a new value in the value history.
1702 Returns the absolute history index of the entry. */
1705 value::record_latest ()
1707 /* We don't want this value to have anything to do with the inferior anymore.
1708 In particular, "set $1 = 50" should not affect the variable from which
1709 the value was taken, and fast watchpoints should be able to assume that
1710 a value on the value history never changes. */
1714 /* Mark the value as recorded in the history for the availability check. */
1715 m_in_history
= true;
1717 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1718 from. This is a bit dubious, because then *&$1 does not just return $1
1719 but the current contents of that location. c'est la vie... */
1720 set_modifiable (false);
1722 value_history
.push_back (release_value (this));
1724 return value_history
.size ();
1727 /* Return a copy of the value in the history with sequence number NUM. */
1730 access_value_history (int num
)
1735 absnum
+= value_history
.size ();
1740 error (_("The history is empty."));
1742 error (_("There is only one value in the history."));
1744 error (_("History does not go back to $$%d."), -num
);
1746 if (absnum
> value_history
.size ())
1747 error (_("History has not yet reached $%d."), absnum
);
1751 return value_history
[absnum
]->copy ();
1757 value_history_count ()
1759 return value_history
.size ();
1763 show_values (const char *num_exp
, int from_tty
)
1771 /* "show values +" should print from the stored position.
1772 "show values <exp>" should print around value number <exp>. */
1773 if (num_exp
[0] != '+' || num_exp
[1] != '\0')
1774 num
= parse_and_eval_long (num_exp
) - 5;
1778 /* "show values" means print the last 10 values. */
1779 num
= value_history
.size () - 9;
1785 for (i
= num
; i
< num
+ 10 && i
<= value_history
.size (); i
++)
1787 struct value_print_options opts
;
1789 val
= access_value_history (i
);
1790 gdb_printf (("$%d = "), i
);
1791 get_user_print_options (&opts
);
1792 value_print (val
, gdb_stdout
, &opts
);
1793 gdb_printf (("\n"));
1796 /* The next "show values +" should start after what we just printed. */
1799 /* Hitting just return after this command should do the same thing as
1800 "show values +". If num_exp is null, this is unnecessary, since
1801 "show values +" is not useful after "show values". */
1802 if (from_tty
&& num_exp
)
1803 set_repeat_arguments ("+");
1806 enum internalvar_kind
1808 /* The internal variable is empty. */
1811 /* The value of the internal variable is provided directly as
1812 a GDB value object. */
1815 /* A fresh value is computed via a call-back routine on every
1816 access to the internal variable. */
1817 INTERNALVAR_MAKE_VALUE
,
1819 /* The internal variable holds a GDB internal convenience function. */
1820 INTERNALVAR_FUNCTION
,
1822 /* The variable holds an integer value. */
1823 INTERNALVAR_INTEGER
,
1825 /* The variable holds a GDB-provided string. */
1829 union internalvar_data
1831 /* A value object used with INTERNALVAR_VALUE. */
1832 struct value
*value
;
1834 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1837 /* The functions to call. */
1838 const struct internalvar_funcs
*functions
;
1840 /* The function's user-data. */
1844 /* The internal function used with INTERNALVAR_FUNCTION. */
1847 struct internal_function
*function
;
1848 /* True if this is the canonical name for the function. */
1852 /* An integer value used with INTERNALVAR_INTEGER. */
1855 /* If type is non-NULL, it will be used as the type to generate
1856 a value for this internal variable. If type is NULL, a default
1857 integer type for the architecture is used. */
1862 /* A string value used with INTERNALVAR_STRING. */
1866 /* Internal variables. These are variables within the debugger
1867 that hold values assigned by debugger commands.
1868 The user refers to them with a '$' prefix
1869 that does not appear in the variable names stored internally. */
1873 internalvar (std::string name
)
1874 : name (std::move (name
))
1877 internalvar (internalvar
&&other
)
1878 : name (std::move(other
.name
)),
1882 other
.kind
= INTERNALVAR_VOID
;
1887 clear_internalvar (this);
1892 /* We support various different kinds of content of an internal variable.
1893 enum internalvar_kind specifies the kind, and union internalvar_data
1894 provides the data associated with this particular kind. */
1896 enum internalvar_kind kind
= INTERNALVAR_VOID
;
1898 union internalvar_data u
{};
1901 /* Use std::map, a sorted container, to make the order of iteration (and
1902 therefore the output of "show convenience") stable. */
1904 static std::map
<std::string
, internalvar
> internalvars
;
1906 /* If the variable does not already exist create it and give it the
1907 value given. If no value is given then the default is zero. */
1909 init_if_undefined_command (const char* args
, int from_tty
)
1911 struct internalvar
*intvar
= nullptr;
1913 /* Parse the expression - this is taken from set_command(). */
1914 expression_up expr
= parse_expression (args
);
1916 /* Validate the expression.
1917 Was the expression an assignment?
1918 Or even an expression at all? */
1919 if (expr
->first_opcode () != BINOP_ASSIGN
)
1920 error (_("Init-if-undefined requires an assignment expression."));
1922 /* Extract the variable from the parsed expression. */
1923 expr::assign_operation
*assign
1924 = dynamic_cast<expr::assign_operation
*> (expr
->op
.get ());
1925 if (assign
!= nullptr)
1927 expr::operation
*lhs
= assign
->get_lhs ();
1928 expr::internalvar_operation
*ivarop
1929 = dynamic_cast<expr::internalvar_operation
*> (lhs
);
1930 if (ivarop
!= nullptr)
1931 intvar
= ivarop
->get_internalvar ();
1934 if (intvar
== nullptr)
1935 error (_("The first parameter to init-if-undefined "
1936 "should be a GDB variable."));
1938 /* Only evaluate the expression if the lvalue is void.
1939 This may still fail if the expression is invalid. */
1940 if (intvar
->kind
== INTERNALVAR_VOID
)
1945 /* Look up an internal variable with name NAME. NAME should not
1946 normally include a dollar sign.
1948 If the specified internal variable does not exist,
1949 the return value is NULL. */
1951 struct internalvar
*
1952 lookup_only_internalvar (const char *name
)
1954 auto it
= internalvars
.find (name
);
1955 if (it
== internalvars
.end ())
1961 /* Complete NAME by comparing it to the names of internal
1965 complete_internalvar (completion_tracker
&tracker
, const char *name
)
1967 int len
= strlen (name
);
1969 for (auto &pair
: internalvars
)
1971 const internalvar
&var
= pair
.second
;
1973 if (var
.name
.compare (0, len
, name
) == 0)
1974 tracker
.add_completion (make_unique_xstrdup (var
.name
.c_str ()));
1978 /* Create an internal variable with name NAME and with a void value.
1979 NAME should not normally include a dollar sign.
1981 An internal variable with that name must not exist already. */
1983 struct internalvar
*
1984 create_internalvar (const char *name
)
1986 auto pair
= internalvars
.emplace (std::make_pair (name
, internalvar (name
)));
1987 gdb_assert (pair
.second
);
1989 return &pair
.first
->second
;
1992 /* Create an internal variable with name NAME and register FUN as the
1993 function that value_of_internalvar uses to create a value whenever
1994 this variable is referenced. NAME should not normally include a
1995 dollar sign. DATA is passed uninterpreted to FUN when it is
1996 called. CLEANUP, if not NULL, is called when the internal variable
1997 is destroyed. It is passed DATA as its only argument. */
1999 struct internalvar
*
2000 create_internalvar_type_lazy (const char *name
,
2001 const struct internalvar_funcs
*funcs
,
2004 struct internalvar
*var
= create_internalvar (name
);
2006 var
->kind
= INTERNALVAR_MAKE_VALUE
;
2007 var
->u
.make_value
.functions
= funcs
;
2008 var
->u
.make_value
.data
= data
;
2012 /* See documentation in value.h. */
2015 compile_internalvar_to_ax (struct internalvar
*var
,
2016 struct agent_expr
*expr
,
2017 struct axs_value
*value
)
2019 if (var
->kind
!= INTERNALVAR_MAKE_VALUE
2020 || var
->u
.make_value
.functions
->compile_to_ax
== NULL
)
2023 var
->u
.make_value
.functions
->compile_to_ax (var
, expr
, value
,
2024 var
->u
.make_value
.data
);
2028 /* Look up an internal variable with name NAME. NAME should not
2029 normally include a dollar sign.
2031 If the specified internal variable does not exist,
2032 one is created, with a void value. */
2034 struct internalvar
*
2035 lookup_internalvar (const char *name
)
2037 struct internalvar
*var
;
2039 var
= lookup_only_internalvar (name
);
2043 return create_internalvar (name
);
2046 /* Return current value of internal variable VAR. For variables that
2047 are not inherently typed, use a value type appropriate for GDBARCH. */
2050 value_of_internalvar (struct gdbarch
*gdbarch
, struct internalvar
*var
)
2053 struct trace_state_variable
*tsv
;
2055 /* If there is a trace state variable of the same name, assume that
2056 is what we really want to see. */
2057 tsv
= find_trace_state_variable (var
->name
.c_str ());
2060 tsv
->value_known
= target_get_trace_state_variable_value (tsv
->number
,
2062 if (tsv
->value_known
)
2063 val
= value_from_longest (builtin_type (gdbarch
)->builtin_int64
,
2066 val
= value::allocate (builtin_type (gdbarch
)->builtin_void
);
2072 case INTERNALVAR_VOID
:
2073 val
= value::allocate (builtin_type (gdbarch
)->builtin_void
);
2076 case INTERNALVAR_FUNCTION
:
2077 val
= value::allocate (builtin_type (gdbarch
)->internal_fn
);
2080 case INTERNALVAR_INTEGER
:
2081 if (!var
->u
.integer
.type
)
2082 val
= value_from_longest (builtin_type (gdbarch
)->builtin_int
,
2083 var
->u
.integer
.val
);
2085 val
= value_from_longest (var
->u
.integer
.type
, var
->u
.integer
.val
);
2088 case INTERNALVAR_STRING
:
2089 val
= current_language
->value_string (gdbarch
,
2091 strlen (var
->u
.string
));
2094 case INTERNALVAR_VALUE
:
2095 val
= var
->u
.value
->copy ();
2100 case INTERNALVAR_MAKE_VALUE
:
2101 val
= (*var
->u
.make_value
.functions
->make_value
) (gdbarch
, var
,
2102 var
->u
.make_value
.data
);
2106 internal_error (_("bad kind"));
2109 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2110 on this value go back to affect the original internal variable.
2112 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2113 no underlying modifiable state in the internal variable.
2115 Likewise, if the variable's value is a computed lvalue, we want
2116 references to it to produce another computed lvalue, where
2117 references and assignments actually operate through the
2118 computed value's functions.
2120 This means that internal variables with computed values
2121 behave a little differently from other internal variables:
2122 assignments to them don't just replace the previous value
2123 altogether. At the moment, this seems like the behavior we
2126 if (var
->kind
!= INTERNALVAR_MAKE_VALUE
2127 && val
->lval () != lval_computed
)
2129 val
->set_lval (lval_internalvar
);
2130 VALUE_INTERNALVAR (val
) = var
;
2137 get_internalvar_integer (struct internalvar
*var
, LONGEST
*result
)
2139 if (var
->kind
== INTERNALVAR_INTEGER
)
2141 *result
= var
->u
.integer
.val
;
2145 if (var
->kind
== INTERNALVAR_VALUE
)
2147 struct type
*type
= check_typedef (var
->u
.value
->type ());
2149 if (type
->code () == TYPE_CODE_INT
)
2151 *result
= value_as_long (var
->u
.value
);
2156 if (var
->kind
== INTERNALVAR_MAKE_VALUE
)
2158 struct gdbarch
*gdbarch
= get_current_arch ();
2160 = (*var
->u
.make_value
.functions
->make_value
) (gdbarch
, var
,
2161 var
->u
.make_value
.data
);
2162 struct type
*type
= check_typedef (val
->type ());
2164 if (type
->code () == TYPE_CODE_INT
)
2166 *result
= value_as_long (val
);
2175 get_internalvar_function (struct internalvar
*var
,
2176 struct internal_function
**result
)
2180 case INTERNALVAR_FUNCTION
:
2181 *result
= var
->u
.fn
.function
;
2190 set_internalvar_component (struct internalvar
*var
,
2191 LONGEST offset
, LONGEST bitpos
,
2192 LONGEST bitsize
, struct value
*newval
)
2195 struct gdbarch
*gdbarch
;
2200 case INTERNALVAR_VALUE
:
2201 addr
= var
->u
.value
->contents_writeable ().data ();
2202 gdbarch
= var
->u
.value
->arch ();
2203 unit_size
= gdbarch_addressable_memory_unit_size (gdbarch
);
2206 modify_field (var
->u
.value
->type (), addr
+ offset
,
2207 value_as_long (newval
), bitpos
, bitsize
);
2209 memcpy (addr
+ offset
* unit_size
, newval
->contents ().data (),
2210 newval
->type ()->length ());
2214 /* We can never get a component of any other kind. */
2215 internal_error (_("set_internalvar_component"));
2220 set_internalvar (struct internalvar
*var
, struct value
*val
)
2222 enum internalvar_kind new_kind
;
2223 union internalvar_data new_data
= { 0 };
2225 if (var
->kind
== INTERNALVAR_FUNCTION
&& var
->u
.fn
.canonical
)
2226 error (_("Cannot overwrite convenience function %s"), var
->name
.c_str ());
2228 /* Prepare new contents. */
2229 switch (check_typedef (val
->type ())->code ())
2231 case TYPE_CODE_VOID
:
2232 new_kind
= INTERNALVAR_VOID
;
2235 case TYPE_CODE_INTERNAL_FUNCTION
:
2236 gdb_assert (val
->lval () == lval_internalvar
);
2237 new_kind
= INTERNALVAR_FUNCTION
;
2238 get_internalvar_function (VALUE_INTERNALVAR (val
),
2239 &new_data
.fn
.function
);
2240 /* Copies created here are never canonical. */
2244 new_kind
= INTERNALVAR_VALUE
;
2245 struct value
*copy
= val
->copy ();
2246 copy
->set_modifiable (true);
2248 /* Force the value to be fetched from the target now, to avoid problems
2249 later when this internalvar is referenced and the target is gone or
2252 copy
->fetch_lazy ();
2254 /* Release the value from the value chain to prevent it from being
2255 deleted by free_all_values. From here on this function should not
2256 call error () until new_data is installed into the var->u to avoid
2258 new_data
.value
= release_value (copy
).release ();
2260 /* Internal variables which are created from values with a dynamic
2261 location don't need the location property of the origin anymore.
2262 The resolved dynamic location is used prior then any other address
2263 when accessing the value.
2264 If we keep it, we would still refer to the origin value.
2265 Remove the location property in case it exist. */
2266 new_data
.value
->type ()->remove_dyn_prop (DYN_PROP_DATA_LOCATION
);
2271 /* Clean up old contents. */
2272 clear_internalvar (var
);
2275 var
->kind
= new_kind
;
2277 /* End code which must not call error(). */
2281 set_internalvar_integer (struct internalvar
*var
, LONGEST l
)
2283 /* Clean up old contents. */
2284 clear_internalvar (var
);
2286 var
->kind
= INTERNALVAR_INTEGER
;
2287 var
->u
.integer
.type
= NULL
;
2288 var
->u
.integer
.val
= l
;
2292 set_internalvar_string (struct internalvar
*var
, const char *string
)
2294 /* Clean up old contents. */
2295 clear_internalvar (var
);
2297 var
->kind
= INTERNALVAR_STRING
;
2298 var
->u
.string
= xstrdup (string
);
2302 set_internalvar_function (internalvar
*var
, internal_function_up f
)
2304 /* Clean up old contents. */
2305 clear_internalvar (var
);
2307 var
->kind
= INTERNALVAR_FUNCTION
;
2308 var
->u
.fn
.function
= f
.release ();
2309 var
->u
.fn
.canonical
= 1;
2310 /* Variables installed here are always the canonical version. */
2314 clear_internalvar (struct internalvar
*var
)
2316 /* Clean up old contents. */
2319 case INTERNALVAR_VALUE
:
2320 var
->u
.value
->decref ();
2323 case INTERNALVAR_STRING
:
2324 xfree (var
->u
.string
);
2327 case INTERNALVAR_FUNCTION
:
2328 delete var
->u
.fn
.function
;
2335 /* Reset to void kind. */
2336 var
->kind
= INTERNALVAR_VOID
;
2340 internalvar_name (const struct internalvar
*var
)
2342 return var
->name
.c_str ();
2346 value_internal_function_name (struct value
*val
)
2348 struct internal_function
*ifn
;
2351 gdb_assert (val
->lval () == lval_internalvar
);
2352 result
= get_internalvar_function (VALUE_INTERNALVAR (val
), &ifn
);
2353 gdb_assert (result
);
2355 return ifn
->name
.c_str ();
2359 call_internal_function (struct gdbarch
*gdbarch
,
2360 const struct language_defn
*language
,
2361 struct value
*func
, int argc
, struct value
**argv
,
2364 struct internal_function
*ifn
;
2367 gdb_assert (func
->lval () == lval_internalvar
);
2368 result
= get_internalvar_function (VALUE_INTERNALVAR (func
), &ifn
);
2369 gdb_assert (result
);
2371 return ifn
->handler (gdbarch
, language
, ifn
->cookie
, argc
, argv
, noside
);
2374 /* The 'function' command. This does nothing -- it is just a
2375 placeholder to let "help function NAME" work. This is also used as
2376 the implementation of the sub-command that is created when
2377 registering an internal function. */
2379 function_command (const char *command
, int from_tty
)
2384 /* Helper function that does the work for add_internal_function. */
2386 static struct cmd_list_element
*
2387 do_add_internal_function (const char *name
, const char *doc
,
2388 internal_function_fn_noside handler
, void *cookie
)
2390 set_internalvar_function (lookup_internalvar (name
),
2391 std::make_unique
<internal_function
> (name
, handler
,
2394 return add_cmd (name
, no_class
, function_command
, doc
, &functionlist
);
2400 add_internal_function (const char *name
, const char *doc
,
2401 internal_function_fn_noside handler
, void *cookie
)
2403 do_add_internal_function (name
, doc
, handler
, cookie
);
2406 /* By default, internal functions are assumed to return int. Return a value
2407 with that type to reflect this. If this is not correct for a specific
2408 internal function, it should use an internal_function_fn_noside handler to
2409 bypass this default. */
2411 static struct value
*
2412 internal_function_default_return_type (struct gdbarch
*gdbarch
)
2414 return value::zero (builtin_type (gdbarch
)->builtin_int
, not_lval
);
2420 add_internal_function (const char *name
, const char *doc
,
2421 internal_function_fn handler
, void *cookie
)
2423 internal_function_fn_noside fn
2424 = [=] (struct gdbarch
*gdbarch
,
2425 const struct language_defn
*language
,
2428 struct value
**argv
,
2431 if (noside
== EVAL_AVOID_SIDE_EFFECTS
)
2432 return internal_function_default_return_type (gdbarch
);
2433 return handler (gdbarch
, language
, _cookie
, argc
, argv
);
2436 do_add_internal_function (name
, doc
, fn
, cookie
);
2442 add_internal_function (gdb::unique_xmalloc_ptr
<char> &&name
,
2443 gdb::unique_xmalloc_ptr
<char> &&doc
,
2444 internal_function_fn_noside handler
, void *cookie
)
2446 struct cmd_list_element
*cmd
2447 = do_add_internal_function (name
.get (), doc
.get (), handler
, cookie
);
2449 /* Manually transfer the ownership of the doc and name strings to CMD by
2450 setting the appropriate flags. */
2451 (void) doc
.release ();
2452 cmd
->doc_allocated
= 1;
2453 (void) name
.release ();
2454 cmd
->name_allocated
= 1;
2460 add_internal_function (gdb::unique_xmalloc_ptr
<char> &&name
,
2461 gdb::unique_xmalloc_ptr
<char> &&doc
,
2462 internal_function_fn handler
, void *cookie
)
2464 internal_function_fn_noside fn
2465 = [=] (struct gdbarch
*gdbarch
,
2466 const struct language_defn
*language
,
2469 struct value
**argv
,
2472 if (noside
== EVAL_AVOID_SIDE_EFFECTS
)
2473 return internal_function_default_return_type (gdbarch
);
2474 return handler (gdbarch
, language
, _cookie
, argc
, argv
);
2477 add_internal_function (std::forward
<gdb::unique_xmalloc_ptr
<char>>(name
),
2478 std::forward
<gdb::unique_xmalloc_ptr
<char>>(doc
),
2483 value::preserve (struct objfile
*objfile
, copied_types_hash_t
&copied_types
)
2485 if (m_type
->objfile_owner () == objfile
)
2486 m_type
= copy_type_recursive (m_type
, copied_types
);
2488 if (m_enclosing_type
->objfile_owner () == objfile
)
2489 m_enclosing_type
= copy_type_recursive (m_enclosing_type
, copied_types
);
2492 /* Likewise for internal variable VAR. */
2495 preserve_one_internalvar (struct internalvar
*var
, struct objfile
*objfile
,
2496 copied_types_hash_t
&copied_types
)
2500 case INTERNALVAR_INTEGER
:
2501 if (var
->u
.integer
.type
2502 && var
->u
.integer
.type
->objfile_owner () == objfile
)
2504 = copy_type_recursive (var
->u
.integer
.type
, copied_types
);
2507 case INTERNALVAR_VALUE
:
2508 var
->u
.value
->preserve (objfile
, copied_types
);
2513 /* Make sure that all types and values referenced by VAROBJ are updated before
2514 OBJFILE is discarded. COPIED_TYPES is used to prevent cycles and
2518 preserve_one_varobj (struct varobj
*varobj
, struct objfile
*objfile
,
2519 copied_types_hash_t
&copied_types
)
2521 if (varobj
->type
->is_objfile_owned ()
2522 && varobj
->type
->objfile_owner () == objfile
)
2525 = copy_type_recursive (varobj
->type
, copied_types
);
2528 if (varobj
->value
!= nullptr)
2529 varobj
->value
->preserve (objfile
, copied_types
);
2532 /* Update the internal variables and value history when OBJFILE is
2533 discarded; we must copy the types out of the objfile. New global types
2534 will be created for every convenience variable which currently points to
2535 this objfile's types, and the convenience variables will be adjusted to
2536 use the new global types. */
2539 preserve_values (struct objfile
*objfile
)
2541 /* Create the hash table. We allocate on the objfile's obstack, since
2542 it is soon to be deleted. */
2543 copied_types_hash_t copied_types
;
2545 for (const value_ref_ptr
&item
: value_history
)
2546 item
->preserve (objfile
, copied_types
);
2548 for (auto &pair
: internalvars
)
2549 preserve_one_internalvar (&pair
.second
, objfile
, copied_types
);
2551 /* For the remaining varobj, check that none has type owned by OBJFILE. */
2552 all_root_varobjs ([&copied_types
, objfile
] (struct varobj
*varobj
)
2554 preserve_one_varobj (varobj
, objfile
, copied_types
);
2557 preserve_ext_lang_values (objfile
, copied_types
);
2561 show_convenience (const char *ignore
, int from_tty
)
2563 struct gdbarch
*gdbarch
= get_current_arch ();
2565 struct value_print_options opts
;
2567 get_user_print_options (&opts
);
2568 for (auto &pair
: internalvars
)
2570 internalvar
&var
= pair
.second
;
2576 gdb_printf (("$%s = "), var
.name
.c_str ());
2582 val
= value_of_internalvar (gdbarch
, &var
);
2583 value_print (val
, gdb_stdout
, &opts
);
2585 catch (const gdb_exception_error
&ex
)
2587 fprintf_styled (gdb_stdout
, metadata_style
.style (),
2588 _("<error: %s>"), ex
.what ());
2591 gdb_printf (("\n"));
2595 /* This text does not mention convenience functions on purpose.
2596 The user can't create them except via Python, and if Python support
2597 is installed this message will never be printed ($_streq will
2599 gdb_printf (_("No debugger convenience variables now defined.\n"
2600 "Convenience variables have "
2601 "names starting with \"$\";\n"
2602 "use \"%ps\" as in \"%ps\" to define them.\n"),
2603 styled_string (command_style
.style (), "set"),
2604 styled_string (command_style
.style (), "set $foo = 5"));
2612 value::from_xmethod (xmethod_worker_up
&&worker
)
2616 v
= value::allocate (builtin_type (current_inferior ()->arch ())->xmethod
);
2617 v
->m_lval
= lval_xcallable
;
2618 v
->m_location
.xm_worker
= worker
.release ();
2619 v
->m_modifiable
= false;
2627 value::result_type_of_xmethod (gdb::array_view
<value
*> argv
)
2629 gdb_assert (type ()->code () == TYPE_CODE_XMETHOD
2630 && m_lval
== lval_xcallable
&& !argv
.empty ());
2632 return m_location
.xm_worker
->get_result_type (argv
[0], argv
.slice (1));
2638 value::call_xmethod (gdb::array_view
<value
*> argv
)
2640 gdb_assert (type ()->code () == TYPE_CODE_XMETHOD
2641 && m_lval
== lval_xcallable
&& !argv
.empty ());
2643 return m_location
.xm_worker
->invoke (argv
[0], argv
.slice (1));
2646 /* Extract a value as a C number (either long or double).
2647 Knows how to convert fixed values to double, or
2648 floating values to long.
2649 Does not deallocate the value. */
2652 value_as_long (struct value
*val
)
2654 /* This coerces arrays and functions, which is necessary (e.g.
2655 in disassemble_command). It also dereferences references, which
2656 I suspect is the most logical thing to do. */
2657 val
= coerce_array (val
);
2658 return unpack_long (val
->type (), val
->contents ().data ());
2664 value_as_mpz (struct value
*val
)
2666 val
= coerce_array (val
);
2667 struct type
*type
= check_typedef (val
->type ());
2669 switch (type
->code ())
2671 case TYPE_CODE_ENUM
:
2672 case TYPE_CODE_BOOL
:
2674 case TYPE_CODE_CHAR
:
2675 case TYPE_CODE_RANGE
:
2679 return gdb_mpz (value_as_long (val
));
2684 gdb::array_view
<const gdb_byte
> valbytes
= val
->contents ();
2685 enum bfd_endian byte_order
= type_byte_order (type
);
2687 /* Handle integers that are either not a multiple of the word size,
2688 or that are stored at some bit offset. */
2689 unsigned bit_off
= 0, bit_size
= 0;
2690 if (type
->bit_size_differs_p ())
2692 bit_size
= type
->bit_size ();
2695 /* We can just handle this immediately. */
2699 bit_off
= type
->bit_offset ();
2701 unsigned n_bytes
= ((bit_off
% 8) + bit_size
+ 7) / 8;
2702 valbytes
= valbytes
.slice (bit_off
/ 8, n_bytes
);
2704 if (byte_order
== BFD_ENDIAN_BIG
)
2705 bit_off
= (n_bytes
* 8 - bit_off
% 8 - bit_size
);
2710 result
.read (val
->contents (), byte_order
, type
->is_unsigned ());
2712 /* Shift off any low bits, if needed. */
2716 /* Mask off any high bits, if needed. */
2718 result
.mask (bit_size
);
2720 /* Now handle any range bias. */
2721 if (type
->code () == TYPE_CODE_RANGE
&& type
->bounds ()->bias
!= 0)
2723 /* Unfortunately we have to box here, because LONGEST is
2724 probably wider than long. */
2725 result
+= gdb_mpz (type
->bounds ()->bias
);
2731 /* Extract a value as a C pointer. */
2734 value_as_address (struct value
*val
)
2736 struct gdbarch
*gdbarch
= val
->type ()->arch ();
2738 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2739 whether we want this to be true eventually. */
2741 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2742 non-address (e.g. argument to "signal", "info break", etc.), or
2743 for pointers to char, in which the low bits *are* significant. */
2744 return gdbarch_addr_bits_remove (gdbarch
, value_as_long (val
));
2747 /* There are several targets (IA-64, PowerPC, and others) which
2748 don't represent pointers to functions as simply the address of
2749 the function's entry point. For example, on the IA-64, a
2750 function pointer points to a two-word descriptor, generated by
2751 the linker, which contains the function's entry point, and the
2752 value the IA-64 "global pointer" register should have --- to
2753 support position-independent code. The linker generates
2754 descriptors only for those functions whose addresses are taken.
2756 On such targets, it's difficult for GDB to convert an arbitrary
2757 function address into a function pointer; it has to either find
2758 an existing descriptor for that function, or call malloc and
2759 build its own. On some targets, it is impossible for GDB to
2760 build a descriptor at all: the descriptor must contain a jump
2761 instruction; data memory cannot be executed; and code memory
2764 Upon entry to this function, if VAL is a value of type `function'
2765 (that is, TYPE_CODE (val->type ()) == TYPE_CODE_FUNC), then
2766 val->address () is the address of the function. This is what
2767 you'll get if you evaluate an expression like `main'. The call
2768 to COERCE_ARRAY below actually does all the usual unary
2769 conversions, which includes converting values of type `function'
2770 to `pointer to function'. This is the challenging conversion
2771 discussed above. Then, `unpack_pointer' will convert that pointer
2772 back into an address.
2774 So, suppose the user types `disassemble foo' on an architecture
2775 with a strange function pointer representation, on which GDB
2776 cannot build its own descriptors, and suppose further that `foo'
2777 has no linker-built descriptor. The address->pointer conversion
2778 will signal an error and prevent the command from running, even
2779 though the next step would have been to convert the pointer
2780 directly back into the same address.
2782 The following shortcut avoids this whole mess. If VAL is a
2783 function, just return its address directly. */
2784 if (val
->type ()->code () == TYPE_CODE_FUNC
2785 || val
->type ()->code () == TYPE_CODE_METHOD
)
2786 return val
->address ();
2788 val
= coerce_array (val
);
2790 /* Some architectures (e.g. Harvard), map instruction and data
2791 addresses onto a single large unified address space. For
2792 instance: An architecture may consider a large integer in the
2793 range 0x10000000 .. 0x1000ffff to already represent a data
2794 addresses (hence not need a pointer to address conversion) while
2795 a small integer would still need to be converted integer to
2796 pointer to address. Just assume such architectures handle all
2797 integer conversions in a single function. */
2801 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2802 must admonish GDB hackers to make sure its behavior matches the
2803 compiler's, whenever possible.
2805 In general, I think GDB should evaluate expressions the same way
2806 the compiler does. When the user copies an expression out of
2807 their source code and hands it to a `print' command, they should
2808 get the same value the compiler would have computed. Any
2809 deviation from this rule can cause major confusion and annoyance,
2810 and needs to be justified carefully. In other words, GDB doesn't
2811 really have the freedom to do these conversions in clever and
2814 AndrewC pointed out that users aren't complaining about how GDB
2815 casts integers to pointers; they are complaining that they can't
2816 take an address from a disassembly listing and give it to `x/i'.
2817 This is certainly important.
2819 Adding an architecture method like integer_to_address() certainly
2820 makes it possible for GDB to "get it right" in all circumstances
2821 --- the target has complete control over how things get done, so
2822 people can Do The Right Thing for their target without breaking
2823 anyone else. The standard doesn't specify how integers get
2824 converted to pointers; usually, the ABI doesn't either, but
2825 ABI-specific code is a more reasonable place to handle it. */
2827 if (!val
->type ()->is_pointer_or_reference ()
2828 && gdbarch_integer_to_address_p (gdbarch
))
2829 return gdbarch_integer_to_address (gdbarch
, val
->type (),
2830 val
->contents ().data ());
2832 return unpack_pointer (val
->type (), val
->contents ().data ());
2836 /* Unpack raw data (copied from debuggee, target byte order) at VALADDR
2837 as a long, or as a double, assuming the raw data is described
2838 by type TYPE. Knows how to convert different sizes of values
2839 and can convert between fixed and floating point. We don't assume
2840 any alignment for the raw data. Return value is in host byte order.
2842 If you want functions and arrays to be coerced to pointers, and
2843 references to be dereferenced, call value_as_long() instead.
2845 C++: It is assumed that the front-end has taken care of
2846 all matters concerning pointers to members. A pointer
2847 to member which reaches here is considered to be equivalent
2848 to an INT (or some size). After all, it is only an offset. */
2851 unpack_long (struct type
*type
, const gdb_byte
*valaddr
)
2853 if (is_fixed_point_type (type
))
2854 type
= type
->fixed_point_type_base_type ();
2856 enum bfd_endian byte_order
= type_byte_order (type
);
2857 enum type_code code
= type
->code ();
2858 int len
= type
->length ();
2859 int nosign
= type
->is_unsigned ();
2863 case TYPE_CODE_TYPEDEF
:
2864 return unpack_long (check_typedef (type
), valaddr
);
2865 case TYPE_CODE_ENUM
:
2866 case TYPE_CODE_FLAGS
:
2867 case TYPE_CODE_BOOL
:
2869 case TYPE_CODE_CHAR
:
2870 case TYPE_CODE_RANGE
:
2871 case TYPE_CODE_MEMBERPTR
:
2875 if (type
->bit_size_differs_p ())
2877 unsigned bit_off
= type
->bit_offset ();
2878 unsigned bit_size
= type
->bit_size ();
2881 /* unpack_bits_as_long doesn't handle this case the
2882 way we'd like, so handle it here. */
2886 result
= unpack_bits_as_long (type
, valaddr
, bit_off
, bit_size
);
2891 result
= extract_unsigned_integer (valaddr
, len
, byte_order
);
2893 result
= extract_signed_integer (valaddr
, len
, byte_order
);
2895 if (code
== TYPE_CODE_RANGE
)
2896 result
+= type
->bounds ()->bias
;
2901 case TYPE_CODE_DECFLOAT
:
2902 return target_float_to_longest (valaddr
, type
);
2904 case TYPE_CODE_FIXED_POINT
:
2907 vq
.read_fixed_point (gdb::make_array_view (valaddr
, len
),
2909 type
->fixed_point_scaling_factor ());
2911 gdb_mpz vz
= vq
.as_integer ();
2912 return vz
.as_integer
<LONGEST
> ();
2917 case TYPE_CODE_RVALUE_REF
:
2918 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2919 whether we want this to be true eventually. */
2920 return extract_typed_address (valaddr
, type
);
2923 error (_("Value can't be converted to integer."));
2927 /* Unpack raw data (copied from debuggee, target byte order) at VALADDR
2928 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2929 We don't assume any alignment for the raw data. Return value is in
2932 If you want functions and arrays to be coerced to pointers, and
2933 references to be dereferenced, call value_as_address() instead.
2935 C++: It is assumed that the front-end has taken care of
2936 all matters concerning pointers to members. A pointer
2937 to member which reaches here is considered to be equivalent
2938 to an INT (or some size). After all, it is only an offset. */
2941 unpack_pointer (struct type
*type
, const gdb_byte
*valaddr
)
2943 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2944 whether we want this to be true eventually. */
2945 return unpack_long (type
, valaddr
);
2949 is_floating_value (struct value
*val
)
2951 struct type
*type
= check_typedef (val
->type ());
2953 if (is_floating_type (type
))
2955 if (!target_float_is_valid (val
->contents ().data (), type
))
2956 error (_("Invalid floating value found in program."));
2964 /* Get the value of the FIELDNO'th field (which must be static) of
2968 value_static_field (struct type
*type
, int fieldno
)
2970 struct value
*retval
;
2972 switch (type
->field (fieldno
).loc_kind ())
2974 case FIELD_LOC_KIND_PHYSADDR
:
2975 retval
= value_at_lazy (type
->field (fieldno
).type (),
2976 type
->field (fieldno
).loc_physaddr ());
2978 case FIELD_LOC_KIND_PHYSNAME
:
2980 const char *phys_name
= type
->field (fieldno
).loc_physname ();
2981 /* type->field (fieldno).name (); */
2982 struct block_symbol sym
= lookup_symbol (phys_name
, nullptr,
2983 SEARCH_VAR_DOMAIN
, nullptr);
2985 if (sym
.symbol
== NULL
)
2987 /* With some compilers, e.g. HP aCC, static data members are
2988 reported as non-debuggable symbols. */
2989 bound_minimal_symbol msym
2990 = lookup_minimal_symbol (current_program_space
, phys_name
);
2991 struct type
*field_type
= type
->field (fieldno
).type ();
2994 retval
= value::allocate_optimized_out (field_type
);
2996 retval
= value_at_lazy (field_type
, msym
.value_address ());
2999 retval
= value_of_variable (sym
.symbol
, sym
.block
);
3003 gdb_assert_not_reached ("unexpected field location kind");
3009 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
3010 You have to be careful here, since the size of the data area for the value
3011 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
3012 than the old enclosing type, you have to allocate more space for the
3016 value::set_enclosing_type (struct type
*new_encl_type
)
3018 if (new_encl_type
->length () > enclosing_type ()->length ())
3020 check_type_length_before_alloc (new_encl_type
);
3021 m_contents
.reset ((gdb_byte
*) xrealloc (m_contents
.release (),
3022 new_encl_type
->length ()));
3025 m_enclosing_type
= new_encl_type
;
3031 value::primitive_field (LONGEST offset
, int fieldno
, struct type
*arg_type
)
3035 int unit_size
= gdbarch_addressable_memory_unit_size (arch ());
3037 arg_type
= check_typedef (arg_type
);
3038 type
= arg_type
->field (fieldno
).type ();
3040 /* Call check_typedef on our type to make sure that, if TYPE
3041 is a TYPE_CODE_TYPEDEF, its length is set to the length
3042 of the target type instead of zero. However, we do not
3043 replace the typedef type by the target type, because we want
3044 to keep the typedef in order to be able to print the type
3045 description correctly. */
3046 check_typedef (type
);
3048 if (arg_type
->field (fieldno
).bitsize ())
3050 /* Handle packed fields.
3052 Create a new value for the bitfield, with bitpos and bitsize
3053 set. If possible, arrange offset and bitpos so that we can
3054 do a single aligned read of the size of the containing type.
3055 Otherwise, adjust offset to the byte containing the first
3056 bit. Assume that the address, offset, and embedded offset
3057 are sufficiently aligned. */
3059 LONGEST bitpos
= arg_type
->field (fieldno
).loc_bitpos ();
3060 LONGEST container_bitsize
= type
->length () * 8;
3062 v
= value::allocate_lazy (type
);
3063 v
->set_bitsize (arg_type
->field (fieldno
).bitsize ());
3064 if ((bitpos
% container_bitsize
) + v
->bitsize () <= container_bitsize
3065 && type
->length () <= (int) sizeof (LONGEST
))
3066 v
->set_bitpos (bitpos
% container_bitsize
);
3068 v
->set_bitpos (bitpos
% 8);
3069 v
->set_offset ((embedded_offset ()
3071 + (bitpos
- v
->bitpos ()) / 8));
3072 v
->set_parent (this);
3076 else if (fieldno
< TYPE_N_BASECLASSES (arg_type
))
3078 /* This field is actually a base subobject, so preserve the
3079 entire object's contents for later references to virtual
3083 /* Lazy register values with offsets are not supported. */
3084 if (this->lval () == lval_register
&& lazy ())
3087 /* We special case virtual inheritance here because this
3088 requires access to the contents, which we would rather avoid
3089 for references to ordinary fields of unavailable values. */
3090 if (BASETYPE_VIA_VIRTUAL (arg_type
, fieldno
))
3091 boffset
= baseclass_offset (arg_type
, fieldno
,
3092 contents ().data (),
3097 boffset
= arg_type
->field (fieldno
).loc_bitpos () / 8;
3100 v
= value::allocate_lazy (enclosing_type ());
3103 v
= value::allocate (enclosing_type ());
3104 contents_copy_raw (v
, 0, 0, enclosing_type ()->length ());
3106 v
->deprecated_set_type (type
);
3107 v
->set_offset (this->offset ());
3108 v
->set_embedded_offset (offset
+ embedded_offset () + boffset
);
3110 else if (NULL
!= TYPE_DATA_LOCATION (type
))
3112 /* Field is a dynamic data member. */
3114 gdb_assert (0 == offset
);
3115 /* We expect an already resolved data location. */
3116 gdb_assert (TYPE_DATA_LOCATION (type
)->is_constant ());
3117 /* For dynamic data types defer memory allocation
3118 until we actual access the value. */
3119 v
= value::allocate_lazy (type
);
3123 /* Plain old data member */
3124 offset
+= (arg_type
->field (fieldno
).loc_bitpos ()
3125 / (HOST_CHAR_BIT
* unit_size
));
3127 /* Lazy register values with offsets are not supported. */
3128 if (this->lval () == lval_register
&& lazy ())
3132 v
= value::allocate_lazy (type
);
3135 v
= value::allocate (type
);
3136 contents_copy_raw (v
, v
->embedded_offset (),
3137 embedded_offset () + offset
,
3138 type_length_units (type
));
3140 v
->set_offset (this->offset () + offset
+ embedded_offset ());
3142 v
->set_component_location (this);
3146 /* Given a value ARG1 of a struct or union type,
3147 extract and return the value of one of its (non-static) fields.
3148 FIELDNO says which field. */
3151 value_field (struct value
*arg1
, int fieldno
)
3153 return arg1
->primitive_field (0, fieldno
, arg1
->type ());
3156 /* Return a non-virtual function as a value.
3157 F is the list of member functions which contains the desired method.
3158 J is an index into F which provides the desired method.
3160 We only use the symbol for its address, so be happy with either a
3161 full symbol or a minimal symbol. */
3164 value_fn_field (struct value
**arg1p
, struct fn_field
*f
,
3165 int j
, struct type
*type
,
3169 struct type
*ftype
= TYPE_FN_FIELD_TYPE (f
, j
);
3170 const char *physname
= TYPE_FN_FIELD_PHYSNAME (f
, j
);
3172 bound_minimal_symbol msym
;
3174 sym
= lookup_symbol (physname
, nullptr, SEARCH_FUNCTION_DOMAIN
,
3178 msym
= lookup_minimal_symbol (current_program_space
, physname
);
3179 if (msym
.minsym
== NULL
)
3183 v
= value::allocate (ftype
);
3184 v
->set_lval (lval_memory
);
3187 v
->set_address (sym
->value_block ()->entry_pc ());
3191 /* The minimal symbol might point to a function descriptor;
3192 resolve it to the actual code address instead. */
3193 struct objfile
*objfile
= msym
.objfile
;
3194 struct gdbarch
*gdbarch
= objfile
->arch ();
3196 v
->set_address (gdbarch_convert_from_func_ptr_addr
3197 (gdbarch
, msym
.value_address (),
3198 current_inferior ()->top_target ()));
3203 if (type
!= (*arg1p
)->type ())
3204 *arg1p
= value_ind (value_cast (lookup_pointer_type (type
),
3205 value_addr (*arg1p
)));
3207 /* Move the `this' pointer according to the offset.
3208 (*arg1p)->offset () += offset; */
3219 unpack_bits_as_long (struct type
*field_type
, const gdb_byte
*valaddr
,
3220 LONGEST bitpos
, LONGEST bitsize
)
3222 enum bfd_endian byte_order
= type_byte_order (field_type
);
3227 LONGEST read_offset
;
3229 /* Read the minimum number of bytes required; there may not be
3230 enough bytes to read an entire ULONGEST. */
3231 field_type
= check_typedef (field_type
);
3233 bytes_read
= ((bitpos
% 8) + bitsize
+ 7) / 8;
3236 bytes_read
= field_type
->length ();
3237 bitsize
= 8 * bytes_read
;
3240 read_offset
= bitpos
/ 8;
3242 val
= extract_unsigned_integer (valaddr
+ read_offset
,
3243 bytes_read
, byte_order
);
3245 /* Extract bits. See comment above. */
3247 if (byte_order
== BFD_ENDIAN_BIG
)
3248 lsbcount
= (bytes_read
* 8 - bitpos
% 8 - bitsize
);
3250 lsbcount
= (bitpos
% 8);
3253 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3254 If the field is signed, and is negative, then sign extend. */
3256 if (bitsize
< 8 * (int) sizeof (val
))
3258 valmask
= (((ULONGEST
) 1) << bitsize
) - 1;
3260 if (!field_type
->is_unsigned ())
3262 if (val
& (valmask
^ (valmask
>> 1)))
3269 if (field_type
->code () == TYPE_CODE_RANGE
)
3270 val
+= field_type
->bounds ()->bias
;
3275 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3276 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3277 ORIGINAL_VALUE, which must not be NULL. See
3278 unpack_value_bits_as_long for more details. */
3281 unpack_value_field_as_long (struct type
*type
, const gdb_byte
*valaddr
,
3282 LONGEST embedded_offset
, int fieldno
,
3283 const struct value
*val
, LONGEST
*result
)
3285 int bitpos
= type
->field (fieldno
).loc_bitpos ();
3286 int bitsize
= type
->field (fieldno
).bitsize ();
3287 struct type
*field_type
= type
->field (fieldno
).type ();
3290 gdb_assert (val
!= NULL
);
3292 bit_offset
= embedded_offset
* TARGET_CHAR_BIT
+ bitpos
;
3293 if (val
->bits_any_optimized_out (bit_offset
, bitsize
)
3294 || !val
->bits_available (bit_offset
, bitsize
))
3297 *result
= unpack_bits_as_long (field_type
, valaddr
+ embedded_offset
,
3305 unpack_field_as_long (const gdb_byte
*valaddr
, struct field
*field
)
3307 int bitpos
= field
->loc_bitpos ();
3308 int bitsize
= field
->bitsize ();
3309 struct type
*field_type
= field
->type ();
3311 return unpack_bits_as_long (field_type
, valaddr
, bitpos
, bitsize
);
3317 unpack_field_as_long (struct type
*type
, const gdb_byte
*valaddr
, int fieldno
)
3319 return unpack_field_as_long (valaddr
, &type
->field (fieldno
));
3325 value::unpack_bitfield (struct value
*dest_val
,
3326 LONGEST bitpos
, LONGEST bitsize
,
3327 const gdb_byte
*valaddr
, LONGEST embedded_offset
)
3330 enum bfd_endian byte_order
;
3333 struct type
*field_type
= dest_val
->type ();
3335 byte_order
= type_byte_order (field_type
);
3337 /* First, unpack and sign extend the bitfield as if it was wholly
3338 valid. Optimized out/unavailable bits are read as zero, but
3339 that's OK, as they'll end up marked below. If the VAL is
3340 wholly-invalid we may have skipped allocating its contents,
3341 though. See value::allocate_optimized_out. */
3342 if (valaddr
!= NULL
)
3346 num
= unpack_bits_as_long (field_type
, valaddr
+ embedded_offset
,
3348 store_signed_integer (dest_val
->contents_raw ().data (),
3349 field_type
->length (), byte_order
, num
);
3352 /* Now copy the optimized out / unavailability ranges to the right
3354 src_bit_offset
= embedded_offset
* TARGET_CHAR_BIT
+ bitpos
;
3355 if (byte_order
== BFD_ENDIAN_BIG
)
3356 dst_bit_offset
= field_type
->length () * TARGET_CHAR_BIT
- bitsize
;
3359 ranges_copy_adjusted (dest_val
, dst_bit_offset
, src_bit_offset
, bitsize
);
3362 /* Return a new value with type TYPE, which is FIELDNO field of the
3363 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3364 of VAL. If the VAL's contents required to extract the bitfield
3365 from are unavailable/optimized out, the new value is
3366 correspondingly marked unavailable/optimized out. */
3369 value_field_bitfield (struct type
*type
, int fieldno
,
3370 const gdb_byte
*valaddr
,
3371 LONGEST embedded_offset
, const struct value
*val
)
3373 int bitpos
= type
->field (fieldno
).loc_bitpos ();
3374 int bitsize
= type
->field (fieldno
).bitsize ();
3375 struct value
*res_val
= value::allocate (type
->field (fieldno
).type ());
3377 val
->unpack_bitfield (res_val
, bitpos
, bitsize
, valaddr
, embedded_offset
);
3382 /* Modify the value of a bitfield. ADDR points to a block of memory in
3383 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3384 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3385 indicate which bits (in target bit order) comprise the bitfield.
3386 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3387 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3390 modify_field (struct type
*type
, gdb_byte
*addr
,
3391 LONGEST fieldval
, LONGEST bitpos
, LONGEST bitsize
)
3393 enum bfd_endian byte_order
= type_byte_order (type
);
3395 ULONGEST mask
= (ULONGEST
) -1 >> (8 * sizeof (ULONGEST
) - bitsize
);
3398 /* Normalize BITPOS. */
3402 /* If a negative fieldval fits in the field in question, chop
3403 off the sign extension bits. */
3404 if ((~fieldval
& ~(mask
>> 1)) == 0)
3407 /* Warn if value is too big to fit in the field in question. */
3408 if (0 != (fieldval
& ~mask
))
3410 /* FIXME: would like to include fieldval in the message, but
3411 we don't have a sprintf_longest. */
3412 warning (_("Value does not fit in %s bits."), plongest (bitsize
));
3414 /* Truncate it, otherwise adjoining fields may be corrupted. */
3418 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3419 false valgrind reports. */
3421 bytesize
= (bitpos
+ bitsize
+ 7) / 8;
3422 oword
= extract_unsigned_integer (addr
, bytesize
, byte_order
);
3424 /* Shifting for bit field depends on endianness of the target machine. */
3425 if (byte_order
== BFD_ENDIAN_BIG
)
3426 bitpos
= bytesize
* 8 - bitpos
- bitsize
;
3428 oword
&= ~(mask
<< bitpos
);
3429 oword
|= fieldval
<< bitpos
;
3431 store_unsigned_integer (addr
, bytesize
, byte_order
, oword
);
3434 /* Pack NUM into BUF using a target format of TYPE. */
3437 pack_long (gdb_byte
*buf
, struct type
*type
, LONGEST num
)
3439 enum bfd_endian byte_order
= type_byte_order (type
);
3442 type
= check_typedef (type
);
3443 len
= type
->length ();
3445 switch (type
->code ())
3447 case TYPE_CODE_RANGE
:
3448 num
-= type
->bounds ()->bias
;
3451 case TYPE_CODE_CHAR
:
3452 case TYPE_CODE_ENUM
:
3453 case TYPE_CODE_FLAGS
:
3454 case TYPE_CODE_BOOL
:
3455 case TYPE_CODE_MEMBERPTR
:
3456 if (type
->bit_size_differs_p ())
3458 unsigned bit_off
= type
->bit_offset ();
3459 unsigned bit_size
= type
->bit_size ();
3460 num
&= ((ULONGEST
) 1 << bit_size
) - 1;
3463 store_signed_integer (buf
, len
, byte_order
, num
);
3467 case TYPE_CODE_RVALUE_REF
:
3469 store_typed_address (buf
, type
, (CORE_ADDR
) num
);
3473 case TYPE_CODE_DECFLOAT
:
3474 target_float_from_longest (buf
, type
, num
);
3478 error (_("Unexpected type (%d) encountered for integer constant."),
3484 /* Pack NUM into BUF using a target format of TYPE. */
3487 pack_unsigned_long (gdb_byte
*buf
, struct type
*type
, ULONGEST num
)
3490 enum bfd_endian byte_order
;
3492 type
= check_typedef (type
);
3493 len
= type
->length ();
3494 byte_order
= type_byte_order (type
);
3496 switch (type
->code ())
3499 case TYPE_CODE_CHAR
:
3500 case TYPE_CODE_ENUM
:
3501 case TYPE_CODE_FLAGS
:
3502 case TYPE_CODE_BOOL
:
3503 case TYPE_CODE_RANGE
:
3504 case TYPE_CODE_MEMBERPTR
:
3505 if (type
->bit_size_differs_p ())
3507 unsigned bit_off
= type
->bit_offset ();
3508 unsigned bit_size
= type
->bit_size ();
3509 num
&= ((ULONGEST
) 1 << bit_size
) - 1;
3512 store_unsigned_integer (buf
, len
, byte_order
, num
);
3516 case TYPE_CODE_RVALUE_REF
:
3518 store_typed_address (buf
, type
, (CORE_ADDR
) num
);
3522 case TYPE_CODE_DECFLOAT
:
3523 target_float_from_ulongest (buf
, type
, num
);
3527 error (_("Unexpected type (%d) encountered "
3528 "for unsigned integer constant."),
3536 value::zero (struct type
*type
, enum lval_type lv
)
3538 struct value
*val
= value::allocate_lazy (type
);
3540 val
->set_lval (lv
== lval_computed
? not_lval
: lv
);
3541 val
->m_is_zero
= true;
3545 /* Convert C numbers into newly allocated values. */
3548 value_from_longest (struct type
*type
, LONGEST num
)
3550 struct value
*val
= value::allocate (type
);
3552 pack_long (val
->contents_raw ().data (), type
, num
);
3557 /* Convert C unsigned numbers into newly allocated values. */
3560 value_from_ulongest (struct type
*type
, ULONGEST num
)
3562 struct value
*val
= value::allocate (type
);
3564 pack_unsigned_long (val
->contents_raw ().data (), type
, num
);
3572 value_from_mpz (struct type
*type
, const gdb_mpz
&v
)
3574 struct type
*real_type
= check_typedef (type
);
3576 const gdb_mpz
*val
= &v
;
3578 if (real_type
->code () == TYPE_CODE_RANGE
&& type
->bounds ()->bias
!= 0)
3582 storage
-= type
->bounds ()->bias
;
3585 if (type
->bit_size_differs_p ())
3587 unsigned bit_off
= type
->bit_offset ();
3588 unsigned bit_size
= type
->bit_size ();
3590 if (val
!= &storage
)
3596 storage
.mask (bit_size
);
3597 storage
<<= bit_off
;
3600 struct value
*result
= value::allocate (type
);
3601 val
->truncate (result
->contents_raw (), type_byte_order (type
),
3602 type
->is_unsigned ());
3606 /* Create a value representing a pointer of type TYPE to the address
3610 value_from_pointer (struct type
*type
, CORE_ADDR addr
)
3612 struct value
*val
= value::allocate (type
);
3614 store_typed_address (val
->contents_raw ().data (),
3615 check_typedef (type
), addr
);
3619 /* Create and return a value object of TYPE containing the value D. The
3620 TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once
3621 it is converted to target format. */
3624 value_from_host_double (struct type
*type
, double d
)
3626 struct value
*value
= value::allocate (type
);
3627 gdb_assert (type
->code () == TYPE_CODE_FLT
);
3628 target_float_from_host_double (value
->contents_raw ().data (),
3633 /* Create a value of type TYPE whose contents come from VALADDR, if it
3634 is non-null, and whose memory address (in the inferior) is
3635 ADDRESS. The type of the created value may differ from the passed
3636 type TYPE. Make sure to retrieve values new type after this call.
3637 Note that TYPE is not passed through resolve_dynamic_type; this is
3638 a special API intended for use only by Ada. */
3641 value_from_contents_and_address_unresolved (struct type
*type
,
3642 const gdb_byte
*valaddr
,
3647 if (valaddr
== NULL
)
3648 v
= value::allocate_lazy (type
);
3650 v
= value_from_contents (type
, valaddr
);
3651 v
->set_lval (lval_memory
);
3652 v
->set_address (address
);
3656 /* Create a value of type TYPE whose contents come from VALADDR, if it
3657 is non-null, and whose memory address (in the inferior) is
3658 ADDRESS. The type of the created value may differ from the passed
3659 type TYPE. Make sure to retrieve values new type after this call. */
3662 value_from_contents_and_address (struct type
*type
,
3663 const gdb_byte
*valaddr
,
3665 const frame_info_ptr
&frame
)
3667 gdb::array_view
<const gdb_byte
> view
;
3668 if (valaddr
!= nullptr)
3669 view
= gdb::make_array_view (valaddr
, type
->length ());
3670 struct type
*resolved_type
= resolve_dynamic_type (type
, view
, address
,
3672 struct type
*resolved_type_no_typedef
= check_typedef (resolved_type
);
3675 if (resolved_type_no_typedef
->code () == TYPE_CODE_ARRAY
3676 && resolved_type_no_typedef
->bound_optimized_out ())
3678 /* Resolution found that the bounds are optimized out. In this
3679 case, mark the array itself as optimized-out. */
3680 v
= value::allocate_optimized_out (resolved_type
);
3682 else if (valaddr
== nullptr)
3683 v
= value::allocate_lazy (resolved_type
);
3685 v
= value_from_contents (resolved_type
, valaddr
);
3686 if (TYPE_DATA_LOCATION (resolved_type_no_typedef
) != NULL
3687 && TYPE_DATA_LOCATION (resolved_type_no_typedef
)->is_constant ())
3688 address
= TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef
);
3689 v
->set_lval (lval_memory
);
3690 v
->set_address (address
);
3694 /* Create a value of type TYPE holding the contents CONTENTS.
3695 The new value is `not_lval'. */
3698 value_from_contents (struct type
*type
, const gdb_byte
*contents
)
3700 struct value
*result
;
3702 result
= value::allocate (type
);
3703 memcpy (result
->contents_raw ().data (), contents
, type
->length ());
3707 /* Extract a value from the history file. Input will be of the form
3708 $digits or $$digits. See block comment above 'write_dollar_variable'
3712 value_from_history_ref (const char *h
, const char **endp
)
3724 /* Find length of numeral string. */
3725 for (; isdigit (h
[len
]); len
++)
3728 /* Make sure numeral string is not part of an identifier. */
3729 if (h
[len
] == '_' || isalpha (h
[len
]))
3732 /* Now collect the index value. */
3737 /* For some bizarre reason, "$$" is equivalent to "$$1",
3738 rather than to "$$0" as it ought to be! */
3746 index
= -strtol (&h
[2], &local_end
, 10);
3754 /* "$" is equivalent to "$0". */
3762 index
= strtol (&h
[1], &local_end
, 10);
3767 return access_value_history (index
);
3770 /* Get the component value (offset by OFFSET bytes) of a struct or
3771 union WHOLE. Component's type is TYPE. */
3774 value_from_component (struct value
*whole
, struct type
*type
, LONGEST offset
)
3778 if (whole
->lval () == lval_memory
&& whole
->lazy ())
3779 v
= value::allocate_lazy (type
);
3782 v
= value::allocate (type
);
3783 whole
->contents_copy (v
, v
->embedded_offset (),
3784 whole
->embedded_offset () + offset
,
3785 type_length_units (type
));
3787 v
->set_offset (whole
->offset () + offset
+ whole
->embedded_offset ());
3788 v
->set_component_location (whole
);
3796 value::from_component_bitsize (struct type
*type
,
3797 LONGEST bit_offset
, LONGEST bit_length
)
3799 gdb_assert (!lazy ());
3801 /* Preserve lvalue-ness if possible. This is needed to avoid
3802 array-printing failures (including crashes) when printing Ada
3803 arrays in programs compiled with -fgnat-encodings=all. */
3804 if ((bit_offset
% TARGET_CHAR_BIT
) == 0
3805 && (bit_length
% TARGET_CHAR_BIT
) == 0
3806 && bit_length
== TARGET_CHAR_BIT
* type
->length ())
3807 return value_from_component (this, type
, bit_offset
/ TARGET_CHAR_BIT
);
3809 struct value
*v
= value::allocate (type
);
3811 LONGEST dst_offset
= TARGET_CHAR_BIT
* v
->embedded_offset ();
3812 if (is_scalar_type (type
) && type_byte_order (type
) == BFD_ENDIAN_BIG
)
3813 dst_offset
+= TARGET_CHAR_BIT
* type
->length () - bit_length
;
3815 contents_copy_raw_bitwise (v
, dst_offset
,
3817 * embedded_offset ()
3824 coerce_ref_if_computed (const struct value
*arg
)
3826 const struct lval_funcs
*funcs
;
3828 if (!TYPE_IS_REFERENCE (check_typedef (arg
->type ())))
3831 if (arg
->lval () != lval_computed
)
3834 funcs
= arg
->computed_funcs ();
3835 if (funcs
->coerce_ref
== NULL
)
3838 return funcs
->coerce_ref (arg
);
3841 /* Look at value.h for description. */
3844 readjust_indirect_value_type (struct value
*value
, struct type
*enc_type
,
3845 const struct type
*original_type
,
3846 struct value
*original_value
,
3847 CORE_ADDR original_value_address
)
3849 gdb_assert (original_type
->is_pointer_or_reference ());
3851 struct type
*original_target_type
= original_type
->target_type ();
3852 gdb::array_view
<const gdb_byte
> view
;
3853 struct type
*resolved_original_target_type
3854 = resolve_dynamic_type (original_target_type
, view
,
3855 original_value_address
);
3857 /* Re-adjust type. */
3858 value
->deprecated_set_type (resolved_original_target_type
);
3860 /* Add embedding info. */
3861 value
->set_enclosing_type (enc_type
);
3862 value
->set_embedded_offset (original_value
->pointed_to_offset ());
3864 /* We may be pointing to an object of some derived type. */
3865 return value_full_object (value
, NULL
, 0, 0, 0);
3869 coerce_ref (struct value
*arg
)
3871 struct type
*value_type_arg_tmp
= check_typedef (arg
->type ());
3872 struct value
*retval
;
3873 struct type
*enc_type
;
3875 retval
= coerce_ref_if_computed (arg
);
3879 if (!TYPE_IS_REFERENCE (value_type_arg_tmp
))
3882 enc_type
= check_typedef (arg
->enclosing_type ());
3883 enc_type
= enc_type
->target_type ();
3885 CORE_ADDR addr
= unpack_pointer (arg
->type (), arg
->contents ().data ());
3886 retval
= value_at_lazy (enc_type
, addr
);
3887 enc_type
= retval
->type ();
3888 return readjust_indirect_value_type (retval
, enc_type
, value_type_arg_tmp
,
3893 coerce_array (struct value
*arg
)
3897 arg
= coerce_ref (arg
);
3898 type
= check_typedef (arg
->type ());
3900 switch (type
->code ())
3902 case TYPE_CODE_ARRAY
:
3903 if (!type
->is_vector () && current_language
->c_style_arrays_p ())
3904 arg
= value_coerce_array (arg
);
3906 case TYPE_CODE_FUNC
:
3907 arg
= value_coerce_function (arg
);
3914 /* Return the return value convention that will be used for the
3917 enum return_value_convention
3918 struct_return_convention (struct gdbarch
*gdbarch
,
3919 struct value
*function
, struct type
*value_type
)
3921 enum type_code code
= value_type
->code ();
3923 if (code
== TYPE_CODE_ERROR
)
3924 error (_("Function return type unknown."));
3926 /* Probe the architecture for the return-value convention. */
3927 return gdbarch_return_value_as_value (gdbarch
, function
, value_type
,
3931 /* Return true if the function returning the specified type is using
3932 the convention of returning structures in memory (passing in the
3933 address as a hidden first parameter). */
3936 using_struct_return (struct gdbarch
*gdbarch
,
3937 struct value
*function
, struct type
*value_type
)
3939 if (value_type
->code () == TYPE_CODE_VOID
)
3940 /* A void return value is never in memory. See also corresponding
3941 code in "print_return_value". */
3944 return (struct_return_convention (gdbarch
, function
, value_type
)
3945 != RETURN_VALUE_REGISTER_CONVENTION
);
3951 value::fetch_lazy_bitfield ()
3953 gdb_assert (bitsize () != 0);
3955 /* To read a lazy bitfield, read the entire enclosing value. This
3956 prevents reading the same block of (possibly volatile) memory once
3957 per bitfield. It would be even better to read only the containing
3958 word, but we have no way to record that just specific bits of a
3959 value have been fetched. */
3960 struct value
*parent
= this->parent ();
3962 if (parent
->lazy ())
3963 parent
->fetch_lazy ();
3965 parent
->unpack_bitfield (this, bitpos (), bitsize (),
3966 parent
->contents_for_printing ().data (),
3973 value::fetch_lazy_memory ()
3975 gdb_assert (m_lval
== lval_memory
);
3977 CORE_ADDR addr
= address ();
3978 struct type
*type
= check_typedef (enclosing_type ());
3980 /* Figure out how much we should copy from memory. Usually, this is just
3981 the size of the type, but, for arrays, we might only be loading a
3982 small part of the array (this is only done for very large arrays). */
3984 if (m_limited_length
> 0)
3986 gdb_assert (this->type ()->code () == TYPE_CODE_ARRAY
);
3987 len
= m_limited_length
;
3989 else if (type
->length () > 0)
3990 len
= type_length_units (type
);
3992 gdb_assert (len
>= 0);
3995 read_value_memory (this, 0, stack (), addr
,
3996 contents_all_raw ().data (), len
);
3998 /* If only part of an array was loaded, mark the rest as unavailable. */
3999 if (m_limited_length
> 0)
4000 mark_bytes_unavailable (m_limited_length
,
4001 m_enclosing_type
->length () - m_limited_length
);
4007 value::fetch_lazy_register ()
4009 struct type
*type
= check_typedef (this->type ());
4010 struct value
*new_val
= this;
4012 scoped_value_mark mark
;
4014 /* Offsets are not supported here; lazy register values must
4015 refer to the entire register. */
4016 gdb_assert (offset () == 0);
4018 while (new_val
->lval () == lval_register
&& new_val
->lazy ())
4020 frame_id next_frame_id
= new_val
->next_frame_id ();
4021 frame_info_ptr next_frame
= frame_find_by_id (next_frame_id
);
4022 gdb_assert (next_frame
!= NULL
);
4024 int regnum
= new_val
->regnum ();
4026 /* Convertible register routines are used for multi-register
4027 values and for interpretation in different types
4028 (e.g. float or int from a double register). Lazy
4029 register values should have the register's natural type,
4030 so they do not apply. */
4031 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame
),
4034 new_val
= frame_unwind_register_value (next_frame
, regnum
);
4036 /* If we get another lazy lval_register value, it means the
4037 register is found by reading it from NEXT_FRAME's next frame.
4038 frame_unwind_register_value should never return a value with
4039 the frame id pointing to NEXT_FRAME. If it does, it means we
4040 either have two consecutive frames with the same frame id
4041 in the frame chain, or some code is trying to unwind
4042 behind get_prev_frame's back (e.g., a frame unwind
4043 sniffer trying to unwind), bypassing its validations. In
4044 any case, it should always be an internal error to end up
4045 in this situation. */
4046 if (new_val
->lval () == lval_register
4048 && new_val
->next_frame_id () == next_frame_id
)
4049 internal_error (_("infinite loop while fetching a register"));
4052 /* If it's still lazy (for instance, a saved register on the
4053 stack), fetch it. */
4054 if (new_val
->lazy ())
4055 new_val
->fetch_lazy ();
4057 /* Copy the contents and the unavailability/optimized-out
4058 meta-data from NEW_VAL to VAL. */
4060 new_val
->contents_copy (this, embedded_offset (),
4061 new_val
->embedded_offset (),
4062 type_length_units (type
));
4066 frame_info_ptr frame
= frame_find_by_id (this->next_frame_id ());
4067 frame
= get_prev_frame_always (frame
);
4068 int regnum
= this->regnum ();
4069 gdbarch
*gdbarch
= get_frame_arch (frame
);
4071 string_file debug_file
;
4072 gdb_printf (&debug_file
,
4073 "(frame=%d, regnum=%d(%s), ...) ",
4074 frame_relative_level (frame
), regnum
,
4075 user_reg_map_regnum_to_name (gdbarch
, regnum
));
4077 gdb_printf (&debug_file
, "->");
4078 if (new_val
->optimized_out ())
4080 gdb_printf (&debug_file
, " ");
4081 val_print_optimized_out (new_val
, &debug_file
);
4085 if (new_val
->lval () == lval_register
)
4086 gdb_printf (&debug_file
, " register=%d", new_val
->regnum ());
4087 else if (new_val
->lval () == lval_memory
)
4088 gdb_printf (&debug_file
, " address=%s",
4090 new_val
->address ()));
4092 gdb_printf (&debug_file
, " computed");
4094 if (new_val
->entirely_available ())
4097 gdb::array_view
<const gdb_byte
> buf
= new_val
->contents ();
4099 gdb_printf (&debug_file
, " bytes=");
4100 gdb_printf (&debug_file
, "[");
4101 for (i
= 0; i
< register_size (gdbarch
, regnum
); i
++)
4102 gdb_printf (&debug_file
, "%02x", buf
[i
]);
4103 gdb_printf (&debug_file
, "]");
4105 else if (new_val
->entirely_unavailable ())
4106 gdb_printf (&debug_file
, " unavailable");
4108 gdb_printf (&debug_file
, " partly unavailable");
4111 frame_debug_printf ("%s", debug_file
.c_str ());
4118 value::fetch_lazy ()
4120 gdb_assert (lazy ());
4121 allocate_contents (true);
4122 /* A value is either lazy, or fully fetched. The
4123 availability/validity is only established as we try to fetch a
4125 gdb_assert (m_optimized_out
.empty ());
4126 gdb_assert (m_unavailable
.empty ());
4131 else if (bitsize ())
4132 fetch_lazy_bitfield ();
4133 else if (this->lval () == lval_memory
)
4134 fetch_lazy_memory ();
4135 else if (this->lval () == lval_register
)
4136 fetch_lazy_register ();
4137 else if (this->lval () == lval_computed
4138 && computed_funcs ()->read
!= NULL
)
4139 computed_funcs ()->read (this);
4141 internal_error (_("Unexpected lazy value type."));
4149 pseudo_from_raw_part (const frame_info_ptr
&next_frame
, int pseudo_reg_num
,
4150 int raw_reg_num
, int raw_offset
)
4152 value
*pseudo_reg_val
4153 = value::allocate_register (next_frame
, pseudo_reg_num
);
4154 value
*raw_reg_val
= value_of_register (raw_reg_num
, next_frame
);
4155 raw_reg_val
->contents_copy (pseudo_reg_val
, 0, raw_offset
,
4156 pseudo_reg_val
->type ()->length ());
4157 return pseudo_reg_val
;
4163 pseudo_to_raw_part (const frame_info_ptr
&next_frame
,
4164 gdb::array_view
<const gdb_byte
> pseudo_buf
,
4165 int raw_reg_num
, int raw_offset
)
4168 = register_size (frame_unwind_arch (next_frame
), raw_reg_num
);
4170 /* When overflowing a register, put_frame_register_bytes writes to the
4171 subsequent registers. We don't want that behavior here, so make sure
4172 the write is wholly within register RAW_REG_NUM. */
4173 gdb_assert (raw_offset
+ pseudo_buf
.size () <= raw_reg_size
);
4174 put_frame_register_bytes (next_frame
, raw_reg_num
, raw_offset
, pseudo_buf
);
4180 pseudo_from_concat_raw (const frame_info_ptr
&next_frame
, int pseudo_reg_num
,
4181 int raw_reg_1_num
, int raw_reg_2_num
)
4183 value
*pseudo_reg_val
4184 = value::allocate_register (next_frame
, pseudo_reg_num
);
4187 value
*raw_reg_1_val
= value_of_register (raw_reg_1_num
, next_frame
);
4188 raw_reg_1_val
->contents_copy (pseudo_reg_val
, dst_offset
, 0,
4189 raw_reg_1_val
->type ()->length ());
4190 dst_offset
+= raw_reg_1_val
->type ()->length ();
4192 value
*raw_reg_2_val
= value_of_register (raw_reg_2_num
, next_frame
);
4193 raw_reg_2_val
->contents_copy (pseudo_reg_val
, dst_offset
, 0,
4194 raw_reg_2_val
->type ()->length ());
4195 dst_offset
+= raw_reg_2_val
->type ()->length ();
4197 gdb_assert (dst_offset
== pseudo_reg_val
->type ()->length ());
4199 return pseudo_reg_val
;
4205 pseudo_to_concat_raw (const frame_info_ptr
&next_frame
,
4206 gdb::array_view
<const gdb_byte
> pseudo_buf
,
4207 int raw_reg_1_num
, int raw_reg_2_num
)
4210 gdbarch
*arch
= frame_unwind_arch (next_frame
);
4212 int raw_reg_1_size
= register_size (arch
, raw_reg_1_num
);
4213 put_frame_register (next_frame
, raw_reg_1_num
,
4214 pseudo_buf
.slice (src_offset
, raw_reg_1_size
));
4215 src_offset
+= raw_reg_1_size
;
4217 int raw_reg_2_size
= register_size (arch
, raw_reg_2_num
);
4218 put_frame_register (next_frame
, raw_reg_2_num
,
4219 pseudo_buf
.slice (src_offset
, raw_reg_2_size
));
4220 src_offset
+= raw_reg_2_size
;
4222 gdb_assert (src_offset
== pseudo_buf
.size ());
4228 pseudo_from_concat_raw (const frame_info_ptr
&next_frame
, int pseudo_reg_num
,
4229 int raw_reg_1_num
, int raw_reg_2_num
,
4232 value
*pseudo_reg_val
4233 = value::allocate_register (next_frame
, pseudo_reg_num
);
4236 value
*raw_reg_1_val
= value_of_register (raw_reg_1_num
, next_frame
);
4237 raw_reg_1_val
->contents_copy (pseudo_reg_val
, dst_offset
, 0,
4238 raw_reg_1_val
->type ()->length ());
4239 dst_offset
+= raw_reg_1_val
->type ()->length ();
4241 value
*raw_reg_2_val
= value_of_register (raw_reg_2_num
, next_frame
);
4242 raw_reg_2_val
->contents_copy (pseudo_reg_val
, dst_offset
, 0,
4243 raw_reg_2_val
->type ()->length ());
4244 dst_offset
+= raw_reg_2_val
->type ()->length ();
4246 value
*raw_reg_3_val
= value_of_register (raw_reg_3_num
, next_frame
);
4247 raw_reg_3_val
->contents_copy (pseudo_reg_val
, dst_offset
, 0,
4248 raw_reg_3_val
->type ()->length ());
4249 dst_offset
+= raw_reg_3_val
->type ()->length ();
4251 gdb_assert (dst_offset
== pseudo_reg_val
->type ()->length ());
4253 return pseudo_reg_val
;
4259 pseudo_to_concat_raw (const frame_info_ptr
&next_frame
,
4260 gdb::array_view
<const gdb_byte
> pseudo_buf
,
4261 int raw_reg_1_num
, int raw_reg_2_num
, int raw_reg_3_num
)
4264 gdbarch
*arch
= frame_unwind_arch (next_frame
);
4266 int raw_reg_1_size
= register_size (arch
, raw_reg_1_num
);
4267 put_frame_register (next_frame
, raw_reg_1_num
,
4268 pseudo_buf
.slice (src_offset
, raw_reg_1_size
));
4269 src_offset
+= raw_reg_1_size
;
4271 int raw_reg_2_size
= register_size (arch
, raw_reg_2_num
);
4272 put_frame_register (next_frame
, raw_reg_2_num
,
4273 pseudo_buf
.slice (src_offset
, raw_reg_2_size
));
4274 src_offset
+= raw_reg_2_size
;
4276 int raw_reg_3_size
= register_size (arch
, raw_reg_3_num
);
4277 put_frame_register (next_frame
, raw_reg_3_num
,
4278 pseudo_buf
.slice (src_offset
, raw_reg_3_size
));
4279 src_offset
+= raw_reg_3_size
;
4281 gdb_assert (src_offset
== pseudo_buf
.size ());
4284 /* Implementation of the convenience function $_isvoid. */
4286 static struct value
*
4287 isvoid_internal_fn (struct gdbarch
*gdbarch
,
4288 const struct language_defn
*language
,
4289 void *cookie
, int argc
, struct value
**argv
)
4294 error (_("You must provide one argument for $_isvoid."));
4296 ret
= argv
[0]->type ()->code () == TYPE_CODE_VOID
;
4298 return value_from_longest (builtin_type (gdbarch
)->builtin_int
, ret
);
4301 /* Implementation of the convenience function $_creal. Extracts the
4302 real part from a complex number. */
4304 static struct value
*
4305 creal_internal_fn (struct gdbarch
*gdbarch
,
4306 const struct language_defn
*language
,
4307 void *cookie
, int argc
, struct value
**argv
,
4311 error (_("You must provide one argument for $_creal."));
4313 value
*cval
= argv
[0];
4314 type
*ctype
= check_typedef (cval
->type ());
4315 if (ctype
->code () != TYPE_CODE_COMPLEX
)
4316 error (_("expected a complex number"));
4317 if (noside
== EVAL_AVOID_SIDE_EFFECTS
)
4318 return value::zero (ctype
->target_type (), not_lval
);
4319 return value_real_part (cval
);
4322 /* Implementation of the convenience function $_cimag. Extracts the
4323 imaginary part from a complex number. */
4325 static struct value
*
4326 cimag_internal_fn (struct gdbarch
*gdbarch
,
4327 const struct language_defn
*language
,
4328 void *cookie
, int argc
,
4329 struct value
**argv
, enum noside noside
)
4332 error (_("You must provide one argument for $_cimag."));
4334 value
*cval
= argv
[0];
4335 type
*ctype
= check_typedef (cval
->type ());
4336 if (ctype
->code () != TYPE_CODE_COMPLEX
)
4337 error (_("expected a complex number"));
4338 if (noside
== EVAL_AVOID_SIDE_EFFECTS
)
4339 return value::zero (ctype
->target_type (), not_lval
);
4340 return value_imaginary_part (cval
);
4347 /* Test the ranges_contain function. */
4350 test_ranges_contain ()
4352 std::vector
<range
> ranges
;
4358 ranges
.push_back (r
);
4363 ranges
.push_back (r
);
4366 SELF_CHECK (!ranges_contain (ranges
, 2, 5));
4368 SELF_CHECK (ranges_contain (ranges
, 9, 5));
4370 SELF_CHECK (ranges_contain (ranges
, 10, 2));
4372 SELF_CHECK (ranges_contain (ranges
, 10, 5));
4374 SELF_CHECK (ranges_contain (ranges
, 13, 6));
4376 SELF_CHECK (ranges_contain (ranges
, 14, 5));
4378 SELF_CHECK (!ranges_contain (ranges
, 15, 4));
4380 SELF_CHECK (!ranges_contain (ranges
, 16, 4));
4382 SELF_CHECK (ranges_contain (ranges
, 16, 6));
4384 SELF_CHECK (ranges_contain (ranges
, 21, 1));
4386 SELF_CHECK (ranges_contain (ranges
, 21, 5));
4388 SELF_CHECK (!ranges_contain (ranges
, 26, 3));
4391 /* Check that RANGES contains the same ranges as EXPECTED. */
4394 check_ranges_vector (gdb::array_view
<const range
> ranges
,
4395 gdb::array_view
<const range
> expected
)
4397 return ranges
== expected
;
4400 /* Test the insert_into_bit_range_vector function. */
4403 test_insert_into_bit_range_vector ()
4405 std::vector
<range
> ranges
;
4409 insert_into_bit_range_vector (&ranges
, 10, 5);
4410 static const range expected
[] = {
4413 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4418 insert_into_bit_range_vector (&ranges
, 11, 4);
4419 static const range expected
= {10, 5};
4420 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4423 /* [10, 14] [20, 24] */
4425 insert_into_bit_range_vector (&ranges
, 20, 5);
4426 static const range expected
[] = {
4430 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4433 /* [10, 14] [17, 24] */
4435 insert_into_bit_range_vector (&ranges
, 17, 5);
4436 static const range expected
[] = {
4440 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4443 /* [2, 8] [10, 14] [17, 24] */
4445 insert_into_bit_range_vector (&ranges
, 2, 7);
4446 static const range expected
[] = {
4451 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4454 /* [2, 14] [17, 24] */
4456 insert_into_bit_range_vector (&ranges
, 9, 1);
4457 static const range expected
[] = {
4461 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4464 /* [2, 14] [17, 24] */
4466 insert_into_bit_range_vector (&ranges
, 9, 1);
4467 static const range expected
[] = {
4471 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4476 insert_into_bit_range_vector (&ranges
, 4, 30);
4477 static const range expected
= {2, 32};
4478 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4485 type
*type
= builtin_type (current_inferior ()->arch ())->builtin_int
;
4487 /* Verify that we can copy an entirely optimized out value, that may not have
4488 its contents allocated. */
4489 value_ref_ptr val
= release_value (value::allocate_optimized_out (type
));
4490 value_ref_ptr copy
= release_value (val
->copy ());
4492 SELF_CHECK (val
->entirely_optimized_out ());
4493 SELF_CHECK (copy
->entirely_optimized_out ());
4496 } /* namespace selftests */
4497 #endif /* GDB_SELF_TEST */
4499 INIT_GDB_FILE (values
)
4501 cmd_list_element
*show_convenience_cmd
4502 = add_cmd ("convenience", no_class
, show_convenience
, _("\
4503 Debugger convenience (\"$foo\") variables and functions.\n\
4504 Convenience variables are created when you assign them values;\n\
4505 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4507 A few convenience variables are given values automatically:\n\
4508 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4509 \"$__\" holds the contents of the last address examined with \"x\"."
4512 Convenience functions are defined via the Python API."
4515 add_alias_cmd ("conv", show_convenience_cmd
, no_class
, 1, &showlist
);
4517 add_cmd ("values", no_set_class
, show_values
, _("\
4518 Elements of value history around item number IDX (or last ten)."),
4521 add_com ("init-if-undefined", class_vars
, init_if_undefined_command
, _("\
4522 Initialize a convenience variable if necessary.\n\
4523 init-if-undefined VARIABLE = EXPRESSION\n\
4524 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4525 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4526 VARIABLE is already initialized."));
4528 add_prefix_cmd ("function", no_class
, function_command
, _("\
4529 Placeholder command for showing help on convenience functions."),
4530 &functionlist
, 0, &cmdlist
);
4532 add_internal_function ("_isvoid", _("\
4533 Check whether an expression is void.\n\
4534 Usage: $_isvoid (expression)\n\
4535 Return 1 if the expression is void, zero otherwise."),
4536 isvoid_internal_fn
, NULL
);
4538 add_internal_function ("_creal", _("\
4539 Extract the real part of a complex number.\n\
4540 Usage: $_creal (expression)\n\
4541 Return the real part of a complex number, the type depends on the\n\
4542 type of a complex number."),
4543 creal_internal_fn
, NULL
);
4545 add_internal_function ("_cimag", _("\
4546 Extract the imaginary part of a complex number.\n\
4547 Usage: $_cimag (expression)\n\
4548 Return the imaginary part of a complex number, the type depends on the\n\
4549 type of a complex number."),
4550 cimag_internal_fn
, NULL
);
4552 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4553 class_support
, &max_value_size
, _("\
4554 Set maximum sized value gdb will load from the inferior."), _("\
4555 Show maximum sized value gdb will load from the inferior."), _("\
4556 Use this to control the maximum size, in bytes, of a value that gdb\n\
4557 will load from the inferior. Setting this value to 'unlimited'\n\
4558 disables checking.\n\
4559 Setting this does not invalidate already allocated values, it only\n\
4560 prevents future values, larger than this size, from being allocated."),
4562 show_max_value_size
,
4563 &setlist
, &showlist
);
4564 set_show_commands vsize_limit
4565 = add_setshow_zuinteger_unlimited_cmd ("varsize-limit", class_support
,
4566 &max_value_size
, _("\
4567 Set the maximum number of bytes allowed in a variable-size object."), _("\
4568 Show the maximum number of bytes allowed in a variable-size object."), _("\
4569 Attempts to access an object whose size is not a compile-time constant\n\
4570 and exceeds this limit will cause an error."),
4571 NULL
, NULL
, &setlist
, &showlist
);
4572 deprecate_cmd (vsize_limit
.set
, "set max-value-size");
4575 selftests::register_test ("ranges_contain", selftests::test_ranges_contain
);
4576 selftests::register_test ("insert_into_bit_range_vector",
4577 selftests::test_insert_into_bit_range_vector
);
4578 selftests::register_test ("value_copy", selftests::test_value_copy
);
4581 /* Destroy any values currently allocated in a final cleanup instead
4582 of leaving it to global destructors, because that may be too
4583 late. For example, the destructors of xmethod values call into
4584 the Python runtime. */
4585 add_final_cleanup ([] ()
4587 all_values
.clear ();