]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/value.c
libctf: lookup_by_name: do not return success for nonexistent pointer types
[thirdparty/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2021 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "regcache.h"
32 #include "block.h"
33 #include "target-float.h"
34 #include "objfiles.h"
35 #include "valprint.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
38 #include <ctype.h>
39 #include "tracepoint.h"
40 #include "cp-abi.h"
41 #include "user-regs.h"
42 #include <algorithm>
43 #include "completer.h"
44 #include "gdbsupport/selftest.h"
45 #include "gdbsupport/array-view.h"
46 #include "cli/cli-style.h"
47
48 /* Definition of a user function. */
49 struct internal_function
50 {
51 /* The name of the function. It is a bit odd to have this in the
52 function itself -- the user might use a differently-named
53 convenience variable to hold the function. */
54 char *name;
55
56 /* The handler. */
57 internal_function_fn handler;
58
59 /* User data for the handler. */
60 void *cookie;
61 };
62
63 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
64
65 struct range
66 {
67 /* Lowest offset in the range. */
68 LONGEST offset;
69
70 /* Length of the range. */
71 LONGEST length;
72
73 /* Returns true if THIS is strictly less than OTHER, useful for
74 searching. We keep ranges sorted by offset and coalesce
75 overlapping and contiguous ranges, so this just compares the
76 starting offset. */
77
78 bool operator< (const range &other) const
79 {
80 return offset < other.offset;
81 }
82
83 /* Returns true if THIS is equal to OTHER. */
84 bool operator== (const range &other) const
85 {
86 return offset == other.offset && length == other.length;
87 }
88 };
89
90 /* Returns true if the ranges defined by [offset1, offset1+len1) and
91 [offset2, offset2+len2) overlap. */
92
93 static int
94 ranges_overlap (LONGEST offset1, LONGEST len1,
95 LONGEST offset2, LONGEST len2)
96 {
97 ULONGEST h, l;
98
99 l = std::max (offset1, offset2);
100 h = std::min (offset1 + len1, offset2 + len2);
101 return (l < h);
102 }
103
104 /* Returns true if RANGES contains any range that overlaps [OFFSET,
105 OFFSET+LENGTH). */
106
107 static int
108 ranges_contain (const std::vector<range> &ranges, LONGEST offset,
109 LONGEST length)
110 {
111 range what;
112
113 what.offset = offset;
114 what.length = length;
115
116 /* We keep ranges sorted by offset and coalesce overlapping and
117 contiguous ranges, so to check if a range list contains a given
118 range, we can do a binary search for the position the given range
119 would be inserted if we only considered the starting OFFSET of
120 ranges. We call that position I. Since we also have LENGTH to
121 care for (this is a range afterall), we need to check if the
122 _previous_ range overlaps the I range. E.g.,
123
124 R
125 |---|
126 |---| |---| |------| ... |--|
127 0 1 2 N
128
129 I=1
130
131 In the case above, the binary search would return `I=1', meaning,
132 this OFFSET should be inserted at position 1, and the current
133 position 1 should be pushed further (and before 2). But, `0'
134 overlaps with R.
135
136 Then we need to check if the I range overlaps the I range itself.
137 E.g.,
138
139 R
140 |---|
141 |---| |---| |-------| ... |--|
142 0 1 2 N
143
144 I=1
145 */
146
147
148 auto i = std::lower_bound (ranges.begin (), ranges.end (), what);
149
150 if (i > ranges.begin ())
151 {
152 const struct range &bef = *(i - 1);
153
154 if (ranges_overlap (bef.offset, bef.length, offset, length))
155 return 1;
156 }
157
158 if (i < ranges.end ())
159 {
160 const struct range &r = *i;
161
162 if (ranges_overlap (r.offset, r.length, offset, length))
163 return 1;
164 }
165
166 return 0;
167 }
168
169 static struct cmd_list_element *functionlist;
170
171 /* Note that the fields in this structure are arranged to save a bit
172 of memory. */
173
174 struct value
175 {
176 explicit value (struct type *type_)
177 : modifiable (1),
178 lazy (1),
179 initialized (1),
180 stack (0),
181 type (type_),
182 enclosing_type (type_)
183 {
184 }
185
186 ~value ()
187 {
188 if (VALUE_LVAL (this) == lval_computed)
189 {
190 const struct lval_funcs *funcs = location.computed.funcs;
191
192 if (funcs->free_closure)
193 funcs->free_closure (this);
194 }
195 else if (VALUE_LVAL (this) == lval_xcallable)
196 delete location.xm_worker;
197 }
198
199 DISABLE_COPY_AND_ASSIGN (value);
200
201 /* Type of value; either not an lval, or one of the various
202 different possible kinds of lval. */
203 enum lval_type lval = not_lval;
204
205 /* Is it modifiable? Only relevant if lval != not_lval. */
206 unsigned int modifiable : 1;
207
208 /* If zero, contents of this value are in the contents field. If
209 nonzero, contents are in inferior. If the lval field is lval_memory,
210 the contents are in inferior memory at location.address plus offset.
211 The lval field may also be lval_register.
212
213 WARNING: This field is used by the code which handles watchpoints
214 (see breakpoint.c) to decide whether a particular value can be
215 watched by hardware watchpoints. If the lazy flag is set for
216 some member of a value chain, it is assumed that this member of
217 the chain doesn't need to be watched as part of watching the
218 value itself. This is how GDB avoids watching the entire struct
219 or array when the user wants to watch a single struct member or
220 array element. If you ever change the way lazy flag is set and
221 reset, be sure to consider this use as well! */
222 unsigned int lazy : 1;
223
224 /* If value is a variable, is it initialized or not. */
225 unsigned int initialized : 1;
226
227 /* If value is from the stack. If this is set, read_stack will be
228 used instead of read_memory to enable extra caching. */
229 unsigned int stack : 1;
230
231 /* Location of value (if lval). */
232 union
233 {
234 /* If lval == lval_memory, this is the address in the inferior */
235 CORE_ADDR address;
236
237 /*If lval == lval_register, the value is from a register. */
238 struct
239 {
240 /* Register number. */
241 int regnum;
242 /* Frame ID of "next" frame to which a register value is relative.
243 If the register value is found relative to frame F, then the
244 frame id of F->next will be stored in next_frame_id. */
245 struct frame_id next_frame_id;
246 } reg;
247
248 /* Pointer to internal variable. */
249 struct internalvar *internalvar;
250
251 /* Pointer to xmethod worker. */
252 struct xmethod_worker *xm_worker;
253
254 /* If lval == lval_computed, this is a set of function pointers
255 to use to access and describe the value, and a closure pointer
256 for them to use. */
257 struct
258 {
259 /* Functions to call. */
260 const struct lval_funcs *funcs;
261
262 /* Closure for those functions to use. */
263 void *closure;
264 } computed;
265 } location {};
266
267 /* Describes offset of a value within lval of a structure in target
268 addressable memory units. Note also the member embedded_offset
269 below. */
270 LONGEST offset = 0;
271
272 /* Only used for bitfields; number of bits contained in them. */
273 LONGEST bitsize = 0;
274
275 /* Only used for bitfields; position of start of field. For
276 little-endian targets, it is the position of the LSB. For
277 big-endian targets, it is the position of the MSB. */
278 LONGEST bitpos = 0;
279
280 /* The number of references to this value. When a value is created,
281 the value chain holds a reference, so REFERENCE_COUNT is 1. If
282 release_value is called, this value is removed from the chain but
283 the caller of release_value now has a reference to this value.
284 The caller must arrange for a call to value_free later. */
285 int reference_count = 1;
286
287 /* Only used for bitfields; the containing value. This allows a
288 single read from the target when displaying multiple
289 bitfields. */
290 value_ref_ptr parent;
291
292 /* Type of the value. */
293 struct type *type;
294
295 /* If a value represents a C++ object, then the `type' field gives
296 the object's compile-time type. If the object actually belongs
297 to some class derived from `type', perhaps with other base
298 classes and additional members, then `type' is just a subobject
299 of the real thing, and the full object is probably larger than
300 `type' would suggest.
301
302 If `type' is a dynamic class (i.e. one with a vtable), then GDB
303 can actually determine the object's run-time type by looking at
304 the run-time type information in the vtable. When this
305 information is available, we may elect to read in the entire
306 object, for several reasons:
307
308 - When printing the value, the user would probably rather see the
309 full object, not just the limited portion apparent from the
310 compile-time type.
311
312 - If `type' has virtual base classes, then even printing `type'
313 alone may require reaching outside the `type' portion of the
314 object to wherever the virtual base class has been stored.
315
316 When we store the entire object, `enclosing_type' is the run-time
317 type -- the complete object -- and `embedded_offset' is the
318 offset of `type' within that larger type, in target addressable memory
319 units. The value_contents() macro takes `embedded_offset' into account,
320 so most GDB code continues to see the `type' portion of the value, just
321 as the inferior would.
322
323 If `type' is a pointer to an object, then `enclosing_type' is a
324 pointer to the object's run-time type, and `pointed_to_offset' is
325 the offset in target addressable memory units from the full object
326 to the pointed-to object -- that is, the value `embedded_offset' would
327 have if we followed the pointer and fetched the complete object.
328 (I don't really see the point. Why not just determine the
329 run-time type when you indirect, and avoid the special case? The
330 contents don't matter until you indirect anyway.)
331
332 If we're not doing anything fancy, `enclosing_type' is equal to
333 `type', and `embedded_offset' is zero, so everything works
334 normally. */
335 struct type *enclosing_type;
336 LONGEST embedded_offset = 0;
337 LONGEST pointed_to_offset = 0;
338
339 /* Actual contents of the value. Target byte-order. NULL or not
340 valid if lazy is nonzero. */
341 gdb::unique_xmalloc_ptr<gdb_byte> contents;
342
343 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
344 rather than available, since the common and default case is for a
345 value to be available. This is filled in at value read time.
346 The unavailable ranges are tracked in bits. Note that a contents
347 bit that has been optimized out doesn't really exist in the
348 program, so it can't be marked unavailable either. */
349 std::vector<range> unavailable;
350
351 /* Likewise, but for optimized out contents (a chunk of the value of
352 a variable that does not actually exist in the program). If LVAL
353 is lval_register, this is a register ($pc, $sp, etc., never a
354 program variable) that has not been saved in the frame. Not
355 saved registers and optimized-out program variables values are
356 treated pretty much the same, except not-saved registers have a
357 different string representation and related error strings. */
358 std::vector<range> optimized_out;
359 };
360
361 /* See value.h. */
362
363 struct gdbarch *
364 get_value_arch (const struct value *value)
365 {
366 return get_type_arch (value_type (value));
367 }
368
369 int
370 value_bits_available (const struct value *value, LONGEST offset, LONGEST length)
371 {
372 gdb_assert (!value->lazy);
373
374 return !ranges_contain (value->unavailable, offset, length);
375 }
376
377 int
378 value_bytes_available (const struct value *value,
379 LONGEST offset, LONGEST length)
380 {
381 return value_bits_available (value,
382 offset * TARGET_CHAR_BIT,
383 length * TARGET_CHAR_BIT);
384 }
385
386 int
387 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
388 {
389 gdb_assert (!value->lazy);
390
391 return ranges_contain (value->optimized_out, bit_offset, bit_length);
392 }
393
394 int
395 value_entirely_available (struct value *value)
396 {
397 /* We can only tell whether the whole value is available when we try
398 to read it. */
399 if (value->lazy)
400 value_fetch_lazy (value);
401
402 if (value->unavailable.empty ())
403 return 1;
404 return 0;
405 }
406
407 /* Returns true if VALUE is entirely covered by RANGES. If the value
408 is lazy, it'll be read now. Note that RANGE is a pointer to
409 pointer because reading the value might change *RANGE. */
410
411 static int
412 value_entirely_covered_by_range_vector (struct value *value,
413 const std::vector<range> &ranges)
414 {
415 /* We can only tell whether the whole value is optimized out /
416 unavailable when we try to read it. */
417 if (value->lazy)
418 value_fetch_lazy (value);
419
420 if (ranges.size () == 1)
421 {
422 const struct range &t = ranges[0];
423
424 if (t.offset == 0
425 && t.length == (TARGET_CHAR_BIT
426 * TYPE_LENGTH (value_enclosing_type (value))))
427 return 1;
428 }
429
430 return 0;
431 }
432
433 int
434 value_entirely_unavailable (struct value *value)
435 {
436 return value_entirely_covered_by_range_vector (value, value->unavailable);
437 }
438
439 int
440 value_entirely_optimized_out (struct value *value)
441 {
442 return value_entirely_covered_by_range_vector (value, value->optimized_out);
443 }
444
445 /* Insert into the vector pointed to by VECTORP the bit range starting of
446 OFFSET bits, and extending for the next LENGTH bits. */
447
448 static void
449 insert_into_bit_range_vector (std::vector<range> *vectorp,
450 LONGEST offset, LONGEST length)
451 {
452 range newr;
453
454 /* Insert the range sorted. If there's overlap or the new range
455 would be contiguous with an existing range, merge. */
456
457 newr.offset = offset;
458 newr.length = length;
459
460 /* Do a binary search for the position the given range would be
461 inserted if we only considered the starting OFFSET of ranges.
462 Call that position I. Since we also have LENGTH to care for
463 (this is a range afterall), we need to check if the _previous_
464 range overlaps the I range. E.g., calling R the new range:
465
466 #1 - overlaps with previous
467
468 R
469 |-...-|
470 |---| |---| |------| ... |--|
471 0 1 2 N
472
473 I=1
474
475 In the case #1 above, the binary search would return `I=1',
476 meaning, this OFFSET should be inserted at position 1, and the
477 current position 1 should be pushed further (and become 2). But,
478 note that `0' overlaps with R, so we want to merge them.
479
480 A similar consideration needs to be taken if the new range would
481 be contiguous with the previous range:
482
483 #2 - contiguous with previous
484
485 R
486 |-...-|
487 |--| |---| |------| ... |--|
488 0 1 2 N
489
490 I=1
491
492 If there's no overlap with the previous range, as in:
493
494 #3 - not overlapping and not contiguous
495
496 R
497 |-...-|
498 |--| |---| |------| ... |--|
499 0 1 2 N
500
501 I=1
502
503 or if I is 0:
504
505 #4 - R is the range with lowest offset
506
507 R
508 |-...-|
509 |--| |---| |------| ... |--|
510 0 1 2 N
511
512 I=0
513
514 ... we just push the new range to I.
515
516 All the 4 cases above need to consider that the new range may
517 also overlap several of the ranges that follow, or that R may be
518 contiguous with the following range, and merge. E.g.,
519
520 #5 - overlapping following ranges
521
522 R
523 |------------------------|
524 |--| |---| |------| ... |--|
525 0 1 2 N
526
527 I=0
528
529 or:
530
531 R
532 |-------|
533 |--| |---| |------| ... |--|
534 0 1 2 N
535
536 I=1
537
538 */
539
540 auto i = std::lower_bound (vectorp->begin (), vectorp->end (), newr);
541 if (i > vectorp->begin ())
542 {
543 struct range &bef = *(i - 1);
544
545 if (ranges_overlap (bef.offset, bef.length, offset, length))
546 {
547 /* #1 */
548 ULONGEST l = std::min (bef.offset, offset);
549 ULONGEST h = std::max (bef.offset + bef.length, offset + length);
550
551 bef.offset = l;
552 bef.length = h - l;
553 i--;
554 }
555 else if (offset == bef.offset + bef.length)
556 {
557 /* #2 */
558 bef.length += length;
559 i--;
560 }
561 else
562 {
563 /* #3 */
564 i = vectorp->insert (i, newr);
565 }
566 }
567 else
568 {
569 /* #4 */
570 i = vectorp->insert (i, newr);
571 }
572
573 /* Check whether the ranges following the one we've just added or
574 touched can be folded in (#5 above). */
575 if (i != vectorp->end () && i + 1 < vectorp->end ())
576 {
577 int removed = 0;
578 auto next = i + 1;
579
580 /* Get the range we just touched. */
581 struct range &t = *i;
582 removed = 0;
583
584 i = next;
585 for (; i < vectorp->end (); i++)
586 {
587 struct range &r = *i;
588 if (r.offset <= t.offset + t.length)
589 {
590 ULONGEST l, h;
591
592 l = std::min (t.offset, r.offset);
593 h = std::max (t.offset + t.length, r.offset + r.length);
594
595 t.offset = l;
596 t.length = h - l;
597
598 removed++;
599 }
600 else
601 {
602 /* If we couldn't merge this one, we won't be able to
603 merge following ones either, since the ranges are
604 always sorted by OFFSET. */
605 break;
606 }
607 }
608
609 if (removed != 0)
610 vectorp->erase (next, next + removed);
611 }
612 }
613
614 void
615 mark_value_bits_unavailable (struct value *value,
616 LONGEST offset, LONGEST length)
617 {
618 insert_into_bit_range_vector (&value->unavailable, offset, length);
619 }
620
621 void
622 mark_value_bytes_unavailable (struct value *value,
623 LONGEST offset, LONGEST length)
624 {
625 mark_value_bits_unavailable (value,
626 offset * TARGET_CHAR_BIT,
627 length * TARGET_CHAR_BIT);
628 }
629
630 /* Find the first range in RANGES that overlaps the range defined by
631 OFFSET and LENGTH, starting at element POS in the RANGES vector,
632 Returns the index into RANGES where such overlapping range was
633 found, or -1 if none was found. */
634
635 static int
636 find_first_range_overlap (const std::vector<range> *ranges, int pos,
637 LONGEST offset, LONGEST length)
638 {
639 int i;
640
641 for (i = pos; i < ranges->size (); i++)
642 {
643 const range &r = (*ranges)[i];
644 if (ranges_overlap (r.offset, r.length, offset, length))
645 return i;
646 }
647
648 return -1;
649 }
650
651 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
652 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
653 return non-zero.
654
655 It must always be the case that:
656 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
657
658 It is assumed that memory can be accessed from:
659 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
660 to:
661 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
662 / TARGET_CHAR_BIT) */
663 static int
664 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
665 const gdb_byte *ptr2, size_t offset2_bits,
666 size_t length_bits)
667 {
668 gdb_assert (offset1_bits % TARGET_CHAR_BIT
669 == offset2_bits % TARGET_CHAR_BIT);
670
671 if (offset1_bits % TARGET_CHAR_BIT != 0)
672 {
673 size_t bits;
674 gdb_byte mask, b1, b2;
675
676 /* The offset from the base pointers PTR1 and PTR2 is not a complete
677 number of bytes. A number of bits up to either the next exact
678 byte boundary, or LENGTH_BITS (which ever is sooner) will be
679 compared. */
680 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
681 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
682 mask = (1 << bits) - 1;
683
684 if (length_bits < bits)
685 {
686 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
687 bits = length_bits;
688 }
689
690 /* Now load the two bytes and mask off the bits we care about. */
691 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
692 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
693
694 if (b1 != b2)
695 return 1;
696
697 /* Now update the length and offsets to take account of the bits
698 we've just compared. */
699 length_bits -= bits;
700 offset1_bits += bits;
701 offset2_bits += bits;
702 }
703
704 if (length_bits % TARGET_CHAR_BIT != 0)
705 {
706 size_t bits;
707 size_t o1, o2;
708 gdb_byte mask, b1, b2;
709
710 /* The length is not an exact number of bytes. After the previous
711 IF.. block then the offsets are byte aligned, or the
712 length is zero (in which case this code is not reached). Compare
713 a number of bits at the end of the region, starting from an exact
714 byte boundary. */
715 bits = length_bits % TARGET_CHAR_BIT;
716 o1 = offset1_bits + length_bits - bits;
717 o2 = offset2_bits + length_bits - bits;
718
719 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
720 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
721
722 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
723 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
724
725 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
726 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
727
728 if (b1 != b2)
729 return 1;
730
731 length_bits -= bits;
732 }
733
734 if (length_bits > 0)
735 {
736 /* We've now taken care of any stray "bits" at the start, or end of
737 the region to compare, the remainder can be covered with a simple
738 memcmp. */
739 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
740 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
741 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
742
743 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
744 ptr2 + offset2_bits / TARGET_CHAR_BIT,
745 length_bits / TARGET_CHAR_BIT);
746 }
747
748 /* Length is zero, regions match. */
749 return 0;
750 }
751
752 /* Helper struct for find_first_range_overlap_and_match and
753 value_contents_bits_eq. Keep track of which slot of a given ranges
754 vector have we last looked at. */
755
756 struct ranges_and_idx
757 {
758 /* The ranges. */
759 const std::vector<range> *ranges;
760
761 /* The range we've last found in RANGES. Given ranges are sorted,
762 we can start the next lookup here. */
763 int idx;
764 };
765
766 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
767 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
768 ranges starting at OFFSET2 bits. Return true if the ranges match
769 and fill in *L and *H with the overlapping window relative to
770 (both) OFFSET1 or OFFSET2. */
771
772 static int
773 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
774 struct ranges_and_idx *rp2,
775 LONGEST offset1, LONGEST offset2,
776 LONGEST length, ULONGEST *l, ULONGEST *h)
777 {
778 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
779 offset1, length);
780 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
781 offset2, length);
782
783 if (rp1->idx == -1 && rp2->idx == -1)
784 {
785 *l = length;
786 *h = length;
787 return 1;
788 }
789 else if (rp1->idx == -1 || rp2->idx == -1)
790 return 0;
791 else
792 {
793 const range *r1, *r2;
794 ULONGEST l1, h1;
795 ULONGEST l2, h2;
796
797 r1 = &(*rp1->ranges)[rp1->idx];
798 r2 = &(*rp2->ranges)[rp2->idx];
799
800 /* Get the unavailable windows intersected by the incoming
801 ranges. The first and last ranges that overlap the argument
802 range may be wider than said incoming arguments ranges. */
803 l1 = std::max (offset1, r1->offset);
804 h1 = std::min (offset1 + length, r1->offset + r1->length);
805
806 l2 = std::max (offset2, r2->offset);
807 h2 = std::min (offset2 + length, offset2 + r2->length);
808
809 /* Make them relative to the respective start offsets, so we can
810 compare them for equality. */
811 l1 -= offset1;
812 h1 -= offset1;
813
814 l2 -= offset2;
815 h2 -= offset2;
816
817 /* Different ranges, no match. */
818 if (l1 != l2 || h1 != h2)
819 return 0;
820
821 *h = h1;
822 *l = l1;
823 return 1;
824 }
825 }
826
827 /* Helper function for value_contents_eq. The only difference is that
828 this function is bit rather than byte based.
829
830 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
831 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
832 Return true if the available bits match. */
833
834 static bool
835 value_contents_bits_eq (const struct value *val1, int offset1,
836 const struct value *val2, int offset2,
837 int length)
838 {
839 /* Each array element corresponds to a ranges source (unavailable,
840 optimized out). '1' is for VAL1, '2' for VAL2. */
841 struct ranges_and_idx rp1[2], rp2[2];
842
843 /* See function description in value.h. */
844 gdb_assert (!val1->lazy && !val2->lazy);
845
846 /* We shouldn't be trying to compare past the end of the values. */
847 gdb_assert (offset1 + length
848 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
849 gdb_assert (offset2 + length
850 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
851
852 memset (&rp1, 0, sizeof (rp1));
853 memset (&rp2, 0, sizeof (rp2));
854 rp1[0].ranges = &val1->unavailable;
855 rp2[0].ranges = &val2->unavailable;
856 rp1[1].ranges = &val1->optimized_out;
857 rp2[1].ranges = &val2->optimized_out;
858
859 while (length > 0)
860 {
861 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
862 int i;
863
864 for (i = 0; i < 2; i++)
865 {
866 ULONGEST l_tmp, h_tmp;
867
868 /* The contents only match equal if the invalid/unavailable
869 contents ranges match as well. */
870 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
871 offset1, offset2, length,
872 &l_tmp, &h_tmp))
873 return false;
874
875 /* We're interested in the lowest/first range found. */
876 if (i == 0 || l_tmp < l)
877 {
878 l = l_tmp;
879 h = h_tmp;
880 }
881 }
882
883 /* Compare the available/valid contents. */
884 if (memcmp_with_bit_offsets (val1->contents.get (), offset1,
885 val2->contents.get (), offset2, l) != 0)
886 return false;
887
888 length -= h;
889 offset1 += h;
890 offset2 += h;
891 }
892
893 return true;
894 }
895
896 bool
897 value_contents_eq (const struct value *val1, LONGEST offset1,
898 const struct value *val2, LONGEST offset2,
899 LONGEST length)
900 {
901 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
902 val2, offset2 * TARGET_CHAR_BIT,
903 length * TARGET_CHAR_BIT);
904 }
905
906
907 /* The value-history records all the values printed by print commands
908 during this session. */
909
910 static std::vector<value_ref_ptr> value_history;
911
912 \f
913 /* List of all value objects currently allocated
914 (except for those released by calls to release_value)
915 This is so they can be freed after each command. */
916
917 static std::vector<value_ref_ptr> all_values;
918
919 /* Allocate a lazy value for type TYPE. Its actual content is
920 "lazily" allocated too: the content field of the return value is
921 NULL; it will be allocated when it is fetched from the target. */
922
923 struct value *
924 allocate_value_lazy (struct type *type)
925 {
926 struct value *val;
927
928 /* Call check_typedef on our type to make sure that, if TYPE
929 is a TYPE_CODE_TYPEDEF, its length is set to the length
930 of the target type instead of zero. However, we do not
931 replace the typedef type by the target type, because we want
932 to keep the typedef in order to be able to set the VAL's type
933 description correctly. */
934 check_typedef (type);
935
936 val = new struct value (type);
937
938 /* Values start out on the all_values chain. */
939 all_values.emplace_back (val);
940
941 return val;
942 }
943
944 /* The maximum size, in bytes, that GDB will try to allocate for a value.
945 The initial value of 64k was not selected for any specific reason, it is
946 just a reasonable starting point. */
947
948 static int max_value_size = 65536; /* 64k bytes */
949
950 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
951 LONGEST, otherwise GDB will not be able to parse integer values from the
952 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
953 be unable to parse "set max-value-size 2".
954
955 As we want a consistent GDB experience across hosts with different sizes
956 of LONGEST, this arbitrary minimum value was selected, so long as this
957 is bigger than LONGEST on all GDB supported hosts we're fine. */
958
959 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
960 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
961
962 /* Implement the "set max-value-size" command. */
963
964 static void
965 set_max_value_size (const char *args, int from_tty,
966 struct cmd_list_element *c)
967 {
968 gdb_assert (max_value_size == -1 || max_value_size >= 0);
969
970 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
971 {
972 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
973 error (_("max-value-size set too low, increasing to %d bytes"),
974 max_value_size);
975 }
976 }
977
978 /* Implement the "show max-value-size" command. */
979
980 static void
981 show_max_value_size (struct ui_file *file, int from_tty,
982 struct cmd_list_element *c, const char *value)
983 {
984 if (max_value_size == -1)
985 fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
986 else
987 fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
988 max_value_size);
989 }
990
991 /* Called before we attempt to allocate or reallocate a buffer for the
992 contents of a value. TYPE is the type of the value for which we are
993 allocating the buffer. If the buffer is too large (based on the user
994 controllable setting) then throw an error. If this function returns
995 then we should attempt to allocate the buffer. */
996
997 static void
998 check_type_length_before_alloc (const struct type *type)
999 {
1000 ULONGEST length = TYPE_LENGTH (type);
1001
1002 if (max_value_size > -1 && length > max_value_size)
1003 {
1004 if (type->name () != NULL)
1005 error (_("value of type `%s' requires %s bytes, which is more "
1006 "than max-value-size"), type->name (), pulongest (length));
1007 else
1008 error (_("value requires %s bytes, which is more than "
1009 "max-value-size"), pulongest (length));
1010 }
1011 }
1012
1013 /* Allocate the contents of VAL if it has not been allocated yet. */
1014
1015 static void
1016 allocate_value_contents (struct value *val)
1017 {
1018 if (!val->contents)
1019 {
1020 check_type_length_before_alloc (val->enclosing_type);
1021 val->contents.reset
1022 ((gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type)));
1023 }
1024 }
1025
1026 /* Allocate a value and its contents for type TYPE. */
1027
1028 struct value *
1029 allocate_value (struct type *type)
1030 {
1031 struct value *val = allocate_value_lazy (type);
1032
1033 allocate_value_contents (val);
1034 val->lazy = 0;
1035 return val;
1036 }
1037
1038 /* Allocate a value that has the correct length
1039 for COUNT repetitions of type TYPE. */
1040
1041 struct value *
1042 allocate_repeat_value (struct type *type, int count)
1043 {
1044 /* Despite the fact that we are really creating an array of TYPE here, we
1045 use the string lower bound as the array lower bound. This seems to
1046 work fine for now. */
1047 int low_bound = current_language->string_lower_bound ();
1048 /* FIXME-type-allocation: need a way to free this type when we are
1049 done with it. */
1050 struct type *array_type
1051 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1052
1053 return allocate_value (array_type);
1054 }
1055
1056 struct value *
1057 allocate_computed_value (struct type *type,
1058 const struct lval_funcs *funcs,
1059 void *closure)
1060 {
1061 struct value *v = allocate_value_lazy (type);
1062
1063 VALUE_LVAL (v) = lval_computed;
1064 v->location.computed.funcs = funcs;
1065 v->location.computed.closure = closure;
1066
1067 return v;
1068 }
1069
1070 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1071
1072 struct value *
1073 allocate_optimized_out_value (struct type *type)
1074 {
1075 struct value *retval = allocate_value_lazy (type);
1076
1077 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1078 set_value_lazy (retval, 0);
1079 return retval;
1080 }
1081
1082 /* Accessor methods. */
1083
1084 struct type *
1085 value_type (const struct value *value)
1086 {
1087 return value->type;
1088 }
1089 void
1090 deprecated_set_value_type (struct value *value, struct type *type)
1091 {
1092 value->type = type;
1093 }
1094
1095 LONGEST
1096 value_offset (const struct value *value)
1097 {
1098 return value->offset;
1099 }
1100 void
1101 set_value_offset (struct value *value, LONGEST offset)
1102 {
1103 value->offset = offset;
1104 }
1105
1106 LONGEST
1107 value_bitpos (const struct value *value)
1108 {
1109 return value->bitpos;
1110 }
1111 void
1112 set_value_bitpos (struct value *value, LONGEST bit)
1113 {
1114 value->bitpos = bit;
1115 }
1116
1117 LONGEST
1118 value_bitsize (const struct value *value)
1119 {
1120 return value->bitsize;
1121 }
1122 void
1123 set_value_bitsize (struct value *value, LONGEST bit)
1124 {
1125 value->bitsize = bit;
1126 }
1127
1128 struct value *
1129 value_parent (const struct value *value)
1130 {
1131 return value->parent.get ();
1132 }
1133
1134 /* See value.h. */
1135
1136 void
1137 set_value_parent (struct value *value, struct value *parent)
1138 {
1139 value->parent = value_ref_ptr::new_reference (parent);
1140 }
1141
1142 gdb_byte *
1143 value_contents_raw (struct value *value)
1144 {
1145 struct gdbarch *arch = get_value_arch (value);
1146 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1147
1148 allocate_value_contents (value);
1149 return value->contents.get () + value->embedded_offset * unit_size;
1150 }
1151
1152 gdb_byte *
1153 value_contents_all_raw (struct value *value)
1154 {
1155 allocate_value_contents (value);
1156 return value->contents.get ();
1157 }
1158
1159 struct type *
1160 value_enclosing_type (const struct value *value)
1161 {
1162 return value->enclosing_type;
1163 }
1164
1165 /* Look at value.h for description. */
1166
1167 struct type *
1168 value_actual_type (struct value *value, int resolve_simple_types,
1169 int *real_type_found)
1170 {
1171 struct value_print_options opts;
1172 struct type *result;
1173
1174 get_user_print_options (&opts);
1175
1176 if (real_type_found)
1177 *real_type_found = 0;
1178 result = value_type (value);
1179 if (opts.objectprint)
1180 {
1181 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1182 fetch its rtti type. */
1183 if ((result->code () == TYPE_CODE_PTR || TYPE_IS_REFERENCE (result))
1184 && (check_typedef (TYPE_TARGET_TYPE (result))->code ()
1185 == TYPE_CODE_STRUCT)
1186 && !value_optimized_out (value))
1187 {
1188 struct type *real_type;
1189
1190 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1191 if (real_type)
1192 {
1193 if (real_type_found)
1194 *real_type_found = 1;
1195 result = real_type;
1196 }
1197 }
1198 else if (resolve_simple_types)
1199 {
1200 if (real_type_found)
1201 *real_type_found = 1;
1202 result = value_enclosing_type (value);
1203 }
1204 }
1205
1206 return result;
1207 }
1208
1209 void
1210 error_value_optimized_out (void)
1211 {
1212 error (_("value has been optimized out"));
1213 }
1214
1215 static void
1216 require_not_optimized_out (const struct value *value)
1217 {
1218 if (!value->optimized_out.empty ())
1219 {
1220 if (value->lval == lval_register)
1221 error (_("register has not been saved in frame"));
1222 else
1223 error_value_optimized_out ();
1224 }
1225 }
1226
1227 static void
1228 require_available (const struct value *value)
1229 {
1230 if (!value->unavailable.empty ())
1231 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1232 }
1233
1234 const gdb_byte *
1235 value_contents_for_printing (struct value *value)
1236 {
1237 if (value->lazy)
1238 value_fetch_lazy (value);
1239 return value->contents.get ();
1240 }
1241
1242 const gdb_byte *
1243 value_contents_for_printing_const (const struct value *value)
1244 {
1245 gdb_assert (!value->lazy);
1246 return value->contents.get ();
1247 }
1248
1249 const gdb_byte *
1250 value_contents_all (struct value *value)
1251 {
1252 const gdb_byte *result = value_contents_for_printing (value);
1253 require_not_optimized_out (value);
1254 require_available (value);
1255 return result;
1256 }
1257
1258 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1259 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1260
1261 static void
1262 ranges_copy_adjusted (std::vector<range> *dst_range, int dst_bit_offset,
1263 const std::vector<range> &src_range, int src_bit_offset,
1264 int bit_length)
1265 {
1266 for (const range &r : src_range)
1267 {
1268 ULONGEST h, l;
1269
1270 l = std::max (r.offset, (LONGEST) src_bit_offset);
1271 h = std::min (r.offset + r.length,
1272 (LONGEST) src_bit_offset + bit_length);
1273
1274 if (l < h)
1275 insert_into_bit_range_vector (dst_range,
1276 dst_bit_offset + (l - src_bit_offset),
1277 h - l);
1278 }
1279 }
1280
1281 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1282 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1283
1284 static void
1285 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1286 const struct value *src, int src_bit_offset,
1287 int bit_length)
1288 {
1289 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1290 src->unavailable, src_bit_offset,
1291 bit_length);
1292 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1293 src->optimized_out, src_bit_offset,
1294 bit_length);
1295 }
1296
1297 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1298 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1299 contents, starting at DST_OFFSET. If unavailable contents are
1300 being copied from SRC, the corresponding DST contents are marked
1301 unavailable accordingly. Neither DST nor SRC may be lazy
1302 values.
1303
1304 It is assumed the contents of DST in the [DST_OFFSET,
1305 DST_OFFSET+LENGTH) range are wholly available. */
1306
1307 void
1308 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1309 struct value *src, LONGEST src_offset, LONGEST length)
1310 {
1311 LONGEST src_bit_offset, dst_bit_offset, bit_length;
1312 struct gdbarch *arch = get_value_arch (src);
1313 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1314
1315 /* A lazy DST would make that this copy operation useless, since as
1316 soon as DST's contents were un-lazied (by a later value_contents
1317 call, say), the contents would be overwritten. A lazy SRC would
1318 mean we'd be copying garbage. */
1319 gdb_assert (!dst->lazy && !src->lazy);
1320
1321 /* The overwritten DST range gets unavailability ORed in, not
1322 replaced. Make sure to remember to implement replacing if it
1323 turns out actually necessary. */
1324 gdb_assert (value_bytes_available (dst, dst_offset, length));
1325 gdb_assert (!value_bits_any_optimized_out (dst,
1326 TARGET_CHAR_BIT * dst_offset,
1327 TARGET_CHAR_BIT * length));
1328
1329 /* Copy the data. */
1330 memcpy (value_contents_all_raw (dst) + dst_offset * unit_size,
1331 value_contents_all_raw (src) + src_offset * unit_size,
1332 length * unit_size);
1333
1334 /* Copy the meta-data, adjusted. */
1335 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1336 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1337 bit_length = length * unit_size * HOST_CHAR_BIT;
1338
1339 value_ranges_copy_adjusted (dst, dst_bit_offset,
1340 src, src_bit_offset,
1341 bit_length);
1342 }
1343
1344 /* Copy LENGTH bytes of SRC value's (all) contents
1345 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1346 (all) contents, starting at DST_OFFSET. If unavailable contents
1347 are being copied from SRC, the corresponding DST contents are
1348 marked unavailable accordingly. DST must not be lazy. If SRC is
1349 lazy, it will be fetched now.
1350
1351 It is assumed the contents of DST in the [DST_OFFSET,
1352 DST_OFFSET+LENGTH) range are wholly available. */
1353
1354 void
1355 value_contents_copy (struct value *dst, LONGEST dst_offset,
1356 struct value *src, LONGEST src_offset, LONGEST length)
1357 {
1358 if (src->lazy)
1359 value_fetch_lazy (src);
1360
1361 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1362 }
1363
1364 int
1365 value_lazy (const struct value *value)
1366 {
1367 return value->lazy;
1368 }
1369
1370 void
1371 set_value_lazy (struct value *value, int val)
1372 {
1373 value->lazy = val;
1374 }
1375
1376 int
1377 value_stack (const struct value *value)
1378 {
1379 return value->stack;
1380 }
1381
1382 void
1383 set_value_stack (struct value *value, int val)
1384 {
1385 value->stack = val;
1386 }
1387
1388 const gdb_byte *
1389 value_contents (struct value *value)
1390 {
1391 const gdb_byte *result = value_contents_writeable (value);
1392 require_not_optimized_out (value);
1393 require_available (value);
1394 return result;
1395 }
1396
1397 gdb_byte *
1398 value_contents_writeable (struct value *value)
1399 {
1400 if (value->lazy)
1401 value_fetch_lazy (value);
1402 return value_contents_raw (value);
1403 }
1404
1405 int
1406 value_optimized_out (struct value *value)
1407 {
1408 /* We can only know if a value is optimized out once we have tried to
1409 fetch it. */
1410 if (value->optimized_out.empty () && value->lazy)
1411 {
1412 try
1413 {
1414 value_fetch_lazy (value);
1415 }
1416 catch (const gdb_exception_error &ex)
1417 {
1418 switch (ex.error)
1419 {
1420 case MEMORY_ERROR:
1421 case OPTIMIZED_OUT_ERROR:
1422 case NOT_AVAILABLE_ERROR:
1423 /* These can normally happen when we try to access an
1424 optimized out or unavailable register, either in a
1425 physical register or spilled to memory. */
1426 break;
1427 default:
1428 throw;
1429 }
1430 }
1431 }
1432
1433 return !value->optimized_out.empty ();
1434 }
1435
1436 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1437 the following LENGTH bytes. */
1438
1439 void
1440 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1441 {
1442 mark_value_bits_optimized_out (value,
1443 offset * TARGET_CHAR_BIT,
1444 length * TARGET_CHAR_BIT);
1445 }
1446
1447 /* See value.h. */
1448
1449 void
1450 mark_value_bits_optimized_out (struct value *value,
1451 LONGEST offset, LONGEST length)
1452 {
1453 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1454 }
1455
1456 int
1457 value_bits_synthetic_pointer (const struct value *value,
1458 LONGEST offset, LONGEST length)
1459 {
1460 if (value->lval != lval_computed
1461 || !value->location.computed.funcs->check_synthetic_pointer)
1462 return 0;
1463 return value->location.computed.funcs->check_synthetic_pointer (value,
1464 offset,
1465 length);
1466 }
1467
1468 LONGEST
1469 value_embedded_offset (const struct value *value)
1470 {
1471 return value->embedded_offset;
1472 }
1473
1474 void
1475 set_value_embedded_offset (struct value *value, LONGEST val)
1476 {
1477 value->embedded_offset = val;
1478 }
1479
1480 LONGEST
1481 value_pointed_to_offset (const struct value *value)
1482 {
1483 return value->pointed_to_offset;
1484 }
1485
1486 void
1487 set_value_pointed_to_offset (struct value *value, LONGEST val)
1488 {
1489 value->pointed_to_offset = val;
1490 }
1491
1492 const struct lval_funcs *
1493 value_computed_funcs (const struct value *v)
1494 {
1495 gdb_assert (value_lval_const (v) == lval_computed);
1496
1497 return v->location.computed.funcs;
1498 }
1499
1500 void *
1501 value_computed_closure (const struct value *v)
1502 {
1503 gdb_assert (v->lval == lval_computed);
1504
1505 return v->location.computed.closure;
1506 }
1507
1508 enum lval_type *
1509 deprecated_value_lval_hack (struct value *value)
1510 {
1511 return &value->lval;
1512 }
1513
1514 enum lval_type
1515 value_lval_const (const struct value *value)
1516 {
1517 return value->lval;
1518 }
1519
1520 CORE_ADDR
1521 value_address (const struct value *value)
1522 {
1523 if (value->lval != lval_memory)
1524 return 0;
1525 if (value->parent != NULL)
1526 return value_address (value->parent.get ()) + value->offset;
1527 if (NULL != TYPE_DATA_LOCATION (value_type (value)))
1528 {
1529 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
1530 return TYPE_DATA_LOCATION_ADDR (value_type (value));
1531 }
1532
1533 return value->location.address + value->offset;
1534 }
1535
1536 CORE_ADDR
1537 value_raw_address (const struct value *value)
1538 {
1539 if (value->lval != lval_memory)
1540 return 0;
1541 return value->location.address;
1542 }
1543
1544 void
1545 set_value_address (struct value *value, CORE_ADDR addr)
1546 {
1547 gdb_assert (value->lval == lval_memory);
1548 value->location.address = addr;
1549 }
1550
1551 struct internalvar **
1552 deprecated_value_internalvar_hack (struct value *value)
1553 {
1554 return &value->location.internalvar;
1555 }
1556
1557 struct frame_id *
1558 deprecated_value_next_frame_id_hack (struct value *value)
1559 {
1560 gdb_assert (value->lval == lval_register);
1561 return &value->location.reg.next_frame_id;
1562 }
1563
1564 int *
1565 deprecated_value_regnum_hack (struct value *value)
1566 {
1567 gdb_assert (value->lval == lval_register);
1568 return &value->location.reg.regnum;
1569 }
1570
1571 int
1572 deprecated_value_modifiable (const struct value *value)
1573 {
1574 return value->modifiable;
1575 }
1576 \f
1577 /* Return a mark in the value chain. All values allocated after the
1578 mark is obtained (except for those released) are subject to being freed
1579 if a subsequent value_free_to_mark is passed the mark. */
1580 struct value *
1581 value_mark (void)
1582 {
1583 if (all_values.empty ())
1584 return nullptr;
1585 return all_values.back ().get ();
1586 }
1587
1588 /* See value.h. */
1589
1590 void
1591 value_incref (struct value *val)
1592 {
1593 val->reference_count++;
1594 }
1595
1596 /* Release a reference to VAL, which was acquired with value_incref.
1597 This function is also called to deallocate values from the value
1598 chain. */
1599
1600 void
1601 value_decref (struct value *val)
1602 {
1603 if (val != nullptr)
1604 {
1605 gdb_assert (val->reference_count > 0);
1606 val->reference_count--;
1607 if (val->reference_count == 0)
1608 delete val;
1609 }
1610 }
1611
1612 /* Free all values allocated since MARK was obtained by value_mark
1613 (except for those released). */
1614 void
1615 value_free_to_mark (const struct value *mark)
1616 {
1617 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1618 if (iter == all_values.end ())
1619 all_values.clear ();
1620 else
1621 all_values.erase (iter + 1, all_values.end ());
1622 }
1623
1624 /* Remove VAL from the chain all_values
1625 so it will not be freed automatically. */
1626
1627 value_ref_ptr
1628 release_value (struct value *val)
1629 {
1630 if (val == nullptr)
1631 return value_ref_ptr ();
1632
1633 std::vector<value_ref_ptr>::reverse_iterator iter;
1634 for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter)
1635 {
1636 if (*iter == val)
1637 {
1638 value_ref_ptr result = *iter;
1639 all_values.erase (iter.base () - 1);
1640 return result;
1641 }
1642 }
1643
1644 /* We must always return an owned reference. Normally this happens
1645 because we transfer the reference from the value chain, but in
1646 this case the value was not on the chain. */
1647 return value_ref_ptr::new_reference (val);
1648 }
1649
1650 /* See value.h. */
1651
1652 std::vector<value_ref_ptr>
1653 value_release_to_mark (const struct value *mark)
1654 {
1655 std::vector<value_ref_ptr> result;
1656
1657 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1658 if (iter == all_values.end ())
1659 std::swap (result, all_values);
1660 else
1661 {
1662 std::move (iter + 1, all_values.end (), std::back_inserter (result));
1663 all_values.erase (iter + 1, all_values.end ());
1664 }
1665 std::reverse (result.begin (), result.end ());
1666 return result;
1667 }
1668
1669 /* Return a copy of the value ARG.
1670 It contains the same contents, for same memory address,
1671 but it's a different block of storage. */
1672
1673 struct value *
1674 value_copy (struct value *arg)
1675 {
1676 struct type *encl_type = value_enclosing_type (arg);
1677 struct value *val;
1678
1679 if (value_lazy (arg))
1680 val = allocate_value_lazy (encl_type);
1681 else
1682 val = allocate_value (encl_type);
1683 val->type = arg->type;
1684 VALUE_LVAL (val) = VALUE_LVAL (arg);
1685 val->location = arg->location;
1686 val->offset = arg->offset;
1687 val->bitpos = arg->bitpos;
1688 val->bitsize = arg->bitsize;
1689 val->lazy = arg->lazy;
1690 val->embedded_offset = value_embedded_offset (arg);
1691 val->pointed_to_offset = arg->pointed_to_offset;
1692 val->modifiable = arg->modifiable;
1693 if (!value_lazy (val))
1694 {
1695 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1696 TYPE_LENGTH (value_enclosing_type (arg)));
1697
1698 }
1699 val->unavailable = arg->unavailable;
1700 val->optimized_out = arg->optimized_out;
1701 val->parent = arg->parent;
1702 if (VALUE_LVAL (val) == lval_computed)
1703 {
1704 const struct lval_funcs *funcs = val->location.computed.funcs;
1705
1706 if (funcs->copy_closure)
1707 val->location.computed.closure = funcs->copy_closure (val);
1708 }
1709 return val;
1710 }
1711
1712 /* Return a "const" and/or "volatile" qualified version of the value V.
1713 If CNST is true, then the returned value will be qualified with
1714 "const".
1715 if VOLTL is true, then the returned value will be qualified with
1716 "volatile". */
1717
1718 struct value *
1719 make_cv_value (int cnst, int voltl, struct value *v)
1720 {
1721 struct type *val_type = value_type (v);
1722 struct type *enclosing_type = value_enclosing_type (v);
1723 struct value *cv_val = value_copy (v);
1724
1725 deprecated_set_value_type (cv_val,
1726 make_cv_type (cnst, voltl, val_type, NULL));
1727 set_value_enclosing_type (cv_val,
1728 make_cv_type (cnst, voltl, enclosing_type, NULL));
1729
1730 return cv_val;
1731 }
1732
1733 /* Return a version of ARG that is non-lvalue. */
1734
1735 struct value *
1736 value_non_lval (struct value *arg)
1737 {
1738 if (VALUE_LVAL (arg) != not_lval)
1739 {
1740 struct type *enc_type = value_enclosing_type (arg);
1741 struct value *val = allocate_value (enc_type);
1742
1743 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1744 TYPE_LENGTH (enc_type));
1745 val->type = arg->type;
1746 set_value_embedded_offset (val, value_embedded_offset (arg));
1747 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1748 return val;
1749 }
1750 return arg;
1751 }
1752
1753 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1754
1755 void
1756 value_force_lval (struct value *v, CORE_ADDR addr)
1757 {
1758 gdb_assert (VALUE_LVAL (v) == not_lval);
1759
1760 write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1761 v->lval = lval_memory;
1762 v->location.address = addr;
1763 }
1764
1765 void
1766 set_value_component_location (struct value *component,
1767 const struct value *whole)
1768 {
1769 struct type *type;
1770
1771 gdb_assert (whole->lval != lval_xcallable);
1772
1773 if (whole->lval == lval_internalvar)
1774 VALUE_LVAL (component) = lval_internalvar_component;
1775 else
1776 VALUE_LVAL (component) = whole->lval;
1777
1778 component->location = whole->location;
1779 if (whole->lval == lval_computed)
1780 {
1781 const struct lval_funcs *funcs = whole->location.computed.funcs;
1782
1783 if (funcs->copy_closure)
1784 component->location.computed.closure = funcs->copy_closure (whole);
1785 }
1786
1787 /* If the WHOLE value has a dynamically resolved location property then
1788 update the address of the COMPONENT. */
1789 type = value_type (whole);
1790 if (NULL != TYPE_DATA_LOCATION (type)
1791 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1792 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1793
1794 /* Similarly, if the COMPONENT value has a dynamically resolved location
1795 property then update its address. */
1796 type = value_type (component);
1797 if (NULL != TYPE_DATA_LOCATION (type)
1798 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1799 {
1800 /* If the COMPONENT has a dynamic location, and is an
1801 lval_internalvar_component, then we change it to a lval_memory.
1802
1803 Usually a component of an internalvar is created non-lazy, and has
1804 its content immediately copied from the parent internalvar.
1805 However, for components with a dynamic location, the content of
1806 the component is not contained within the parent, but is instead
1807 accessed indirectly. Further, the component will be created as a
1808 lazy value.
1809
1810 By changing the type of the component to lval_memory we ensure
1811 that value_fetch_lazy can successfully load the component.
1812
1813 This solution isn't ideal, but a real fix would require values to
1814 carry around both the parent value contents, and the contents of
1815 any dynamic fields within the parent. This is a substantial
1816 change to how values work in GDB. */
1817 if (VALUE_LVAL (component) == lval_internalvar_component)
1818 {
1819 gdb_assert (value_lazy (component));
1820 VALUE_LVAL (component) = lval_memory;
1821 }
1822 else
1823 gdb_assert (VALUE_LVAL (component) == lval_memory);
1824 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1825 }
1826 }
1827
1828 /* Access to the value history. */
1829
1830 /* Record a new value in the value history.
1831 Returns the absolute history index of the entry. */
1832
1833 int
1834 record_latest_value (struct value *val)
1835 {
1836 /* We don't want this value to have anything to do with the inferior anymore.
1837 In particular, "set $1 = 50" should not affect the variable from which
1838 the value was taken, and fast watchpoints should be able to assume that
1839 a value on the value history never changes. */
1840 if (value_lazy (val))
1841 value_fetch_lazy (val);
1842 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1843 from. This is a bit dubious, because then *&$1 does not just return $1
1844 but the current contents of that location. c'est la vie... */
1845 val->modifiable = 0;
1846
1847 value_history.push_back (release_value (val));
1848
1849 return value_history.size ();
1850 }
1851
1852 /* Return a copy of the value in the history with sequence number NUM. */
1853
1854 struct value *
1855 access_value_history (int num)
1856 {
1857 int absnum = num;
1858
1859 if (absnum <= 0)
1860 absnum += value_history.size ();
1861
1862 if (absnum <= 0)
1863 {
1864 if (num == 0)
1865 error (_("The history is empty."));
1866 else if (num == 1)
1867 error (_("There is only one value in the history."));
1868 else
1869 error (_("History does not go back to $$%d."), -num);
1870 }
1871 if (absnum > value_history.size ())
1872 error (_("History has not yet reached $%d."), absnum);
1873
1874 absnum--;
1875
1876 return value_copy (value_history[absnum].get ());
1877 }
1878
1879 static void
1880 show_values (const char *num_exp, int from_tty)
1881 {
1882 int i;
1883 struct value *val;
1884 static int num = 1;
1885
1886 if (num_exp)
1887 {
1888 /* "show values +" should print from the stored position.
1889 "show values <exp>" should print around value number <exp>. */
1890 if (num_exp[0] != '+' || num_exp[1] != '\0')
1891 num = parse_and_eval_long (num_exp) - 5;
1892 }
1893 else
1894 {
1895 /* "show values" means print the last 10 values. */
1896 num = value_history.size () - 9;
1897 }
1898
1899 if (num <= 0)
1900 num = 1;
1901
1902 for (i = num; i < num + 10 && i <= value_history.size (); i++)
1903 {
1904 struct value_print_options opts;
1905
1906 val = access_value_history (i);
1907 printf_filtered (("$%d = "), i);
1908 get_user_print_options (&opts);
1909 value_print (val, gdb_stdout, &opts);
1910 printf_filtered (("\n"));
1911 }
1912
1913 /* The next "show values +" should start after what we just printed. */
1914 num += 10;
1915
1916 /* Hitting just return after this command should do the same thing as
1917 "show values +". If num_exp is null, this is unnecessary, since
1918 "show values +" is not useful after "show values". */
1919 if (from_tty && num_exp)
1920 set_repeat_arguments ("+");
1921 }
1922 \f
1923 enum internalvar_kind
1924 {
1925 /* The internal variable is empty. */
1926 INTERNALVAR_VOID,
1927
1928 /* The value of the internal variable is provided directly as
1929 a GDB value object. */
1930 INTERNALVAR_VALUE,
1931
1932 /* A fresh value is computed via a call-back routine on every
1933 access to the internal variable. */
1934 INTERNALVAR_MAKE_VALUE,
1935
1936 /* The internal variable holds a GDB internal convenience function. */
1937 INTERNALVAR_FUNCTION,
1938
1939 /* The variable holds an integer value. */
1940 INTERNALVAR_INTEGER,
1941
1942 /* The variable holds a GDB-provided string. */
1943 INTERNALVAR_STRING,
1944 };
1945
1946 union internalvar_data
1947 {
1948 /* A value object used with INTERNALVAR_VALUE. */
1949 struct value *value;
1950
1951 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1952 struct
1953 {
1954 /* The functions to call. */
1955 const struct internalvar_funcs *functions;
1956
1957 /* The function's user-data. */
1958 void *data;
1959 } make_value;
1960
1961 /* The internal function used with INTERNALVAR_FUNCTION. */
1962 struct
1963 {
1964 struct internal_function *function;
1965 /* True if this is the canonical name for the function. */
1966 int canonical;
1967 } fn;
1968
1969 /* An integer value used with INTERNALVAR_INTEGER. */
1970 struct
1971 {
1972 /* If type is non-NULL, it will be used as the type to generate
1973 a value for this internal variable. If type is NULL, a default
1974 integer type for the architecture is used. */
1975 struct type *type;
1976 LONGEST val;
1977 } integer;
1978
1979 /* A string value used with INTERNALVAR_STRING. */
1980 char *string;
1981 };
1982
1983 /* Internal variables. These are variables within the debugger
1984 that hold values assigned by debugger commands.
1985 The user refers to them with a '$' prefix
1986 that does not appear in the variable names stored internally. */
1987
1988 struct internalvar
1989 {
1990 struct internalvar *next;
1991 char *name;
1992
1993 /* We support various different kinds of content of an internal variable.
1994 enum internalvar_kind specifies the kind, and union internalvar_data
1995 provides the data associated with this particular kind. */
1996
1997 enum internalvar_kind kind;
1998
1999 union internalvar_data u;
2000 };
2001
2002 static struct internalvar *internalvars;
2003
2004 /* If the variable does not already exist create it and give it the
2005 value given. If no value is given then the default is zero. */
2006 static void
2007 init_if_undefined_command (const char* args, int from_tty)
2008 {
2009 struct internalvar* intvar;
2010
2011 /* Parse the expression - this is taken from set_command(). */
2012 expression_up expr = parse_expression (args);
2013
2014 /* Validate the expression.
2015 Was the expression an assignment?
2016 Or even an expression at all? */
2017 if (expr->nelts == 0 || expr->first_opcode () != BINOP_ASSIGN)
2018 error (_("Init-if-undefined requires an assignment expression."));
2019
2020 /* Extract the variable from the parsed expression.
2021 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
2022 if (expr->elts[1].opcode != OP_INTERNALVAR)
2023 error (_("The first parameter to init-if-undefined "
2024 "should be a GDB variable."));
2025 intvar = expr->elts[2].internalvar;
2026
2027 /* Only evaluate the expression if the lvalue is void.
2028 This may still fail if the expression is invalid. */
2029 if (intvar->kind == INTERNALVAR_VOID)
2030 evaluate_expression (expr.get ());
2031 }
2032
2033
2034 /* Look up an internal variable with name NAME. NAME should not
2035 normally include a dollar sign.
2036
2037 If the specified internal variable does not exist,
2038 the return value is NULL. */
2039
2040 struct internalvar *
2041 lookup_only_internalvar (const char *name)
2042 {
2043 struct internalvar *var;
2044
2045 for (var = internalvars; var; var = var->next)
2046 if (strcmp (var->name, name) == 0)
2047 return var;
2048
2049 return NULL;
2050 }
2051
2052 /* Complete NAME by comparing it to the names of internal
2053 variables. */
2054
2055 void
2056 complete_internalvar (completion_tracker &tracker, const char *name)
2057 {
2058 struct internalvar *var;
2059 int len;
2060
2061 len = strlen (name);
2062
2063 for (var = internalvars; var; var = var->next)
2064 if (strncmp (var->name, name, len) == 0)
2065 tracker.add_completion (make_unique_xstrdup (var->name));
2066 }
2067
2068 /* Create an internal variable with name NAME and with a void value.
2069 NAME should not normally include a dollar sign. */
2070
2071 struct internalvar *
2072 create_internalvar (const char *name)
2073 {
2074 struct internalvar *var = XNEW (struct internalvar);
2075
2076 var->name = xstrdup (name);
2077 var->kind = INTERNALVAR_VOID;
2078 var->next = internalvars;
2079 internalvars = var;
2080 return var;
2081 }
2082
2083 /* Create an internal variable with name NAME and register FUN as the
2084 function that value_of_internalvar uses to create a value whenever
2085 this variable is referenced. NAME should not normally include a
2086 dollar sign. DATA is passed uninterpreted to FUN when it is
2087 called. CLEANUP, if not NULL, is called when the internal variable
2088 is destroyed. It is passed DATA as its only argument. */
2089
2090 struct internalvar *
2091 create_internalvar_type_lazy (const char *name,
2092 const struct internalvar_funcs *funcs,
2093 void *data)
2094 {
2095 struct internalvar *var = create_internalvar (name);
2096
2097 var->kind = INTERNALVAR_MAKE_VALUE;
2098 var->u.make_value.functions = funcs;
2099 var->u.make_value.data = data;
2100 return var;
2101 }
2102
2103 /* See documentation in value.h. */
2104
2105 int
2106 compile_internalvar_to_ax (struct internalvar *var,
2107 struct agent_expr *expr,
2108 struct axs_value *value)
2109 {
2110 if (var->kind != INTERNALVAR_MAKE_VALUE
2111 || var->u.make_value.functions->compile_to_ax == NULL)
2112 return 0;
2113
2114 var->u.make_value.functions->compile_to_ax (var, expr, value,
2115 var->u.make_value.data);
2116 return 1;
2117 }
2118
2119 /* Look up an internal variable with name NAME. NAME should not
2120 normally include a dollar sign.
2121
2122 If the specified internal variable does not exist,
2123 one is created, with a void value. */
2124
2125 struct internalvar *
2126 lookup_internalvar (const char *name)
2127 {
2128 struct internalvar *var;
2129
2130 var = lookup_only_internalvar (name);
2131 if (var)
2132 return var;
2133
2134 return create_internalvar (name);
2135 }
2136
2137 /* Return current value of internal variable VAR. For variables that
2138 are not inherently typed, use a value type appropriate for GDBARCH. */
2139
2140 struct value *
2141 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2142 {
2143 struct value *val;
2144 struct trace_state_variable *tsv;
2145
2146 /* If there is a trace state variable of the same name, assume that
2147 is what we really want to see. */
2148 tsv = find_trace_state_variable (var->name);
2149 if (tsv)
2150 {
2151 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2152 &(tsv->value));
2153 if (tsv->value_known)
2154 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2155 tsv->value);
2156 else
2157 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2158 return val;
2159 }
2160
2161 switch (var->kind)
2162 {
2163 case INTERNALVAR_VOID:
2164 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2165 break;
2166
2167 case INTERNALVAR_FUNCTION:
2168 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2169 break;
2170
2171 case INTERNALVAR_INTEGER:
2172 if (!var->u.integer.type)
2173 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2174 var->u.integer.val);
2175 else
2176 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2177 break;
2178
2179 case INTERNALVAR_STRING:
2180 val = value_cstring (var->u.string, strlen (var->u.string),
2181 builtin_type (gdbarch)->builtin_char);
2182 break;
2183
2184 case INTERNALVAR_VALUE:
2185 val = value_copy (var->u.value);
2186 if (value_lazy (val))
2187 value_fetch_lazy (val);
2188 break;
2189
2190 case INTERNALVAR_MAKE_VALUE:
2191 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2192 var->u.make_value.data);
2193 break;
2194
2195 default:
2196 internal_error (__FILE__, __LINE__, _("bad kind"));
2197 }
2198
2199 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2200 on this value go back to affect the original internal variable.
2201
2202 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2203 no underlying modifiable state in the internal variable.
2204
2205 Likewise, if the variable's value is a computed lvalue, we want
2206 references to it to produce another computed lvalue, where
2207 references and assignments actually operate through the
2208 computed value's functions.
2209
2210 This means that internal variables with computed values
2211 behave a little differently from other internal variables:
2212 assignments to them don't just replace the previous value
2213 altogether. At the moment, this seems like the behavior we
2214 want. */
2215
2216 if (var->kind != INTERNALVAR_MAKE_VALUE
2217 && val->lval != lval_computed)
2218 {
2219 VALUE_LVAL (val) = lval_internalvar;
2220 VALUE_INTERNALVAR (val) = var;
2221 }
2222
2223 return val;
2224 }
2225
2226 int
2227 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2228 {
2229 if (var->kind == INTERNALVAR_INTEGER)
2230 {
2231 *result = var->u.integer.val;
2232 return 1;
2233 }
2234
2235 if (var->kind == INTERNALVAR_VALUE)
2236 {
2237 struct type *type = check_typedef (value_type (var->u.value));
2238
2239 if (type->code () == TYPE_CODE_INT)
2240 {
2241 *result = value_as_long (var->u.value);
2242 return 1;
2243 }
2244 }
2245
2246 return 0;
2247 }
2248
2249 static int
2250 get_internalvar_function (struct internalvar *var,
2251 struct internal_function **result)
2252 {
2253 switch (var->kind)
2254 {
2255 case INTERNALVAR_FUNCTION:
2256 *result = var->u.fn.function;
2257 return 1;
2258
2259 default:
2260 return 0;
2261 }
2262 }
2263
2264 void
2265 set_internalvar_component (struct internalvar *var,
2266 LONGEST offset, LONGEST bitpos,
2267 LONGEST bitsize, struct value *newval)
2268 {
2269 gdb_byte *addr;
2270 struct gdbarch *arch;
2271 int unit_size;
2272
2273 switch (var->kind)
2274 {
2275 case INTERNALVAR_VALUE:
2276 addr = value_contents_writeable (var->u.value);
2277 arch = get_value_arch (var->u.value);
2278 unit_size = gdbarch_addressable_memory_unit_size (arch);
2279
2280 if (bitsize)
2281 modify_field (value_type (var->u.value), addr + offset,
2282 value_as_long (newval), bitpos, bitsize);
2283 else
2284 memcpy (addr + offset * unit_size, value_contents (newval),
2285 TYPE_LENGTH (value_type (newval)));
2286 break;
2287
2288 default:
2289 /* We can never get a component of any other kind. */
2290 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2291 }
2292 }
2293
2294 void
2295 set_internalvar (struct internalvar *var, struct value *val)
2296 {
2297 enum internalvar_kind new_kind;
2298 union internalvar_data new_data = { 0 };
2299
2300 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2301 error (_("Cannot overwrite convenience function %s"), var->name);
2302
2303 /* Prepare new contents. */
2304 switch (check_typedef (value_type (val))->code ())
2305 {
2306 case TYPE_CODE_VOID:
2307 new_kind = INTERNALVAR_VOID;
2308 break;
2309
2310 case TYPE_CODE_INTERNAL_FUNCTION:
2311 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2312 new_kind = INTERNALVAR_FUNCTION;
2313 get_internalvar_function (VALUE_INTERNALVAR (val),
2314 &new_data.fn.function);
2315 /* Copies created here are never canonical. */
2316 break;
2317
2318 default:
2319 new_kind = INTERNALVAR_VALUE;
2320 struct value *copy = value_copy (val);
2321 copy->modifiable = 1;
2322
2323 /* Force the value to be fetched from the target now, to avoid problems
2324 later when this internalvar is referenced and the target is gone or
2325 has changed. */
2326 if (value_lazy (copy))
2327 value_fetch_lazy (copy);
2328
2329 /* Release the value from the value chain to prevent it from being
2330 deleted by free_all_values. From here on this function should not
2331 call error () until new_data is installed into the var->u to avoid
2332 leaking memory. */
2333 new_data.value = release_value (copy).release ();
2334
2335 /* Internal variables which are created from values with a dynamic
2336 location don't need the location property of the origin anymore.
2337 The resolved dynamic location is used prior then any other address
2338 when accessing the value.
2339 If we keep it, we would still refer to the origin value.
2340 Remove the location property in case it exist. */
2341 value_type (new_data.value)->remove_dyn_prop (DYN_PROP_DATA_LOCATION);
2342
2343 break;
2344 }
2345
2346 /* Clean up old contents. */
2347 clear_internalvar (var);
2348
2349 /* Switch over. */
2350 var->kind = new_kind;
2351 var->u = new_data;
2352 /* End code which must not call error(). */
2353 }
2354
2355 void
2356 set_internalvar_integer (struct internalvar *var, LONGEST l)
2357 {
2358 /* Clean up old contents. */
2359 clear_internalvar (var);
2360
2361 var->kind = INTERNALVAR_INTEGER;
2362 var->u.integer.type = NULL;
2363 var->u.integer.val = l;
2364 }
2365
2366 void
2367 set_internalvar_string (struct internalvar *var, const char *string)
2368 {
2369 /* Clean up old contents. */
2370 clear_internalvar (var);
2371
2372 var->kind = INTERNALVAR_STRING;
2373 var->u.string = xstrdup (string);
2374 }
2375
2376 static void
2377 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2378 {
2379 /* Clean up old contents. */
2380 clear_internalvar (var);
2381
2382 var->kind = INTERNALVAR_FUNCTION;
2383 var->u.fn.function = f;
2384 var->u.fn.canonical = 1;
2385 /* Variables installed here are always the canonical version. */
2386 }
2387
2388 void
2389 clear_internalvar (struct internalvar *var)
2390 {
2391 /* Clean up old contents. */
2392 switch (var->kind)
2393 {
2394 case INTERNALVAR_VALUE:
2395 value_decref (var->u.value);
2396 break;
2397
2398 case INTERNALVAR_STRING:
2399 xfree (var->u.string);
2400 break;
2401
2402 case INTERNALVAR_MAKE_VALUE:
2403 if (var->u.make_value.functions->destroy != NULL)
2404 var->u.make_value.functions->destroy (var->u.make_value.data);
2405 break;
2406
2407 default:
2408 break;
2409 }
2410
2411 /* Reset to void kind. */
2412 var->kind = INTERNALVAR_VOID;
2413 }
2414
2415 const char *
2416 internalvar_name (const struct internalvar *var)
2417 {
2418 return var->name;
2419 }
2420
2421 static struct internal_function *
2422 create_internal_function (const char *name,
2423 internal_function_fn handler, void *cookie)
2424 {
2425 struct internal_function *ifn = XNEW (struct internal_function);
2426
2427 ifn->name = xstrdup (name);
2428 ifn->handler = handler;
2429 ifn->cookie = cookie;
2430 return ifn;
2431 }
2432
2433 const char *
2434 value_internal_function_name (struct value *val)
2435 {
2436 struct internal_function *ifn;
2437 int result;
2438
2439 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2440 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2441 gdb_assert (result);
2442
2443 return ifn->name;
2444 }
2445
2446 struct value *
2447 call_internal_function (struct gdbarch *gdbarch,
2448 const struct language_defn *language,
2449 struct value *func, int argc, struct value **argv)
2450 {
2451 struct internal_function *ifn;
2452 int result;
2453
2454 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2455 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2456 gdb_assert (result);
2457
2458 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2459 }
2460
2461 /* The 'function' command. This does nothing -- it is just a
2462 placeholder to let "help function NAME" work. This is also used as
2463 the implementation of the sub-command that is created when
2464 registering an internal function. */
2465 static void
2466 function_command (const char *command, int from_tty)
2467 {
2468 /* Do nothing. */
2469 }
2470
2471 /* Helper function that does the work for add_internal_function. */
2472
2473 static struct cmd_list_element *
2474 do_add_internal_function (const char *name, const char *doc,
2475 internal_function_fn handler, void *cookie)
2476 {
2477 struct internal_function *ifn;
2478 struct internalvar *var = lookup_internalvar (name);
2479
2480 ifn = create_internal_function (name, handler, cookie);
2481 set_internalvar_function (var, ifn);
2482
2483 return add_cmd (name, no_class, function_command, doc, &functionlist);
2484 }
2485
2486 /* See value.h. */
2487
2488 void
2489 add_internal_function (const char *name, const char *doc,
2490 internal_function_fn handler, void *cookie)
2491 {
2492 do_add_internal_function (name, doc, handler, cookie);
2493 }
2494
2495 /* See value.h. */
2496
2497 void
2498 add_internal_function (gdb::unique_xmalloc_ptr<char> &&name,
2499 gdb::unique_xmalloc_ptr<char> &&doc,
2500 internal_function_fn handler, void *cookie)
2501 {
2502 struct cmd_list_element *cmd
2503 = do_add_internal_function (name.get (), doc.get (), handler, cookie);
2504 doc.release ();
2505 cmd->doc_allocated = 1;
2506 name.release ();
2507 cmd->name_allocated = 1;
2508 }
2509
2510 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2511 prevent cycles / duplicates. */
2512
2513 void
2514 preserve_one_value (struct value *value, struct objfile *objfile,
2515 htab_t copied_types)
2516 {
2517 if (TYPE_OBJFILE (value->type) == objfile)
2518 value->type = copy_type_recursive (objfile, value->type, copied_types);
2519
2520 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2521 value->enclosing_type = copy_type_recursive (objfile,
2522 value->enclosing_type,
2523 copied_types);
2524 }
2525
2526 /* Likewise for internal variable VAR. */
2527
2528 static void
2529 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2530 htab_t copied_types)
2531 {
2532 switch (var->kind)
2533 {
2534 case INTERNALVAR_INTEGER:
2535 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2536 var->u.integer.type
2537 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2538 break;
2539
2540 case INTERNALVAR_VALUE:
2541 preserve_one_value (var->u.value, objfile, copied_types);
2542 break;
2543 }
2544 }
2545
2546 /* Update the internal variables and value history when OBJFILE is
2547 discarded; we must copy the types out of the objfile. New global types
2548 will be created for every convenience variable which currently points to
2549 this objfile's types, and the convenience variables will be adjusted to
2550 use the new global types. */
2551
2552 void
2553 preserve_values (struct objfile *objfile)
2554 {
2555 struct internalvar *var;
2556
2557 /* Create the hash table. We allocate on the objfile's obstack, since
2558 it is soon to be deleted. */
2559 htab_up copied_types = create_copied_types_hash (objfile);
2560
2561 for (const value_ref_ptr &item : value_history)
2562 preserve_one_value (item.get (), objfile, copied_types.get ());
2563
2564 for (var = internalvars; var; var = var->next)
2565 preserve_one_internalvar (var, objfile, copied_types.get ());
2566
2567 preserve_ext_lang_values (objfile, copied_types.get ());
2568 }
2569
2570 static void
2571 show_convenience (const char *ignore, int from_tty)
2572 {
2573 struct gdbarch *gdbarch = get_current_arch ();
2574 struct internalvar *var;
2575 int varseen = 0;
2576 struct value_print_options opts;
2577
2578 get_user_print_options (&opts);
2579 for (var = internalvars; var; var = var->next)
2580 {
2581
2582 if (!varseen)
2583 {
2584 varseen = 1;
2585 }
2586 printf_filtered (("$%s = "), var->name);
2587
2588 try
2589 {
2590 struct value *val;
2591
2592 val = value_of_internalvar (gdbarch, var);
2593 value_print (val, gdb_stdout, &opts);
2594 }
2595 catch (const gdb_exception_error &ex)
2596 {
2597 fprintf_styled (gdb_stdout, metadata_style.style (),
2598 _("<error: %s>"), ex.what ());
2599 }
2600
2601 printf_filtered (("\n"));
2602 }
2603 if (!varseen)
2604 {
2605 /* This text does not mention convenience functions on purpose.
2606 The user can't create them except via Python, and if Python support
2607 is installed this message will never be printed ($_streq will
2608 exist). */
2609 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2610 "Convenience variables have "
2611 "names starting with \"$\";\n"
2612 "use \"set\" as in \"set "
2613 "$foo = 5\" to define them.\n"));
2614 }
2615 }
2616 \f
2617
2618 /* See value.h. */
2619
2620 struct value *
2621 value_from_xmethod (xmethod_worker_up &&worker)
2622 {
2623 struct value *v;
2624
2625 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2626 v->lval = lval_xcallable;
2627 v->location.xm_worker = worker.release ();
2628 v->modifiable = 0;
2629
2630 return v;
2631 }
2632
2633 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2634
2635 struct type *
2636 result_type_of_xmethod (struct value *method, gdb::array_view<value *> argv)
2637 {
2638 gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD
2639 && method->lval == lval_xcallable && !argv.empty ());
2640
2641 return method->location.xm_worker->get_result_type (argv[0], argv.slice (1));
2642 }
2643
2644 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2645
2646 struct value *
2647 call_xmethod (struct value *method, gdb::array_view<value *> argv)
2648 {
2649 gdb_assert (value_type (method)->code () == TYPE_CODE_XMETHOD
2650 && method->lval == lval_xcallable && !argv.empty ());
2651
2652 return method->location.xm_worker->invoke (argv[0], argv.slice (1));
2653 }
2654 \f
2655 /* Extract a value as a C number (either long or double).
2656 Knows how to convert fixed values to double, or
2657 floating values to long.
2658 Does not deallocate the value. */
2659
2660 LONGEST
2661 value_as_long (struct value *val)
2662 {
2663 /* This coerces arrays and functions, which is necessary (e.g.
2664 in disassemble_command). It also dereferences references, which
2665 I suspect is the most logical thing to do. */
2666 val = coerce_array (val);
2667 return unpack_long (value_type (val), value_contents (val));
2668 }
2669
2670 /* Extract a value as a C pointer. Does not deallocate the value.
2671 Note that val's type may not actually be a pointer; value_as_long
2672 handles all the cases. */
2673 CORE_ADDR
2674 value_as_address (struct value *val)
2675 {
2676 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2677
2678 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2679 whether we want this to be true eventually. */
2680 #if 0
2681 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2682 non-address (e.g. argument to "signal", "info break", etc.), or
2683 for pointers to char, in which the low bits *are* significant. */
2684 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2685 #else
2686
2687 /* There are several targets (IA-64, PowerPC, and others) which
2688 don't represent pointers to functions as simply the address of
2689 the function's entry point. For example, on the IA-64, a
2690 function pointer points to a two-word descriptor, generated by
2691 the linker, which contains the function's entry point, and the
2692 value the IA-64 "global pointer" register should have --- to
2693 support position-independent code. The linker generates
2694 descriptors only for those functions whose addresses are taken.
2695
2696 On such targets, it's difficult for GDB to convert an arbitrary
2697 function address into a function pointer; it has to either find
2698 an existing descriptor for that function, or call malloc and
2699 build its own. On some targets, it is impossible for GDB to
2700 build a descriptor at all: the descriptor must contain a jump
2701 instruction; data memory cannot be executed; and code memory
2702 cannot be modified.
2703
2704 Upon entry to this function, if VAL is a value of type `function'
2705 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2706 value_address (val) is the address of the function. This is what
2707 you'll get if you evaluate an expression like `main'. The call
2708 to COERCE_ARRAY below actually does all the usual unary
2709 conversions, which includes converting values of type `function'
2710 to `pointer to function'. This is the challenging conversion
2711 discussed above. Then, `unpack_long' will convert that pointer
2712 back into an address.
2713
2714 So, suppose the user types `disassemble foo' on an architecture
2715 with a strange function pointer representation, on which GDB
2716 cannot build its own descriptors, and suppose further that `foo'
2717 has no linker-built descriptor. The address->pointer conversion
2718 will signal an error and prevent the command from running, even
2719 though the next step would have been to convert the pointer
2720 directly back into the same address.
2721
2722 The following shortcut avoids this whole mess. If VAL is a
2723 function, just return its address directly. */
2724 if (value_type (val)->code () == TYPE_CODE_FUNC
2725 || value_type (val)->code () == TYPE_CODE_METHOD)
2726 return value_address (val);
2727
2728 val = coerce_array (val);
2729
2730 /* Some architectures (e.g. Harvard), map instruction and data
2731 addresses onto a single large unified address space. For
2732 instance: An architecture may consider a large integer in the
2733 range 0x10000000 .. 0x1000ffff to already represent a data
2734 addresses (hence not need a pointer to address conversion) while
2735 a small integer would still need to be converted integer to
2736 pointer to address. Just assume such architectures handle all
2737 integer conversions in a single function. */
2738
2739 /* JimB writes:
2740
2741 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2742 must admonish GDB hackers to make sure its behavior matches the
2743 compiler's, whenever possible.
2744
2745 In general, I think GDB should evaluate expressions the same way
2746 the compiler does. When the user copies an expression out of
2747 their source code and hands it to a `print' command, they should
2748 get the same value the compiler would have computed. Any
2749 deviation from this rule can cause major confusion and annoyance,
2750 and needs to be justified carefully. In other words, GDB doesn't
2751 really have the freedom to do these conversions in clever and
2752 useful ways.
2753
2754 AndrewC pointed out that users aren't complaining about how GDB
2755 casts integers to pointers; they are complaining that they can't
2756 take an address from a disassembly listing and give it to `x/i'.
2757 This is certainly important.
2758
2759 Adding an architecture method like integer_to_address() certainly
2760 makes it possible for GDB to "get it right" in all circumstances
2761 --- the target has complete control over how things get done, so
2762 people can Do The Right Thing for their target without breaking
2763 anyone else. The standard doesn't specify how integers get
2764 converted to pointers; usually, the ABI doesn't either, but
2765 ABI-specific code is a more reasonable place to handle it. */
2766
2767 if (value_type (val)->code () != TYPE_CODE_PTR
2768 && !TYPE_IS_REFERENCE (value_type (val))
2769 && gdbarch_integer_to_address_p (gdbarch))
2770 return gdbarch_integer_to_address (gdbarch, value_type (val),
2771 value_contents (val));
2772
2773 return unpack_long (value_type (val), value_contents (val));
2774 #endif
2775 }
2776 \f
2777 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2778 as a long, or as a double, assuming the raw data is described
2779 by type TYPE. Knows how to convert different sizes of values
2780 and can convert between fixed and floating point. We don't assume
2781 any alignment for the raw data. Return value is in host byte order.
2782
2783 If you want functions and arrays to be coerced to pointers, and
2784 references to be dereferenced, call value_as_long() instead.
2785
2786 C++: It is assumed that the front-end has taken care of
2787 all matters concerning pointers to members. A pointer
2788 to member which reaches here is considered to be equivalent
2789 to an INT (or some size). After all, it is only an offset. */
2790
2791 LONGEST
2792 unpack_long (struct type *type, const gdb_byte *valaddr)
2793 {
2794 if (is_fixed_point_type (type))
2795 type = type->fixed_point_type_base_type ();
2796
2797 enum bfd_endian byte_order = type_byte_order (type);
2798 enum type_code code = type->code ();
2799 int len = TYPE_LENGTH (type);
2800 int nosign = type->is_unsigned ();
2801
2802 switch (code)
2803 {
2804 case TYPE_CODE_TYPEDEF:
2805 return unpack_long (check_typedef (type), valaddr);
2806 case TYPE_CODE_ENUM:
2807 case TYPE_CODE_FLAGS:
2808 case TYPE_CODE_BOOL:
2809 case TYPE_CODE_INT:
2810 case TYPE_CODE_CHAR:
2811 case TYPE_CODE_RANGE:
2812 case TYPE_CODE_MEMBERPTR:
2813 {
2814 LONGEST result;
2815
2816 if (type->bit_size_differs_p ())
2817 {
2818 unsigned bit_off = type->bit_offset ();
2819 unsigned bit_size = type->bit_size ();
2820 if (bit_size == 0)
2821 {
2822 /* unpack_bits_as_long doesn't handle this case the
2823 way we'd like, so handle it here. */
2824 result = 0;
2825 }
2826 else
2827 result = unpack_bits_as_long (type, valaddr, bit_off, bit_size);
2828 }
2829 else
2830 {
2831 if (nosign)
2832 result = extract_unsigned_integer (valaddr, len, byte_order);
2833 else
2834 result = extract_signed_integer (valaddr, len, byte_order);
2835 }
2836 if (code == TYPE_CODE_RANGE)
2837 result += type->bounds ()->bias;
2838 return result;
2839 }
2840
2841 case TYPE_CODE_FLT:
2842 case TYPE_CODE_DECFLOAT:
2843 return target_float_to_longest (valaddr, type);
2844
2845 case TYPE_CODE_FIXED_POINT:
2846 {
2847 gdb_mpq vq;
2848 vq.read_fixed_point (gdb::make_array_view (valaddr, len),
2849 byte_order, nosign,
2850 type->fixed_point_scaling_factor ());
2851
2852 gdb_mpz vz;
2853 mpz_tdiv_q (vz.val, mpq_numref (vq.val), mpq_denref (vq.val));
2854 return vz.as_integer<LONGEST> ();
2855 }
2856
2857 case TYPE_CODE_PTR:
2858 case TYPE_CODE_REF:
2859 case TYPE_CODE_RVALUE_REF:
2860 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2861 whether we want this to be true eventually. */
2862 return extract_typed_address (valaddr, type);
2863
2864 default:
2865 error (_("Value can't be converted to integer."));
2866 }
2867 }
2868
2869 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2870 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2871 We don't assume any alignment for the raw data. Return value is in
2872 host byte order.
2873
2874 If you want functions and arrays to be coerced to pointers, and
2875 references to be dereferenced, call value_as_address() instead.
2876
2877 C++: It is assumed that the front-end has taken care of
2878 all matters concerning pointers to members. A pointer
2879 to member which reaches here is considered to be equivalent
2880 to an INT (or some size). After all, it is only an offset. */
2881
2882 CORE_ADDR
2883 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2884 {
2885 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2886 whether we want this to be true eventually. */
2887 return unpack_long (type, valaddr);
2888 }
2889
2890 bool
2891 is_floating_value (struct value *val)
2892 {
2893 struct type *type = check_typedef (value_type (val));
2894
2895 if (is_floating_type (type))
2896 {
2897 if (!target_float_is_valid (value_contents (val), type))
2898 error (_("Invalid floating value found in program."));
2899 return true;
2900 }
2901
2902 return false;
2903 }
2904
2905 \f
2906 /* Get the value of the FIELDNO'th field (which must be static) of
2907 TYPE. */
2908
2909 struct value *
2910 value_static_field (struct type *type, int fieldno)
2911 {
2912 struct value *retval;
2913
2914 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2915 {
2916 case FIELD_LOC_KIND_PHYSADDR:
2917 retval = value_at_lazy (type->field (fieldno).type (),
2918 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2919 break;
2920 case FIELD_LOC_KIND_PHYSNAME:
2921 {
2922 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2923 /* TYPE_FIELD_NAME (type, fieldno); */
2924 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2925
2926 if (sym.symbol == NULL)
2927 {
2928 /* With some compilers, e.g. HP aCC, static data members are
2929 reported as non-debuggable symbols. */
2930 struct bound_minimal_symbol msym
2931 = lookup_minimal_symbol (phys_name, NULL, NULL);
2932 struct type *field_type = type->field (fieldno).type ();
2933
2934 if (!msym.minsym)
2935 retval = allocate_optimized_out_value (field_type);
2936 else
2937 retval = value_at_lazy (field_type, BMSYMBOL_VALUE_ADDRESS (msym));
2938 }
2939 else
2940 retval = value_of_variable (sym.symbol, sym.block);
2941 break;
2942 }
2943 default:
2944 gdb_assert_not_reached ("unexpected field location kind");
2945 }
2946
2947 return retval;
2948 }
2949
2950 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2951 You have to be careful here, since the size of the data area for the value
2952 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2953 than the old enclosing type, you have to allocate more space for the
2954 data. */
2955
2956 void
2957 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2958 {
2959 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2960 {
2961 check_type_length_before_alloc (new_encl_type);
2962 val->contents
2963 .reset ((gdb_byte *) xrealloc (val->contents.release (),
2964 TYPE_LENGTH (new_encl_type)));
2965 }
2966
2967 val->enclosing_type = new_encl_type;
2968 }
2969
2970 /* Given a value ARG1 (offset by OFFSET bytes)
2971 of a struct or union type ARG_TYPE,
2972 extract and return the value of one of its (non-static) fields.
2973 FIELDNO says which field. */
2974
2975 struct value *
2976 value_primitive_field (struct value *arg1, LONGEST offset,
2977 int fieldno, struct type *arg_type)
2978 {
2979 struct value *v;
2980 struct type *type;
2981 struct gdbarch *arch = get_value_arch (arg1);
2982 int unit_size = gdbarch_addressable_memory_unit_size (arch);
2983
2984 arg_type = check_typedef (arg_type);
2985 type = arg_type->field (fieldno).type ();
2986
2987 /* Call check_typedef on our type to make sure that, if TYPE
2988 is a TYPE_CODE_TYPEDEF, its length is set to the length
2989 of the target type instead of zero. However, we do not
2990 replace the typedef type by the target type, because we want
2991 to keep the typedef in order to be able to print the type
2992 description correctly. */
2993 check_typedef (type);
2994
2995 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
2996 {
2997 /* Handle packed fields.
2998
2999 Create a new value for the bitfield, with bitpos and bitsize
3000 set. If possible, arrange offset and bitpos so that we can
3001 do a single aligned read of the size of the containing type.
3002 Otherwise, adjust offset to the byte containing the first
3003 bit. Assume that the address, offset, and embedded offset
3004 are sufficiently aligned. */
3005
3006 LONGEST bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
3007 LONGEST container_bitsize = TYPE_LENGTH (type) * 8;
3008
3009 v = allocate_value_lazy (type);
3010 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
3011 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
3012 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
3013 v->bitpos = bitpos % container_bitsize;
3014 else
3015 v->bitpos = bitpos % 8;
3016 v->offset = (value_embedded_offset (arg1)
3017 + offset
3018 + (bitpos - v->bitpos) / 8);
3019 set_value_parent (v, arg1);
3020 if (!value_lazy (arg1))
3021 value_fetch_lazy (v);
3022 }
3023 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3024 {
3025 /* This field is actually a base subobject, so preserve the
3026 entire object's contents for later references to virtual
3027 bases, etc. */
3028 LONGEST boffset;
3029
3030 /* Lazy register values with offsets are not supported. */
3031 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3032 value_fetch_lazy (arg1);
3033
3034 /* We special case virtual inheritance here because this
3035 requires access to the contents, which we would rather avoid
3036 for references to ordinary fields of unavailable values. */
3037 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3038 boffset = baseclass_offset (arg_type, fieldno,
3039 value_contents (arg1),
3040 value_embedded_offset (arg1),
3041 value_address (arg1),
3042 arg1);
3043 else
3044 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
3045
3046 if (value_lazy (arg1))
3047 v = allocate_value_lazy (value_enclosing_type (arg1));
3048 else
3049 {
3050 v = allocate_value (value_enclosing_type (arg1));
3051 value_contents_copy_raw (v, 0, arg1, 0,
3052 TYPE_LENGTH (value_enclosing_type (arg1)));
3053 }
3054 v->type = type;
3055 v->offset = value_offset (arg1);
3056 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
3057 }
3058 else if (NULL != TYPE_DATA_LOCATION (type))
3059 {
3060 /* Field is a dynamic data member. */
3061
3062 gdb_assert (0 == offset);
3063 /* We expect an already resolved data location. */
3064 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
3065 /* For dynamic data types defer memory allocation
3066 until we actual access the value. */
3067 v = allocate_value_lazy (type);
3068 }
3069 else
3070 {
3071 /* Plain old data member */
3072 offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
3073 / (HOST_CHAR_BIT * unit_size));
3074
3075 /* Lazy register values with offsets are not supported. */
3076 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
3077 value_fetch_lazy (arg1);
3078
3079 if (value_lazy (arg1))
3080 v = allocate_value_lazy (type);
3081 else
3082 {
3083 v = allocate_value (type);
3084 value_contents_copy_raw (v, value_embedded_offset (v),
3085 arg1, value_embedded_offset (arg1) + offset,
3086 type_length_units (type));
3087 }
3088 v->offset = (value_offset (arg1) + offset
3089 + value_embedded_offset (arg1));
3090 }
3091 set_value_component_location (v, arg1);
3092 return v;
3093 }
3094
3095 /* Given a value ARG1 of a struct or union type,
3096 extract and return the value of one of its (non-static) fields.
3097 FIELDNO says which field. */
3098
3099 struct value *
3100 value_field (struct value *arg1, int fieldno)
3101 {
3102 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3103 }
3104
3105 /* Return a non-virtual function as a value.
3106 F is the list of member functions which contains the desired method.
3107 J is an index into F which provides the desired method.
3108
3109 We only use the symbol for its address, so be happy with either a
3110 full symbol or a minimal symbol. */
3111
3112 struct value *
3113 value_fn_field (struct value **arg1p, struct fn_field *f,
3114 int j, struct type *type,
3115 LONGEST offset)
3116 {
3117 struct value *v;
3118 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3119 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3120 struct symbol *sym;
3121 struct bound_minimal_symbol msym;
3122
3123 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3124 if (sym != NULL)
3125 {
3126 memset (&msym, 0, sizeof (msym));
3127 }
3128 else
3129 {
3130 gdb_assert (sym == NULL);
3131 msym = lookup_bound_minimal_symbol (physname);
3132 if (msym.minsym == NULL)
3133 return NULL;
3134 }
3135
3136 v = allocate_value (ftype);
3137 VALUE_LVAL (v) = lval_memory;
3138 if (sym)
3139 {
3140 set_value_address (v, BLOCK_ENTRY_PC (SYMBOL_BLOCK_VALUE (sym)));
3141 }
3142 else
3143 {
3144 /* The minimal symbol might point to a function descriptor;
3145 resolve it to the actual code address instead. */
3146 struct objfile *objfile = msym.objfile;
3147 struct gdbarch *gdbarch = objfile->arch ();
3148
3149 set_value_address (v,
3150 gdbarch_convert_from_func_ptr_addr
3151 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), current_top_target ()));
3152 }
3153
3154 if (arg1p)
3155 {
3156 if (type != value_type (*arg1p))
3157 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3158 value_addr (*arg1p)));
3159
3160 /* Move the `this' pointer according to the offset.
3161 VALUE_OFFSET (*arg1p) += offset; */
3162 }
3163
3164 return v;
3165 }
3166
3167 \f
3168
3169 /* See value.h. */
3170
3171 LONGEST
3172 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3173 LONGEST bitpos, LONGEST bitsize)
3174 {
3175 enum bfd_endian byte_order = type_byte_order (field_type);
3176 ULONGEST val;
3177 ULONGEST valmask;
3178 int lsbcount;
3179 LONGEST bytes_read;
3180 LONGEST read_offset;
3181
3182 /* Read the minimum number of bytes required; there may not be
3183 enough bytes to read an entire ULONGEST. */
3184 field_type = check_typedef (field_type);
3185 if (bitsize)
3186 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3187 else
3188 {
3189 bytes_read = TYPE_LENGTH (field_type);
3190 bitsize = 8 * bytes_read;
3191 }
3192
3193 read_offset = bitpos / 8;
3194
3195 val = extract_unsigned_integer (valaddr + read_offset,
3196 bytes_read, byte_order);
3197
3198 /* Extract bits. See comment above. */
3199
3200 if (byte_order == BFD_ENDIAN_BIG)
3201 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3202 else
3203 lsbcount = (bitpos % 8);
3204 val >>= lsbcount;
3205
3206 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3207 If the field is signed, and is negative, then sign extend. */
3208
3209 if (bitsize < 8 * (int) sizeof (val))
3210 {
3211 valmask = (((ULONGEST) 1) << bitsize) - 1;
3212 val &= valmask;
3213 if (!field_type->is_unsigned ())
3214 {
3215 if (val & (valmask ^ (valmask >> 1)))
3216 {
3217 val |= ~valmask;
3218 }
3219 }
3220 }
3221
3222 return val;
3223 }
3224
3225 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3226 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3227 ORIGINAL_VALUE, which must not be NULL. See
3228 unpack_value_bits_as_long for more details. */
3229
3230 int
3231 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3232 LONGEST embedded_offset, int fieldno,
3233 const struct value *val, LONGEST *result)
3234 {
3235 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3236 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3237 struct type *field_type = type->field (fieldno).type ();
3238 int bit_offset;
3239
3240 gdb_assert (val != NULL);
3241
3242 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3243 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3244 || !value_bits_available (val, bit_offset, bitsize))
3245 return 0;
3246
3247 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3248 bitpos, bitsize);
3249 return 1;
3250 }
3251
3252 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3253 object at VALADDR. See unpack_bits_as_long for more details. */
3254
3255 LONGEST
3256 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3257 {
3258 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3259 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3260 struct type *field_type = type->field (fieldno).type ();
3261
3262 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3263 }
3264
3265 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3266 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3267 the contents in DEST_VAL, zero or sign extending if the type of
3268 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3269 VAL. If the VAL's contents required to extract the bitfield from
3270 are unavailable/optimized out, DEST_VAL is correspondingly
3271 marked unavailable/optimized out. */
3272
3273 void
3274 unpack_value_bitfield (struct value *dest_val,
3275 LONGEST bitpos, LONGEST bitsize,
3276 const gdb_byte *valaddr, LONGEST embedded_offset,
3277 const struct value *val)
3278 {
3279 enum bfd_endian byte_order;
3280 int src_bit_offset;
3281 int dst_bit_offset;
3282 struct type *field_type = value_type (dest_val);
3283
3284 byte_order = type_byte_order (field_type);
3285
3286 /* First, unpack and sign extend the bitfield as if it was wholly
3287 valid. Optimized out/unavailable bits are read as zero, but
3288 that's OK, as they'll end up marked below. If the VAL is
3289 wholly-invalid we may have skipped allocating its contents,
3290 though. See allocate_optimized_out_value. */
3291 if (valaddr != NULL)
3292 {
3293 LONGEST num;
3294
3295 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3296 bitpos, bitsize);
3297 store_signed_integer (value_contents_raw (dest_val),
3298 TYPE_LENGTH (field_type), byte_order, num);
3299 }
3300
3301 /* Now copy the optimized out / unavailability ranges to the right
3302 bits. */
3303 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3304 if (byte_order == BFD_ENDIAN_BIG)
3305 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3306 else
3307 dst_bit_offset = 0;
3308 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3309 val, src_bit_offset, bitsize);
3310 }
3311
3312 /* Return a new value with type TYPE, which is FIELDNO field of the
3313 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3314 of VAL. If the VAL's contents required to extract the bitfield
3315 from are unavailable/optimized out, the new value is
3316 correspondingly marked unavailable/optimized out. */
3317
3318 struct value *
3319 value_field_bitfield (struct type *type, int fieldno,
3320 const gdb_byte *valaddr,
3321 LONGEST embedded_offset, const struct value *val)
3322 {
3323 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3324 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3325 struct value *res_val = allocate_value (type->field (fieldno).type ());
3326
3327 unpack_value_bitfield (res_val, bitpos, bitsize,
3328 valaddr, embedded_offset, val);
3329
3330 return res_val;
3331 }
3332
3333 /* Modify the value of a bitfield. ADDR points to a block of memory in
3334 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3335 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3336 indicate which bits (in target bit order) comprise the bitfield.
3337 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3338 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3339
3340 void
3341 modify_field (struct type *type, gdb_byte *addr,
3342 LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3343 {
3344 enum bfd_endian byte_order = type_byte_order (type);
3345 ULONGEST oword;
3346 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3347 LONGEST bytesize;
3348
3349 /* Normalize BITPOS. */
3350 addr += bitpos / 8;
3351 bitpos %= 8;
3352
3353 /* If a negative fieldval fits in the field in question, chop
3354 off the sign extension bits. */
3355 if ((~fieldval & ~(mask >> 1)) == 0)
3356 fieldval &= mask;
3357
3358 /* Warn if value is too big to fit in the field in question. */
3359 if (0 != (fieldval & ~mask))
3360 {
3361 /* FIXME: would like to include fieldval in the message, but
3362 we don't have a sprintf_longest. */
3363 warning (_("Value does not fit in %s bits."), plongest (bitsize));
3364
3365 /* Truncate it, otherwise adjoining fields may be corrupted. */
3366 fieldval &= mask;
3367 }
3368
3369 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3370 false valgrind reports. */
3371
3372 bytesize = (bitpos + bitsize + 7) / 8;
3373 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3374
3375 /* Shifting for bit field depends on endianness of the target machine. */
3376 if (byte_order == BFD_ENDIAN_BIG)
3377 bitpos = bytesize * 8 - bitpos - bitsize;
3378
3379 oword &= ~(mask << bitpos);
3380 oword |= fieldval << bitpos;
3381
3382 store_unsigned_integer (addr, bytesize, byte_order, oword);
3383 }
3384 \f
3385 /* Pack NUM into BUF using a target format of TYPE. */
3386
3387 void
3388 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3389 {
3390 enum bfd_endian byte_order = type_byte_order (type);
3391 LONGEST len;
3392
3393 type = check_typedef (type);
3394 len = TYPE_LENGTH (type);
3395
3396 switch (type->code ())
3397 {
3398 case TYPE_CODE_RANGE:
3399 num -= type->bounds ()->bias;
3400 /* Fall through. */
3401 case TYPE_CODE_INT:
3402 case TYPE_CODE_CHAR:
3403 case TYPE_CODE_ENUM:
3404 case TYPE_CODE_FLAGS:
3405 case TYPE_CODE_BOOL:
3406 case TYPE_CODE_MEMBERPTR:
3407 if (type->bit_size_differs_p ())
3408 {
3409 unsigned bit_off = type->bit_offset ();
3410 unsigned bit_size = type->bit_size ();
3411 num &= ((ULONGEST) 1 << bit_size) - 1;
3412 num <<= bit_off;
3413 }
3414 store_signed_integer (buf, len, byte_order, num);
3415 break;
3416
3417 case TYPE_CODE_REF:
3418 case TYPE_CODE_RVALUE_REF:
3419 case TYPE_CODE_PTR:
3420 store_typed_address (buf, type, (CORE_ADDR) num);
3421 break;
3422
3423 case TYPE_CODE_FLT:
3424 case TYPE_CODE_DECFLOAT:
3425 target_float_from_longest (buf, type, num);
3426 break;
3427
3428 default:
3429 error (_("Unexpected type (%d) encountered for integer constant."),
3430 type->code ());
3431 }
3432 }
3433
3434
3435 /* Pack NUM into BUF using a target format of TYPE. */
3436
3437 static void
3438 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3439 {
3440 LONGEST len;
3441 enum bfd_endian byte_order;
3442
3443 type = check_typedef (type);
3444 len = TYPE_LENGTH (type);
3445 byte_order = type_byte_order (type);
3446
3447 switch (type->code ())
3448 {
3449 case TYPE_CODE_INT:
3450 case TYPE_CODE_CHAR:
3451 case TYPE_CODE_ENUM:
3452 case TYPE_CODE_FLAGS:
3453 case TYPE_CODE_BOOL:
3454 case TYPE_CODE_RANGE:
3455 case TYPE_CODE_MEMBERPTR:
3456 if (type->bit_size_differs_p ())
3457 {
3458 unsigned bit_off = type->bit_offset ();
3459 unsigned bit_size = type->bit_size ();
3460 num &= ((ULONGEST) 1 << bit_size) - 1;
3461 num <<= bit_off;
3462 }
3463 store_unsigned_integer (buf, len, byte_order, num);
3464 break;
3465
3466 case TYPE_CODE_REF:
3467 case TYPE_CODE_RVALUE_REF:
3468 case TYPE_CODE_PTR:
3469 store_typed_address (buf, type, (CORE_ADDR) num);
3470 break;
3471
3472 case TYPE_CODE_FLT:
3473 case TYPE_CODE_DECFLOAT:
3474 target_float_from_ulongest (buf, type, num);
3475 break;
3476
3477 default:
3478 error (_("Unexpected type (%d) encountered "
3479 "for unsigned integer constant."),
3480 type->code ());
3481 }
3482 }
3483
3484
3485 /* Convert C numbers into newly allocated values. */
3486
3487 struct value *
3488 value_from_longest (struct type *type, LONGEST num)
3489 {
3490 struct value *val = allocate_value (type);
3491
3492 pack_long (value_contents_raw (val), type, num);
3493 return val;
3494 }
3495
3496
3497 /* Convert C unsigned numbers into newly allocated values. */
3498
3499 struct value *
3500 value_from_ulongest (struct type *type, ULONGEST num)
3501 {
3502 struct value *val = allocate_value (type);
3503
3504 pack_unsigned_long (value_contents_raw (val), type, num);
3505
3506 return val;
3507 }
3508
3509
3510 /* Create a value representing a pointer of type TYPE to the address
3511 ADDR. */
3512
3513 struct value *
3514 value_from_pointer (struct type *type, CORE_ADDR addr)
3515 {
3516 struct value *val = allocate_value (type);
3517
3518 store_typed_address (value_contents_raw (val),
3519 check_typedef (type), addr);
3520 return val;
3521 }
3522
3523 /* Create and return a value object of TYPE containing the value D. The
3524 TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once
3525 it is converted to target format. */
3526
3527 struct value *
3528 value_from_host_double (struct type *type, double d)
3529 {
3530 struct value *value = allocate_value (type);
3531 gdb_assert (type->code () == TYPE_CODE_FLT);
3532 target_float_from_host_double (value_contents_raw (value),
3533 value_type (value), d);
3534 return value;
3535 }
3536
3537 /* Create a value of type TYPE whose contents come from VALADDR, if it
3538 is non-null, and whose memory address (in the inferior) is
3539 ADDRESS. The type of the created value may differ from the passed
3540 type TYPE. Make sure to retrieve values new type after this call.
3541 Note that TYPE is not passed through resolve_dynamic_type; this is
3542 a special API intended for use only by Ada. */
3543
3544 struct value *
3545 value_from_contents_and_address_unresolved (struct type *type,
3546 const gdb_byte *valaddr,
3547 CORE_ADDR address)
3548 {
3549 struct value *v;
3550
3551 if (valaddr == NULL)
3552 v = allocate_value_lazy (type);
3553 else
3554 v = value_from_contents (type, valaddr);
3555 VALUE_LVAL (v) = lval_memory;
3556 set_value_address (v, address);
3557 return v;
3558 }
3559
3560 /* Create a value of type TYPE whose contents come from VALADDR, if it
3561 is non-null, and whose memory address (in the inferior) is
3562 ADDRESS. The type of the created value may differ from the passed
3563 type TYPE. Make sure to retrieve values new type after this call. */
3564
3565 struct value *
3566 value_from_contents_and_address (struct type *type,
3567 const gdb_byte *valaddr,
3568 CORE_ADDR address)
3569 {
3570 gdb::array_view<const gdb_byte> view;
3571 if (valaddr != nullptr)
3572 view = gdb::make_array_view (valaddr, TYPE_LENGTH (type));
3573 struct type *resolved_type = resolve_dynamic_type (type, view, address);
3574 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3575 struct value *v;
3576
3577 if (valaddr == NULL)
3578 v = allocate_value_lazy (resolved_type);
3579 else
3580 v = value_from_contents (resolved_type, valaddr);
3581 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3582 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3583 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3584 VALUE_LVAL (v) = lval_memory;
3585 set_value_address (v, address);
3586 return v;
3587 }
3588
3589 /* Create a value of type TYPE holding the contents CONTENTS.
3590 The new value is `not_lval'. */
3591
3592 struct value *
3593 value_from_contents (struct type *type, const gdb_byte *contents)
3594 {
3595 struct value *result;
3596
3597 result = allocate_value (type);
3598 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3599 return result;
3600 }
3601
3602 /* Extract a value from the history file. Input will be of the form
3603 $digits or $$digits. See block comment above 'write_dollar_variable'
3604 for details. */
3605
3606 struct value *
3607 value_from_history_ref (const char *h, const char **endp)
3608 {
3609 int index, len;
3610
3611 if (h[0] == '$')
3612 len = 1;
3613 else
3614 return NULL;
3615
3616 if (h[1] == '$')
3617 len = 2;
3618
3619 /* Find length of numeral string. */
3620 for (; isdigit (h[len]); len++)
3621 ;
3622
3623 /* Make sure numeral string is not part of an identifier. */
3624 if (h[len] == '_' || isalpha (h[len]))
3625 return NULL;
3626
3627 /* Now collect the index value. */
3628 if (h[1] == '$')
3629 {
3630 if (len == 2)
3631 {
3632 /* For some bizarre reason, "$$" is equivalent to "$$1",
3633 rather than to "$$0" as it ought to be! */
3634 index = -1;
3635 *endp += len;
3636 }
3637 else
3638 {
3639 char *local_end;
3640
3641 index = -strtol (&h[2], &local_end, 10);
3642 *endp = local_end;
3643 }
3644 }
3645 else
3646 {
3647 if (len == 1)
3648 {
3649 /* "$" is equivalent to "$0". */
3650 index = 0;
3651 *endp += len;
3652 }
3653 else
3654 {
3655 char *local_end;
3656
3657 index = strtol (&h[1], &local_end, 10);
3658 *endp = local_end;
3659 }
3660 }
3661
3662 return access_value_history (index);
3663 }
3664
3665 /* Get the component value (offset by OFFSET bytes) of a struct or
3666 union WHOLE. Component's type is TYPE. */
3667
3668 struct value *
3669 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3670 {
3671 struct value *v;
3672
3673 if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole))
3674 v = allocate_value_lazy (type);
3675 else
3676 {
3677 v = allocate_value (type);
3678 value_contents_copy (v, value_embedded_offset (v),
3679 whole, value_embedded_offset (whole) + offset,
3680 type_length_units (type));
3681 }
3682 v->offset = value_offset (whole) + offset + value_embedded_offset (whole);
3683 set_value_component_location (v, whole);
3684
3685 return v;
3686 }
3687
3688 struct value *
3689 coerce_ref_if_computed (const struct value *arg)
3690 {
3691 const struct lval_funcs *funcs;
3692
3693 if (!TYPE_IS_REFERENCE (check_typedef (value_type (arg))))
3694 return NULL;
3695
3696 if (value_lval_const (arg) != lval_computed)
3697 return NULL;
3698
3699 funcs = value_computed_funcs (arg);
3700 if (funcs->coerce_ref == NULL)
3701 return NULL;
3702
3703 return funcs->coerce_ref (arg);
3704 }
3705
3706 /* Look at value.h for description. */
3707
3708 struct value *
3709 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3710 const struct type *original_type,
3711 struct value *original_value,
3712 CORE_ADDR original_value_address)
3713 {
3714 gdb_assert (original_type->code () == TYPE_CODE_PTR
3715 || TYPE_IS_REFERENCE (original_type));
3716
3717 struct type *original_target_type = TYPE_TARGET_TYPE (original_type);
3718 gdb::array_view<const gdb_byte> view;
3719 struct type *resolved_original_target_type
3720 = resolve_dynamic_type (original_target_type, view,
3721 original_value_address);
3722
3723 /* Re-adjust type. */
3724 deprecated_set_value_type (value, resolved_original_target_type);
3725
3726 /* Add embedding info. */
3727 set_value_enclosing_type (value, enc_type);
3728 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3729
3730 /* We may be pointing to an object of some derived type. */
3731 return value_full_object (value, NULL, 0, 0, 0);
3732 }
3733
3734 struct value *
3735 coerce_ref (struct value *arg)
3736 {
3737 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3738 struct value *retval;
3739 struct type *enc_type;
3740
3741 retval = coerce_ref_if_computed (arg);
3742 if (retval)
3743 return retval;
3744
3745 if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3746 return arg;
3747
3748 enc_type = check_typedef (value_enclosing_type (arg));
3749 enc_type = TYPE_TARGET_TYPE (enc_type);
3750
3751 CORE_ADDR addr = unpack_pointer (value_type (arg), value_contents (arg));
3752 retval = value_at_lazy (enc_type, addr);
3753 enc_type = value_type (retval);
3754 return readjust_indirect_value_type (retval, enc_type, value_type_arg_tmp,
3755 arg, addr);
3756 }
3757
3758 struct value *
3759 coerce_array (struct value *arg)
3760 {
3761 struct type *type;
3762
3763 arg = coerce_ref (arg);
3764 type = check_typedef (value_type (arg));
3765
3766 switch (type->code ())
3767 {
3768 case TYPE_CODE_ARRAY:
3769 if (!type->is_vector () && current_language->c_style_arrays_p ())
3770 arg = value_coerce_array (arg);
3771 break;
3772 case TYPE_CODE_FUNC:
3773 arg = value_coerce_function (arg);
3774 break;
3775 }
3776 return arg;
3777 }
3778 \f
3779
3780 /* Return the return value convention that will be used for the
3781 specified type. */
3782
3783 enum return_value_convention
3784 struct_return_convention (struct gdbarch *gdbarch,
3785 struct value *function, struct type *value_type)
3786 {
3787 enum type_code code = value_type->code ();
3788
3789 if (code == TYPE_CODE_ERROR)
3790 error (_("Function return type unknown."));
3791
3792 /* Probe the architecture for the return-value convention. */
3793 return gdbarch_return_value (gdbarch, function, value_type,
3794 NULL, NULL, NULL);
3795 }
3796
3797 /* Return true if the function returning the specified type is using
3798 the convention of returning structures in memory (passing in the
3799 address as a hidden first parameter). */
3800
3801 int
3802 using_struct_return (struct gdbarch *gdbarch,
3803 struct value *function, struct type *value_type)
3804 {
3805 if (value_type->code () == TYPE_CODE_VOID)
3806 /* A void return value is never in memory. See also corresponding
3807 code in "print_return_value". */
3808 return 0;
3809
3810 return (struct_return_convention (gdbarch, function, value_type)
3811 != RETURN_VALUE_REGISTER_CONVENTION);
3812 }
3813
3814 /* Set the initialized field in a value struct. */
3815
3816 void
3817 set_value_initialized (struct value *val, int status)
3818 {
3819 val->initialized = status;
3820 }
3821
3822 /* Return the initialized field in a value struct. */
3823
3824 int
3825 value_initialized (const struct value *val)
3826 {
3827 return val->initialized;
3828 }
3829
3830 /* Helper for value_fetch_lazy when the value is a bitfield. */
3831
3832 static void
3833 value_fetch_lazy_bitfield (struct value *val)
3834 {
3835 gdb_assert (value_bitsize (val) != 0);
3836
3837 /* To read a lazy bitfield, read the entire enclosing value. This
3838 prevents reading the same block of (possibly volatile) memory once
3839 per bitfield. It would be even better to read only the containing
3840 word, but we have no way to record that just specific bits of a
3841 value have been fetched. */
3842 struct value *parent = value_parent (val);
3843
3844 if (value_lazy (parent))
3845 value_fetch_lazy (parent);
3846
3847 unpack_value_bitfield (val, value_bitpos (val), value_bitsize (val),
3848 value_contents_for_printing (parent),
3849 value_offset (val), parent);
3850 }
3851
3852 /* Helper for value_fetch_lazy when the value is in memory. */
3853
3854 static void
3855 value_fetch_lazy_memory (struct value *val)
3856 {
3857 gdb_assert (VALUE_LVAL (val) == lval_memory);
3858
3859 CORE_ADDR addr = value_address (val);
3860 struct type *type = check_typedef (value_enclosing_type (val));
3861
3862 if (TYPE_LENGTH (type))
3863 read_value_memory (val, 0, value_stack (val),
3864 addr, value_contents_all_raw (val),
3865 type_length_units (type));
3866 }
3867
3868 /* Helper for value_fetch_lazy when the value is in a register. */
3869
3870 static void
3871 value_fetch_lazy_register (struct value *val)
3872 {
3873 struct frame_info *next_frame;
3874 int regnum;
3875 struct type *type = check_typedef (value_type (val));
3876 struct value *new_val = val, *mark = value_mark ();
3877
3878 /* Offsets are not supported here; lazy register values must
3879 refer to the entire register. */
3880 gdb_assert (value_offset (val) == 0);
3881
3882 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3883 {
3884 struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
3885
3886 next_frame = frame_find_by_id (next_frame_id);
3887 regnum = VALUE_REGNUM (new_val);
3888
3889 gdb_assert (next_frame != NULL);
3890
3891 /* Convertible register routines are used for multi-register
3892 values and for interpretation in different types
3893 (e.g. float or int from a double register). Lazy
3894 register values should have the register's natural type,
3895 so they do not apply. */
3896 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
3897 regnum, type));
3898
3899 /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
3900 Since a "->next" operation was performed when setting
3901 this field, we do not need to perform a "next" operation
3902 again when unwinding the register. That's why
3903 frame_unwind_register_value() is called here instead of
3904 get_frame_register_value(). */
3905 new_val = frame_unwind_register_value (next_frame, regnum);
3906
3907 /* If we get another lazy lval_register value, it means the
3908 register is found by reading it from NEXT_FRAME's next frame.
3909 frame_unwind_register_value should never return a value with
3910 the frame id pointing to NEXT_FRAME. If it does, it means we
3911 either have two consecutive frames with the same frame id
3912 in the frame chain, or some code is trying to unwind
3913 behind get_prev_frame's back (e.g., a frame unwind
3914 sniffer trying to unwind), bypassing its validations. In
3915 any case, it should always be an internal error to end up
3916 in this situation. */
3917 if (VALUE_LVAL (new_val) == lval_register
3918 && value_lazy (new_val)
3919 && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id))
3920 internal_error (__FILE__, __LINE__,
3921 _("infinite loop while fetching a register"));
3922 }
3923
3924 /* If it's still lazy (for instance, a saved register on the
3925 stack), fetch it. */
3926 if (value_lazy (new_val))
3927 value_fetch_lazy (new_val);
3928
3929 /* Copy the contents and the unavailability/optimized-out
3930 meta-data from NEW_VAL to VAL. */
3931 set_value_lazy (val, 0);
3932 value_contents_copy (val, value_embedded_offset (val),
3933 new_val, value_embedded_offset (new_val),
3934 type_length_units (type));
3935
3936 if (frame_debug)
3937 {
3938 struct gdbarch *gdbarch;
3939 struct frame_info *frame;
3940 /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID,
3941 so that the frame level will be shown correctly. */
3942 frame = frame_find_by_id (VALUE_FRAME_ID (val));
3943 regnum = VALUE_REGNUM (val);
3944 gdbarch = get_frame_arch (frame);
3945
3946 fprintf_unfiltered (gdb_stdlog,
3947 "{ value_fetch_lazy "
3948 "(frame=%d,regnum=%d(%s),...) ",
3949 frame_relative_level (frame), regnum,
3950 user_reg_map_regnum_to_name (gdbarch, regnum));
3951
3952 fprintf_unfiltered (gdb_stdlog, "->");
3953 if (value_optimized_out (new_val))
3954 {
3955 fprintf_unfiltered (gdb_stdlog, " ");
3956 val_print_optimized_out (new_val, gdb_stdlog);
3957 }
3958 else
3959 {
3960 int i;
3961 const gdb_byte *buf = value_contents (new_val);
3962
3963 if (VALUE_LVAL (new_val) == lval_register)
3964 fprintf_unfiltered (gdb_stdlog, " register=%d",
3965 VALUE_REGNUM (new_val));
3966 else if (VALUE_LVAL (new_val) == lval_memory)
3967 fprintf_unfiltered (gdb_stdlog, " address=%s",
3968 paddress (gdbarch,
3969 value_address (new_val)));
3970 else
3971 fprintf_unfiltered (gdb_stdlog, " computed");
3972
3973 fprintf_unfiltered (gdb_stdlog, " bytes=");
3974 fprintf_unfiltered (gdb_stdlog, "[");
3975 for (i = 0; i < register_size (gdbarch, regnum); i++)
3976 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3977 fprintf_unfiltered (gdb_stdlog, "]");
3978 }
3979
3980 fprintf_unfiltered (gdb_stdlog, " }\n");
3981 }
3982
3983 /* Dispose of the intermediate values. This prevents
3984 watchpoints from trying to watch the saved frame pointer. */
3985 value_free_to_mark (mark);
3986 }
3987
3988 /* Load the actual content of a lazy value. Fetch the data from the
3989 user's process and clear the lazy flag to indicate that the data in
3990 the buffer is valid.
3991
3992 If the value is zero-length, we avoid calling read_memory, which
3993 would abort. We mark the value as fetched anyway -- all 0 bytes of
3994 it. */
3995
3996 void
3997 value_fetch_lazy (struct value *val)
3998 {
3999 gdb_assert (value_lazy (val));
4000 allocate_value_contents (val);
4001 /* A value is either lazy, or fully fetched. The
4002 availability/validity is only established as we try to fetch a
4003 value. */
4004 gdb_assert (val->optimized_out.empty ());
4005 gdb_assert (val->unavailable.empty ());
4006 if (value_bitsize (val))
4007 value_fetch_lazy_bitfield (val);
4008 else if (VALUE_LVAL (val) == lval_memory)
4009 value_fetch_lazy_memory (val);
4010 else if (VALUE_LVAL (val) == lval_register)
4011 value_fetch_lazy_register (val);
4012 else if (VALUE_LVAL (val) == lval_computed
4013 && value_computed_funcs (val)->read != NULL)
4014 value_computed_funcs (val)->read (val);
4015 else
4016 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
4017
4018 set_value_lazy (val, 0);
4019 }
4020
4021 /* Implementation of the convenience function $_isvoid. */
4022
4023 static struct value *
4024 isvoid_internal_fn (struct gdbarch *gdbarch,
4025 const struct language_defn *language,
4026 void *cookie, int argc, struct value **argv)
4027 {
4028 int ret;
4029
4030 if (argc != 1)
4031 error (_("You must provide one argument for $_isvoid."));
4032
4033 ret = value_type (argv[0])->code () == TYPE_CODE_VOID;
4034
4035 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
4036 }
4037
4038 /* Implementation of the convenience function $_creal. Extracts the
4039 real part from a complex number. */
4040
4041 static struct value *
4042 creal_internal_fn (struct gdbarch *gdbarch,
4043 const struct language_defn *language,
4044 void *cookie, int argc, struct value **argv)
4045 {
4046 if (argc != 1)
4047 error (_("You must provide one argument for $_creal."));
4048
4049 value *cval = argv[0];
4050 type *ctype = check_typedef (value_type (cval));
4051 if (ctype->code () != TYPE_CODE_COMPLEX)
4052 error (_("expected a complex number"));
4053 return value_real_part (cval);
4054 }
4055
4056 /* Implementation of the convenience function $_cimag. Extracts the
4057 imaginary part from a complex number. */
4058
4059 static struct value *
4060 cimag_internal_fn (struct gdbarch *gdbarch,
4061 const struct language_defn *language,
4062 void *cookie, int argc,
4063 struct value **argv)
4064 {
4065 if (argc != 1)
4066 error (_("You must provide one argument for $_cimag."));
4067
4068 value *cval = argv[0];
4069 type *ctype = check_typedef (value_type (cval));
4070 if (ctype->code () != TYPE_CODE_COMPLEX)
4071 error (_("expected a complex number"));
4072 return value_imaginary_part (cval);
4073 }
4074
4075 #if GDB_SELF_TEST
4076 namespace selftests
4077 {
4078
4079 /* Test the ranges_contain function. */
4080
4081 static void
4082 test_ranges_contain ()
4083 {
4084 std::vector<range> ranges;
4085 range r;
4086
4087 /* [10, 14] */
4088 r.offset = 10;
4089 r.length = 5;
4090 ranges.push_back (r);
4091
4092 /* [20, 24] */
4093 r.offset = 20;
4094 r.length = 5;
4095 ranges.push_back (r);
4096
4097 /* [2, 6] */
4098 SELF_CHECK (!ranges_contain (ranges, 2, 5));
4099 /* [9, 13] */
4100 SELF_CHECK (ranges_contain (ranges, 9, 5));
4101 /* [10, 11] */
4102 SELF_CHECK (ranges_contain (ranges, 10, 2));
4103 /* [10, 14] */
4104 SELF_CHECK (ranges_contain (ranges, 10, 5));
4105 /* [13, 18] */
4106 SELF_CHECK (ranges_contain (ranges, 13, 6));
4107 /* [14, 18] */
4108 SELF_CHECK (ranges_contain (ranges, 14, 5));
4109 /* [15, 18] */
4110 SELF_CHECK (!ranges_contain (ranges, 15, 4));
4111 /* [16, 19] */
4112 SELF_CHECK (!ranges_contain (ranges, 16, 4));
4113 /* [16, 21] */
4114 SELF_CHECK (ranges_contain (ranges, 16, 6));
4115 /* [21, 21] */
4116 SELF_CHECK (ranges_contain (ranges, 21, 1));
4117 /* [21, 25] */
4118 SELF_CHECK (ranges_contain (ranges, 21, 5));
4119 /* [26, 28] */
4120 SELF_CHECK (!ranges_contain (ranges, 26, 3));
4121 }
4122
4123 /* Check that RANGES contains the same ranges as EXPECTED. */
4124
4125 static bool
4126 check_ranges_vector (gdb::array_view<const range> ranges,
4127 gdb::array_view<const range> expected)
4128 {
4129 return ranges == expected;
4130 }
4131
4132 /* Test the insert_into_bit_range_vector function. */
4133
4134 static void
4135 test_insert_into_bit_range_vector ()
4136 {
4137 std::vector<range> ranges;
4138
4139 /* [10, 14] */
4140 {
4141 insert_into_bit_range_vector (&ranges, 10, 5);
4142 static const range expected[] = {
4143 {10, 5}
4144 };
4145 SELF_CHECK (check_ranges_vector (ranges, expected));
4146 }
4147
4148 /* [10, 14] */
4149 {
4150 insert_into_bit_range_vector (&ranges, 11, 4);
4151 static const range expected = {10, 5};
4152 SELF_CHECK (check_ranges_vector (ranges, expected));
4153 }
4154
4155 /* [10, 14] [20, 24] */
4156 {
4157 insert_into_bit_range_vector (&ranges, 20, 5);
4158 static const range expected[] = {
4159 {10, 5},
4160 {20, 5},
4161 };
4162 SELF_CHECK (check_ranges_vector (ranges, expected));
4163 }
4164
4165 /* [10, 14] [17, 24] */
4166 {
4167 insert_into_bit_range_vector (&ranges, 17, 5);
4168 static const range expected[] = {
4169 {10, 5},
4170 {17, 8},
4171 };
4172 SELF_CHECK (check_ranges_vector (ranges, expected));
4173 }
4174
4175 /* [2, 8] [10, 14] [17, 24] */
4176 {
4177 insert_into_bit_range_vector (&ranges, 2, 7);
4178 static const range expected[] = {
4179 {2, 7},
4180 {10, 5},
4181 {17, 8},
4182 };
4183 SELF_CHECK (check_ranges_vector (ranges, expected));
4184 }
4185
4186 /* [2, 14] [17, 24] */
4187 {
4188 insert_into_bit_range_vector (&ranges, 9, 1);
4189 static const range expected[] = {
4190 {2, 13},
4191 {17, 8},
4192 };
4193 SELF_CHECK (check_ranges_vector (ranges, expected));
4194 }
4195
4196 /* [2, 14] [17, 24] */
4197 {
4198 insert_into_bit_range_vector (&ranges, 9, 1);
4199 static const range expected[] = {
4200 {2, 13},
4201 {17, 8},
4202 };
4203 SELF_CHECK (check_ranges_vector (ranges, expected));
4204 }
4205
4206 /* [2, 33] */
4207 {
4208 insert_into_bit_range_vector (&ranges, 4, 30);
4209 static const range expected = {2, 32};
4210 SELF_CHECK (check_ranges_vector (ranges, expected));
4211 }
4212 }
4213
4214 } /* namespace selftests */
4215 #endif /* GDB_SELF_TEST */
4216
4217 void _initialize_values ();
4218 void
4219 _initialize_values ()
4220 {
4221 add_cmd ("convenience", no_class, show_convenience, _("\
4222 Debugger convenience (\"$foo\") variables and functions.\n\
4223 Convenience variables are created when you assign them values;\n\
4224 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4225 \n\
4226 A few convenience variables are given values automatically:\n\
4227 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4228 \"$__\" holds the contents of the last address examined with \"x\"."
4229 #ifdef HAVE_PYTHON
4230 "\n\n\
4231 Convenience functions are defined via the Python API."
4232 #endif
4233 ), &showlist);
4234 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
4235
4236 add_cmd ("values", no_set_class, show_values, _("\
4237 Elements of value history around item number IDX (or last ten)."),
4238 &showlist);
4239
4240 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4241 Initialize a convenience variable if necessary.\n\
4242 init-if-undefined VARIABLE = EXPRESSION\n\
4243 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4244 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4245 VARIABLE is already initialized."));
4246
4247 add_prefix_cmd ("function", no_class, function_command, _("\
4248 Placeholder command for showing help on convenience functions."),
4249 &functionlist, "function ", 0, &cmdlist);
4250
4251 add_internal_function ("_isvoid", _("\
4252 Check whether an expression is void.\n\
4253 Usage: $_isvoid (expression)\n\
4254 Return 1 if the expression is void, zero otherwise."),
4255 isvoid_internal_fn, NULL);
4256
4257 add_internal_function ("_creal", _("\
4258 Extract the real part of a complex number.\n\
4259 Usage: $_creal (expression)\n\
4260 Return the real part of a complex number, the type depends on the\n\
4261 type of a complex number."),
4262 creal_internal_fn, NULL);
4263
4264 add_internal_function ("_cimag", _("\
4265 Extract the imaginary part of a complex number.\n\
4266 Usage: $_cimag (expression)\n\
4267 Return the imaginary part of a complex number, the type depends on the\n\
4268 type of a complex number."),
4269 cimag_internal_fn, NULL);
4270
4271 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4272 class_support, &max_value_size, _("\
4273 Set maximum sized value gdb will load from the inferior."), _("\
4274 Show maximum sized value gdb will load from the inferior."), _("\
4275 Use this to control the maximum size, in bytes, of a value that gdb\n\
4276 will load from the inferior. Setting this value to 'unlimited'\n\
4277 disables checking.\n\
4278 Setting this does not invalidate already allocated values, it only\n\
4279 prevents future values, larger than this size, from being allocated."),
4280 set_max_value_size,
4281 show_max_value_size,
4282 &setlist, &showlist);
4283 #if GDB_SELF_TEST
4284 selftests::register_test ("ranges_contain", selftests::test_ranges_contain);
4285 selftests::register_test ("insert_into_bit_range_vector",
4286 selftests::test_insert_into_bit_range_vector);
4287 #endif
4288 }
4289
4290 /* See value.h. */
4291
4292 void
4293 finalize_values ()
4294 {
4295 all_values.clear ();
4296 }