]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/value.c
PR24435, buffer overflow reading dynamic entries
[thirdparty/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2019 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "regcache.h"
32 #include "block.h"
33 #include "target-float.h"
34 #include "objfiles.h"
35 #include "valprint.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
38 #include <ctype.h>
39 #include "tracepoint.h"
40 #include "cp-abi.h"
41 #include "user-regs.h"
42 #include <algorithm>
43 #include "completer.h"
44 #include "common/selftest.h"
45 #include "common/array-view.h"
46
47 /* Definition of a user function. */
48 struct internal_function
49 {
50 /* The name of the function. It is a bit odd to have this in the
51 function itself -- the user might use a differently-named
52 convenience variable to hold the function. */
53 char *name;
54
55 /* The handler. */
56 internal_function_fn handler;
57
58 /* User data for the handler. */
59 void *cookie;
60 };
61
62 /* Defines an [OFFSET, OFFSET + LENGTH) range. */
63
64 struct range
65 {
66 /* Lowest offset in the range. */
67 LONGEST offset;
68
69 /* Length of the range. */
70 LONGEST length;
71
72 /* Returns true if THIS is strictly less than OTHER, useful for
73 searching. We keep ranges sorted by offset and coalesce
74 overlapping and contiguous ranges, so this just compares the
75 starting offset. */
76
77 bool operator< (const range &other) const
78 {
79 return offset < other.offset;
80 }
81
82 /* Returns true if THIS is equal to OTHER. */
83 bool operator== (const range &other) const
84 {
85 return offset == other.offset && length == other.length;
86 }
87 };
88
89 /* Returns true if the ranges defined by [offset1, offset1+len1) and
90 [offset2, offset2+len2) overlap. */
91
92 static int
93 ranges_overlap (LONGEST offset1, LONGEST len1,
94 LONGEST offset2, LONGEST len2)
95 {
96 ULONGEST h, l;
97
98 l = std::max (offset1, offset2);
99 h = std::min (offset1 + len1, offset2 + len2);
100 return (l < h);
101 }
102
103 /* Returns true if RANGES contains any range that overlaps [OFFSET,
104 OFFSET+LENGTH). */
105
106 static int
107 ranges_contain (const std::vector<range> &ranges, LONGEST offset,
108 LONGEST length)
109 {
110 range what;
111
112 what.offset = offset;
113 what.length = length;
114
115 /* We keep ranges sorted by offset and coalesce overlapping and
116 contiguous ranges, so to check if a range list contains a given
117 range, we can do a binary search for the position the given range
118 would be inserted if we only considered the starting OFFSET of
119 ranges. We call that position I. Since we also have LENGTH to
120 care for (this is a range afterall), we need to check if the
121 _previous_ range overlaps the I range. E.g.,
122
123 R
124 |---|
125 |---| |---| |------| ... |--|
126 0 1 2 N
127
128 I=1
129
130 In the case above, the binary search would return `I=1', meaning,
131 this OFFSET should be inserted at position 1, and the current
132 position 1 should be pushed further (and before 2). But, `0'
133 overlaps with R.
134
135 Then we need to check if the I range overlaps the I range itself.
136 E.g.,
137
138 R
139 |---|
140 |---| |---| |-------| ... |--|
141 0 1 2 N
142
143 I=1
144 */
145
146
147 auto i = std::lower_bound (ranges.begin (), ranges.end (), what);
148
149 if (i > ranges.begin ())
150 {
151 const struct range &bef = *(i - 1);
152
153 if (ranges_overlap (bef.offset, bef.length, offset, length))
154 return 1;
155 }
156
157 if (i < ranges.end ())
158 {
159 const struct range &r = *i;
160
161 if (ranges_overlap (r.offset, r.length, offset, length))
162 return 1;
163 }
164
165 return 0;
166 }
167
168 static struct cmd_list_element *functionlist;
169
170 /* Note that the fields in this structure are arranged to save a bit
171 of memory. */
172
173 struct value
174 {
175 explicit value (struct type *type_)
176 : modifiable (1),
177 lazy (1),
178 initialized (1),
179 stack (0),
180 type (type_),
181 enclosing_type (type_)
182 {
183 }
184
185 ~value ()
186 {
187 if (VALUE_LVAL (this) == lval_computed)
188 {
189 const struct lval_funcs *funcs = location.computed.funcs;
190
191 if (funcs->free_closure)
192 funcs->free_closure (this);
193 }
194 else if (VALUE_LVAL (this) == lval_xcallable)
195 delete location.xm_worker;
196 }
197
198 DISABLE_COPY_AND_ASSIGN (value);
199
200 /* Type of value; either not an lval, or one of the various
201 different possible kinds of lval. */
202 enum lval_type lval = not_lval;
203
204 /* Is it modifiable? Only relevant if lval != not_lval. */
205 unsigned int modifiable : 1;
206
207 /* If zero, contents of this value are in the contents field. If
208 nonzero, contents are in inferior. If the lval field is lval_memory,
209 the contents are in inferior memory at location.address plus offset.
210 The lval field may also be lval_register.
211
212 WARNING: This field is used by the code which handles watchpoints
213 (see breakpoint.c) to decide whether a particular value can be
214 watched by hardware watchpoints. If the lazy flag is set for
215 some member of a value chain, it is assumed that this member of
216 the chain doesn't need to be watched as part of watching the
217 value itself. This is how GDB avoids watching the entire struct
218 or array when the user wants to watch a single struct member or
219 array element. If you ever change the way lazy flag is set and
220 reset, be sure to consider this use as well! */
221 unsigned int lazy : 1;
222
223 /* If value is a variable, is it initialized or not. */
224 unsigned int initialized : 1;
225
226 /* If value is from the stack. If this is set, read_stack will be
227 used instead of read_memory to enable extra caching. */
228 unsigned int stack : 1;
229
230 /* Location of value (if lval). */
231 union
232 {
233 /* If lval == lval_memory, this is the address in the inferior */
234 CORE_ADDR address;
235
236 /*If lval == lval_register, the value is from a register. */
237 struct
238 {
239 /* Register number. */
240 int regnum;
241 /* Frame ID of "next" frame to which a register value is relative.
242 If the register value is found relative to frame F, then the
243 frame id of F->next will be stored in next_frame_id. */
244 struct frame_id next_frame_id;
245 } reg;
246
247 /* Pointer to internal variable. */
248 struct internalvar *internalvar;
249
250 /* Pointer to xmethod worker. */
251 struct xmethod_worker *xm_worker;
252
253 /* If lval == lval_computed, this is a set of function pointers
254 to use to access and describe the value, and a closure pointer
255 for them to use. */
256 struct
257 {
258 /* Functions to call. */
259 const struct lval_funcs *funcs;
260
261 /* Closure for those functions to use. */
262 void *closure;
263 } computed;
264 } location {};
265
266 /* Describes offset of a value within lval of a structure in target
267 addressable memory units. Note also the member embedded_offset
268 below. */
269 LONGEST offset = 0;
270
271 /* Only used for bitfields; number of bits contained in them. */
272 LONGEST bitsize = 0;
273
274 /* Only used for bitfields; position of start of field. For
275 gdbarch_bits_big_endian=0 targets, it is the position of the LSB. For
276 gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */
277 LONGEST bitpos = 0;
278
279 /* The number of references to this value. When a value is created,
280 the value chain holds a reference, so REFERENCE_COUNT is 1. If
281 release_value is called, this value is removed from the chain but
282 the caller of release_value now has a reference to this value.
283 The caller must arrange for a call to value_free later. */
284 int reference_count = 1;
285
286 /* Only used for bitfields; the containing value. This allows a
287 single read from the target when displaying multiple
288 bitfields. */
289 value_ref_ptr parent;
290
291 /* Type of the value. */
292 struct type *type;
293
294 /* If a value represents a C++ object, then the `type' field gives
295 the object's compile-time type. If the object actually belongs
296 to some class derived from `type', perhaps with other base
297 classes and additional members, then `type' is just a subobject
298 of the real thing, and the full object is probably larger than
299 `type' would suggest.
300
301 If `type' is a dynamic class (i.e. one with a vtable), then GDB
302 can actually determine the object's run-time type by looking at
303 the run-time type information in the vtable. When this
304 information is available, we may elect to read in the entire
305 object, for several reasons:
306
307 - When printing the value, the user would probably rather see the
308 full object, not just the limited portion apparent from the
309 compile-time type.
310
311 - If `type' has virtual base classes, then even printing `type'
312 alone may require reaching outside the `type' portion of the
313 object to wherever the virtual base class has been stored.
314
315 When we store the entire object, `enclosing_type' is the run-time
316 type -- the complete object -- and `embedded_offset' is the
317 offset of `type' within that larger type, in target addressable memory
318 units. The value_contents() macro takes `embedded_offset' into account,
319 so most GDB code continues to see the `type' portion of the value, just
320 as the inferior would.
321
322 If `type' is a pointer to an object, then `enclosing_type' is a
323 pointer to the object's run-time type, and `pointed_to_offset' is
324 the offset in target addressable memory units from the full object
325 to the pointed-to object -- that is, the value `embedded_offset' would
326 have if we followed the pointer and fetched the complete object.
327 (I don't really see the point. Why not just determine the
328 run-time type when you indirect, and avoid the special case? The
329 contents don't matter until you indirect anyway.)
330
331 If we're not doing anything fancy, `enclosing_type' is equal to
332 `type', and `embedded_offset' is zero, so everything works
333 normally. */
334 struct type *enclosing_type;
335 LONGEST embedded_offset = 0;
336 LONGEST pointed_to_offset = 0;
337
338 /* Actual contents of the value. Target byte-order. NULL or not
339 valid if lazy is nonzero. */
340 gdb::unique_xmalloc_ptr<gdb_byte> contents;
341
342 /* Unavailable ranges in CONTENTS. We mark unavailable ranges,
343 rather than available, since the common and default case is for a
344 value to be available. This is filled in at value read time.
345 The unavailable ranges are tracked in bits. Note that a contents
346 bit that has been optimized out doesn't really exist in the
347 program, so it can't be marked unavailable either. */
348 std::vector<range> unavailable;
349
350 /* Likewise, but for optimized out contents (a chunk of the value of
351 a variable that does not actually exist in the program). If LVAL
352 is lval_register, this is a register ($pc, $sp, etc., never a
353 program variable) that has not been saved in the frame. Not
354 saved registers and optimized-out program variables values are
355 treated pretty much the same, except not-saved registers have a
356 different string representation and related error strings. */
357 std::vector<range> optimized_out;
358 };
359
360 /* See value.h. */
361
362 struct gdbarch *
363 get_value_arch (const struct value *value)
364 {
365 return get_type_arch (value_type (value));
366 }
367
368 int
369 value_bits_available (const struct value *value, LONGEST offset, LONGEST length)
370 {
371 gdb_assert (!value->lazy);
372
373 return !ranges_contain (value->unavailable, offset, length);
374 }
375
376 int
377 value_bytes_available (const struct value *value,
378 LONGEST offset, LONGEST length)
379 {
380 return value_bits_available (value,
381 offset * TARGET_CHAR_BIT,
382 length * TARGET_CHAR_BIT);
383 }
384
385 int
386 value_bits_any_optimized_out (const struct value *value, int bit_offset, int bit_length)
387 {
388 gdb_assert (!value->lazy);
389
390 return ranges_contain (value->optimized_out, bit_offset, bit_length);
391 }
392
393 int
394 value_entirely_available (struct value *value)
395 {
396 /* We can only tell whether the whole value is available when we try
397 to read it. */
398 if (value->lazy)
399 value_fetch_lazy (value);
400
401 if (value->unavailable.empty ())
402 return 1;
403 return 0;
404 }
405
406 /* Returns true if VALUE is entirely covered by RANGES. If the value
407 is lazy, it'll be read now. Note that RANGE is a pointer to
408 pointer because reading the value might change *RANGE. */
409
410 static int
411 value_entirely_covered_by_range_vector (struct value *value,
412 const std::vector<range> &ranges)
413 {
414 /* We can only tell whether the whole value is optimized out /
415 unavailable when we try to read it. */
416 if (value->lazy)
417 value_fetch_lazy (value);
418
419 if (ranges.size () == 1)
420 {
421 const struct range &t = ranges[0];
422
423 if (t.offset == 0
424 && t.length == (TARGET_CHAR_BIT
425 * TYPE_LENGTH (value_enclosing_type (value))))
426 return 1;
427 }
428
429 return 0;
430 }
431
432 int
433 value_entirely_unavailable (struct value *value)
434 {
435 return value_entirely_covered_by_range_vector (value, value->unavailable);
436 }
437
438 int
439 value_entirely_optimized_out (struct value *value)
440 {
441 return value_entirely_covered_by_range_vector (value, value->optimized_out);
442 }
443
444 /* Insert into the vector pointed to by VECTORP the bit range starting of
445 OFFSET bits, and extending for the next LENGTH bits. */
446
447 static void
448 insert_into_bit_range_vector (std::vector<range> *vectorp,
449 LONGEST offset, LONGEST length)
450 {
451 range newr;
452
453 /* Insert the range sorted. If there's overlap or the new range
454 would be contiguous with an existing range, merge. */
455
456 newr.offset = offset;
457 newr.length = length;
458
459 /* Do a binary search for the position the given range would be
460 inserted if we only considered the starting OFFSET of ranges.
461 Call that position I. Since we also have LENGTH to care for
462 (this is a range afterall), we need to check if the _previous_
463 range overlaps the I range. E.g., calling R the new range:
464
465 #1 - overlaps with previous
466
467 R
468 |-...-|
469 |---| |---| |------| ... |--|
470 0 1 2 N
471
472 I=1
473
474 In the case #1 above, the binary search would return `I=1',
475 meaning, this OFFSET should be inserted at position 1, and the
476 current position 1 should be pushed further (and become 2). But,
477 note that `0' overlaps with R, so we want to merge them.
478
479 A similar consideration needs to be taken if the new range would
480 be contiguous with the previous range:
481
482 #2 - contiguous with previous
483
484 R
485 |-...-|
486 |--| |---| |------| ... |--|
487 0 1 2 N
488
489 I=1
490
491 If there's no overlap with the previous range, as in:
492
493 #3 - not overlapping and not contiguous
494
495 R
496 |-...-|
497 |--| |---| |------| ... |--|
498 0 1 2 N
499
500 I=1
501
502 or if I is 0:
503
504 #4 - R is the range with lowest offset
505
506 R
507 |-...-|
508 |--| |---| |------| ... |--|
509 0 1 2 N
510
511 I=0
512
513 ... we just push the new range to I.
514
515 All the 4 cases above need to consider that the new range may
516 also overlap several of the ranges that follow, or that R may be
517 contiguous with the following range, and merge. E.g.,
518
519 #5 - overlapping following ranges
520
521 R
522 |------------------------|
523 |--| |---| |------| ... |--|
524 0 1 2 N
525
526 I=0
527
528 or:
529
530 R
531 |-------|
532 |--| |---| |------| ... |--|
533 0 1 2 N
534
535 I=1
536
537 */
538
539 auto i = std::lower_bound (vectorp->begin (), vectorp->end (), newr);
540 if (i > vectorp->begin ())
541 {
542 struct range &bef = *(i - 1);
543
544 if (ranges_overlap (bef.offset, bef.length, offset, length))
545 {
546 /* #1 */
547 ULONGEST l = std::min (bef.offset, offset);
548 ULONGEST h = std::max (bef.offset + bef.length, offset + length);
549
550 bef.offset = l;
551 bef.length = h - l;
552 i--;
553 }
554 else if (offset == bef.offset + bef.length)
555 {
556 /* #2 */
557 bef.length += length;
558 i--;
559 }
560 else
561 {
562 /* #3 */
563 i = vectorp->insert (i, newr);
564 }
565 }
566 else
567 {
568 /* #4 */
569 i = vectorp->insert (i, newr);
570 }
571
572 /* Check whether the ranges following the one we've just added or
573 touched can be folded in (#5 above). */
574 if (i != vectorp->end () && i + 1 < vectorp->end ())
575 {
576 int removed = 0;
577 auto next = i + 1;
578
579 /* Get the range we just touched. */
580 struct range &t = *i;
581 removed = 0;
582
583 i = next;
584 for (; i < vectorp->end (); i++)
585 {
586 struct range &r = *i;
587 if (r.offset <= t.offset + t.length)
588 {
589 ULONGEST l, h;
590
591 l = std::min (t.offset, r.offset);
592 h = std::max (t.offset + t.length, r.offset + r.length);
593
594 t.offset = l;
595 t.length = h - l;
596
597 removed++;
598 }
599 else
600 {
601 /* If we couldn't merge this one, we won't be able to
602 merge following ones either, since the ranges are
603 always sorted by OFFSET. */
604 break;
605 }
606 }
607
608 if (removed != 0)
609 vectorp->erase (next, next + removed);
610 }
611 }
612
613 void
614 mark_value_bits_unavailable (struct value *value,
615 LONGEST offset, LONGEST length)
616 {
617 insert_into_bit_range_vector (&value->unavailable, offset, length);
618 }
619
620 void
621 mark_value_bytes_unavailable (struct value *value,
622 LONGEST offset, LONGEST length)
623 {
624 mark_value_bits_unavailable (value,
625 offset * TARGET_CHAR_BIT,
626 length * TARGET_CHAR_BIT);
627 }
628
629 /* Find the first range in RANGES that overlaps the range defined by
630 OFFSET and LENGTH, starting at element POS in the RANGES vector,
631 Returns the index into RANGES where such overlapping range was
632 found, or -1 if none was found. */
633
634 static int
635 find_first_range_overlap (const std::vector<range> *ranges, int pos,
636 LONGEST offset, LONGEST length)
637 {
638 int i;
639
640 for (i = pos; i < ranges->size (); i++)
641 {
642 const range &r = (*ranges)[i];
643 if (ranges_overlap (r.offset, r.length, offset, length))
644 return i;
645 }
646
647 return -1;
648 }
649
650 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
651 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
652 return non-zero.
653
654 It must always be the case that:
655 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
656
657 It is assumed that memory can be accessed from:
658 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
659 to:
660 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
661 / TARGET_CHAR_BIT) */
662 static int
663 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
664 const gdb_byte *ptr2, size_t offset2_bits,
665 size_t length_bits)
666 {
667 gdb_assert (offset1_bits % TARGET_CHAR_BIT
668 == offset2_bits % TARGET_CHAR_BIT);
669
670 if (offset1_bits % TARGET_CHAR_BIT != 0)
671 {
672 size_t bits;
673 gdb_byte mask, b1, b2;
674
675 /* The offset from the base pointers PTR1 and PTR2 is not a complete
676 number of bytes. A number of bits up to either the next exact
677 byte boundary, or LENGTH_BITS (which ever is sooner) will be
678 compared. */
679 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
680 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
681 mask = (1 << bits) - 1;
682
683 if (length_bits < bits)
684 {
685 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
686 bits = length_bits;
687 }
688
689 /* Now load the two bytes and mask off the bits we care about. */
690 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
691 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
692
693 if (b1 != b2)
694 return 1;
695
696 /* Now update the length and offsets to take account of the bits
697 we've just compared. */
698 length_bits -= bits;
699 offset1_bits += bits;
700 offset2_bits += bits;
701 }
702
703 if (length_bits % TARGET_CHAR_BIT != 0)
704 {
705 size_t bits;
706 size_t o1, o2;
707 gdb_byte mask, b1, b2;
708
709 /* The length is not an exact number of bytes. After the previous
710 IF.. block then the offsets are byte aligned, or the
711 length is zero (in which case this code is not reached). Compare
712 a number of bits at the end of the region, starting from an exact
713 byte boundary. */
714 bits = length_bits % TARGET_CHAR_BIT;
715 o1 = offset1_bits + length_bits - bits;
716 o2 = offset2_bits + length_bits - bits;
717
718 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
719 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
720
721 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
722 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
723
724 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
725 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
726
727 if (b1 != b2)
728 return 1;
729
730 length_bits -= bits;
731 }
732
733 if (length_bits > 0)
734 {
735 /* We've now taken care of any stray "bits" at the start, or end of
736 the region to compare, the remainder can be covered with a simple
737 memcmp. */
738 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
739 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
740 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
741
742 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
743 ptr2 + offset2_bits / TARGET_CHAR_BIT,
744 length_bits / TARGET_CHAR_BIT);
745 }
746
747 /* Length is zero, regions match. */
748 return 0;
749 }
750
751 /* Helper struct for find_first_range_overlap_and_match and
752 value_contents_bits_eq. Keep track of which slot of a given ranges
753 vector have we last looked at. */
754
755 struct ranges_and_idx
756 {
757 /* The ranges. */
758 const std::vector<range> *ranges;
759
760 /* The range we've last found in RANGES. Given ranges are sorted,
761 we can start the next lookup here. */
762 int idx;
763 };
764
765 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
766 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
767 ranges starting at OFFSET2 bits. Return true if the ranges match
768 and fill in *L and *H with the overlapping window relative to
769 (both) OFFSET1 or OFFSET2. */
770
771 static int
772 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
773 struct ranges_and_idx *rp2,
774 LONGEST offset1, LONGEST offset2,
775 LONGEST length, ULONGEST *l, ULONGEST *h)
776 {
777 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
778 offset1, length);
779 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
780 offset2, length);
781
782 if (rp1->idx == -1 && rp2->idx == -1)
783 {
784 *l = length;
785 *h = length;
786 return 1;
787 }
788 else if (rp1->idx == -1 || rp2->idx == -1)
789 return 0;
790 else
791 {
792 const range *r1, *r2;
793 ULONGEST l1, h1;
794 ULONGEST l2, h2;
795
796 r1 = &(*rp1->ranges)[rp1->idx];
797 r2 = &(*rp2->ranges)[rp2->idx];
798
799 /* Get the unavailable windows intersected by the incoming
800 ranges. The first and last ranges that overlap the argument
801 range may be wider than said incoming arguments ranges. */
802 l1 = std::max (offset1, r1->offset);
803 h1 = std::min (offset1 + length, r1->offset + r1->length);
804
805 l2 = std::max (offset2, r2->offset);
806 h2 = std::min (offset2 + length, offset2 + r2->length);
807
808 /* Make them relative to the respective start offsets, so we can
809 compare them for equality. */
810 l1 -= offset1;
811 h1 -= offset1;
812
813 l2 -= offset2;
814 h2 -= offset2;
815
816 /* Different ranges, no match. */
817 if (l1 != l2 || h1 != h2)
818 return 0;
819
820 *h = h1;
821 *l = l1;
822 return 1;
823 }
824 }
825
826 /* Helper function for value_contents_eq. The only difference is that
827 this function is bit rather than byte based.
828
829 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
830 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
831 Return true if the available bits match. */
832
833 static bool
834 value_contents_bits_eq (const struct value *val1, int offset1,
835 const struct value *val2, int offset2,
836 int length)
837 {
838 /* Each array element corresponds to a ranges source (unavailable,
839 optimized out). '1' is for VAL1, '2' for VAL2. */
840 struct ranges_and_idx rp1[2], rp2[2];
841
842 /* See function description in value.h. */
843 gdb_assert (!val1->lazy && !val2->lazy);
844
845 /* We shouldn't be trying to compare past the end of the values. */
846 gdb_assert (offset1 + length
847 <= TYPE_LENGTH (val1->enclosing_type) * TARGET_CHAR_BIT);
848 gdb_assert (offset2 + length
849 <= TYPE_LENGTH (val2->enclosing_type) * TARGET_CHAR_BIT);
850
851 memset (&rp1, 0, sizeof (rp1));
852 memset (&rp2, 0, sizeof (rp2));
853 rp1[0].ranges = &val1->unavailable;
854 rp2[0].ranges = &val2->unavailable;
855 rp1[1].ranges = &val1->optimized_out;
856 rp2[1].ranges = &val2->optimized_out;
857
858 while (length > 0)
859 {
860 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
861 int i;
862
863 for (i = 0; i < 2; i++)
864 {
865 ULONGEST l_tmp, h_tmp;
866
867 /* The contents only match equal if the invalid/unavailable
868 contents ranges match as well. */
869 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
870 offset1, offset2, length,
871 &l_tmp, &h_tmp))
872 return false;
873
874 /* We're interested in the lowest/first range found. */
875 if (i == 0 || l_tmp < l)
876 {
877 l = l_tmp;
878 h = h_tmp;
879 }
880 }
881
882 /* Compare the available/valid contents. */
883 if (memcmp_with_bit_offsets (val1->contents.get (), offset1,
884 val2->contents.get (), offset2, l) != 0)
885 return false;
886
887 length -= h;
888 offset1 += h;
889 offset2 += h;
890 }
891
892 return true;
893 }
894
895 bool
896 value_contents_eq (const struct value *val1, LONGEST offset1,
897 const struct value *val2, LONGEST offset2,
898 LONGEST length)
899 {
900 return value_contents_bits_eq (val1, offset1 * TARGET_CHAR_BIT,
901 val2, offset2 * TARGET_CHAR_BIT,
902 length * TARGET_CHAR_BIT);
903 }
904
905
906 /* The value-history records all the values printed by print commands
907 during this session. */
908
909 static std::vector<value_ref_ptr> value_history;
910
911 \f
912 /* List of all value objects currently allocated
913 (except for those released by calls to release_value)
914 This is so they can be freed after each command. */
915
916 static std::vector<value_ref_ptr> all_values;
917
918 /* Allocate a lazy value for type TYPE. Its actual content is
919 "lazily" allocated too: the content field of the return value is
920 NULL; it will be allocated when it is fetched from the target. */
921
922 struct value *
923 allocate_value_lazy (struct type *type)
924 {
925 struct value *val;
926
927 /* Call check_typedef on our type to make sure that, if TYPE
928 is a TYPE_CODE_TYPEDEF, its length is set to the length
929 of the target type instead of zero. However, we do not
930 replace the typedef type by the target type, because we want
931 to keep the typedef in order to be able to set the VAL's type
932 description correctly. */
933 check_typedef (type);
934
935 val = new struct value (type);
936
937 /* Values start out on the all_values chain. */
938 all_values.emplace_back (val);
939
940 return val;
941 }
942
943 /* The maximum size, in bytes, that GDB will try to allocate for a value.
944 The initial value of 64k was not selected for any specific reason, it is
945 just a reasonable starting point. */
946
947 static int max_value_size = 65536; /* 64k bytes */
948
949 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
950 LONGEST, otherwise GDB will not be able to parse integer values from the
951 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
952 be unable to parse "set max-value-size 2".
953
954 As we want a consistent GDB experience across hosts with different sizes
955 of LONGEST, this arbitrary minimum value was selected, so long as this
956 is bigger than LONGEST on all GDB supported hosts we're fine. */
957
958 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
959 gdb_static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
960
961 /* Implement the "set max-value-size" command. */
962
963 static void
964 set_max_value_size (const char *args, int from_tty,
965 struct cmd_list_element *c)
966 {
967 gdb_assert (max_value_size == -1 || max_value_size >= 0);
968
969 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
970 {
971 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
972 error (_("max-value-size set too low, increasing to %d bytes"),
973 max_value_size);
974 }
975 }
976
977 /* Implement the "show max-value-size" command. */
978
979 static void
980 show_max_value_size (struct ui_file *file, int from_tty,
981 struct cmd_list_element *c, const char *value)
982 {
983 if (max_value_size == -1)
984 fprintf_filtered (file, _("Maximum value size is unlimited.\n"));
985 else
986 fprintf_filtered (file, _("Maximum value size is %d bytes.\n"),
987 max_value_size);
988 }
989
990 /* Called before we attempt to allocate or reallocate a buffer for the
991 contents of a value. TYPE is the type of the value for which we are
992 allocating the buffer. If the buffer is too large (based on the user
993 controllable setting) then throw an error. If this function returns
994 then we should attempt to allocate the buffer. */
995
996 static void
997 check_type_length_before_alloc (const struct type *type)
998 {
999 unsigned int length = TYPE_LENGTH (type);
1000
1001 if (max_value_size > -1 && length > max_value_size)
1002 {
1003 if (TYPE_NAME (type) != NULL)
1004 error (_("value of type `%s' requires %u bytes, which is more "
1005 "than max-value-size"), TYPE_NAME (type), length);
1006 else
1007 error (_("value requires %u bytes, which is more than "
1008 "max-value-size"), length);
1009 }
1010 }
1011
1012 /* Allocate the contents of VAL if it has not been allocated yet. */
1013
1014 static void
1015 allocate_value_contents (struct value *val)
1016 {
1017 if (!val->contents)
1018 {
1019 check_type_length_before_alloc (val->enclosing_type);
1020 val->contents.reset
1021 ((gdb_byte *) xzalloc (TYPE_LENGTH (val->enclosing_type)));
1022 }
1023 }
1024
1025 /* Allocate a value and its contents for type TYPE. */
1026
1027 struct value *
1028 allocate_value (struct type *type)
1029 {
1030 struct value *val = allocate_value_lazy (type);
1031
1032 allocate_value_contents (val);
1033 val->lazy = 0;
1034 return val;
1035 }
1036
1037 /* Allocate a value that has the correct length
1038 for COUNT repetitions of type TYPE. */
1039
1040 struct value *
1041 allocate_repeat_value (struct type *type, int count)
1042 {
1043 int low_bound = current_language->string_lower_bound; /* ??? */
1044 /* FIXME-type-allocation: need a way to free this type when we are
1045 done with it. */
1046 struct type *array_type
1047 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1048
1049 return allocate_value (array_type);
1050 }
1051
1052 struct value *
1053 allocate_computed_value (struct type *type,
1054 const struct lval_funcs *funcs,
1055 void *closure)
1056 {
1057 struct value *v = allocate_value_lazy (type);
1058
1059 VALUE_LVAL (v) = lval_computed;
1060 v->location.computed.funcs = funcs;
1061 v->location.computed.closure = closure;
1062
1063 return v;
1064 }
1065
1066 /* Allocate NOT_LVAL value for type TYPE being OPTIMIZED_OUT. */
1067
1068 struct value *
1069 allocate_optimized_out_value (struct type *type)
1070 {
1071 struct value *retval = allocate_value_lazy (type);
1072
1073 mark_value_bytes_optimized_out (retval, 0, TYPE_LENGTH (type));
1074 set_value_lazy (retval, 0);
1075 return retval;
1076 }
1077
1078 /* Accessor methods. */
1079
1080 struct type *
1081 value_type (const struct value *value)
1082 {
1083 return value->type;
1084 }
1085 void
1086 deprecated_set_value_type (struct value *value, struct type *type)
1087 {
1088 value->type = type;
1089 }
1090
1091 LONGEST
1092 value_offset (const struct value *value)
1093 {
1094 return value->offset;
1095 }
1096 void
1097 set_value_offset (struct value *value, LONGEST offset)
1098 {
1099 value->offset = offset;
1100 }
1101
1102 LONGEST
1103 value_bitpos (const struct value *value)
1104 {
1105 return value->bitpos;
1106 }
1107 void
1108 set_value_bitpos (struct value *value, LONGEST bit)
1109 {
1110 value->bitpos = bit;
1111 }
1112
1113 LONGEST
1114 value_bitsize (const struct value *value)
1115 {
1116 return value->bitsize;
1117 }
1118 void
1119 set_value_bitsize (struct value *value, LONGEST bit)
1120 {
1121 value->bitsize = bit;
1122 }
1123
1124 struct value *
1125 value_parent (const struct value *value)
1126 {
1127 return value->parent.get ();
1128 }
1129
1130 /* See value.h. */
1131
1132 void
1133 set_value_parent (struct value *value, struct value *parent)
1134 {
1135 value->parent = value_ref_ptr::new_reference (parent);
1136 }
1137
1138 gdb_byte *
1139 value_contents_raw (struct value *value)
1140 {
1141 struct gdbarch *arch = get_value_arch (value);
1142 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1143
1144 allocate_value_contents (value);
1145 return value->contents.get () + value->embedded_offset * unit_size;
1146 }
1147
1148 gdb_byte *
1149 value_contents_all_raw (struct value *value)
1150 {
1151 allocate_value_contents (value);
1152 return value->contents.get ();
1153 }
1154
1155 struct type *
1156 value_enclosing_type (const struct value *value)
1157 {
1158 return value->enclosing_type;
1159 }
1160
1161 /* Look at value.h for description. */
1162
1163 struct type *
1164 value_actual_type (struct value *value, int resolve_simple_types,
1165 int *real_type_found)
1166 {
1167 struct value_print_options opts;
1168 struct type *result;
1169
1170 get_user_print_options (&opts);
1171
1172 if (real_type_found)
1173 *real_type_found = 0;
1174 result = value_type (value);
1175 if (opts.objectprint)
1176 {
1177 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1178 fetch its rtti type. */
1179 if ((TYPE_CODE (result) == TYPE_CODE_PTR || TYPE_IS_REFERENCE (result))
1180 && TYPE_CODE (check_typedef (TYPE_TARGET_TYPE (result)))
1181 == TYPE_CODE_STRUCT
1182 && !value_optimized_out (value))
1183 {
1184 struct type *real_type;
1185
1186 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1187 if (real_type)
1188 {
1189 if (real_type_found)
1190 *real_type_found = 1;
1191 result = real_type;
1192 }
1193 }
1194 else if (resolve_simple_types)
1195 {
1196 if (real_type_found)
1197 *real_type_found = 1;
1198 result = value_enclosing_type (value);
1199 }
1200 }
1201
1202 return result;
1203 }
1204
1205 void
1206 error_value_optimized_out (void)
1207 {
1208 error (_("value has been optimized out"));
1209 }
1210
1211 static void
1212 require_not_optimized_out (const struct value *value)
1213 {
1214 if (!value->optimized_out.empty ())
1215 {
1216 if (value->lval == lval_register)
1217 error (_("register has not been saved in frame"));
1218 else
1219 error_value_optimized_out ();
1220 }
1221 }
1222
1223 static void
1224 require_available (const struct value *value)
1225 {
1226 if (!value->unavailable.empty ())
1227 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1228 }
1229
1230 const gdb_byte *
1231 value_contents_for_printing (struct value *value)
1232 {
1233 if (value->lazy)
1234 value_fetch_lazy (value);
1235 return value->contents.get ();
1236 }
1237
1238 const gdb_byte *
1239 value_contents_for_printing_const (const struct value *value)
1240 {
1241 gdb_assert (!value->lazy);
1242 return value->contents.get ();
1243 }
1244
1245 const gdb_byte *
1246 value_contents_all (struct value *value)
1247 {
1248 const gdb_byte *result = value_contents_for_printing (value);
1249 require_not_optimized_out (value);
1250 require_available (value);
1251 return result;
1252 }
1253
1254 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1255 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1256
1257 static void
1258 ranges_copy_adjusted (std::vector<range> *dst_range, int dst_bit_offset,
1259 const std::vector<range> &src_range, int src_bit_offset,
1260 int bit_length)
1261 {
1262 for (const range &r : src_range)
1263 {
1264 ULONGEST h, l;
1265
1266 l = std::max (r.offset, (LONGEST) src_bit_offset);
1267 h = std::min (r.offset + r.length,
1268 (LONGEST) src_bit_offset + bit_length);
1269
1270 if (l < h)
1271 insert_into_bit_range_vector (dst_range,
1272 dst_bit_offset + (l - src_bit_offset),
1273 h - l);
1274 }
1275 }
1276
1277 /* Copy the ranges metadata in SRC that overlaps [SRC_BIT_OFFSET,
1278 SRC_BIT_OFFSET+BIT_LENGTH) into DST, adjusted. */
1279
1280 static void
1281 value_ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1282 const struct value *src, int src_bit_offset,
1283 int bit_length)
1284 {
1285 ranges_copy_adjusted (&dst->unavailable, dst_bit_offset,
1286 src->unavailable, src_bit_offset,
1287 bit_length);
1288 ranges_copy_adjusted (&dst->optimized_out, dst_bit_offset,
1289 src->optimized_out, src_bit_offset,
1290 bit_length);
1291 }
1292
1293 /* Copy LENGTH target addressable memory units of SRC value's (all) contents
1294 (value_contents_all) starting at SRC_OFFSET, into DST value's (all)
1295 contents, starting at DST_OFFSET. If unavailable contents are
1296 being copied from SRC, the corresponding DST contents are marked
1297 unavailable accordingly. Neither DST nor SRC may be lazy
1298 values.
1299
1300 It is assumed the contents of DST in the [DST_OFFSET,
1301 DST_OFFSET+LENGTH) range are wholly available. */
1302
1303 void
1304 value_contents_copy_raw (struct value *dst, LONGEST dst_offset,
1305 struct value *src, LONGEST src_offset, LONGEST length)
1306 {
1307 LONGEST src_bit_offset, dst_bit_offset, bit_length;
1308 struct gdbarch *arch = get_value_arch (src);
1309 int unit_size = gdbarch_addressable_memory_unit_size (arch);
1310
1311 /* A lazy DST would make that this copy operation useless, since as
1312 soon as DST's contents were un-lazied (by a later value_contents
1313 call, say), the contents would be overwritten. A lazy SRC would
1314 mean we'd be copying garbage. */
1315 gdb_assert (!dst->lazy && !src->lazy);
1316
1317 /* The overwritten DST range gets unavailability ORed in, not
1318 replaced. Make sure to remember to implement replacing if it
1319 turns out actually necessary. */
1320 gdb_assert (value_bytes_available (dst, dst_offset, length));
1321 gdb_assert (!value_bits_any_optimized_out (dst,
1322 TARGET_CHAR_BIT * dst_offset,
1323 TARGET_CHAR_BIT * length));
1324
1325 /* Copy the data. */
1326 memcpy (value_contents_all_raw (dst) + dst_offset * unit_size,
1327 value_contents_all_raw (src) + src_offset * unit_size,
1328 length * unit_size);
1329
1330 /* Copy the meta-data, adjusted. */
1331 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1332 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1333 bit_length = length * unit_size * HOST_CHAR_BIT;
1334
1335 value_ranges_copy_adjusted (dst, dst_bit_offset,
1336 src, src_bit_offset,
1337 bit_length);
1338 }
1339
1340 /* Copy LENGTH bytes of SRC value's (all) contents
1341 (value_contents_all) starting at SRC_OFFSET byte, into DST value's
1342 (all) contents, starting at DST_OFFSET. If unavailable contents
1343 are being copied from SRC, the corresponding DST contents are
1344 marked unavailable accordingly. DST must not be lazy. If SRC is
1345 lazy, it will be fetched now.
1346
1347 It is assumed the contents of DST in the [DST_OFFSET,
1348 DST_OFFSET+LENGTH) range are wholly available. */
1349
1350 void
1351 value_contents_copy (struct value *dst, LONGEST dst_offset,
1352 struct value *src, LONGEST src_offset, LONGEST length)
1353 {
1354 if (src->lazy)
1355 value_fetch_lazy (src);
1356
1357 value_contents_copy_raw (dst, dst_offset, src, src_offset, length);
1358 }
1359
1360 int
1361 value_lazy (const struct value *value)
1362 {
1363 return value->lazy;
1364 }
1365
1366 void
1367 set_value_lazy (struct value *value, int val)
1368 {
1369 value->lazy = val;
1370 }
1371
1372 int
1373 value_stack (const struct value *value)
1374 {
1375 return value->stack;
1376 }
1377
1378 void
1379 set_value_stack (struct value *value, int val)
1380 {
1381 value->stack = val;
1382 }
1383
1384 const gdb_byte *
1385 value_contents (struct value *value)
1386 {
1387 const gdb_byte *result = value_contents_writeable (value);
1388 require_not_optimized_out (value);
1389 require_available (value);
1390 return result;
1391 }
1392
1393 gdb_byte *
1394 value_contents_writeable (struct value *value)
1395 {
1396 if (value->lazy)
1397 value_fetch_lazy (value);
1398 return value_contents_raw (value);
1399 }
1400
1401 int
1402 value_optimized_out (struct value *value)
1403 {
1404 /* We can only know if a value is optimized out once we have tried to
1405 fetch it. */
1406 if (value->optimized_out.empty () && value->lazy)
1407 {
1408 try
1409 {
1410 value_fetch_lazy (value);
1411 }
1412 catch (const gdb_exception_error &ex)
1413 {
1414 /* Fall back to checking value->optimized_out. */
1415 }
1416 }
1417
1418 return !value->optimized_out.empty ();
1419 }
1420
1421 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1422 the following LENGTH bytes. */
1423
1424 void
1425 mark_value_bytes_optimized_out (struct value *value, int offset, int length)
1426 {
1427 mark_value_bits_optimized_out (value,
1428 offset * TARGET_CHAR_BIT,
1429 length * TARGET_CHAR_BIT);
1430 }
1431
1432 /* See value.h. */
1433
1434 void
1435 mark_value_bits_optimized_out (struct value *value,
1436 LONGEST offset, LONGEST length)
1437 {
1438 insert_into_bit_range_vector (&value->optimized_out, offset, length);
1439 }
1440
1441 int
1442 value_bits_synthetic_pointer (const struct value *value,
1443 LONGEST offset, LONGEST length)
1444 {
1445 if (value->lval != lval_computed
1446 || !value->location.computed.funcs->check_synthetic_pointer)
1447 return 0;
1448 return value->location.computed.funcs->check_synthetic_pointer (value,
1449 offset,
1450 length);
1451 }
1452
1453 LONGEST
1454 value_embedded_offset (const struct value *value)
1455 {
1456 return value->embedded_offset;
1457 }
1458
1459 void
1460 set_value_embedded_offset (struct value *value, LONGEST val)
1461 {
1462 value->embedded_offset = val;
1463 }
1464
1465 LONGEST
1466 value_pointed_to_offset (const struct value *value)
1467 {
1468 return value->pointed_to_offset;
1469 }
1470
1471 void
1472 set_value_pointed_to_offset (struct value *value, LONGEST val)
1473 {
1474 value->pointed_to_offset = val;
1475 }
1476
1477 const struct lval_funcs *
1478 value_computed_funcs (const struct value *v)
1479 {
1480 gdb_assert (value_lval_const (v) == lval_computed);
1481
1482 return v->location.computed.funcs;
1483 }
1484
1485 void *
1486 value_computed_closure (const struct value *v)
1487 {
1488 gdb_assert (v->lval == lval_computed);
1489
1490 return v->location.computed.closure;
1491 }
1492
1493 enum lval_type *
1494 deprecated_value_lval_hack (struct value *value)
1495 {
1496 return &value->lval;
1497 }
1498
1499 enum lval_type
1500 value_lval_const (const struct value *value)
1501 {
1502 return value->lval;
1503 }
1504
1505 CORE_ADDR
1506 value_address (const struct value *value)
1507 {
1508 if (value->lval != lval_memory)
1509 return 0;
1510 if (value->parent != NULL)
1511 return value_address (value->parent.get ()) + value->offset;
1512 if (NULL != TYPE_DATA_LOCATION (value_type (value)))
1513 {
1514 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (value_type (value)));
1515 return TYPE_DATA_LOCATION_ADDR (value_type (value));
1516 }
1517
1518 return value->location.address + value->offset;
1519 }
1520
1521 CORE_ADDR
1522 value_raw_address (const struct value *value)
1523 {
1524 if (value->lval != lval_memory)
1525 return 0;
1526 return value->location.address;
1527 }
1528
1529 void
1530 set_value_address (struct value *value, CORE_ADDR addr)
1531 {
1532 gdb_assert (value->lval == lval_memory);
1533 value->location.address = addr;
1534 }
1535
1536 struct internalvar **
1537 deprecated_value_internalvar_hack (struct value *value)
1538 {
1539 return &value->location.internalvar;
1540 }
1541
1542 struct frame_id *
1543 deprecated_value_next_frame_id_hack (struct value *value)
1544 {
1545 gdb_assert (value->lval == lval_register);
1546 return &value->location.reg.next_frame_id;
1547 }
1548
1549 int *
1550 deprecated_value_regnum_hack (struct value *value)
1551 {
1552 gdb_assert (value->lval == lval_register);
1553 return &value->location.reg.regnum;
1554 }
1555
1556 int
1557 deprecated_value_modifiable (const struct value *value)
1558 {
1559 return value->modifiable;
1560 }
1561 \f
1562 /* Return a mark in the value chain. All values allocated after the
1563 mark is obtained (except for those released) are subject to being freed
1564 if a subsequent value_free_to_mark is passed the mark. */
1565 struct value *
1566 value_mark (void)
1567 {
1568 if (all_values.empty ())
1569 return nullptr;
1570 return all_values.back ().get ();
1571 }
1572
1573 /* See value.h. */
1574
1575 void
1576 value_incref (struct value *val)
1577 {
1578 val->reference_count++;
1579 }
1580
1581 /* Release a reference to VAL, which was acquired with value_incref.
1582 This function is also called to deallocate values from the value
1583 chain. */
1584
1585 void
1586 value_decref (struct value *val)
1587 {
1588 if (val != nullptr)
1589 {
1590 gdb_assert (val->reference_count > 0);
1591 val->reference_count--;
1592 if (val->reference_count == 0)
1593 delete val;
1594 }
1595 }
1596
1597 /* Free all values allocated since MARK was obtained by value_mark
1598 (except for those released). */
1599 void
1600 value_free_to_mark (const struct value *mark)
1601 {
1602 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1603 if (iter == all_values.end ())
1604 all_values.clear ();
1605 else
1606 all_values.erase (iter + 1, all_values.end ());
1607 }
1608
1609 /* Remove VAL from the chain all_values
1610 so it will not be freed automatically. */
1611
1612 value_ref_ptr
1613 release_value (struct value *val)
1614 {
1615 if (val == nullptr)
1616 return value_ref_ptr ();
1617
1618 std::vector<value_ref_ptr>::reverse_iterator iter;
1619 for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter)
1620 {
1621 if (*iter == val)
1622 {
1623 value_ref_ptr result = *iter;
1624 all_values.erase (iter.base () - 1);
1625 return result;
1626 }
1627 }
1628
1629 /* We must always return an owned reference. Normally this happens
1630 because we transfer the reference from the value chain, but in
1631 this case the value was not on the chain. */
1632 return value_ref_ptr::new_reference (val);
1633 }
1634
1635 /* See value.h. */
1636
1637 std::vector<value_ref_ptr>
1638 value_release_to_mark (const struct value *mark)
1639 {
1640 std::vector<value_ref_ptr> result;
1641
1642 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1643 if (iter == all_values.end ())
1644 std::swap (result, all_values);
1645 else
1646 {
1647 std::move (iter + 1, all_values.end (), std::back_inserter (result));
1648 all_values.erase (iter + 1, all_values.end ());
1649 }
1650 std::reverse (result.begin (), result.end ());
1651 return result;
1652 }
1653
1654 /* Return a copy of the value ARG.
1655 It contains the same contents, for same memory address,
1656 but it's a different block of storage. */
1657
1658 struct value *
1659 value_copy (struct value *arg)
1660 {
1661 struct type *encl_type = value_enclosing_type (arg);
1662 struct value *val;
1663
1664 if (value_lazy (arg))
1665 val = allocate_value_lazy (encl_type);
1666 else
1667 val = allocate_value (encl_type);
1668 val->type = arg->type;
1669 VALUE_LVAL (val) = VALUE_LVAL (arg);
1670 val->location = arg->location;
1671 val->offset = arg->offset;
1672 val->bitpos = arg->bitpos;
1673 val->bitsize = arg->bitsize;
1674 val->lazy = arg->lazy;
1675 val->embedded_offset = value_embedded_offset (arg);
1676 val->pointed_to_offset = arg->pointed_to_offset;
1677 val->modifiable = arg->modifiable;
1678 if (!value_lazy (val))
1679 {
1680 memcpy (value_contents_all_raw (val), value_contents_all_raw (arg),
1681 TYPE_LENGTH (value_enclosing_type (arg)));
1682
1683 }
1684 val->unavailable = arg->unavailable;
1685 val->optimized_out = arg->optimized_out;
1686 val->parent = arg->parent;
1687 if (VALUE_LVAL (val) == lval_computed)
1688 {
1689 const struct lval_funcs *funcs = val->location.computed.funcs;
1690
1691 if (funcs->copy_closure)
1692 val->location.computed.closure = funcs->copy_closure (val);
1693 }
1694 return val;
1695 }
1696
1697 /* Return a "const" and/or "volatile" qualified version of the value V.
1698 If CNST is true, then the returned value will be qualified with
1699 "const".
1700 if VOLTL is true, then the returned value will be qualified with
1701 "volatile". */
1702
1703 struct value *
1704 make_cv_value (int cnst, int voltl, struct value *v)
1705 {
1706 struct type *val_type = value_type (v);
1707 struct type *enclosing_type = value_enclosing_type (v);
1708 struct value *cv_val = value_copy (v);
1709
1710 deprecated_set_value_type (cv_val,
1711 make_cv_type (cnst, voltl, val_type, NULL));
1712 set_value_enclosing_type (cv_val,
1713 make_cv_type (cnst, voltl, enclosing_type, NULL));
1714
1715 return cv_val;
1716 }
1717
1718 /* Return a version of ARG that is non-lvalue. */
1719
1720 struct value *
1721 value_non_lval (struct value *arg)
1722 {
1723 if (VALUE_LVAL (arg) != not_lval)
1724 {
1725 struct type *enc_type = value_enclosing_type (arg);
1726 struct value *val = allocate_value (enc_type);
1727
1728 memcpy (value_contents_all_raw (val), value_contents_all (arg),
1729 TYPE_LENGTH (enc_type));
1730 val->type = arg->type;
1731 set_value_embedded_offset (val, value_embedded_offset (arg));
1732 set_value_pointed_to_offset (val, value_pointed_to_offset (arg));
1733 return val;
1734 }
1735 return arg;
1736 }
1737
1738 /* Write contents of V at ADDR and set its lval type to be LVAL_MEMORY. */
1739
1740 void
1741 value_force_lval (struct value *v, CORE_ADDR addr)
1742 {
1743 gdb_assert (VALUE_LVAL (v) == not_lval);
1744
1745 write_memory (addr, value_contents_raw (v), TYPE_LENGTH (value_type (v)));
1746 v->lval = lval_memory;
1747 v->location.address = addr;
1748 }
1749
1750 void
1751 set_value_component_location (struct value *component,
1752 const struct value *whole)
1753 {
1754 struct type *type;
1755
1756 gdb_assert (whole->lval != lval_xcallable);
1757
1758 if (whole->lval == lval_internalvar)
1759 VALUE_LVAL (component) = lval_internalvar_component;
1760 else
1761 VALUE_LVAL (component) = whole->lval;
1762
1763 component->location = whole->location;
1764 if (whole->lval == lval_computed)
1765 {
1766 const struct lval_funcs *funcs = whole->location.computed.funcs;
1767
1768 if (funcs->copy_closure)
1769 component->location.computed.closure = funcs->copy_closure (whole);
1770 }
1771
1772 /* If type has a dynamic resolved location property
1773 update it's value address. */
1774 type = value_type (whole);
1775 if (NULL != TYPE_DATA_LOCATION (type)
1776 && TYPE_DATA_LOCATION_KIND (type) == PROP_CONST)
1777 set_value_address (component, TYPE_DATA_LOCATION_ADDR (type));
1778 }
1779
1780 /* Access to the value history. */
1781
1782 /* Record a new value in the value history.
1783 Returns the absolute history index of the entry. */
1784
1785 int
1786 record_latest_value (struct value *val)
1787 {
1788 /* We don't want this value to have anything to do with the inferior anymore.
1789 In particular, "set $1 = 50" should not affect the variable from which
1790 the value was taken, and fast watchpoints should be able to assume that
1791 a value on the value history never changes. */
1792 if (value_lazy (val))
1793 value_fetch_lazy (val);
1794 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1795 from. This is a bit dubious, because then *&$1 does not just return $1
1796 but the current contents of that location. c'est la vie... */
1797 val->modifiable = 0;
1798
1799 value_history.push_back (release_value (val));
1800
1801 return value_history.size ();
1802 }
1803
1804 /* Return a copy of the value in the history with sequence number NUM. */
1805
1806 struct value *
1807 access_value_history (int num)
1808 {
1809 int absnum = num;
1810
1811 if (absnum <= 0)
1812 absnum += value_history.size ();
1813
1814 if (absnum <= 0)
1815 {
1816 if (num == 0)
1817 error (_("The history is empty."));
1818 else if (num == 1)
1819 error (_("There is only one value in the history."));
1820 else
1821 error (_("History does not go back to $$%d."), -num);
1822 }
1823 if (absnum > value_history.size ())
1824 error (_("History has not yet reached $%d."), absnum);
1825
1826 absnum--;
1827
1828 return value_copy (value_history[absnum].get ());
1829 }
1830
1831 static void
1832 show_values (const char *num_exp, int from_tty)
1833 {
1834 int i;
1835 struct value *val;
1836 static int num = 1;
1837
1838 if (num_exp)
1839 {
1840 /* "show values +" should print from the stored position.
1841 "show values <exp>" should print around value number <exp>. */
1842 if (num_exp[0] != '+' || num_exp[1] != '\0')
1843 num = parse_and_eval_long (num_exp) - 5;
1844 }
1845 else
1846 {
1847 /* "show values" means print the last 10 values. */
1848 num = value_history.size () - 9;
1849 }
1850
1851 if (num <= 0)
1852 num = 1;
1853
1854 for (i = num; i < num + 10 && i <= value_history.size (); i++)
1855 {
1856 struct value_print_options opts;
1857
1858 val = access_value_history (i);
1859 printf_filtered (("$%d = "), i);
1860 get_user_print_options (&opts);
1861 value_print (val, gdb_stdout, &opts);
1862 printf_filtered (("\n"));
1863 }
1864
1865 /* The next "show values +" should start after what we just printed. */
1866 num += 10;
1867
1868 /* Hitting just return after this command should do the same thing as
1869 "show values +". If num_exp is null, this is unnecessary, since
1870 "show values +" is not useful after "show values". */
1871 if (from_tty && num_exp)
1872 set_repeat_arguments ("+");
1873 }
1874 \f
1875 enum internalvar_kind
1876 {
1877 /* The internal variable is empty. */
1878 INTERNALVAR_VOID,
1879
1880 /* The value of the internal variable is provided directly as
1881 a GDB value object. */
1882 INTERNALVAR_VALUE,
1883
1884 /* A fresh value is computed via a call-back routine on every
1885 access to the internal variable. */
1886 INTERNALVAR_MAKE_VALUE,
1887
1888 /* The internal variable holds a GDB internal convenience function. */
1889 INTERNALVAR_FUNCTION,
1890
1891 /* The variable holds an integer value. */
1892 INTERNALVAR_INTEGER,
1893
1894 /* The variable holds a GDB-provided string. */
1895 INTERNALVAR_STRING,
1896 };
1897
1898 union internalvar_data
1899 {
1900 /* A value object used with INTERNALVAR_VALUE. */
1901 struct value *value;
1902
1903 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1904 struct
1905 {
1906 /* The functions to call. */
1907 const struct internalvar_funcs *functions;
1908
1909 /* The function's user-data. */
1910 void *data;
1911 } make_value;
1912
1913 /* The internal function used with INTERNALVAR_FUNCTION. */
1914 struct
1915 {
1916 struct internal_function *function;
1917 /* True if this is the canonical name for the function. */
1918 int canonical;
1919 } fn;
1920
1921 /* An integer value used with INTERNALVAR_INTEGER. */
1922 struct
1923 {
1924 /* If type is non-NULL, it will be used as the type to generate
1925 a value for this internal variable. If type is NULL, a default
1926 integer type for the architecture is used. */
1927 struct type *type;
1928 LONGEST val;
1929 } integer;
1930
1931 /* A string value used with INTERNALVAR_STRING. */
1932 char *string;
1933 };
1934
1935 /* Internal variables. These are variables within the debugger
1936 that hold values assigned by debugger commands.
1937 The user refers to them with a '$' prefix
1938 that does not appear in the variable names stored internally. */
1939
1940 struct internalvar
1941 {
1942 struct internalvar *next;
1943 char *name;
1944
1945 /* We support various different kinds of content of an internal variable.
1946 enum internalvar_kind specifies the kind, and union internalvar_data
1947 provides the data associated with this particular kind. */
1948
1949 enum internalvar_kind kind;
1950
1951 union internalvar_data u;
1952 };
1953
1954 static struct internalvar *internalvars;
1955
1956 /* If the variable does not already exist create it and give it the
1957 value given. If no value is given then the default is zero. */
1958 static void
1959 init_if_undefined_command (const char* args, int from_tty)
1960 {
1961 struct internalvar* intvar;
1962
1963 /* Parse the expression - this is taken from set_command(). */
1964 expression_up expr = parse_expression (args);
1965
1966 /* Validate the expression.
1967 Was the expression an assignment?
1968 Or even an expression at all? */
1969 if (expr->nelts == 0 || expr->elts[0].opcode != BINOP_ASSIGN)
1970 error (_("Init-if-undefined requires an assignment expression."));
1971
1972 /* Extract the variable from the parsed expression.
1973 In the case of an assign the lvalue will be in elts[1] and elts[2]. */
1974 if (expr->elts[1].opcode != OP_INTERNALVAR)
1975 error (_("The first parameter to init-if-undefined "
1976 "should be a GDB variable."));
1977 intvar = expr->elts[2].internalvar;
1978
1979 /* Only evaluate the expression if the lvalue is void.
1980 This may still fail if the expresssion is invalid. */
1981 if (intvar->kind == INTERNALVAR_VOID)
1982 evaluate_expression (expr.get ());
1983 }
1984
1985
1986 /* Look up an internal variable with name NAME. NAME should not
1987 normally include a dollar sign.
1988
1989 If the specified internal variable does not exist,
1990 the return value is NULL. */
1991
1992 struct internalvar *
1993 lookup_only_internalvar (const char *name)
1994 {
1995 struct internalvar *var;
1996
1997 for (var = internalvars; var; var = var->next)
1998 if (strcmp (var->name, name) == 0)
1999 return var;
2000
2001 return NULL;
2002 }
2003
2004 /* Complete NAME by comparing it to the names of internal
2005 variables. */
2006
2007 void
2008 complete_internalvar (completion_tracker &tracker, const char *name)
2009 {
2010 struct internalvar *var;
2011 int len;
2012
2013 len = strlen (name);
2014
2015 for (var = internalvars; var; var = var->next)
2016 if (strncmp (var->name, name, len) == 0)
2017 {
2018 gdb::unique_xmalloc_ptr<char> copy (xstrdup (var->name));
2019
2020 tracker.add_completion (std::move (copy));
2021 }
2022 }
2023
2024 /* Create an internal variable with name NAME and with a void value.
2025 NAME should not normally include a dollar sign. */
2026
2027 struct internalvar *
2028 create_internalvar (const char *name)
2029 {
2030 struct internalvar *var = XNEW (struct internalvar);
2031
2032 var->name = concat (name, (char *)NULL);
2033 var->kind = INTERNALVAR_VOID;
2034 var->next = internalvars;
2035 internalvars = var;
2036 return var;
2037 }
2038
2039 /* Create an internal variable with name NAME and register FUN as the
2040 function that value_of_internalvar uses to create a value whenever
2041 this variable is referenced. NAME should not normally include a
2042 dollar sign. DATA is passed uninterpreted to FUN when it is
2043 called. CLEANUP, if not NULL, is called when the internal variable
2044 is destroyed. It is passed DATA as its only argument. */
2045
2046 struct internalvar *
2047 create_internalvar_type_lazy (const char *name,
2048 const struct internalvar_funcs *funcs,
2049 void *data)
2050 {
2051 struct internalvar *var = create_internalvar (name);
2052
2053 var->kind = INTERNALVAR_MAKE_VALUE;
2054 var->u.make_value.functions = funcs;
2055 var->u.make_value.data = data;
2056 return var;
2057 }
2058
2059 /* See documentation in value.h. */
2060
2061 int
2062 compile_internalvar_to_ax (struct internalvar *var,
2063 struct agent_expr *expr,
2064 struct axs_value *value)
2065 {
2066 if (var->kind != INTERNALVAR_MAKE_VALUE
2067 || var->u.make_value.functions->compile_to_ax == NULL)
2068 return 0;
2069
2070 var->u.make_value.functions->compile_to_ax (var, expr, value,
2071 var->u.make_value.data);
2072 return 1;
2073 }
2074
2075 /* Look up an internal variable with name NAME. NAME should not
2076 normally include a dollar sign.
2077
2078 If the specified internal variable does not exist,
2079 one is created, with a void value. */
2080
2081 struct internalvar *
2082 lookup_internalvar (const char *name)
2083 {
2084 struct internalvar *var;
2085
2086 var = lookup_only_internalvar (name);
2087 if (var)
2088 return var;
2089
2090 return create_internalvar (name);
2091 }
2092
2093 /* Return current value of internal variable VAR. For variables that
2094 are not inherently typed, use a value type appropriate for GDBARCH. */
2095
2096 struct value *
2097 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2098 {
2099 struct value *val;
2100 struct trace_state_variable *tsv;
2101
2102 /* If there is a trace state variable of the same name, assume that
2103 is what we really want to see. */
2104 tsv = find_trace_state_variable (var->name);
2105 if (tsv)
2106 {
2107 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2108 &(tsv->value));
2109 if (tsv->value_known)
2110 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2111 tsv->value);
2112 else
2113 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2114 return val;
2115 }
2116
2117 switch (var->kind)
2118 {
2119 case INTERNALVAR_VOID:
2120 val = allocate_value (builtin_type (gdbarch)->builtin_void);
2121 break;
2122
2123 case INTERNALVAR_FUNCTION:
2124 val = allocate_value (builtin_type (gdbarch)->internal_fn);
2125 break;
2126
2127 case INTERNALVAR_INTEGER:
2128 if (!var->u.integer.type)
2129 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2130 var->u.integer.val);
2131 else
2132 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2133 break;
2134
2135 case INTERNALVAR_STRING:
2136 val = value_cstring (var->u.string, strlen (var->u.string),
2137 builtin_type (gdbarch)->builtin_char);
2138 break;
2139
2140 case INTERNALVAR_VALUE:
2141 val = value_copy (var->u.value);
2142 if (value_lazy (val))
2143 value_fetch_lazy (val);
2144 break;
2145
2146 case INTERNALVAR_MAKE_VALUE:
2147 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2148 var->u.make_value.data);
2149 break;
2150
2151 default:
2152 internal_error (__FILE__, __LINE__, _("bad kind"));
2153 }
2154
2155 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2156 on this value go back to affect the original internal variable.
2157
2158 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2159 no underlying modifyable state in the internal variable.
2160
2161 Likewise, if the variable's value is a computed lvalue, we want
2162 references to it to produce another computed lvalue, where
2163 references and assignments actually operate through the
2164 computed value's functions.
2165
2166 This means that internal variables with computed values
2167 behave a little differently from other internal variables:
2168 assignments to them don't just replace the previous value
2169 altogether. At the moment, this seems like the behavior we
2170 want. */
2171
2172 if (var->kind != INTERNALVAR_MAKE_VALUE
2173 && val->lval != lval_computed)
2174 {
2175 VALUE_LVAL (val) = lval_internalvar;
2176 VALUE_INTERNALVAR (val) = var;
2177 }
2178
2179 return val;
2180 }
2181
2182 int
2183 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2184 {
2185 if (var->kind == INTERNALVAR_INTEGER)
2186 {
2187 *result = var->u.integer.val;
2188 return 1;
2189 }
2190
2191 if (var->kind == INTERNALVAR_VALUE)
2192 {
2193 struct type *type = check_typedef (value_type (var->u.value));
2194
2195 if (TYPE_CODE (type) == TYPE_CODE_INT)
2196 {
2197 *result = value_as_long (var->u.value);
2198 return 1;
2199 }
2200 }
2201
2202 return 0;
2203 }
2204
2205 static int
2206 get_internalvar_function (struct internalvar *var,
2207 struct internal_function **result)
2208 {
2209 switch (var->kind)
2210 {
2211 case INTERNALVAR_FUNCTION:
2212 *result = var->u.fn.function;
2213 return 1;
2214
2215 default:
2216 return 0;
2217 }
2218 }
2219
2220 void
2221 set_internalvar_component (struct internalvar *var,
2222 LONGEST offset, LONGEST bitpos,
2223 LONGEST bitsize, struct value *newval)
2224 {
2225 gdb_byte *addr;
2226 struct gdbarch *arch;
2227 int unit_size;
2228
2229 switch (var->kind)
2230 {
2231 case INTERNALVAR_VALUE:
2232 addr = value_contents_writeable (var->u.value);
2233 arch = get_value_arch (var->u.value);
2234 unit_size = gdbarch_addressable_memory_unit_size (arch);
2235
2236 if (bitsize)
2237 modify_field (value_type (var->u.value), addr + offset,
2238 value_as_long (newval), bitpos, bitsize);
2239 else
2240 memcpy (addr + offset * unit_size, value_contents (newval),
2241 TYPE_LENGTH (value_type (newval)));
2242 break;
2243
2244 default:
2245 /* We can never get a component of any other kind. */
2246 internal_error (__FILE__, __LINE__, _("set_internalvar_component"));
2247 }
2248 }
2249
2250 void
2251 set_internalvar (struct internalvar *var, struct value *val)
2252 {
2253 enum internalvar_kind new_kind;
2254 union internalvar_data new_data = { 0 };
2255
2256 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2257 error (_("Cannot overwrite convenience function %s"), var->name);
2258
2259 /* Prepare new contents. */
2260 switch (TYPE_CODE (check_typedef (value_type (val))))
2261 {
2262 case TYPE_CODE_VOID:
2263 new_kind = INTERNALVAR_VOID;
2264 break;
2265
2266 case TYPE_CODE_INTERNAL_FUNCTION:
2267 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2268 new_kind = INTERNALVAR_FUNCTION;
2269 get_internalvar_function (VALUE_INTERNALVAR (val),
2270 &new_data.fn.function);
2271 /* Copies created here are never canonical. */
2272 break;
2273
2274 default:
2275 new_kind = INTERNALVAR_VALUE;
2276 struct value *copy = value_copy (val);
2277 copy->modifiable = 1;
2278
2279 /* Force the value to be fetched from the target now, to avoid problems
2280 later when this internalvar is referenced and the target is gone or
2281 has changed. */
2282 if (value_lazy (copy))
2283 value_fetch_lazy (copy);
2284
2285 /* Release the value from the value chain to prevent it from being
2286 deleted by free_all_values. From here on this function should not
2287 call error () until new_data is installed into the var->u to avoid
2288 leaking memory. */
2289 new_data.value = release_value (copy).release ();
2290
2291 /* Internal variables which are created from values with a dynamic
2292 location don't need the location property of the origin anymore.
2293 The resolved dynamic location is used prior then any other address
2294 when accessing the value.
2295 If we keep it, we would still refer to the origin value.
2296 Remove the location property in case it exist. */
2297 remove_dyn_prop (DYN_PROP_DATA_LOCATION, value_type (new_data.value));
2298
2299 break;
2300 }
2301
2302 /* Clean up old contents. */
2303 clear_internalvar (var);
2304
2305 /* Switch over. */
2306 var->kind = new_kind;
2307 var->u = new_data;
2308 /* End code which must not call error(). */
2309 }
2310
2311 void
2312 set_internalvar_integer (struct internalvar *var, LONGEST l)
2313 {
2314 /* Clean up old contents. */
2315 clear_internalvar (var);
2316
2317 var->kind = INTERNALVAR_INTEGER;
2318 var->u.integer.type = NULL;
2319 var->u.integer.val = l;
2320 }
2321
2322 void
2323 set_internalvar_string (struct internalvar *var, const char *string)
2324 {
2325 /* Clean up old contents. */
2326 clear_internalvar (var);
2327
2328 var->kind = INTERNALVAR_STRING;
2329 var->u.string = xstrdup (string);
2330 }
2331
2332 static void
2333 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2334 {
2335 /* Clean up old contents. */
2336 clear_internalvar (var);
2337
2338 var->kind = INTERNALVAR_FUNCTION;
2339 var->u.fn.function = f;
2340 var->u.fn.canonical = 1;
2341 /* Variables installed here are always the canonical version. */
2342 }
2343
2344 void
2345 clear_internalvar (struct internalvar *var)
2346 {
2347 /* Clean up old contents. */
2348 switch (var->kind)
2349 {
2350 case INTERNALVAR_VALUE:
2351 value_decref (var->u.value);
2352 break;
2353
2354 case INTERNALVAR_STRING:
2355 xfree (var->u.string);
2356 break;
2357
2358 case INTERNALVAR_MAKE_VALUE:
2359 if (var->u.make_value.functions->destroy != NULL)
2360 var->u.make_value.functions->destroy (var->u.make_value.data);
2361 break;
2362
2363 default:
2364 break;
2365 }
2366
2367 /* Reset to void kind. */
2368 var->kind = INTERNALVAR_VOID;
2369 }
2370
2371 char *
2372 internalvar_name (const struct internalvar *var)
2373 {
2374 return var->name;
2375 }
2376
2377 static struct internal_function *
2378 create_internal_function (const char *name,
2379 internal_function_fn handler, void *cookie)
2380 {
2381 struct internal_function *ifn = XNEW (struct internal_function);
2382
2383 ifn->name = xstrdup (name);
2384 ifn->handler = handler;
2385 ifn->cookie = cookie;
2386 return ifn;
2387 }
2388
2389 char *
2390 value_internal_function_name (struct value *val)
2391 {
2392 struct internal_function *ifn;
2393 int result;
2394
2395 gdb_assert (VALUE_LVAL (val) == lval_internalvar);
2396 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2397 gdb_assert (result);
2398
2399 return ifn->name;
2400 }
2401
2402 struct value *
2403 call_internal_function (struct gdbarch *gdbarch,
2404 const struct language_defn *language,
2405 struct value *func, int argc, struct value **argv)
2406 {
2407 struct internal_function *ifn;
2408 int result;
2409
2410 gdb_assert (VALUE_LVAL (func) == lval_internalvar);
2411 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2412 gdb_assert (result);
2413
2414 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2415 }
2416
2417 /* The 'function' command. This does nothing -- it is just a
2418 placeholder to let "help function NAME" work. This is also used as
2419 the implementation of the sub-command that is created when
2420 registering an internal function. */
2421 static void
2422 function_command (const char *command, int from_tty)
2423 {
2424 /* Do nothing. */
2425 }
2426
2427 /* Clean up if an internal function's command is destroyed. */
2428 static void
2429 function_destroyer (struct cmd_list_element *self, void *ignore)
2430 {
2431 xfree ((char *) self->name);
2432 xfree ((char *) self->doc);
2433 }
2434
2435 /* Add a new internal function. NAME is the name of the function; DOC
2436 is a documentation string describing the function. HANDLER is
2437 called when the function is invoked. COOKIE is an arbitrary
2438 pointer which is passed to HANDLER and is intended for "user
2439 data". */
2440 void
2441 add_internal_function (const char *name, const char *doc,
2442 internal_function_fn handler, void *cookie)
2443 {
2444 struct cmd_list_element *cmd;
2445 struct internal_function *ifn;
2446 struct internalvar *var = lookup_internalvar (name);
2447
2448 ifn = create_internal_function (name, handler, cookie);
2449 set_internalvar_function (var, ifn);
2450
2451 cmd = add_cmd (xstrdup (name), no_class, function_command, (char *) doc,
2452 &functionlist);
2453 cmd->destroyer = function_destroyer;
2454 }
2455
2456 /* Update VALUE before discarding OBJFILE. COPIED_TYPES is used to
2457 prevent cycles / duplicates. */
2458
2459 void
2460 preserve_one_value (struct value *value, struct objfile *objfile,
2461 htab_t copied_types)
2462 {
2463 if (TYPE_OBJFILE (value->type) == objfile)
2464 value->type = copy_type_recursive (objfile, value->type, copied_types);
2465
2466 if (TYPE_OBJFILE (value->enclosing_type) == objfile)
2467 value->enclosing_type = copy_type_recursive (objfile,
2468 value->enclosing_type,
2469 copied_types);
2470 }
2471
2472 /* Likewise for internal variable VAR. */
2473
2474 static void
2475 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2476 htab_t copied_types)
2477 {
2478 switch (var->kind)
2479 {
2480 case INTERNALVAR_INTEGER:
2481 if (var->u.integer.type && TYPE_OBJFILE (var->u.integer.type) == objfile)
2482 var->u.integer.type
2483 = copy_type_recursive (objfile, var->u.integer.type, copied_types);
2484 break;
2485
2486 case INTERNALVAR_VALUE:
2487 preserve_one_value (var->u.value, objfile, copied_types);
2488 break;
2489 }
2490 }
2491
2492 /* Update the internal variables and value history when OBJFILE is
2493 discarded; we must copy the types out of the objfile. New global types
2494 will be created for every convenience variable which currently points to
2495 this objfile's types, and the convenience variables will be adjusted to
2496 use the new global types. */
2497
2498 void
2499 preserve_values (struct objfile *objfile)
2500 {
2501 htab_t copied_types;
2502 struct internalvar *var;
2503
2504 /* Create the hash table. We allocate on the objfile's obstack, since
2505 it is soon to be deleted. */
2506 copied_types = create_copied_types_hash (objfile);
2507
2508 for (const value_ref_ptr &item : value_history)
2509 preserve_one_value (item.get (), objfile, copied_types);
2510
2511 for (var = internalvars; var; var = var->next)
2512 preserve_one_internalvar (var, objfile, copied_types);
2513
2514 preserve_ext_lang_values (objfile, copied_types);
2515
2516 htab_delete (copied_types);
2517 }
2518
2519 static void
2520 show_convenience (const char *ignore, int from_tty)
2521 {
2522 struct gdbarch *gdbarch = get_current_arch ();
2523 struct internalvar *var;
2524 int varseen = 0;
2525 struct value_print_options opts;
2526
2527 get_user_print_options (&opts);
2528 for (var = internalvars; var; var = var->next)
2529 {
2530
2531 if (!varseen)
2532 {
2533 varseen = 1;
2534 }
2535 printf_filtered (("$%s = "), var->name);
2536
2537 try
2538 {
2539 struct value *val;
2540
2541 val = value_of_internalvar (gdbarch, var);
2542 value_print (val, gdb_stdout, &opts);
2543 }
2544 catch (const gdb_exception_error &ex)
2545 {
2546 fprintf_filtered (gdb_stdout, _("<error: %s>"), ex.what ());
2547 }
2548
2549 printf_filtered (("\n"));
2550 }
2551 if (!varseen)
2552 {
2553 /* This text does not mention convenience functions on purpose.
2554 The user can't create them except via Python, and if Python support
2555 is installed this message will never be printed ($_streq will
2556 exist). */
2557 printf_unfiltered (_("No debugger convenience variables now defined.\n"
2558 "Convenience variables have "
2559 "names starting with \"$\";\n"
2560 "use \"set\" as in \"set "
2561 "$foo = 5\" to define them.\n"));
2562 }
2563 }
2564 \f
2565
2566 /* See value.h. */
2567
2568 struct value *
2569 value_from_xmethod (xmethod_worker_up &&worker)
2570 {
2571 struct value *v;
2572
2573 v = allocate_value (builtin_type (target_gdbarch ())->xmethod);
2574 v->lval = lval_xcallable;
2575 v->location.xm_worker = worker.release ();
2576 v->modifiable = 0;
2577
2578 return v;
2579 }
2580
2581 /* Return the type of the result of TYPE_CODE_XMETHOD value METHOD. */
2582
2583 struct type *
2584 result_type_of_xmethod (struct value *method, gdb::array_view<value *> argv)
2585 {
2586 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2587 && method->lval == lval_xcallable && !argv.empty ());
2588
2589 return method->location.xm_worker->get_result_type (argv[0], argv.slice (1));
2590 }
2591
2592 /* Call the xmethod corresponding to the TYPE_CODE_XMETHOD value METHOD. */
2593
2594 struct value *
2595 call_xmethod (struct value *method, gdb::array_view<value *> argv)
2596 {
2597 gdb_assert (TYPE_CODE (value_type (method)) == TYPE_CODE_XMETHOD
2598 && method->lval == lval_xcallable && !argv.empty ());
2599
2600 return method->location.xm_worker->invoke (argv[0], argv.slice (1));
2601 }
2602 \f
2603 /* Extract a value as a C number (either long or double).
2604 Knows how to convert fixed values to double, or
2605 floating values to long.
2606 Does not deallocate the value. */
2607
2608 LONGEST
2609 value_as_long (struct value *val)
2610 {
2611 /* This coerces arrays and functions, which is necessary (e.g.
2612 in disassemble_command). It also dereferences references, which
2613 I suspect is the most logical thing to do. */
2614 val = coerce_array (val);
2615 return unpack_long (value_type (val), value_contents (val));
2616 }
2617
2618 /* Extract a value as a C pointer. Does not deallocate the value.
2619 Note that val's type may not actually be a pointer; value_as_long
2620 handles all the cases. */
2621 CORE_ADDR
2622 value_as_address (struct value *val)
2623 {
2624 struct gdbarch *gdbarch = get_type_arch (value_type (val));
2625
2626 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2627 whether we want this to be true eventually. */
2628 #if 0
2629 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2630 non-address (e.g. argument to "signal", "info break", etc.), or
2631 for pointers to char, in which the low bits *are* significant. */
2632 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2633 #else
2634
2635 /* There are several targets (IA-64, PowerPC, and others) which
2636 don't represent pointers to functions as simply the address of
2637 the function's entry point. For example, on the IA-64, a
2638 function pointer points to a two-word descriptor, generated by
2639 the linker, which contains the function's entry point, and the
2640 value the IA-64 "global pointer" register should have --- to
2641 support position-independent code. The linker generates
2642 descriptors only for those functions whose addresses are taken.
2643
2644 On such targets, it's difficult for GDB to convert an arbitrary
2645 function address into a function pointer; it has to either find
2646 an existing descriptor for that function, or call malloc and
2647 build its own. On some targets, it is impossible for GDB to
2648 build a descriptor at all: the descriptor must contain a jump
2649 instruction; data memory cannot be executed; and code memory
2650 cannot be modified.
2651
2652 Upon entry to this function, if VAL is a value of type `function'
2653 (that is, TYPE_CODE (VALUE_TYPE (val)) == TYPE_CODE_FUNC), then
2654 value_address (val) is the address of the function. This is what
2655 you'll get if you evaluate an expression like `main'. The call
2656 to COERCE_ARRAY below actually does all the usual unary
2657 conversions, which includes converting values of type `function'
2658 to `pointer to function'. This is the challenging conversion
2659 discussed above. Then, `unpack_long' will convert that pointer
2660 back into an address.
2661
2662 So, suppose the user types `disassemble foo' on an architecture
2663 with a strange function pointer representation, on which GDB
2664 cannot build its own descriptors, and suppose further that `foo'
2665 has no linker-built descriptor. The address->pointer conversion
2666 will signal an error and prevent the command from running, even
2667 though the next step would have been to convert the pointer
2668 directly back into the same address.
2669
2670 The following shortcut avoids this whole mess. If VAL is a
2671 function, just return its address directly. */
2672 if (TYPE_CODE (value_type (val)) == TYPE_CODE_FUNC
2673 || TYPE_CODE (value_type (val)) == TYPE_CODE_METHOD)
2674 return value_address (val);
2675
2676 val = coerce_array (val);
2677
2678 /* Some architectures (e.g. Harvard), map instruction and data
2679 addresses onto a single large unified address space. For
2680 instance: An architecture may consider a large integer in the
2681 range 0x10000000 .. 0x1000ffff to already represent a data
2682 addresses (hence not need a pointer to address conversion) while
2683 a small integer would still need to be converted integer to
2684 pointer to address. Just assume such architectures handle all
2685 integer conversions in a single function. */
2686
2687 /* JimB writes:
2688
2689 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2690 must admonish GDB hackers to make sure its behavior matches the
2691 compiler's, whenever possible.
2692
2693 In general, I think GDB should evaluate expressions the same way
2694 the compiler does. When the user copies an expression out of
2695 their source code and hands it to a `print' command, they should
2696 get the same value the compiler would have computed. Any
2697 deviation from this rule can cause major confusion and annoyance,
2698 and needs to be justified carefully. In other words, GDB doesn't
2699 really have the freedom to do these conversions in clever and
2700 useful ways.
2701
2702 AndrewC pointed out that users aren't complaining about how GDB
2703 casts integers to pointers; they are complaining that they can't
2704 take an address from a disassembly listing and give it to `x/i'.
2705 This is certainly important.
2706
2707 Adding an architecture method like integer_to_address() certainly
2708 makes it possible for GDB to "get it right" in all circumstances
2709 --- the target has complete control over how things get done, so
2710 people can Do The Right Thing for their target without breaking
2711 anyone else. The standard doesn't specify how integers get
2712 converted to pointers; usually, the ABI doesn't either, but
2713 ABI-specific code is a more reasonable place to handle it. */
2714
2715 if (TYPE_CODE (value_type (val)) != TYPE_CODE_PTR
2716 && !TYPE_IS_REFERENCE (value_type (val))
2717 && gdbarch_integer_to_address_p (gdbarch))
2718 return gdbarch_integer_to_address (gdbarch, value_type (val),
2719 value_contents (val));
2720
2721 return unpack_long (value_type (val), value_contents (val));
2722 #endif
2723 }
2724 \f
2725 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2726 as a long, or as a double, assuming the raw data is described
2727 by type TYPE. Knows how to convert different sizes of values
2728 and can convert between fixed and floating point. We don't assume
2729 any alignment for the raw data. Return value is in host byte order.
2730
2731 If you want functions and arrays to be coerced to pointers, and
2732 references to be dereferenced, call value_as_long() instead.
2733
2734 C++: It is assumed that the front-end has taken care of
2735 all matters concerning pointers to members. A pointer
2736 to member which reaches here is considered to be equivalent
2737 to an INT (or some size). After all, it is only an offset. */
2738
2739 LONGEST
2740 unpack_long (struct type *type, const gdb_byte *valaddr)
2741 {
2742 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
2743 enum type_code code = TYPE_CODE (type);
2744 int len = TYPE_LENGTH (type);
2745 int nosign = TYPE_UNSIGNED (type);
2746
2747 switch (code)
2748 {
2749 case TYPE_CODE_TYPEDEF:
2750 return unpack_long (check_typedef (type), valaddr);
2751 case TYPE_CODE_ENUM:
2752 case TYPE_CODE_FLAGS:
2753 case TYPE_CODE_BOOL:
2754 case TYPE_CODE_INT:
2755 case TYPE_CODE_CHAR:
2756 case TYPE_CODE_RANGE:
2757 case TYPE_CODE_MEMBERPTR:
2758 if (nosign)
2759 return extract_unsigned_integer (valaddr, len, byte_order);
2760 else
2761 return extract_signed_integer (valaddr, len, byte_order);
2762
2763 case TYPE_CODE_FLT:
2764 case TYPE_CODE_DECFLOAT:
2765 return target_float_to_longest (valaddr, type);
2766
2767 case TYPE_CODE_PTR:
2768 case TYPE_CODE_REF:
2769 case TYPE_CODE_RVALUE_REF:
2770 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2771 whether we want this to be true eventually. */
2772 return extract_typed_address (valaddr, type);
2773
2774 default:
2775 error (_("Value can't be converted to integer."));
2776 }
2777 }
2778
2779 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2780 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2781 We don't assume any alignment for the raw data. Return value is in
2782 host byte order.
2783
2784 If you want functions and arrays to be coerced to pointers, and
2785 references to be dereferenced, call value_as_address() instead.
2786
2787 C++: It is assumed that the front-end has taken care of
2788 all matters concerning pointers to members. A pointer
2789 to member which reaches here is considered to be equivalent
2790 to an INT (or some size). After all, it is only an offset. */
2791
2792 CORE_ADDR
2793 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2794 {
2795 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2796 whether we want this to be true eventually. */
2797 return unpack_long (type, valaddr);
2798 }
2799
2800 bool
2801 is_floating_value (struct value *val)
2802 {
2803 struct type *type = check_typedef (value_type (val));
2804
2805 if (is_floating_type (type))
2806 {
2807 if (!target_float_is_valid (value_contents (val), type))
2808 error (_("Invalid floating value found in program."));
2809 return true;
2810 }
2811
2812 return false;
2813 }
2814
2815 \f
2816 /* Get the value of the FIELDNO'th field (which must be static) of
2817 TYPE. */
2818
2819 struct value *
2820 value_static_field (struct type *type, int fieldno)
2821 {
2822 struct value *retval;
2823
2824 switch (TYPE_FIELD_LOC_KIND (type, fieldno))
2825 {
2826 case FIELD_LOC_KIND_PHYSADDR:
2827 retval = value_at_lazy (TYPE_FIELD_TYPE (type, fieldno),
2828 TYPE_FIELD_STATIC_PHYSADDR (type, fieldno));
2829 break;
2830 case FIELD_LOC_KIND_PHYSNAME:
2831 {
2832 const char *phys_name = TYPE_FIELD_STATIC_PHYSNAME (type, fieldno);
2833 /* TYPE_FIELD_NAME (type, fieldno); */
2834 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2835
2836 if (sym.symbol == NULL)
2837 {
2838 /* With some compilers, e.g. HP aCC, static data members are
2839 reported as non-debuggable symbols. */
2840 struct bound_minimal_symbol msym
2841 = lookup_minimal_symbol (phys_name, NULL, NULL);
2842 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
2843
2844 if (!msym.minsym)
2845 retval = allocate_optimized_out_value (field_type);
2846 else
2847 retval = value_at_lazy (field_type, BMSYMBOL_VALUE_ADDRESS (msym));
2848 }
2849 else
2850 retval = value_of_variable (sym.symbol, sym.block);
2851 break;
2852 }
2853 default:
2854 gdb_assert_not_reached ("unexpected field location kind");
2855 }
2856
2857 return retval;
2858 }
2859
2860 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2861 You have to be careful here, since the size of the data area for the value
2862 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2863 than the old enclosing type, you have to allocate more space for the
2864 data. */
2865
2866 void
2867 set_value_enclosing_type (struct value *val, struct type *new_encl_type)
2868 {
2869 if (TYPE_LENGTH (new_encl_type) > TYPE_LENGTH (value_enclosing_type (val)))
2870 {
2871 check_type_length_before_alloc (new_encl_type);
2872 val->contents
2873 .reset ((gdb_byte *) xrealloc (val->contents.release (),
2874 TYPE_LENGTH (new_encl_type)));
2875 }
2876
2877 val->enclosing_type = new_encl_type;
2878 }
2879
2880 /* Given a value ARG1 (offset by OFFSET bytes)
2881 of a struct or union type ARG_TYPE,
2882 extract and return the value of one of its (non-static) fields.
2883 FIELDNO says which field. */
2884
2885 struct value *
2886 value_primitive_field (struct value *arg1, LONGEST offset,
2887 int fieldno, struct type *arg_type)
2888 {
2889 struct value *v;
2890 struct type *type;
2891 struct gdbarch *arch = get_value_arch (arg1);
2892 int unit_size = gdbarch_addressable_memory_unit_size (arch);
2893
2894 arg_type = check_typedef (arg_type);
2895 type = TYPE_FIELD_TYPE (arg_type, fieldno);
2896
2897 /* Call check_typedef on our type to make sure that, if TYPE
2898 is a TYPE_CODE_TYPEDEF, its length is set to the length
2899 of the target type instead of zero. However, we do not
2900 replace the typedef type by the target type, because we want
2901 to keep the typedef in order to be able to print the type
2902 description correctly. */
2903 check_typedef (type);
2904
2905 if (TYPE_FIELD_BITSIZE (arg_type, fieldno))
2906 {
2907 /* Handle packed fields.
2908
2909 Create a new value for the bitfield, with bitpos and bitsize
2910 set. If possible, arrange offset and bitpos so that we can
2911 do a single aligned read of the size of the containing type.
2912 Otherwise, adjust offset to the byte containing the first
2913 bit. Assume that the address, offset, and embedded offset
2914 are sufficiently aligned. */
2915
2916 LONGEST bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno);
2917 LONGEST container_bitsize = TYPE_LENGTH (type) * 8;
2918
2919 v = allocate_value_lazy (type);
2920 v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno);
2921 if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize
2922 && TYPE_LENGTH (type) <= (int) sizeof (LONGEST))
2923 v->bitpos = bitpos % container_bitsize;
2924 else
2925 v->bitpos = bitpos % 8;
2926 v->offset = (value_embedded_offset (arg1)
2927 + offset
2928 + (bitpos - v->bitpos) / 8);
2929 set_value_parent (v, arg1);
2930 if (!value_lazy (arg1))
2931 value_fetch_lazy (v);
2932 }
2933 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
2934 {
2935 /* This field is actually a base subobject, so preserve the
2936 entire object's contents for later references to virtual
2937 bases, etc. */
2938 LONGEST boffset;
2939
2940 /* Lazy register values with offsets are not supported. */
2941 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2942 value_fetch_lazy (arg1);
2943
2944 /* We special case virtual inheritance here because this
2945 requires access to the contents, which we would rather avoid
2946 for references to ordinary fields of unavailable values. */
2947 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
2948 boffset = baseclass_offset (arg_type, fieldno,
2949 value_contents (arg1),
2950 value_embedded_offset (arg1),
2951 value_address (arg1),
2952 arg1);
2953 else
2954 boffset = TYPE_FIELD_BITPOS (arg_type, fieldno) / 8;
2955
2956 if (value_lazy (arg1))
2957 v = allocate_value_lazy (value_enclosing_type (arg1));
2958 else
2959 {
2960 v = allocate_value (value_enclosing_type (arg1));
2961 value_contents_copy_raw (v, 0, arg1, 0,
2962 TYPE_LENGTH (value_enclosing_type (arg1)));
2963 }
2964 v->type = type;
2965 v->offset = value_offset (arg1);
2966 v->embedded_offset = offset + value_embedded_offset (arg1) + boffset;
2967 }
2968 else if (NULL != TYPE_DATA_LOCATION (type))
2969 {
2970 /* Field is a dynamic data member. */
2971
2972 gdb_assert (0 == offset);
2973 /* We expect an already resolved data location. */
2974 gdb_assert (PROP_CONST == TYPE_DATA_LOCATION_KIND (type));
2975 /* For dynamic data types defer memory allocation
2976 until we actual access the value. */
2977 v = allocate_value_lazy (type);
2978 }
2979 else
2980 {
2981 /* Plain old data member */
2982 offset += (TYPE_FIELD_BITPOS (arg_type, fieldno)
2983 / (HOST_CHAR_BIT * unit_size));
2984
2985 /* Lazy register values with offsets are not supported. */
2986 if (VALUE_LVAL (arg1) == lval_register && value_lazy (arg1))
2987 value_fetch_lazy (arg1);
2988
2989 if (value_lazy (arg1))
2990 v = allocate_value_lazy (type);
2991 else
2992 {
2993 v = allocate_value (type);
2994 value_contents_copy_raw (v, value_embedded_offset (v),
2995 arg1, value_embedded_offset (arg1) + offset,
2996 type_length_units (type));
2997 }
2998 v->offset = (value_offset (arg1) + offset
2999 + value_embedded_offset (arg1));
3000 }
3001 set_value_component_location (v, arg1);
3002 return v;
3003 }
3004
3005 /* Given a value ARG1 of a struct or union type,
3006 extract and return the value of one of its (non-static) fields.
3007 FIELDNO says which field. */
3008
3009 struct value *
3010 value_field (struct value *arg1, int fieldno)
3011 {
3012 return value_primitive_field (arg1, 0, fieldno, value_type (arg1));
3013 }
3014
3015 /* Return a non-virtual function as a value.
3016 F is the list of member functions which contains the desired method.
3017 J is an index into F which provides the desired method.
3018
3019 We only use the symbol for its address, so be happy with either a
3020 full symbol or a minimal symbol. */
3021
3022 struct value *
3023 value_fn_field (struct value **arg1p, struct fn_field *f,
3024 int j, struct type *type,
3025 LONGEST offset)
3026 {
3027 struct value *v;
3028 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3029 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3030 struct symbol *sym;
3031 struct bound_minimal_symbol msym;
3032
3033 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3034 if (sym != NULL)
3035 {
3036 memset (&msym, 0, sizeof (msym));
3037 }
3038 else
3039 {
3040 gdb_assert (sym == NULL);
3041 msym = lookup_bound_minimal_symbol (physname);
3042 if (msym.minsym == NULL)
3043 return NULL;
3044 }
3045
3046 v = allocate_value (ftype);
3047 VALUE_LVAL (v) = lval_memory;
3048 if (sym)
3049 {
3050 set_value_address (v, BLOCK_ENTRY_PC (SYMBOL_BLOCK_VALUE (sym)));
3051 }
3052 else
3053 {
3054 /* The minimal symbol might point to a function descriptor;
3055 resolve it to the actual code address instead. */
3056 struct objfile *objfile = msym.objfile;
3057 struct gdbarch *gdbarch = get_objfile_arch (objfile);
3058
3059 set_value_address (v,
3060 gdbarch_convert_from_func_ptr_addr
3061 (gdbarch, BMSYMBOL_VALUE_ADDRESS (msym), current_top_target ()));
3062 }
3063
3064 if (arg1p)
3065 {
3066 if (type != value_type (*arg1p))
3067 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3068 value_addr (*arg1p)));
3069
3070 /* Move the `this' pointer according to the offset.
3071 VALUE_OFFSET (*arg1p) += offset; */
3072 }
3073
3074 return v;
3075 }
3076
3077 \f
3078
3079 /* Unpack a bitfield of the specified FIELD_TYPE, from the object at
3080 VALADDR, and store the result in *RESULT.
3081 The bitfield starts at BITPOS bits and contains BITSIZE bits; if
3082 BITSIZE is zero, then the length is taken from FIELD_TYPE.
3083
3084 Extracting bits depends on endianness of the machine. Compute the
3085 number of least significant bits to discard. For big endian machines,
3086 we compute the total number of bits in the anonymous object, subtract
3087 off the bit count from the MSB of the object to the MSB of the
3088 bitfield, then the size of the bitfield, which leaves the LSB discard
3089 count. For little endian machines, the discard count is simply the
3090 number of bits from the LSB of the anonymous object to the LSB of the
3091 bitfield.
3092
3093 If the field is signed, we also do sign extension. */
3094
3095 static LONGEST
3096 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3097 LONGEST bitpos, LONGEST bitsize)
3098 {
3099 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type));
3100 ULONGEST val;
3101 ULONGEST valmask;
3102 int lsbcount;
3103 LONGEST bytes_read;
3104 LONGEST read_offset;
3105
3106 /* Read the minimum number of bytes required; there may not be
3107 enough bytes to read an entire ULONGEST. */
3108 field_type = check_typedef (field_type);
3109 if (bitsize)
3110 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3111 else
3112 {
3113 bytes_read = TYPE_LENGTH (field_type);
3114 bitsize = 8 * bytes_read;
3115 }
3116
3117 read_offset = bitpos / 8;
3118
3119 val = extract_unsigned_integer (valaddr + read_offset,
3120 bytes_read, byte_order);
3121
3122 /* Extract bits. See comment above. */
3123
3124 if (gdbarch_bits_big_endian (get_type_arch (field_type)))
3125 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3126 else
3127 lsbcount = (bitpos % 8);
3128 val >>= lsbcount;
3129
3130 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3131 If the field is signed, and is negative, then sign extend. */
3132
3133 if (bitsize < 8 * (int) sizeof (val))
3134 {
3135 valmask = (((ULONGEST) 1) << bitsize) - 1;
3136 val &= valmask;
3137 if (!TYPE_UNSIGNED (field_type))
3138 {
3139 if (val & (valmask ^ (valmask >> 1)))
3140 {
3141 val |= ~valmask;
3142 }
3143 }
3144 }
3145
3146 return val;
3147 }
3148
3149 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3150 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3151 ORIGINAL_VALUE, which must not be NULL. See
3152 unpack_value_bits_as_long for more details. */
3153
3154 int
3155 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3156 LONGEST embedded_offset, int fieldno,
3157 const struct value *val, LONGEST *result)
3158 {
3159 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3160 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3161 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3162 int bit_offset;
3163
3164 gdb_assert (val != NULL);
3165
3166 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3167 if (value_bits_any_optimized_out (val, bit_offset, bitsize)
3168 || !value_bits_available (val, bit_offset, bitsize))
3169 return 0;
3170
3171 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3172 bitpos, bitsize);
3173 return 1;
3174 }
3175
3176 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3177 object at VALADDR. See unpack_bits_as_long for more details. */
3178
3179 LONGEST
3180 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3181 {
3182 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3183 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3184 struct type *field_type = TYPE_FIELD_TYPE (type, fieldno);
3185
3186 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3187 }
3188
3189 /* Unpack a bitfield of BITSIZE bits found at BITPOS in the object at
3190 VALADDR + EMBEDDEDOFFSET that has the type of DEST_VAL and store
3191 the contents in DEST_VAL, zero or sign extending if the type of
3192 DEST_VAL is wider than BITSIZE. VALADDR points to the contents of
3193 VAL. If the VAL's contents required to extract the bitfield from
3194 are unavailable/optimized out, DEST_VAL is correspondingly
3195 marked unavailable/optimized out. */
3196
3197 void
3198 unpack_value_bitfield (struct value *dest_val,
3199 LONGEST bitpos, LONGEST bitsize,
3200 const gdb_byte *valaddr, LONGEST embedded_offset,
3201 const struct value *val)
3202 {
3203 enum bfd_endian byte_order;
3204 int src_bit_offset;
3205 int dst_bit_offset;
3206 struct type *field_type = value_type (dest_val);
3207
3208 byte_order = gdbarch_byte_order (get_type_arch (field_type));
3209
3210 /* First, unpack and sign extend the bitfield as if it was wholly
3211 valid. Optimized out/unavailable bits are read as zero, but
3212 that's OK, as they'll end up marked below. If the VAL is
3213 wholly-invalid we may have skipped allocating its contents,
3214 though. See allocate_optimized_out_value. */
3215 if (valaddr != NULL)
3216 {
3217 LONGEST num;
3218
3219 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3220 bitpos, bitsize);
3221 store_signed_integer (value_contents_raw (dest_val),
3222 TYPE_LENGTH (field_type), byte_order, num);
3223 }
3224
3225 /* Now copy the optimized out / unavailability ranges to the right
3226 bits. */
3227 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3228 if (byte_order == BFD_ENDIAN_BIG)
3229 dst_bit_offset = TYPE_LENGTH (field_type) * TARGET_CHAR_BIT - bitsize;
3230 else
3231 dst_bit_offset = 0;
3232 value_ranges_copy_adjusted (dest_val, dst_bit_offset,
3233 val, src_bit_offset, bitsize);
3234 }
3235
3236 /* Return a new value with type TYPE, which is FIELDNO field of the
3237 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3238 of VAL. If the VAL's contents required to extract the bitfield
3239 from are unavailable/optimized out, the new value is
3240 correspondingly marked unavailable/optimized out. */
3241
3242 struct value *
3243 value_field_bitfield (struct type *type, int fieldno,
3244 const gdb_byte *valaddr,
3245 LONGEST embedded_offset, const struct value *val)
3246 {
3247 int bitpos = TYPE_FIELD_BITPOS (type, fieldno);
3248 int bitsize = TYPE_FIELD_BITSIZE (type, fieldno);
3249 struct value *res_val = allocate_value (TYPE_FIELD_TYPE (type, fieldno));
3250
3251 unpack_value_bitfield (res_val, bitpos, bitsize,
3252 valaddr, embedded_offset, val);
3253
3254 return res_val;
3255 }
3256
3257 /* Modify the value of a bitfield. ADDR points to a block of memory in
3258 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3259 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3260 indicate which bits (in target bit order) comprise the bitfield.
3261 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3262 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3263
3264 void
3265 modify_field (struct type *type, gdb_byte *addr,
3266 LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3267 {
3268 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3269 ULONGEST oword;
3270 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3271 LONGEST bytesize;
3272
3273 /* Normalize BITPOS. */
3274 addr += bitpos / 8;
3275 bitpos %= 8;
3276
3277 /* If a negative fieldval fits in the field in question, chop
3278 off the sign extension bits. */
3279 if ((~fieldval & ~(mask >> 1)) == 0)
3280 fieldval &= mask;
3281
3282 /* Warn if value is too big to fit in the field in question. */
3283 if (0 != (fieldval & ~mask))
3284 {
3285 /* FIXME: would like to include fieldval in the message, but
3286 we don't have a sprintf_longest. */
3287 warning (_("Value does not fit in %s bits."), plongest (bitsize));
3288
3289 /* Truncate it, otherwise adjoining fields may be corrupted. */
3290 fieldval &= mask;
3291 }
3292
3293 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3294 false valgrind reports. */
3295
3296 bytesize = (bitpos + bitsize + 7) / 8;
3297 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3298
3299 /* Shifting for bit field depends on endianness of the target machine. */
3300 if (gdbarch_bits_big_endian (get_type_arch (type)))
3301 bitpos = bytesize * 8 - bitpos - bitsize;
3302
3303 oword &= ~(mask << bitpos);
3304 oword |= fieldval << bitpos;
3305
3306 store_unsigned_integer (addr, bytesize, byte_order, oword);
3307 }
3308 \f
3309 /* Pack NUM into BUF using a target format of TYPE. */
3310
3311 void
3312 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3313 {
3314 enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type));
3315 LONGEST len;
3316
3317 type = check_typedef (type);
3318 len = TYPE_LENGTH (type);
3319
3320 switch (TYPE_CODE (type))
3321 {
3322 case TYPE_CODE_INT:
3323 case TYPE_CODE_CHAR:
3324 case TYPE_CODE_ENUM:
3325 case TYPE_CODE_FLAGS:
3326 case TYPE_CODE_BOOL:
3327 case TYPE_CODE_RANGE:
3328 case TYPE_CODE_MEMBERPTR:
3329 store_signed_integer (buf, len, byte_order, num);
3330 break;
3331
3332 case TYPE_CODE_REF:
3333 case TYPE_CODE_RVALUE_REF:
3334 case TYPE_CODE_PTR:
3335 store_typed_address (buf, type, (CORE_ADDR) num);
3336 break;
3337
3338 case TYPE_CODE_FLT:
3339 case TYPE_CODE_DECFLOAT:
3340 target_float_from_longest (buf, type, num);
3341 break;
3342
3343 default:
3344 error (_("Unexpected type (%d) encountered for integer constant."),
3345 TYPE_CODE (type));
3346 }
3347 }
3348
3349
3350 /* Pack NUM into BUF using a target format of TYPE. */
3351
3352 static void
3353 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3354 {
3355 LONGEST len;
3356 enum bfd_endian byte_order;
3357
3358 type = check_typedef (type);
3359 len = TYPE_LENGTH (type);
3360 byte_order = gdbarch_byte_order (get_type_arch (type));
3361
3362 switch (TYPE_CODE (type))
3363 {
3364 case TYPE_CODE_INT:
3365 case TYPE_CODE_CHAR:
3366 case TYPE_CODE_ENUM:
3367 case TYPE_CODE_FLAGS:
3368 case TYPE_CODE_BOOL:
3369 case TYPE_CODE_RANGE:
3370 case TYPE_CODE_MEMBERPTR:
3371 store_unsigned_integer (buf, len, byte_order, num);
3372 break;
3373
3374 case TYPE_CODE_REF:
3375 case TYPE_CODE_RVALUE_REF:
3376 case TYPE_CODE_PTR:
3377 store_typed_address (buf, type, (CORE_ADDR) num);
3378 break;
3379
3380 case TYPE_CODE_FLT:
3381 case TYPE_CODE_DECFLOAT:
3382 target_float_from_ulongest (buf, type, num);
3383 break;
3384
3385 default:
3386 error (_("Unexpected type (%d) encountered "
3387 "for unsigned integer constant."),
3388 TYPE_CODE (type));
3389 }
3390 }
3391
3392
3393 /* Convert C numbers into newly allocated values. */
3394
3395 struct value *
3396 value_from_longest (struct type *type, LONGEST num)
3397 {
3398 struct value *val = allocate_value (type);
3399
3400 pack_long (value_contents_raw (val), type, num);
3401 return val;
3402 }
3403
3404
3405 /* Convert C unsigned numbers into newly allocated values. */
3406
3407 struct value *
3408 value_from_ulongest (struct type *type, ULONGEST num)
3409 {
3410 struct value *val = allocate_value (type);
3411
3412 pack_unsigned_long (value_contents_raw (val), type, num);
3413
3414 return val;
3415 }
3416
3417
3418 /* Create a value representing a pointer of type TYPE to the address
3419 ADDR. */
3420
3421 struct value *
3422 value_from_pointer (struct type *type, CORE_ADDR addr)
3423 {
3424 struct value *val = allocate_value (type);
3425
3426 store_typed_address (value_contents_raw (val),
3427 check_typedef (type), addr);
3428 return val;
3429 }
3430
3431 /* Create and return a value object of TYPE containing the value D. The
3432 TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once
3433 it is converted to target format. */
3434
3435 struct value *
3436 value_from_host_double (struct type *type, double d)
3437 {
3438 struct value *value = allocate_value (type);
3439 gdb_assert (TYPE_CODE (type) == TYPE_CODE_FLT);
3440 target_float_from_host_double (value_contents_raw (value),
3441 value_type (value), d);
3442 return value;
3443 }
3444
3445 /* Create a value of type TYPE whose contents come from VALADDR, if it
3446 is non-null, and whose memory address (in the inferior) is
3447 ADDRESS. The type of the created value may differ from the passed
3448 type TYPE. Make sure to retrieve values new type after this call.
3449 Note that TYPE is not passed through resolve_dynamic_type; this is
3450 a special API intended for use only by Ada. */
3451
3452 struct value *
3453 value_from_contents_and_address_unresolved (struct type *type,
3454 const gdb_byte *valaddr,
3455 CORE_ADDR address)
3456 {
3457 struct value *v;
3458
3459 if (valaddr == NULL)
3460 v = allocate_value_lazy (type);
3461 else
3462 v = value_from_contents (type, valaddr);
3463 VALUE_LVAL (v) = lval_memory;
3464 set_value_address (v, address);
3465 return v;
3466 }
3467
3468 /* Create a value of type TYPE whose contents come from VALADDR, if it
3469 is non-null, and whose memory address (in the inferior) is
3470 ADDRESS. The type of the created value may differ from the passed
3471 type TYPE. Make sure to retrieve values new type after this call. */
3472
3473 struct value *
3474 value_from_contents_and_address (struct type *type,
3475 const gdb_byte *valaddr,
3476 CORE_ADDR address)
3477 {
3478 struct type *resolved_type = resolve_dynamic_type (type, valaddr, address);
3479 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3480 struct value *v;
3481
3482 if (valaddr == NULL)
3483 v = allocate_value_lazy (resolved_type);
3484 else
3485 v = value_from_contents (resolved_type, valaddr);
3486 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3487 && TYPE_DATA_LOCATION_KIND (resolved_type_no_typedef) == PROP_CONST)
3488 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3489 VALUE_LVAL (v) = lval_memory;
3490 set_value_address (v, address);
3491 return v;
3492 }
3493
3494 /* Create a value of type TYPE holding the contents CONTENTS.
3495 The new value is `not_lval'. */
3496
3497 struct value *
3498 value_from_contents (struct type *type, const gdb_byte *contents)
3499 {
3500 struct value *result;
3501
3502 result = allocate_value (type);
3503 memcpy (value_contents_raw (result), contents, TYPE_LENGTH (type));
3504 return result;
3505 }
3506
3507 /* Extract a value from the history file. Input will be of the form
3508 $digits or $$digits. See block comment above 'write_dollar_variable'
3509 for details. */
3510
3511 struct value *
3512 value_from_history_ref (const char *h, const char **endp)
3513 {
3514 int index, len;
3515
3516 if (h[0] == '$')
3517 len = 1;
3518 else
3519 return NULL;
3520
3521 if (h[1] == '$')
3522 len = 2;
3523
3524 /* Find length of numeral string. */
3525 for (; isdigit (h[len]); len++)
3526 ;
3527
3528 /* Make sure numeral string is not part of an identifier. */
3529 if (h[len] == '_' || isalpha (h[len]))
3530 return NULL;
3531
3532 /* Now collect the index value. */
3533 if (h[1] == '$')
3534 {
3535 if (len == 2)
3536 {
3537 /* For some bizarre reason, "$$" is equivalent to "$$1",
3538 rather than to "$$0" as it ought to be! */
3539 index = -1;
3540 *endp += len;
3541 }
3542 else
3543 {
3544 char *local_end;
3545
3546 index = -strtol (&h[2], &local_end, 10);
3547 *endp = local_end;
3548 }
3549 }
3550 else
3551 {
3552 if (len == 1)
3553 {
3554 /* "$" is equivalent to "$0". */
3555 index = 0;
3556 *endp += len;
3557 }
3558 else
3559 {
3560 char *local_end;
3561
3562 index = strtol (&h[1], &local_end, 10);
3563 *endp = local_end;
3564 }
3565 }
3566
3567 return access_value_history (index);
3568 }
3569
3570 /* Get the component value (offset by OFFSET bytes) of a struct or
3571 union WHOLE. Component's type is TYPE. */
3572
3573 struct value *
3574 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3575 {
3576 struct value *v;
3577
3578 if (VALUE_LVAL (whole) == lval_memory && value_lazy (whole))
3579 v = allocate_value_lazy (type);
3580 else
3581 {
3582 v = allocate_value (type);
3583 value_contents_copy (v, value_embedded_offset (v),
3584 whole, value_embedded_offset (whole) + offset,
3585 type_length_units (type));
3586 }
3587 v->offset = value_offset (whole) + offset + value_embedded_offset (whole);
3588 set_value_component_location (v, whole);
3589
3590 return v;
3591 }
3592
3593 struct value *
3594 coerce_ref_if_computed (const struct value *arg)
3595 {
3596 const struct lval_funcs *funcs;
3597
3598 if (!TYPE_IS_REFERENCE (check_typedef (value_type (arg))))
3599 return NULL;
3600
3601 if (value_lval_const (arg) != lval_computed)
3602 return NULL;
3603
3604 funcs = value_computed_funcs (arg);
3605 if (funcs->coerce_ref == NULL)
3606 return NULL;
3607
3608 return funcs->coerce_ref (arg);
3609 }
3610
3611 /* Look at value.h for description. */
3612
3613 struct value *
3614 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3615 const struct type *original_type,
3616 const struct value *original_value)
3617 {
3618 /* Re-adjust type. */
3619 deprecated_set_value_type (value, TYPE_TARGET_TYPE (original_type));
3620
3621 /* Add embedding info. */
3622 set_value_enclosing_type (value, enc_type);
3623 set_value_embedded_offset (value, value_pointed_to_offset (original_value));
3624
3625 /* We may be pointing to an object of some derived type. */
3626 return value_full_object (value, NULL, 0, 0, 0);
3627 }
3628
3629 struct value *
3630 coerce_ref (struct value *arg)
3631 {
3632 struct type *value_type_arg_tmp = check_typedef (value_type (arg));
3633 struct value *retval;
3634 struct type *enc_type;
3635
3636 retval = coerce_ref_if_computed (arg);
3637 if (retval)
3638 return retval;
3639
3640 if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3641 return arg;
3642
3643 enc_type = check_typedef (value_enclosing_type (arg));
3644 enc_type = TYPE_TARGET_TYPE (enc_type);
3645
3646 retval = value_at_lazy (enc_type,
3647 unpack_pointer (value_type (arg),
3648 value_contents (arg)));
3649 enc_type = value_type (retval);
3650 return readjust_indirect_value_type (retval, enc_type,
3651 value_type_arg_tmp, arg);
3652 }
3653
3654 struct value *
3655 coerce_array (struct value *arg)
3656 {
3657 struct type *type;
3658
3659 arg = coerce_ref (arg);
3660 type = check_typedef (value_type (arg));
3661
3662 switch (TYPE_CODE (type))
3663 {
3664 case TYPE_CODE_ARRAY:
3665 if (!TYPE_VECTOR (type) && current_language->c_style_arrays)
3666 arg = value_coerce_array (arg);
3667 break;
3668 case TYPE_CODE_FUNC:
3669 arg = value_coerce_function (arg);
3670 break;
3671 }
3672 return arg;
3673 }
3674 \f
3675
3676 /* Return the return value convention that will be used for the
3677 specified type. */
3678
3679 enum return_value_convention
3680 struct_return_convention (struct gdbarch *gdbarch,
3681 struct value *function, struct type *value_type)
3682 {
3683 enum type_code code = TYPE_CODE (value_type);
3684
3685 if (code == TYPE_CODE_ERROR)
3686 error (_("Function return type unknown."));
3687
3688 /* Probe the architecture for the return-value convention. */
3689 return gdbarch_return_value (gdbarch, function, value_type,
3690 NULL, NULL, NULL);
3691 }
3692
3693 /* Return true if the function returning the specified type is using
3694 the convention of returning structures in memory (passing in the
3695 address as a hidden first parameter). */
3696
3697 int
3698 using_struct_return (struct gdbarch *gdbarch,
3699 struct value *function, struct type *value_type)
3700 {
3701 if (TYPE_CODE (value_type) == TYPE_CODE_VOID)
3702 /* A void return value is never in memory. See also corresponding
3703 code in "print_return_value". */
3704 return 0;
3705
3706 return (struct_return_convention (gdbarch, function, value_type)
3707 != RETURN_VALUE_REGISTER_CONVENTION);
3708 }
3709
3710 /* Set the initialized field in a value struct. */
3711
3712 void
3713 set_value_initialized (struct value *val, int status)
3714 {
3715 val->initialized = status;
3716 }
3717
3718 /* Return the initialized field in a value struct. */
3719
3720 int
3721 value_initialized (const struct value *val)
3722 {
3723 return val->initialized;
3724 }
3725
3726 /* Helper for value_fetch_lazy when the value is a bitfield. */
3727
3728 static void
3729 value_fetch_lazy_bitfield (struct value *val)
3730 {
3731 gdb_assert (value_bitsize (val) != 0);
3732
3733 /* To read a lazy bitfield, read the entire enclosing value. This
3734 prevents reading the same block of (possibly volatile) memory once
3735 per bitfield. It would be even better to read only the containing
3736 word, but we have no way to record that just specific bits of a
3737 value have been fetched. */
3738 struct value *parent = value_parent (val);
3739
3740 if (value_lazy (parent))
3741 value_fetch_lazy (parent);
3742
3743 unpack_value_bitfield (val, value_bitpos (val), value_bitsize (val),
3744 value_contents_for_printing (parent),
3745 value_offset (val), parent);
3746 }
3747
3748 /* Helper for value_fetch_lazy when the value is in memory. */
3749
3750 static void
3751 value_fetch_lazy_memory (struct value *val)
3752 {
3753 gdb_assert (VALUE_LVAL (val) == lval_memory);
3754
3755 CORE_ADDR addr = value_address (val);
3756 struct type *type = check_typedef (value_enclosing_type (val));
3757
3758 if (TYPE_LENGTH (type))
3759 read_value_memory (val, 0, value_stack (val),
3760 addr, value_contents_all_raw (val),
3761 type_length_units (type));
3762 }
3763
3764 /* Helper for value_fetch_lazy when the value is in a register. */
3765
3766 static void
3767 value_fetch_lazy_register (struct value *val)
3768 {
3769 struct frame_info *next_frame;
3770 int regnum;
3771 struct type *type = check_typedef (value_type (val));
3772 struct value *new_val = val, *mark = value_mark ();
3773
3774 /* Offsets are not supported here; lazy register values must
3775 refer to the entire register. */
3776 gdb_assert (value_offset (val) == 0);
3777
3778 while (VALUE_LVAL (new_val) == lval_register && value_lazy (new_val))
3779 {
3780 struct frame_id next_frame_id = VALUE_NEXT_FRAME_ID (new_val);
3781
3782 next_frame = frame_find_by_id (next_frame_id);
3783 regnum = VALUE_REGNUM (new_val);
3784
3785 gdb_assert (next_frame != NULL);
3786
3787 /* Convertible register routines are used for multi-register
3788 values and for interpretation in different types
3789 (e.g. float or int from a double register). Lazy
3790 register values should have the register's natural type,
3791 so they do not apply. */
3792 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
3793 regnum, type));
3794
3795 /* FRAME was obtained, above, via VALUE_NEXT_FRAME_ID.
3796 Since a "->next" operation was performed when setting
3797 this field, we do not need to perform a "next" operation
3798 again when unwinding the register. That's why
3799 frame_unwind_register_value() is called here instead of
3800 get_frame_register_value(). */
3801 new_val = frame_unwind_register_value (next_frame, regnum);
3802
3803 /* If we get another lazy lval_register value, it means the
3804 register is found by reading it from NEXT_FRAME's next frame.
3805 frame_unwind_register_value should never return a value with
3806 the frame id pointing to NEXT_FRAME. If it does, it means we
3807 either have two consecutive frames with the same frame id
3808 in the frame chain, or some code is trying to unwind
3809 behind get_prev_frame's back (e.g., a frame unwind
3810 sniffer trying to unwind), bypassing its validations. In
3811 any case, it should always be an internal error to end up
3812 in this situation. */
3813 if (VALUE_LVAL (new_val) == lval_register
3814 && value_lazy (new_val)
3815 && frame_id_eq (VALUE_NEXT_FRAME_ID (new_val), next_frame_id))
3816 internal_error (__FILE__, __LINE__,
3817 _("infinite loop while fetching a register"));
3818 }
3819
3820 /* If it's still lazy (for instance, a saved register on the
3821 stack), fetch it. */
3822 if (value_lazy (new_val))
3823 value_fetch_lazy (new_val);
3824
3825 /* Copy the contents and the unavailability/optimized-out
3826 meta-data from NEW_VAL to VAL. */
3827 set_value_lazy (val, 0);
3828 value_contents_copy (val, value_embedded_offset (val),
3829 new_val, value_embedded_offset (new_val),
3830 type_length_units (type));
3831
3832 if (frame_debug)
3833 {
3834 struct gdbarch *gdbarch;
3835 struct frame_info *frame;
3836 /* VALUE_FRAME_ID is used here, instead of VALUE_NEXT_FRAME_ID,
3837 so that the frame level will be shown correctly. */
3838 frame = frame_find_by_id (VALUE_FRAME_ID (val));
3839 regnum = VALUE_REGNUM (val);
3840 gdbarch = get_frame_arch (frame);
3841
3842 fprintf_unfiltered (gdb_stdlog,
3843 "{ value_fetch_lazy "
3844 "(frame=%d,regnum=%d(%s),...) ",
3845 frame_relative_level (frame), regnum,
3846 user_reg_map_regnum_to_name (gdbarch, regnum));
3847
3848 fprintf_unfiltered (gdb_stdlog, "->");
3849 if (value_optimized_out (new_val))
3850 {
3851 fprintf_unfiltered (gdb_stdlog, " ");
3852 val_print_optimized_out (new_val, gdb_stdlog);
3853 }
3854 else
3855 {
3856 int i;
3857 const gdb_byte *buf = value_contents (new_val);
3858
3859 if (VALUE_LVAL (new_val) == lval_register)
3860 fprintf_unfiltered (gdb_stdlog, " register=%d",
3861 VALUE_REGNUM (new_val));
3862 else if (VALUE_LVAL (new_val) == lval_memory)
3863 fprintf_unfiltered (gdb_stdlog, " address=%s",
3864 paddress (gdbarch,
3865 value_address (new_val)));
3866 else
3867 fprintf_unfiltered (gdb_stdlog, " computed");
3868
3869 fprintf_unfiltered (gdb_stdlog, " bytes=");
3870 fprintf_unfiltered (gdb_stdlog, "[");
3871 for (i = 0; i < register_size (gdbarch, regnum); i++)
3872 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3873 fprintf_unfiltered (gdb_stdlog, "]");
3874 }
3875
3876 fprintf_unfiltered (gdb_stdlog, " }\n");
3877 }
3878
3879 /* Dispose of the intermediate values. This prevents
3880 watchpoints from trying to watch the saved frame pointer. */
3881 value_free_to_mark (mark);
3882 }
3883
3884 /* Load the actual content of a lazy value. Fetch the data from the
3885 user's process and clear the lazy flag to indicate that the data in
3886 the buffer is valid.
3887
3888 If the value is zero-length, we avoid calling read_memory, which
3889 would abort. We mark the value as fetched anyway -- all 0 bytes of
3890 it. */
3891
3892 void
3893 value_fetch_lazy (struct value *val)
3894 {
3895 gdb_assert (value_lazy (val));
3896 allocate_value_contents (val);
3897 /* A value is either lazy, or fully fetched. The
3898 availability/validity is only established as we try to fetch a
3899 value. */
3900 gdb_assert (val->optimized_out.empty ());
3901 gdb_assert (val->unavailable.empty ());
3902 if (value_bitsize (val))
3903 value_fetch_lazy_bitfield (val);
3904 else if (VALUE_LVAL (val) == lval_memory)
3905 value_fetch_lazy_memory (val);
3906 else if (VALUE_LVAL (val) == lval_register)
3907 value_fetch_lazy_register (val);
3908 else if (VALUE_LVAL (val) == lval_computed
3909 && value_computed_funcs (val)->read != NULL)
3910 value_computed_funcs (val)->read (val);
3911 else
3912 internal_error (__FILE__, __LINE__, _("Unexpected lazy value type."));
3913
3914 set_value_lazy (val, 0);
3915 }
3916
3917 /* Implementation of the convenience function $_isvoid. */
3918
3919 static struct value *
3920 isvoid_internal_fn (struct gdbarch *gdbarch,
3921 const struct language_defn *language,
3922 void *cookie, int argc, struct value **argv)
3923 {
3924 int ret;
3925
3926 if (argc != 1)
3927 error (_("You must provide one argument for $_isvoid."));
3928
3929 ret = TYPE_CODE (value_type (argv[0])) == TYPE_CODE_VOID;
3930
3931 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
3932 }
3933
3934 /* Implementation of the convenience function $_cimag. Extracts the
3935 real part from a complex number. */
3936
3937 static struct value *
3938 creal_internal_fn (struct gdbarch *gdbarch,
3939 const struct language_defn *language,
3940 void *cookie, int argc, struct value **argv)
3941 {
3942 if (argc != 1)
3943 error (_("You must provide one argument for $_creal."));
3944
3945 value *cval = argv[0];
3946 type *ctype = check_typedef (value_type (cval));
3947 if (TYPE_CODE (ctype) != TYPE_CODE_COMPLEX)
3948 error (_("expected a complex number"));
3949 return value_from_component (cval, TYPE_TARGET_TYPE (ctype), 0);
3950 }
3951
3952 /* Implementation of the convenience function $_cimag. Extracts the
3953 imaginary part from a complex number. */
3954
3955 static struct value *
3956 cimag_internal_fn (struct gdbarch *gdbarch,
3957 const struct language_defn *language,
3958 void *cookie, int argc,
3959 struct value **argv)
3960 {
3961 if (argc != 1)
3962 error (_("You must provide one argument for $_cimag."));
3963
3964 value *cval = argv[0];
3965 type *ctype = check_typedef (value_type (cval));
3966 if (TYPE_CODE (ctype) != TYPE_CODE_COMPLEX)
3967 error (_("expected a complex number"));
3968 return value_from_component (cval, TYPE_TARGET_TYPE (ctype),
3969 TYPE_LENGTH (TYPE_TARGET_TYPE (ctype)));
3970 }
3971
3972 #if GDB_SELF_TEST
3973 namespace selftests
3974 {
3975
3976 /* Test the ranges_contain function. */
3977
3978 static void
3979 test_ranges_contain ()
3980 {
3981 std::vector<range> ranges;
3982 range r;
3983
3984 /* [10, 14] */
3985 r.offset = 10;
3986 r.length = 5;
3987 ranges.push_back (r);
3988
3989 /* [20, 24] */
3990 r.offset = 20;
3991 r.length = 5;
3992 ranges.push_back (r);
3993
3994 /* [2, 6] */
3995 SELF_CHECK (!ranges_contain (ranges, 2, 5));
3996 /* [9, 13] */
3997 SELF_CHECK (ranges_contain (ranges, 9, 5));
3998 /* [10, 11] */
3999 SELF_CHECK (ranges_contain (ranges, 10, 2));
4000 /* [10, 14] */
4001 SELF_CHECK (ranges_contain (ranges, 10, 5));
4002 /* [13, 18] */
4003 SELF_CHECK (ranges_contain (ranges, 13, 6));
4004 /* [14, 18] */
4005 SELF_CHECK (ranges_contain (ranges, 14, 5));
4006 /* [15, 18] */
4007 SELF_CHECK (!ranges_contain (ranges, 15, 4));
4008 /* [16, 19] */
4009 SELF_CHECK (!ranges_contain (ranges, 16, 4));
4010 /* [16, 21] */
4011 SELF_CHECK (ranges_contain (ranges, 16, 6));
4012 /* [21, 21] */
4013 SELF_CHECK (ranges_contain (ranges, 21, 1));
4014 /* [21, 25] */
4015 SELF_CHECK (ranges_contain (ranges, 21, 5));
4016 /* [26, 28] */
4017 SELF_CHECK (!ranges_contain (ranges, 26, 3));
4018 }
4019
4020 /* Check that RANGES contains the same ranges as EXPECTED. */
4021
4022 static bool
4023 check_ranges_vector (gdb::array_view<const range> ranges,
4024 gdb::array_view<const range> expected)
4025 {
4026 return ranges == expected;
4027 }
4028
4029 /* Test the insert_into_bit_range_vector function. */
4030
4031 static void
4032 test_insert_into_bit_range_vector ()
4033 {
4034 std::vector<range> ranges;
4035
4036 /* [10, 14] */
4037 {
4038 insert_into_bit_range_vector (&ranges, 10, 5);
4039 static const range expected[] = {
4040 {10, 5}
4041 };
4042 SELF_CHECK (check_ranges_vector (ranges, expected));
4043 }
4044
4045 /* [10, 14] */
4046 {
4047 insert_into_bit_range_vector (&ranges, 11, 4);
4048 static const range expected = {10, 5};
4049 SELF_CHECK (check_ranges_vector (ranges, expected));
4050 }
4051
4052 /* [10, 14] [20, 24] */
4053 {
4054 insert_into_bit_range_vector (&ranges, 20, 5);
4055 static const range expected[] = {
4056 {10, 5},
4057 {20, 5},
4058 };
4059 SELF_CHECK (check_ranges_vector (ranges, expected));
4060 }
4061
4062 /* [10, 14] [17, 24] */
4063 {
4064 insert_into_bit_range_vector (&ranges, 17, 5);
4065 static const range expected[] = {
4066 {10, 5},
4067 {17, 8},
4068 };
4069 SELF_CHECK (check_ranges_vector (ranges, expected));
4070 }
4071
4072 /* [2, 8] [10, 14] [17, 24] */
4073 {
4074 insert_into_bit_range_vector (&ranges, 2, 7);
4075 static const range expected[] = {
4076 {2, 7},
4077 {10, 5},
4078 {17, 8},
4079 };
4080 SELF_CHECK (check_ranges_vector (ranges, expected));
4081 }
4082
4083 /* [2, 14] [17, 24] */
4084 {
4085 insert_into_bit_range_vector (&ranges, 9, 1);
4086 static const range expected[] = {
4087 {2, 13},
4088 {17, 8},
4089 };
4090 SELF_CHECK (check_ranges_vector (ranges, expected));
4091 }
4092
4093 /* [2, 14] [17, 24] */
4094 {
4095 insert_into_bit_range_vector (&ranges, 9, 1);
4096 static const range expected[] = {
4097 {2, 13},
4098 {17, 8},
4099 };
4100 SELF_CHECK (check_ranges_vector (ranges, expected));
4101 }
4102
4103 /* [2, 33] */
4104 {
4105 insert_into_bit_range_vector (&ranges, 4, 30);
4106 static const range expected = {2, 32};
4107 SELF_CHECK (check_ranges_vector (ranges, expected));
4108 }
4109 }
4110
4111 } /* namespace selftests */
4112 #endif /* GDB_SELF_TEST */
4113
4114 void
4115 _initialize_values (void)
4116 {
4117 add_cmd ("convenience", no_class, show_convenience, _("\
4118 Debugger convenience (\"$foo\") variables and functions.\n\
4119 Convenience variables are created when you assign them values;\n\
4120 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4121 \n\
4122 A few convenience variables are given values automatically:\n\
4123 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4124 \"$__\" holds the contents of the last address examined with \"x\"."
4125 #ifdef HAVE_PYTHON
4126 "\n\n\
4127 Convenience functions are defined via the Python API."
4128 #endif
4129 ), &showlist);
4130 add_alias_cmd ("conv", "convenience", no_class, 1, &showlist);
4131
4132 add_cmd ("values", no_set_class, show_values, _("\
4133 Elements of value history around item number IDX (or last ten)."),
4134 &showlist);
4135
4136 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4137 Initialize a convenience variable if necessary.\n\
4138 init-if-undefined VARIABLE = EXPRESSION\n\
4139 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4140 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4141 VARIABLE is already initialized."));
4142
4143 add_prefix_cmd ("function", no_class, function_command, _("\
4144 Placeholder command for showing help on convenience functions."),
4145 &functionlist, "function ", 0, &cmdlist);
4146
4147 add_internal_function ("_isvoid", _("\
4148 Check whether an expression is void.\n\
4149 Usage: $_isvoid (expression)\n\
4150 Return 1 if the expression is void, zero otherwise."),
4151 isvoid_internal_fn, NULL);
4152
4153 add_internal_function ("_creal", _("\
4154 Extract the real part of a complex number.\n\
4155 Usage: $_creal (expression)\n\
4156 Return the real part of a complex number, the type depends on the\n\
4157 type of a complex number."),
4158 creal_internal_fn, NULL);
4159
4160 add_internal_function ("_cimag", _("\
4161 Extract the imaginary part of a complex number.\n\
4162 Usage: $_cimag (expression)\n\
4163 Return the imaginary part of a complex number, the type depends on the\n\
4164 type of a complex number."),
4165 cimag_internal_fn, NULL);
4166
4167 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4168 class_support, &max_value_size, _("\
4169 Set maximum sized value gdb will load from the inferior."), _("\
4170 Show maximum sized value gdb will load from the inferior."), _("\
4171 Use this to control the maximum size, in bytes, of a value that gdb\n\
4172 will load from the inferior. Setting this value to 'unlimited'\n\
4173 disables checking.\n\
4174 Setting this does not invalidate already allocated values, it only\n\
4175 prevents future values, larger than this size, from being allocated."),
4176 set_max_value_size,
4177 show_max_value_size,
4178 &setlist, &showlist);
4179 #if GDB_SELF_TEST
4180 selftests::register_test ("ranges_contain", selftests::test_ranges_contain);
4181 selftests::register_test ("insert_into_bit_range_vector",
4182 selftests::test_insert_into_bit_range_vector);
4183 #endif
4184 }
4185
4186 /* See value.h. */
4187
4188 void
4189 finalize_values ()
4190 {
4191 all_values.clear ();
4192 }