]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/value.c
Update copyright year range in header of all files managed by GDB
[thirdparty/binutils-gdb.git] / gdb / value.c
1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
2
3 Copyright (C) 1986-2024 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "arch-utils.h"
22 #include "symtab.h"
23 #include "gdbtypes.h"
24 #include "value.h"
25 #include "gdbcore.h"
26 #include "command.h"
27 #include "gdbcmd.h"
28 #include "target.h"
29 #include "language.h"
30 #include "demangle.h"
31 #include "regcache.h"
32 #include "block.h"
33 #include "target-float.h"
34 #include "objfiles.h"
35 #include "valprint.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
38 #include <ctype.h>
39 #include "tracepoint.h"
40 #include "cp-abi.h"
41 #include "user-regs.h"
42 #include <algorithm>
43 #include <iterator>
44 #include <map>
45 #include <utility>
46 #include <vector>
47 #include "completer.h"
48 #include "gdbsupport/selftest.h"
49 #include "gdbsupport/array-view.h"
50 #include "cli/cli-style.h"
51 #include "expop.h"
52 #include "inferior.h"
53 #include "varobj.h"
54
55 /* Definition of a user function. */
56 struct internal_function
57 {
58 /* The name of the function. It is a bit odd to have this in the
59 function itself -- the user might use a differently-named
60 convenience variable to hold the function. */
61 char *name;
62
63 /* The handler. */
64 internal_function_fn handler;
65
66 /* User data for the handler. */
67 void *cookie;
68 };
69
70 /* Returns true if the ranges defined by [offset1, offset1+len1) and
71 [offset2, offset2+len2) overlap. */
72
73 static bool
74 ranges_overlap (LONGEST offset1, ULONGEST len1,
75 LONGEST offset2, ULONGEST len2)
76 {
77 LONGEST h, l;
78
79 l = std::max (offset1, offset2);
80 h = std::min (offset1 + len1, offset2 + len2);
81 return (l < h);
82 }
83
84 /* Returns true if RANGES contains any range that overlaps [OFFSET,
85 OFFSET+LENGTH). */
86
87 static bool
88 ranges_contain (const std::vector<range> &ranges, LONGEST offset,
89 ULONGEST length)
90 {
91 range what;
92
93 what.offset = offset;
94 what.length = length;
95
96 /* We keep ranges sorted by offset and coalesce overlapping and
97 contiguous ranges, so to check if a range list contains a given
98 range, we can do a binary search for the position the given range
99 would be inserted if we only considered the starting OFFSET of
100 ranges. We call that position I. Since we also have LENGTH to
101 care for (this is a range afterall), we need to check if the
102 _previous_ range overlaps the I range. E.g.,
103
104 R
105 |---|
106 |---| |---| |------| ... |--|
107 0 1 2 N
108
109 I=1
110
111 In the case above, the binary search would return `I=1', meaning,
112 this OFFSET should be inserted at position 1, and the current
113 position 1 should be pushed further (and before 2). But, `0'
114 overlaps with R.
115
116 Then we need to check if the I range overlaps the I range itself.
117 E.g.,
118
119 R
120 |---|
121 |---| |---| |-------| ... |--|
122 0 1 2 N
123
124 I=1
125 */
126
127
128 auto i = std::lower_bound (ranges.begin (), ranges.end (), what);
129
130 if (i > ranges.begin ())
131 {
132 const struct range &bef = *(i - 1);
133
134 if (ranges_overlap (bef.offset, bef.length, offset, length))
135 return true;
136 }
137
138 if (i < ranges.end ())
139 {
140 const struct range &r = *i;
141
142 if (ranges_overlap (r.offset, r.length, offset, length))
143 return true;
144 }
145
146 return false;
147 }
148
149 static struct cmd_list_element *functionlist;
150
151 value::~value ()
152 {
153 if (this->lval () == lval_computed)
154 {
155 const struct lval_funcs *funcs = m_location.computed.funcs;
156
157 if (funcs->free_closure)
158 funcs->free_closure (this);
159 }
160 else if (this->lval () == lval_xcallable)
161 delete m_location.xm_worker;
162 }
163
164 /* See value.h. */
165
166 struct gdbarch *
167 value::arch () const
168 {
169 return type ()->arch ();
170 }
171
172 bool
173 value::bits_available (LONGEST offset, ULONGEST length) const
174 {
175 gdb_assert (!m_lazy);
176
177 /* Don't pretend we have anything available there in the history beyond
178 the boundaries of the value recorded. It's not like inferior memory
179 where there is actual stuff underneath. */
180 ULONGEST val_len = TARGET_CHAR_BIT * enclosing_type ()->length ();
181 return !((m_in_history
182 && (offset < 0 || offset + length > val_len))
183 || ranges_contain (m_unavailable, offset, length));
184 }
185
186 bool
187 value::bytes_available (LONGEST offset, ULONGEST length) const
188 {
189 ULONGEST sign = (1ULL << (sizeof (ULONGEST) * 8 - 1)) / TARGET_CHAR_BIT;
190 ULONGEST mask = (sign << 1) - 1;
191
192 if (offset != ((offset & mask) ^ sign) - sign
193 || length != ((length & mask) ^ sign) - sign
194 || (length > 0 && (~offset & (offset + length - 1) & sign) != 0))
195 error (_("Integer overflow in data location calculation"));
196
197 return bits_available (offset * TARGET_CHAR_BIT, length * TARGET_CHAR_BIT);
198 }
199
200 bool
201 value::bits_any_optimized_out (int bit_offset, int bit_length) const
202 {
203 gdb_assert (!m_lazy);
204
205 return ranges_contain (m_optimized_out, bit_offset, bit_length);
206 }
207
208 bool
209 value::entirely_available ()
210 {
211 /* We can only tell whether the whole value is available when we try
212 to read it. */
213 if (m_lazy)
214 fetch_lazy ();
215
216 if (m_unavailable.empty ())
217 return true;
218 return false;
219 }
220
221 /* See value.h. */
222
223 bool
224 value::entirely_covered_by_range_vector (const std::vector<range> &ranges)
225 {
226 /* We can only tell whether the whole value is optimized out /
227 unavailable when we try to read it. */
228 if (m_lazy)
229 fetch_lazy ();
230
231 if (ranges.size () == 1)
232 {
233 const struct range &t = ranges[0];
234
235 if (t.offset == 0
236 && t.length == TARGET_CHAR_BIT * enclosing_type ()->length ())
237 return true;
238 }
239
240 return false;
241 }
242
243 /* Insert into the vector pointed to by VECTORP the bit range starting of
244 OFFSET bits, and extending for the next LENGTH bits. */
245
246 static void
247 insert_into_bit_range_vector (std::vector<range> *vectorp,
248 LONGEST offset, ULONGEST length)
249 {
250 range newr;
251
252 /* Insert the range sorted. If there's overlap or the new range
253 would be contiguous with an existing range, merge. */
254
255 newr.offset = offset;
256 newr.length = length;
257
258 /* Do a binary search for the position the given range would be
259 inserted if we only considered the starting OFFSET of ranges.
260 Call that position I. Since we also have LENGTH to care for
261 (this is a range afterall), we need to check if the _previous_
262 range overlaps the I range. E.g., calling R the new range:
263
264 #1 - overlaps with previous
265
266 R
267 |-...-|
268 |---| |---| |------| ... |--|
269 0 1 2 N
270
271 I=1
272
273 In the case #1 above, the binary search would return `I=1',
274 meaning, this OFFSET should be inserted at position 1, and the
275 current position 1 should be pushed further (and become 2). But,
276 note that `0' overlaps with R, so we want to merge them.
277
278 A similar consideration needs to be taken if the new range would
279 be contiguous with the previous range:
280
281 #2 - contiguous with previous
282
283 R
284 |-...-|
285 |--| |---| |------| ... |--|
286 0 1 2 N
287
288 I=1
289
290 If there's no overlap with the previous range, as in:
291
292 #3 - not overlapping and not contiguous
293
294 R
295 |-...-|
296 |--| |---| |------| ... |--|
297 0 1 2 N
298
299 I=1
300
301 or if I is 0:
302
303 #4 - R is the range with lowest offset
304
305 R
306 |-...-|
307 |--| |---| |------| ... |--|
308 0 1 2 N
309
310 I=0
311
312 ... we just push the new range to I.
313
314 All the 4 cases above need to consider that the new range may
315 also overlap several of the ranges that follow, or that R may be
316 contiguous with the following range, and merge. E.g.,
317
318 #5 - overlapping following ranges
319
320 R
321 |------------------------|
322 |--| |---| |------| ... |--|
323 0 1 2 N
324
325 I=0
326
327 or:
328
329 R
330 |-------|
331 |--| |---| |------| ... |--|
332 0 1 2 N
333
334 I=1
335
336 */
337
338 auto i = std::lower_bound (vectorp->begin (), vectorp->end (), newr);
339 if (i > vectorp->begin ())
340 {
341 struct range &bef = *(i - 1);
342
343 if (ranges_overlap (bef.offset, bef.length, offset, length))
344 {
345 /* #1 */
346 LONGEST l = std::min (bef.offset, offset);
347 LONGEST h = std::max (bef.offset + bef.length, offset + length);
348
349 bef.offset = l;
350 bef.length = h - l;
351 i--;
352 }
353 else if (offset == bef.offset + bef.length)
354 {
355 /* #2 */
356 bef.length += length;
357 i--;
358 }
359 else
360 {
361 /* #3 */
362 i = vectorp->insert (i, newr);
363 }
364 }
365 else
366 {
367 /* #4 */
368 i = vectorp->insert (i, newr);
369 }
370
371 /* Check whether the ranges following the one we've just added or
372 touched can be folded in (#5 above). */
373 if (i != vectorp->end () && i + 1 < vectorp->end ())
374 {
375 int removed = 0;
376 auto next = i + 1;
377
378 /* Get the range we just touched. */
379 struct range &t = *i;
380 removed = 0;
381
382 i = next;
383 for (; i < vectorp->end (); i++)
384 {
385 struct range &r = *i;
386 if (r.offset <= t.offset + t.length)
387 {
388 LONGEST l, h;
389
390 l = std::min (t.offset, r.offset);
391 h = std::max (t.offset + t.length, r.offset + r.length);
392
393 t.offset = l;
394 t.length = h - l;
395
396 removed++;
397 }
398 else
399 {
400 /* If we couldn't merge this one, we won't be able to
401 merge following ones either, since the ranges are
402 always sorted by OFFSET. */
403 break;
404 }
405 }
406
407 if (removed != 0)
408 vectorp->erase (next, next + removed);
409 }
410 }
411
412 void
413 value::mark_bits_unavailable (LONGEST offset, ULONGEST length)
414 {
415 insert_into_bit_range_vector (&m_unavailable, offset, length);
416 }
417
418 void
419 value::mark_bytes_unavailable (LONGEST offset, ULONGEST length)
420 {
421 mark_bits_unavailable (offset * TARGET_CHAR_BIT,
422 length * TARGET_CHAR_BIT);
423 }
424
425 /* Find the first range in RANGES that overlaps the range defined by
426 OFFSET and LENGTH, starting at element POS in the RANGES vector,
427 Returns the index into RANGES where such overlapping range was
428 found, or -1 if none was found. */
429
430 static int
431 find_first_range_overlap (const std::vector<range> *ranges, int pos,
432 LONGEST offset, LONGEST length)
433 {
434 int i;
435
436 for (i = pos; i < ranges->size (); i++)
437 {
438 const range &r = (*ranges)[i];
439 if (ranges_overlap (r.offset, r.length, offset, length))
440 return i;
441 }
442
443 return -1;
444 }
445
446 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
447 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
448 return non-zero.
449
450 It must always be the case that:
451 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
452
453 It is assumed that memory can be accessed from:
454 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
455 to:
456 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
457 / TARGET_CHAR_BIT) */
458 static int
459 memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits,
460 const gdb_byte *ptr2, size_t offset2_bits,
461 size_t length_bits)
462 {
463 gdb_assert (offset1_bits % TARGET_CHAR_BIT
464 == offset2_bits % TARGET_CHAR_BIT);
465
466 if (offset1_bits % TARGET_CHAR_BIT != 0)
467 {
468 size_t bits;
469 gdb_byte mask, b1, b2;
470
471 /* The offset from the base pointers PTR1 and PTR2 is not a complete
472 number of bytes. A number of bits up to either the next exact
473 byte boundary, or LENGTH_BITS (which ever is sooner) will be
474 compared. */
475 bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT;
476 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
477 mask = (1 << bits) - 1;
478
479 if (length_bits < bits)
480 {
481 mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1);
482 bits = length_bits;
483 }
484
485 /* Now load the two bytes and mask off the bits we care about. */
486 b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask;
487 b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask;
488
489 if (b1 != b2)
490 return 1;
491
492 /* Now update the length and offsets to take account of the bits
493 we've just compared. */
494 length_bits -= bits;
495 offset1_bits += bits;
496 offset2_bits += bits;
497 }
498
499 if (length_bits % TARGET_CHAR_BIT != 0)
500 {
501 size_t bits;
502 size_t o1, o2;
503 gdb_byte mask, b1, b2;
504
505 /* The length is not an exact number of bytes. After the previous
506 IF.. block then the offsets are byte aligned, or the
507 length is zero (in which case this code is not reached). Compare
508 a number of bits at the end of the region, starting from an exact
509 byte boundary. */
510 bits = length_bits % TARGET_CHAR_BIT;
511 o1 = offset1_bits + length_bits - bits;
512 o2 = offset2_bits + length_bits - bits;
513
514 gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT);
515 mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits);
516
517 gdb_assert (o1 % TARGET_CHAR_BIT == 0);
518 gdb_assert (o2 % TARGET_CHAR_BIT == 0);
519
520 b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask;
521 b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask;
522
523 if (b1 != b2)
524 return 1;
525
526 length_bits -= bits;
527 }
528
529 if (length_bits > 0)
530 {
531 /* We've now taken care of any stray "bits" at the start, or end of
532 the region to compare, the remainder can be covered with a simple
533 memcmp. */
534 gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0);
535 gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0);
536 gdb_assert (length_bits % TARGET_CHAR_BIT == 0);
537
538 return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT,
539 ptr2 + offset2_bits / TARGET_CHAR_BIT,
540 length_bits / TARGET_CHAR_BIT);
541 }
542
543 /* Length is zero, regions match. */
544 return 0;
545 }
546
547 /* Helper struct for find_first_range_overlap_and_match and
548 value_contents_bits_eq. Keep track of which slot of a given ranges
549 vector have we last looked at. */
550
551 struct ranges_and_idx
552 {
553 /* The ranges. */
554 const std::vector<range> *ranges;
555
556 /* The range we've last found in RANGES. Given ranges are sorted,
557 we can start the next lookup here. */
558 int idx;
559 };
560
561 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
562 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
563 ranges starting at OFFSET2 bits. Return true if the ranges match
564 and fill in *L and *H with the overlapping window relative to
565 (both) OFFSET1 or OFFSET2. */
566
567 static int
568 find_first_range_overlap_and_match (struct ranges_and_idx *rp1,
569 struct ranges_and_idx *rp2,
570 LONGEST offset1, LONGEST offset2,
571 ULONGEST length, ULONGEST *l, ULONGEST *h)
572 {
573 rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx,
574 offset1, length);
575 rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx,
576 offset2, length);
577
578 if (rp1->idx == -1 && rp2->idx == -1)
579 {
580 *l = length;
581 *h = length;
582 return 1;
583 }
584 else if (rp1->idx == -1 || rp2->idx == -1)
585 return 0;
586 else
587 {
588 const range *r1, *r2;
589 ULONGEST l1, h1;
590 ULONGEST l2, h2;
591
592 r1 = &(*rp1->ranges)[rp1->idx];
593 r2 = &(*rp2->ranges)[rp2->idx];
594
595 /* Get the unavailable windows intersected by the incoming
596 ranges. The first and last ranges that overlap the argument
597 range may be wider than said incoming arguments ranges. */
598 l1 = std::max (offset1, r1->offset);
599 h1 = std::min (offset1 + length, r1->offset + r1->length);
600
601 l2 = std::max (offset2, r2->offset);
602 h2 = std::min (offset2 + length, offset2 + r2->length);
603
604 /* Make them relative to the respective start offsets, so we can
605 compare them for equality. */
606 l1 -= offset1;
607 h1 -= offset1;
608
609 l2 -= offset2;
610 h2 -= offset2;
611
612 /* Different ranges, no match. */
613 if (l1 != l2 || h1 != h2)
614 return 0;
615
616 *h = h1;
617 *l = l1;
618 return 1;
619 }
620 }
621
622 /* Helper function for value_contents_eq. The only difference is that
623 this function is bit rather than byte based.
624
625 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
626 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
627 Return true if the available bits match. */
628
629 bool
630 value::contents_bits_eq (int offset1, const struct value *val2, int offset2,
631 int length) const
632 {
633 /* Each array element corresponds to a ranges source (unavailable,
634 optimized out). '1' is for VAL1, '2' for VAL2. */
635 struct ranges_and_idx rp1[2], rp2[2];
636
637 /* See function description in value.h. */
638 gdb_assert (!m_lazy && !val2->m_lazy);
639
640 /* We shouldn't be trying to compare past the end of the values. */
641 gdb_assert (offset1 + length
642 <= m_enclosing_type->length () * TARGET_CHAR_BIT);
643 gdb_assert (offset2 + length
644 <= val2->m_enclosing_type->length () * TARGET_CHAR_BIT);
645
646 memset (&rp1, 0, sizeof (rp1));
647 memset (&rp2, 0, sizeof (rp2));
648 rp1[0].ranges = &m_unavailable;
649 rp2[0].ranges = &val2->m_unavailable;
650 rp1[1].ranges = &m_optimized_out;
651 rp2[1].ranges = &val2->m_optimized_out;
652
653 while (length > 0)
654 {
655 ULONGEST l = 0, h = 0; /* init for gcc -Wall */
656 int i;
657
658 for (i = 0; i < 2; i++)
659 {
660 ULONGEST l_tmp, h_tmp;
661
662 /* The contents only match equal if the invalid/unavailable
663 contents ranges match as well. */
664 if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i],
665 offset1, offset2, length,
666 &l_tmp, &h_tmp))
667 return false;
668
669 /* We're interested in the lowest/first range found. */
670 if (i == 0 || l_tmp < l)
671 {
672 l = l_tmp;
673 h = h_tmp;
674 }
675 }
676
677 /* Compare the available/valid contents. */
678 if (memcmp_with_bit_offsets (m_contents.get (), offset1,
679 val2->m_contents.get (), offset2, l) != 0)
680 return false;
681
682 length -= h;
683 offset1 += h;
684 offset2 += h;
685 }
686
687 return true;
688 }
689
690 /* See value.h. */
691
692 bool
693 value::contents_eq (LONGEST offset1,
694 const struct value *val2, LONGEST offset2,
695 LONGEST length) const
696 {
697 return contents_bits_eq (offset1 * TARGET_CHAR_BIT,
698 val2, offset2 * TARGET_CHAR_BIT,
699 length * TARGET_CHAR_BIT);
700 }
701
702 /* See value.h. */
703
704 bool
705 value::contents_eq (const struct value *val2) const
706 {
707 ULONGEST len1 = check_typedef (enclosing_type ())->length ();
708 ULONGEST len2 = check_typedef (val2->enclosing_type ())->length ();
709 if (len1 != len2)
710 return false;
711 return contents_eq (0, val2, 0, len1);
712 }
713
714 /* The value-history records all the values printed by print commands
715 during this session. */
716
717 static std::vector<value_ref_ptr> value_history;
718
719 \f
720 /* List of all value objects currently allocated
721 (except for those released by calls to release_value)
722 This is so they can be freed after each command. */
723
724 static std::vector<value_ref_ptr> all_values;
725
726 /* See value.h. */
727
728 struct value *
729 value::allocate_lazy (struct type *type)
730 {
731 struct value *val;
732
733 /* Call check_typedef on our type to make sure that, if TYPE
734 is a TYPE_CODE_TYPEDEF, its length is set to the length
735 of the target type instead of zero. However, we do not
736 replace the typedef type by the target type, because we want
737 to keep the typedef in order to be able to set the VAL's type
738 description correctly. */
739 check_typedef (type);
740
741 val = new struct value (type);
742
743 /* Values start out on the all_values chain. */
744 all_values.emplace_back (val);
745
746 return val;
747 }
748
749 /* The maximum size, in bytes, that GDB will try to allocate for a value.
750 The initial value of 64k was not selected for any specific reason, it is
751 just a reasonable starting point. */
752
753 static int max_value_size = 65536; /* 64k bytes */
754
755 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
756 LONGEST, otherwise GDB will not be able to parse integer values from the
757 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
758 be unable to parse "set max-value-size 2".
759
760 As we want a consistent GDB experience across hosts with different sizes
761 of LONGEST, this arbitrary minimum value was selected, so long as this
762 is bigger than LONGEST on all GDB supported hosts we're fine. */
763
764 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
765 static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE);
766
767 /* Implement the "set max-value-size" command. */
768
769 static void
770 set_max_value_size (const char *args, int from_tty,
771 struct cmd_list_element *c)
772 {
773 gdb_assert (max_value_size == -1 || max_value_size >= 0);
774
775 if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE)
776 {
777 max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE;
778 error (_("max-value-size set too low, increasing to %d bytes"),
779 max_value_size);
780 }
781 }
782
783 /* Implement the "show max-value-size" command. */
784
785 static void
786 show_max_value_size (struct ui_file *file, int from_tty,
787 struct cmd_list_element *c, const char *value)
788 {
789 if (max_value_size == -1)
790 gdb_printf (file, _("Maximum value size is unlimited.\n"));
791 else
792 gdb_printf (file, _("Maximum value size is %d bytes.\n"),
793 max_value_size);
794 }
795
796 /* Called before we attempt to allocate or reallocate a buffer for the
797 contents of a value. TYPE is the type of the value for which we are
798 allocating the buffer. If the buffer is too large (based on the user
799 controllable setting) then throw an error. If this function returns
800 then we should attempt to allocate the buffer. */
801
802 static void
803 check_type_length_before_alloc (const struct type *type)
804 {
805 ULONGEST length = type->length ();
806
807 if (exceeds_max_value_size (length))
808 {
809 if (type->name () != NULL)
810 error (_("value of type `%s' requires %s bytes, which is more "
811 "than max-value-size"), type->name (), pulongest (length));
812 else
813 error (_("value requires %s bytes, which is more than "
814 "max-value-size"), pulongest (length));
815 }
816 }
817
818 /* See value.h. */
819
820 bool
821 exceeds_max_value_size (ULONGEST length)
822 {
823 return max_value_size > -1 && length > max_value_size;
824 }
825
826 /* When this has a value, it is used to limit the number of array elements
827 of an array that are loaded into memory when an array value is made
828 non-lazy. */
829 static std::optional<int> array_length_limiting_element_count;
830
831 /* See value.h. */
832 scoped_array_length_limiting::scoped_array_length_limiting (int elements)
833 {
834 m_old_value = array_length_limiting_element_count;
835 array_length_limiting_element_count.emplace (elements);
836 }
837
838 /* See value.h. */
839 scoped_array_length_limiting::~scoped_array_length_limiting ()
840 {
841 array_length_limiting_element_count = m_old_value;
842 }
843
844 /* Find the inner element type for ARRAY_TYPE. */
845
846 static struct type *
847 find_array_element_type (struct type *array_type)
848 {
849 array_type = check_typedef (array_type);
850 gdb_assert (array_type->code () == TYPE_CODE_ARRAY);
851
852 if (current_language->la_language == language_fortran)
853 while (array_type->code () == TYPE_CODE_ARRAY)
854 {
855 array_type = array_type->target_type ();
856 array_type = check_typedef (array_type);
857 }
858 else
859 {
860 array_type = array_type->target_type ();
861 array_type = check_typedef (array_type);
862 }
863
864 return array_type;
865 }
866
867 /* Return the limited length of ARRAY_TYPE, which must be of
868 TYPE_CODE_ARRAY. This function can only be called when the global
869 ARRAY_LENGTH_LIMITING_ELEMENT_COUNT has a value.
870
871 The limited length of an array is the smallest of either (1) the total
872 size of the array type, or (2) the array target type multiplies by the
873 array_length_limiting_element_count. */
874
875 static ULONGEST
876 calculate_limited_array_length (struct type *array_type)
877 {
878 gdb_assert (array_length_limiting_element_count.has_value ());
879
880 array_type = check_typedef (array_type);
881 gdb_assert (array_type->code () == TYPE_CODE_ARRAY);
882
883 struct type *elm_type = find_array_element_type (array_type);
884 ULONGEST len = (elm_type->length ()
885 * (*array_length_limiting_element_count));
886 len = std::min (len, array_type->length ());
887
888 return len;
889 }
890
891 /* See value.h. */
892
893 bool
894 value::set_limited_array_length ()
895 {
896 ULONGEST limit = m_limited_length;
897 ULONGEST len = type ()->length ();
898
899 if (array_length_limiting_element_count.has_value ())
900 len = calculate_limited_array_length (type ());
901
902 if (limit != 0 && len > limit)
903 len = limit;
904 if (len > max_value_size)
905 return false;
906
907 m_limited_length = max_value_size;
908 return true;
909 }
910
911 /* See value.h. */
912
913 void
914 value::allocate_contents (bool check_size)
915 {
916 if (!m_contents)
917 {
918 struct type *enc_type = enclosing_type ();
919 ULONGEST len = enc_type->length ();
920
921 if (check_size)
922 {
923 /* If we are allocating the contents of an array, which
924 is greater in size than max_value_size, and there is
925 an element limit in effect, then we can possibly try
926 to load only a sub-set of the array contents into
927 GDB's memory. */
928 if (type () == enc_type
929 && type ()->code () == TYPE_CODE_ARRAY
930 && len > max_value_size
931 && set_limited_array_length ())
932 len = m_limited_length;
933 else
934 check_type_length_before_alloc (enc_type);
935 }
936
937 m_contents.reset ((gdb_byte *) xzalloc (len));
938 }
939 }
940
941 /* Allocate a value and its contents for type TYPE. If CHECK_SIZE is true,
942 then apply the usual max-value-size checks. */
943
944 struct value *
945 value::allocate (struct type *type, bool check_size)
946 {
947 struct value *val = value::allocate_lazy (type);
948
949 val->allocate_contents (check_size);
950 val->m_lazy = false;
951 return val;
952 }
953
954 /* Allocate a value and its contents for type TYPE. */
955
956 struct value *
957 value::allocate (struct type *type)
958 {
959 return allocate (type, true);
960 }
961
962 /* See value.h */
963
964 value *
965 value::allocate_register_lazy (frame_info_ptr next_frame, int regnum,
966 struct type *type)
967 {
968 if (type == nullptr)
969 type = register_type (frame_unwind_arch (next_frame), regnum);
970
971 value *result = value::allocate_lazy (type);
972
973 result->set_lval (lval_register);
974 result->m_location.reg.regnum = regnum;
975
976 /* If this register value is created during unwind (while computing a frame
977 id), and NEXT_FRAME is a frame inlined in the frame being unwound, then
978 NEXT_FRAME will not have a valid frame id yet. Find the next non-inline
979 frame (possibly the sentinel frame). This is where registers are unwound
980 from anyway. */
981 while (get_frame_type (next_frame) == INLINE_FRAME)
982 next_frame = get_next_frame_sentinel_okay (next_frame);
983
984 result->m_location.reg.next_frame_id = get_frame_id (next_frame);
985
986 /* We should have a next frame with a valid id. */
987 gdb_assert (frame_id_p (result->m_location.reg.next_frame_id));
988
989 return result;
990 }
991
992 /* See value.h */
993
994 value *
995 value::allocate_register (frame_info_ptr next_frame, int regnum,
996 struct type *type)
997 {
998 value *result = value::allocate_register_lazy (next_frame, regnum, type);
999 result->set_lazy (false);
1000 return result;
1001 }
1002
1003 /* Allocate a value that has the correct length
1004 for COUNT repetitions of type TYPE. */
1005
1006 struct value *
1007 allocate_repeat_value (struct type *type, int count)
1008 {
1009 /* Despite the fact that we are really creating an array of TYPE here, we
1010 use the string lower bound as the array lower bound. This seems to
1011 work fine for now. */
1012 int low_bound = current_language->string_lower_bound ();
1013 /* FIXME-type-allocation: need a way to free this type when we are
1014 done with it. */
1015 struct type *array_type
1016 = lookup_array_range_type (type, low_bound, count + low_bound - 1);
1017
1018 return value::allocate (array_type);
1019 }
1020
1021 struct value *
1022 value::allocate_computed (struct type *type,
1023 const struct lval_funcs *funcs,
1024 void *closure)
1025 {
1026 struct value *v = value::allocate_lazy (type);
1027
1028 v->set_lval (lval_computed);
1029 v->m_location.computed.funcs = funcs;
1030 v->m_location.computed.closure = closure;
1031
1032 return v;
1033 }
1034
1035 /* See value.h. */
1036
1037 struct value *
1038 value::allocate_optimized_out (struct type *type)
1039 {
1040 struct value *retval = value::allocate_lazy (type);
1041
1042 retval->mark_bytes_optimized_out (0, type->length ());
1043 retval->set_lazy (false);
1044 return retval;
1045 }
1046
1047 /* Accessor methods. */
1048
1049 gdb::array_view<gdb_byte>
1050 value::contents_raw ()
1051 {
1052 int unit_size = gdbarch_addressable_memory_unit_size (arch ());
1053
1054 allocate_contents (true);
1055
1056 ULONGEST length = type ()->length ();
1057 return gdb::make_array_view
1058 (m_contents.get () + m_embedded_offset * unit_size, length);
1059 }
1060
1061 gdb::array_view<gdb_byte>
1062 value::contents_all_raw ()
1063 {
1064 allocate_contents (true);
1065
1066 ULONGEST length = enclosing_type ()->length ();
1067 return gdb::make_array_view (m_contents.get (), length);
1068 }
1069
1070 /* Look at value.h for description. */
1071
1072 struct type *
1073 value_actual_type (struct value *value, int resolve_simple_types,
1074 int *real_type_found)
1075 {
1076 struct value_print_options opts;
1077 struct type *result;
1078
1079 get_user_print_options (&opts);
1080
1081 if (real_type_found)
1082 *real_type_found = 0;
1083 result = value->type ();
1084 if (opts.objectprint)
1085 {
1086 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1087 fetch its rtti type. */
1088 if (result->is_pointer_or_reference ()
1089 && (check_typedef (result->target_type ())->code ()
1090 == TYPE_CODE_STRUCT)
1091 && !value->optimized_out ())
1092 {
1093 struct type *real_type;
1094
1095 real_type = value_rtti_indirect_type (value, NULL, NULL, NULL);
1096 if (real_type)
1097 {
1098 if (real_type_found)
1099 *real_type_found = 1;
1100 result = real_type;
1101 }
1102 }
1103 else if (resolve_simple_types)
1104 {
1105 if (real_type_found)
1106 *real_type_found = 1;
1107 result = value->enclosing_type ();
1108 }
1109 }
1110
1111 return result;
1112 }
1113
1114 void
1115 error_value_optimized_out (void)
1116 {
1117 throw_error (OPTIMIZED_OUT_ERROR, _("value has been optimized out"));
1118 }
1119
1120 void
1121 value::require_not_optimized_out () const
1122 {
1123 if (!m_optimized_out.empty ())
1124 {
1125 if (m_lval == lval_register)
1126 throw_error (OPTIMIZED_OUT_ERROR,
1127 _("register has not been saved in frame"));
1128 else
1129 error_value_optimized_out ();
1130 }
1131 }
1132
1133 void
1134 value::require_available () const
1135 {
1136 if (!m_unavailable.empty ())
1137 throw_error (NOT_AVAILABLE_ERROR, _("value is not available"));
1138 }
1139
1140 gdb::array_view<const gdb_byte>
1141 value::contents_for_printing ()
1142 {
1143 if (m_lazy)
1144 fetch_lazy ();
1145
1146 ULONGEST length = enclosing_type ()->length ();
1147 return gdb::make_array_view (m_contents.get (), length);
1148 }
1149
1150 gdb::array_view<const gdb_byte>
1151 value::contents_for_printing () const
1152 {
1153 gdb_assert (!m_lazy);
1154
1155 ULONGEST length = enclosing_type ()->length ();
1156 return gdb::make_array_view (m_contents.get (), length);
1157 }
1158
1159 gdb::array_view<const gdb_byte>
1160 value::contents_all ()
1161 {
1162 gdb::array_view<const gdb_byte> result = contents_for_printing ();
1163 require_not_optimized_out ();
1164 require_available ();
1165 return result;
1166 }
1167
1168 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1169 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1170
1171 static void
1172 ranges_copy_adjusted (std::vector<range> *dst_range, int dst_bit_offset,
1173 const std::vector<range> &src_range, int src_bit_offset,
1174 unsigned int bit_length)
1175 {
1176 for (const range &r : src_range)
1177 {
1178 LONGEST h, l;
1179
1180 l = std::max (r.offset, (LONGEST) src_bit_offset);
1181 h = std::min ((LONGEST) (r.offset + r.length),
1182 (LONGEST) src_bit_offset + bit_length);
1183
1184 if (l < h)
1185 insert_into_bit_range_vector (dst_range,
1186 dst_bit_offset + (l - src_bit_offset),
1187 h - l);
1188 }
1189 }
1190
1191 /* See value.h. */
1192
1193 void
1194 value::ranges_copy_adjusted (struct value *dst, int dst_bit_offset,
1195 int src_bit_offset, int bit_length) const
1196 {
1197 ::ranges_copy_adjusted (&dst->m_unavailable, dst_bit_offset,
1198 m_unavailable, src_bit_offset,
1199 bit_length);
1200 ::ranges_copy_adjusted (&dst->m_optimized_out, dst_bit_offset,
1201 m_optimized_out, src_bit_offset,
1202 bit_length);
1203 }
1204
1205 /* See value.h. */
1206
1207 void
1208 value::contents_copy_raw (struct value *dst, LONGEST dst_offset,
1209 LONGEST src_offset, LONGEST length)
1210 {
1211 LONGEST src_bit_offset, dst_bit_offset, bit_length;
1212 int unit_size = gdbarch_addressable_memory_unit_size (arch ());
1213
1214 /* A lazy DST would make that this copy operation useless, since as
1215 soon as DST's contents were un-lazied (by a later value_contents
1216 call, say), the contents would be overwritten. A lazy SRC would
1217 mean we'd be copying garbage. */
1218 gdb_assert (!dst->m_lazy && !m_lazy);
1219
1220 ULONGEST copy_length = length;
1221 ULONGEST limit = m_limited_length;
1222 if (limit > 0 && src_offset + length > limit)
1223 copy_length = src_offset > limit ? 0 : limit - src_offset;
1224
1225 /* The overwritten DST range gets unavailability ORed in, not
1226 replaced. Make sure to remember to implement replacing if it
1227 turns out actually necessary. */
1228 gdb_assert (dst->bytes_available (dst_offset, length));
1229 gdb_assert (!dst->bits_any_optimized_out (TARGET_CHAR_BIT * dst_offset,
1230 TARGET_CHAR_BIT * length));
1231
1232 /* Copy the data. */
1233 gdb::array_view<gdb_byte> dst_contents
1234 = dst->contents_all_raw ().slice (dst_offset * unit_size,
1235 copy_length * unit_size);
1236 gdb::array_view<const gdb_byte> src_contents
1237 = contents_all_raw ().slice (src_offset * unit_size,
1238 copy_length * unit_size);
1239 gdb::copy (src_contents, dst_contents);
1240
1241 /* Copy the meta-data, adjusted. */
1242 src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT;
1243 dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT;
1244 bit_length = length * unit_size * HOST_CHAR_BIT;
1245
1246 ranges_copy_adjusted (dst, dst_bit_offset,
1247 src_bit_offset, bit_length);
1248 }
1249
1250 /* See value.h. */
1251
1252 void
1253 value::contents_copy_raw_bitwise (struct value *dst, LONGEST dst_bit_offset,
1254 LONGEST src_bit_offset,
1255 LONGEST bit_length)
1256 {
1257 /* A lazy DST would make that this copy operation useless, since as
1258 soon as DST's contents were un-lazied (by a later value_contents
1259 call, say), the contents would be overwritten. A lazy SRC would
1260 mean we'd be copying garbage. */
1261 gdb_assert (!dst->m_lazy && !m_lazy);
1262
1263 ULONGEST copy_bit_length = bit_length;
1264 ULONGEST bit_limit = m_limited_length * TARGET_CHAR_BIT;
1265 if (bit_limit > 0 && src_bit_offset + bit_length > bit_limit)
1266 copy_bit_length = (src_bit_offset > bit_limit ? 0
1267 : bit_limit - src_bit_offset);
1268
1269 /* The overwritten DST range gets unavailability ORed in, not
1270 replaced. Make sure to remember to implement replacing if it
1271 turns out actually necessary. */
1272 LONGEST dst_offset = dst_bit_offset / TARGET_CHAR_BIT;
1273 LONGEST length = bit_length / TARGET_CHAR_BIT;
1274 gdb_assert (dst->bytes_available (dst_offset, length));
1275 gdb_assert (!dst->bits_any_optimized_out (dst_bit_offset,
1276 bit_length));
1277
1278 /* Copy the data. */
1279 gdb::array_view<gdb_byte> dst_contents = dst->contents_all_raw ();
1280 gdb::array_view<const gdb_byte> src_contents = contents_all_raw ();
1281 copy_bitwise (dst_contents.data (), dst_bit_offset,
1282 src_contents.data (), src_bit_offset,
1283 copy_bit_length,
1284 type_byte_order (type ()) == BFD_ENDIAN_BIG);
1285
1286 /* Copy the meta-data. */
1287 ranges_copy_adjusted (dst, dst_bit_offset, src_bit_offset, bit_length);
1288 }
1289
1290 /* See value.h. */
1291
1292 void
1293 value::contents_copy (struct value *dst, LONGEST dst_offset,
1294 LONGEST src_offset, LONGEST length)
1295 {
1296 if (m_lazy)
1297 fetch_lazy ();
1298
1299 contents_copy_raw (dst, dst_offset, src_offset, length);
1300 }
1301
1302 gdb::array_view<const gdb_byte>
1303 value::contents ()
1304 {
1305 gdb::array_view<const gdb_byte> result = contents_writeable ();
1306 require_not_optimized_out ();
1307 require_available ();
1308 return result;
1309 }
1310
1311 gdb::array_view<gdb_byte>
1312 value::contents_writeable ()
1313 {
1314 if (m_lazy)
1315 fetch_lazy ();
1316 return contents_raw ();
1317 }
1318
1319 bool
1320 value::optimized_out ()
1321 {
1322 if (m_lazy)
1323 {
1324 /* See if we can compute the result without fetching the
1325 value. */
1326 if (this->lval () == lval_memory)
1327 return false;
1328 else if (this->lval () == lval_computed)
1329 {
1330 const struct lval_funcs *funcs = m_location.computed.funcs;
1331
1332 if (funcs->is_optimized_out != nullptr)
1333 return funcs->is_optimized_out (this);
1334 }
1335
1336 /* Fall back to fetching. */
1337 try
1338 {
1339 fetch_lazy ();
1340 }
1341 catch (const gdb_exception_error &ex)
1342 {
1343 switch (ex.error)
1344 {
1345 case MEMORY_ERROR:
1346 case OPTIMIZED_OUT_ERROR:
1347 case NOT_AVAILABLE_ERROR:
1348 /* These can normally happen when we try to access an
1349 optimized out or unavailable register, either in a
1350 physical register or spilled to memory. */
1351 break;
1352 default:
1353 throw;
1354 }
1355 }
1356 }
1357
1358 return !m_optimized_out.empty ();
1359 }
1360
1361 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1362 the following LENGTH bytes. */
1363
1364 void
1365 value::mark_bytes_optimized_out (int offset, int length)
1366 {
1367 mark_bits_optimized_out (offset * TARGET_CHAR_BIT,
1368 length * TARGET_CHAR_BIT);
1369 }
1370
1371 /* See value.h. */
1372
1373 void
1374 value::mark_bits_optimized_out (LONGEST offset, LONGEST length)
1375 {
1376 insert_into_bit_range_vector (&m_optimized_out, offset, length);
1377 }
1378
1379 bool
1380 value::bits_synthetic_pointer (LONGEST offset, LONGEST length) const
1381 {
1382 if (m_lval != lval_computed
1383 || !m_location.computed.funcs->check_synthetic_pointer)
1384 return false;
1385 return m_location.computed.funcs->check_synthetic_pointer (this, offset,
1386 length);
1387 }
1388
1389 const struct lval_funcs *
1390 value::computed_funcs () const
1391 {
1392 gdb_assert (m_lval == lval_computed);
1393
1394 return m_location.computed.funcs;
1395 }
1396
1397 void *
1398 value::computed_closure () const
1399 {
1400 gdb_assert (m_lval == lval_computed);
1401
1402 return m_location.computed.closure;
1403 }
1404
1405 CORE_ADDR
1406 value::address () const
1407 {
1408 if (m_lval != lval_memory)
1409 return 0;
1410 if (m_parent != NULL)
1411 return m_parent->address () + m_offset;
1412 if (NULL != TYPE_DATA_LOCATION (type ()))
1413 {
1414 gdb_assert (TYPE_DATA_LOCATION (type ())->is_constant ());
1415 return TYPE_DATA_LOCATION_ADDR (type ());
1416 }
1417
1418 return m_location.address + m_offset;
1419 }
1420
1421 CORE_ADDR
1422 value::raw_address () const
1423 {
1424 if (m_lval != lval_memory)
1425 return 0;
1426 return m_location.address;
1427 }
1428
1429 void
1430 value::set_address (CORE_ADDR addr)
1431 {
1432 gdb_assert (m_lval == lval_memory);
1433 m_location.address = addr;
1434 }
1435
1436 /* Return a mark in the value chain. All values allocated after the
1437 mark is obtained (except for those released) are subject to being freed
1438 if a subsequent value_free_to_mark is passed the mark. */
1439 struct value *
1440 value_mark (void)
1441 {
1442 if (all_values.empty ())
1443 return nullptr;
1444 return all_values.back ().get ();
1445 }
1446
1447 /* Release a reference to VAL, which was acquired with value_incref.
1448 This function is also called to deallocate values from the value
1449 chain. */
1450
1451 void
1452 value::decref ()
1453 {
1454 gdb_assert (m_reference_count > 0);
1455 m_reference_count--;
1456 if (m_reference_count == 0)
1457 delete this;
1458 }
1459
1460 /* Free all values allocated since MARK was obtained by value_mark
1461 (except for those released). */
1462 void
1463 value_free_to_mark (const struct value *mark)
1464 {
1465 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1466 if (iter == all_values.end ())
1467 all_values.clear ();
1468 else
1469 all_values.erase (iter + 1, all_values.end ());
1470 }
1471
1472 /* Remove VAL from the chain all_values
1473 so it will not be freed automatically. */
1474
1475 value_ref_ptr
1476 release_value (struct value *val)
1477 {
1478 if (val == nullptr)
1479 return value_ref_ptr ();
1480
1481 std::vector<value_ref_ptr>::reverse_iterator iter;
1482 for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter)
1483 {
1484 if (*iter == val)
1485 {
1486 value_ref_ptr result = *iter;
1487 all_values.erase (iter.base () - 1);
1488 return result;
1489 }
1490 }
1491
1492 /* We must always return an owned reference. Normally this happens
1493 because we transfer the reference from the value chain, but in
1494 this case the value was not on the chain. */
1495 return value_ref_ptr::new_reference (val);
1496 }
1497
1498 /* See value.h. */
1499
1500 std::vector<value_ref_ptr>
1501 value_release_to_mark (const struct value *mark)
1502 {
1503 std::vector<value_ref_ptr> result;
1504
1505 auto iter = std::find (all_values.begin (), all_values.end (), mark);
1506 if (iter == all_values.end ())
1507 std::swap (result, all_values);
1508 else
1509 {
1510 std::move (iter + 1, all_values.end (), std::back_inserter (result));
1511 all_values.erase (iter + 1, all_values.end ());
1512 }
1513 std::reverse (result.begin (), result.end ());
1514 return result;
1515 }
1516
1517 /* See value.h. */
1518
1519 struct value *
1520 value::copy () const
1521 {
1522 struct type *encl_type = enclosing_type ();
1523 struct value *val;
1524
1525 val = value::allocate_lazy (encl_type);
1526 val->m_type = m_type;
1527 val->set_lval (m_lval);
1528 val->m_location = m_location;
1529 val->m_offset = m_offset;
1530 val->m_bitpos = m_bitpos;
1531 val->m_bitsize = m_bitsize;
1532 val->m_lazy = m_lazy;
1533 val->m_embedded_offset = embedded_offset ();
1534 val->m_pointed_to_offset = m_pointed_to_offset;
1535 val->m_modifiable = m_modifiable;
1536 val->m_stack = m_stack;
1537 val->m_is_zero = m_is_zero;
1538 val->m_in_history = m_in_history;
1539 val->m_initialized = m_initialized;
1540 val->m_unavailable = m_unavailable;
1541 val->m_optimized_out = m_optimized_out;
1542 val->m_parent = m_parent;
1543 val->m_limited_length = m_limited_length;
1544
1545 if (!val->lazy ()
1546 && !(val->entirely_optimized_out ()
1547 || val->entirely_unavailable ()))
1548 {
1549 ULONGEST length = val->m_limited_length;
1550 if (length == 0)
1551 length = val->enclosing_type ()->length ();
1552
1553 gdb_assert (m_contents != nullptr);
1554 const auto &arg_view
1555 = gdb::make_array_view (m_contents.get (), length);
1556
1557 val->allocate_contents (false);
1558 gdb::array_view<gdb_byte> val_contents
1559 = val->contents_all_raw ().slice (0, length);
1560
1561 gdb::copy (arg_view, val_contents);
1562 }
1563
1564 if (val->lval () == lval_computed)
1565 {
1566 const struct lval_funcs *funcs = val->m_location.computed.funcs;
1567
1568 if (funcs->copy_closure)
1569 val->m_location.computed.closure = funcs->copy_closure (val);
1570 }
1571 return val;
1572 }
1573
1574 /* Return a "const" and/or "volatile" qualified version of the value V.
1575 If CNST is true, then the returned value will be qualified with
1576 "const".
1577 if VOLTL is true, then the returned value will be qualified with
1578 "volatile". */
1579
1580 struct value *
1581 make_cv_value (int cnst, int voltl, struct value *v)
1582 {
1583 struct type *val_type = v->type ();
1584 struct type *m_enclosing_type = v->enclosing_type ();
1585 struct value *cv_val = v->copy ();
1586
1587 cv_val->deprecated_set_type (make_cv_type (cnst, voltl, val_type, NULL));
1588 cv_val->set_enclosing_type (make_cv_type (cnst, voltl, m_enclosing_type, NULL));
1589
1590 return cv_val;
1591 }
1592
1593 /* See value.h. */
1594
1595 struct value *
1596 value::non_lval ()
1597 {
1598 if (this->lval () != not_lval)
1599 {
1600 struct type *enc_type = enclosing_type ();
1601 struct value *val = value::allocate (enc_type);
1602
1603 gdb::copy (contents_all (), val->contents_all_raw ());
1604 val->m_type = m_type;
1605 val->set_embedded_offset (embedded_offset ());
1606 val->set_pointed_to_offset (pointed_to_offset ());
1607 return val;
1608 }
1609 return this;
1610 }
1611
1612 /* See value.h. */
1613
1614 void
1615 value::force_lval (CORE_ADDR addr)
1616 {
1617 gdb_assert (this->lval () == not_lval);
1618
1619 write_memory (addr, contents_raw ().data (), type ()->length ());
1620 m_lval = lval_memory;
1621 m_location.address = addr;
1622 }
1623
1624 void
1625 value::set_component_location (const struct value *whole)
1626 {
1627 struct type *type;
1628
1629 gdb_assert (whole->m_lval != lval_xcallable);
1630
1631 if (whole->m_lval == lval_internalvar)
1632 m_lval = lval_internalvar_component;
1633 else
1634 m_lval = whole->m_lval;
1635
1636 m_location = whole->m_location;
1637 if (whole->m_lval == lval_computed)
1638 {
1639 const struct lval_funcs *funcs = whole->m_location.computed.funcs;
1640
1641 if (funcs->copy_closure)
1642 m_location.computed.closure = funcs->copy_closure (whole);
1643 }
1644
1645 /* If the WHOLE value has a dynamically resolved location property then
1646 update the address of the COMPONENT. */
1647 type = whole->type ();
1648 if (NULL != TYPE_DATA_LOCATION (type)
1649 && TYPE_DATA_LOCATION (type)->is_constant ())
1650 set_address (TYPE_DATA_LOCATION_ADDR (type));
1651
1652 /* Similarly, if the COMPONENT value has a dynamically resolved location
1653 property then update its address. */
1654 type = this->type ();
1655 if (NULL != TYPE_DATA_LOCATION (type)
1656 && TYPE_DATA_LOCATION (type)->is_constant ())
1657 {
1658 /* If the COMPONENT has a dynamic location, and is an
1659 lval_internalvar_component, then we change it to a lval_memory.
1660
1661 Usually a component of an internalvar is created non-lazy, and has
1662 its content immediately copied from the parent internalvar.
1663 However, for components with a dynamic location, the content of
1664 the component is not contained within the parent, but is instead
1665 accessed indirectly. Further, the component will be created as a
1666 lazy value.
1667
1668 By changing the type of the component to lval_memory we ensure
1669 that value_fetch_lazy can successfully load the component.
1670
1671 This solution isn't ideal, but a real fix would require values to
1672 carry around both the parent value contents, and the contents of
1673 any dynamic fields within the parent. This is a substantial
1674 change to how values work in GDB. */
1675 if (this->lval () == lval_internalvar_component)
1676 {
1677 gdb_assert (lazy ());
1678 m_lval = lval_memory;
1679 }
1680 else
1681 gdb_assert (this->lval () == lval_memory);
1682 set_address (TYPE_DATA_LOCATION_ADDR (type));
1683 }
1684 }
1685
1686 /* Access to the value history. */
1687
1688 /* Record a new value in the value history.
1689 Returns the absolute history index of the entry. */
1690
1691 int
1692 value::record_latest ()
1693 {
1694 /* We don't want this value to have anything to do with the inferior anymore.
1695 In particular, "set $1 = 50" should not affect the variable from which
1696 the value was taken, and fast watchpoints should be able to assume that
1697 a value on the value history never changes. */
1698 if (lazy ())
1699 {
1700 /* We know that this is a _huge_ array, any attempt to fetch this
1701 is going to cause GDB to throw an error. However, to allow
1702 the array to still be displayed we fetch its contents up to
1703 `max_value_size' and mark anything beyond "unavailable" in
1704 the history. */
1705 if (m_type->code () == TYPE_CODE_ARRAY
1706 && m_type->length () > max_value_size
1707 && array_length_limiting_element_count.has_value ()
1708 && m_enclosing_type == m_type
1709 && calculate_limited_array_length (m_type) <= max_value_size)
1710 m_limited_length = max_value_size;
1711
1712 fetch_lazy ();
1713 }
1714
1715 ULONGEST limit = m_limited_length;
1716 if (limit != 0)
1717 mark_bytes_unavailable (limit, m_enclosing_type->length () - limit);
1718
1719 /* Mark the value as recorded in the history for the availability check. */
1720 m_in_history = true;
1721
1722 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1723 from. This is a bit dubious, because then *&$1 does not just return $1
1724 but the current contents of that location. c'est la vie... */
1725 set_modifiable (false);
1726
1727 value_history.push_back (release_value (this));
1728
1729 return value_history.size ();
1730 }
1731
1732 /* Return a copy of the value in the history with sequence number NUM. */
1733
1734 struct value *
1735 access_value_history (int num)
1736 {
1737 int absnum = num;
1738
1739 if (absnum <= 0)
1740 absnum += value_history.size ();
1741
1742 if (absnum <= 0)
1743 {
1744 if (num == 0)
1745 error (_("The history is empty."));
1746 else if (num == 1)
1747 error (_("There is only one value in the history."));
1748 else
1749 error (_("History does not go back to $$%d."), -num);
1750 }
1751 if (absnum > value_history.size ())
1752 error (_("History has not yet reached $%d."), absnum);
1753
1754 absnum--;
1755
1756 return value_history[absnum]->copy ();
1757 }
1758
1759 /* See value.h. */
1760
1761 ULONGEST
1762 value_history_count ()
1763 {
1764 return value_history.size ();
1765 }
1766
1767 static void
1768 show_values (const char *num_exp, int from_tty)
1769 {
1770 int i;
1771 struct value *val;
1772 static int num = 1;
1773
1774 if (num_exp)
1775 {
1776 /* "show values +" should print from the stored position.
1777 "show values <exp>" should print around value number <exp>. */
1778 if (num_exp[0] != '+' || num_exp[1] != '\0')
1779 num = parse_and_eval_long (num_exp) - 5;
1780 }
1781 else
1782 {
1783 /* "show values" means print the last 10 values. */
1784 num = value_history.size () - 9;
1785 }
1786
1787 if (num <= 0)
1788 num = 1;
1789
1790 for (i = num; i < num + 10 && i <= value_history.size (); i++)
1791 {
1792 struct value_print_options opts;
1793
1794 val = access_value_history (i);
1795 gdb_printf (("$%d = "), i);
1796 get_user_print_options (&opts);
1797 value_print (val, gdb_stdout, &opts);
1798 gdb_printf (("\n"));
1799 }
1800
1801 /* The next "show values +" should start after what we just printed. */
1802 num += 10;
1803
1804 /* Hitting just return after this command should do the same thing as
1805 "show values +". If num_exp is null, this is unnecessary, since
1806 "show values +" is not useful after "show values". */
1807 if (from_tty && num_exp)
1808 set_repeat_arguments ("+");
1809 }
1810 \f
1811 enum internalvar_kind
1812 {
1813 /* The internal variable is empty. */
1814 INTERNALVAR_VOID,
1815
1816 /* The value of the internal variable is provided directly as
1817 a GDB value object. */
1818 INTERNALVAR_VALUE,
1819
1820 /* A fresh value is computed via a call-back routine on every
1821 access to the internal variable. */
1822 INTERNALVAR_MAKE_VALUE,
1823
1824 /* The internal variable holds a GDB internal convenience function. */
1825 INTERNALVAR_FUNCTION,
1826
1827 /* The variable holds an integer value. */
1828 INTERNALVAR_INTEGER,
1829
1830 /* The variable holds a GDB-provided string. */
1831 INTERNALVAR_STRING,
1832 };
1833
1834 union internalvar_data
1835 {
1836 /* A value object used with INTERNALVAR_VALUE. */
1837 struct value *value;
1838
1839 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1840 struct
1841 {
1842 /* The functions to call. */
1843 const struct internalvar_funcs *functions;
1844
1845 /* The function's user-data. */
1846 void *data;
1847 } make_value;
1848
1849 /* The internal function used with INTERNALVAR_FUNCTION. */
1850 struct
1851 {
1852 struct internal_function *function;
1853 /* True if this is the canonical name for the function. */
1854 int canonical;
1855 } fn;
1856
1857 /* An integer value used with INTERNALVAR_INTEGER. */
1858 struct
1859 {
1860 /* If type is non-NULL, it will be used as the type to generate
1861 a value for this internal variable. If type is NULL, a default
1862 integer type for the architecture is used. */
1863 struct type *type;
1864 LONGEST val;
1865 } integer;
1866
1867 /* A string value used with INTERNALVAR_STRING. */
1868 char *string;
1869 };
1870
1871 /* Internal variables. These are variables within the debugger
1872 that hold values assigned by debugger commands.
1873 The user refers to them with a '$' prefix
1874 that does not appear in the variable names stored internally. */
1875
1876 struct internalvar
1877 {
1878 internalvar (std::string name)
1879 : name (std::move (name))
1880 {}
1881
1882 std::string name;
1883
1884 /* We support various different kinds of content of an internal variable.
1885 enum internalvar_kind specifies the kind, and union internalvar_data
1886 provides the data associated with this particular kind. */
1887
1888 enum internalvar_kind kind = INTERNALVAR_VOID;
1889
1890 union internalvar_data u {};
1891 };
1892
1893 /* Use std::map, a sorted container, to make the order of iteration (and
1894 therefore the output of "show convenience") stable. */
1895
1896 static std::map<std::string, internalvar> internalvars;
1897
1898 /* If the variable does not already exist create it and give it the
1899 value given. If no value is given then the default is zero. */
1900 static void
1901 init_if_undefined_command (const char* args, int from_tty)
1902 {
1903 struct internalvar *intvar = nullptr;
1904
1905 /* Parse the expression - this is taken from set_command(). */
1906 expression_up expr = parse_expression (args);
1907
1908 /* Validate the expression.
1909 Was the expression an assignment?
1910 Or even an expression at all? */
1911 if (expr->first_opcode () != BINOP_ASSIGN)
1912 error (_("Init-if-undefined requires an assignment expression."));
1913
1914 /* Extract the variable from the parsed expression. */
1915 expr::assign_operation *assign
1916 = dynamic_cast<expr::assign_operation *> (expr->op.get ());
1917 if (assign != nullptr)
1918 {
1919 expr::operation *lhs = assign->get_lhs ();
1920 expr::internalvar_operation *ivarop
1921 = dynamic_cast<expr::internalvar_operation *> (lhs);
1922 if (ivarop != nullptr)
1923 intvar = ivarop->get_internalvar ();
1924 }
1925
1926 if (intvar == nullptr)
1927 error (_("The first parameter to init-if-undefined "
1928 "should be a GDB variable."));
1929
1930 /* Only evaluate the expression if the lvalue is void.
1931 This may still fail if the expression is invalid. */
1932 if (intvar->kind == INTERNALVAR_VOID)
1933 expr->evaluate ();
1934 }
1935
1936
1937 /* Look up an internal variable with name NAME. NAME should not
1938 normally include a dollar sign.
1939
1940 If the specified internal variable does not exist,
1941 the return value is NULL. */
1942
1943 struct internalvar *
1944 lookup_only_internalvar (const char *name)
1945 {
1946 auto it = internalvars.find (name);
1947 if (it == internalvars.end ())
1948 return nullptr;
1949
1950 return &it->second;
1951 }
1952
1953 /* Complete NAME by comparing it to the names of internal
1954 variables. */
1955
1956 void
1957 complete_internalvar (completion_tracker &tracker, const char *name)
1958 {
1959 int len = strlen (name);
1960
1961 for (auto &pair : internalvars)
1962 {
1963 const internalvar &var = pair.second;
1964
1965 if (var.name.compare (0, len, name) == 0)
1966 tracker.add_completion (make_unique_xstrdup (var.name.c_str ()));
1967 }
1968 }
1969
1970 /* Create an internal variable with name NAME and with a void value.
1971 NAME should not normally include a dollar sign.
1972
1973 An internal variable with that name must not exist already. */
1974
1975 struct internalvar *
1976 create_internalvar (const char *name)
1977 {
1978 auto pair = internalvars.emplace (std::make_pair (name, internalvar (name)));
1979 gdb_assert (pair.second);
1980
1981 return &pair.first->second;
1982 }
1983
1984 /* Create an internal variable with name NAME and register FUN as the
1985 function that value_of_internalvar uses to create a value whenever
1986 this variable is referenced. NAME should not normally include a
1987 dollar sign. DATA is passed uninterpreted to FUN when it is
1988 called. CLEANUP, if not NULL, is called when the internal variable
1989 is destroyed. It is passed DATA as its only argument. */
1990
1991 struct internalvar *
1992 create_internalvar_type_lazy (const char *name,
1993 const struct internalvar_funcs *funcs,
1994 void *data)
1995 {
1996 struct internalvar *var = create_internalvar (name);
1997
1998 var->kind = INTERNALVAR_MAKE_VALUE;
1999 var->u.make_value.functions = funcs;
2000 var->u.make_value.data = data;
2001 return var;
2002 }
2003
2004 /* See documentation in value.h. */
2005
2006 int
2007 compile_internalvar_to_ax (struct internalvar *var,
2008 struct agent_expr *expr,
2009 struct axs_value *value)
2010 {
2011 if (var->kind != INTERNALVAR_MAKE_VALUE
2012 || var->u.make_value.functions->compile_to_ax == NULL)
2013 return 0;
2014
2015 var->u.make_value.functions->compile_to_ax (var, expr, value,
2016 var->u.make_value.data);
2017 return 1;
2018 }
2019
2020 /* Look up an internal variable with name NAME. NAME should not
2021 normally include a dollar sign.
2022
2023 If the specified internal variable does not exist,
2024 one is created, with a void value. */
2025
2026 struct internalvar *
2027 lookup_internalvar (const char *name)
2028 {
2029 struct internalvar *var;
2030
2031 var = lookup_only_internalvar (name);
2032 if (var)
2033 return var;
2034
2035 return create_internalvar (name);
2036 }
2037
2038 /* Return current value of internal variable VAR. For variables that
2039 are not inherently typed, use a value type appropriate for GDBARCH. */
2040
2041 struct value *
2042 value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var)
2043 {
2044 struct value *val;
2045 struct trace_state_variable *tsv;
2046
2047 /* If there is a trace state variable of the same name, assume that
2048 is what we really want to see. */
2049 tsv = find_trace_state_variable (var->name.c_str ());
2050 if (tsv)
2051 {
2052 tsv->value_known = target_get_trace_state_variable_value (tsv->number,
2053 &(tsv->value));
2054 if (tsv->value_known)
2055 val = value_from_longest (builtin_type (gdbarch)->builtin_int64,
2056 tsv->value);
2057 else
2058 val = value::allocate (builtin_type (gdbarch)->builtin_void);
2059 return val;
2060 }
2061
2062 switch (var->kind)
2063 {
2064 case INTERNALVAR_VOID:
2065 val = value::allocate (builtin_type (gdbarch)->builtin_void);
2066 break;
2067
2068 case INTERNALVAR_FUNCTION:
2069 val = value::allocate (builtin_type (gdbarch)->internal_fn);
2070 break;
2071
2072 case INTERNALVAR_INTEGER:
2073 if (!var->u.integer.type)
2074 val = value_from_longest (builtin_type (gdbarch)->builtin_int,
2075 var->u.integer.val);
2076 else
2077 val = value_from_longest (var->u.integer.type, var->u.integer.val);
2078 break;
2079
2080 case INTERNALVAR_STRING:
2081 val = current_language->value_string (gdbarch,
2082 var->u.string,
2083 strlen (var->u.string));
2084 break;
2085
2086 case INTERNALVAR_VALUE:
2087 val = var->u.value->copy ();
2088 if (val->lazy ())
2089 val->fetch_lazy ();
2090 break;
2091
2092 case INTERNALVAR_MAKE_VALUE:
2093 val = (*var->u.make_value.functions->make_value) (gdbarch, var,
2094 var->u.make_value.data);
2095 break;
2096
2097 default:
2098 internal_error (_("bad kind"));
2099 }
2100
2101 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2102 on this value go back to affect the original internal variable.
2103
2104 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2105 no underlying modifiable state in the internal variable.
2106
2107 Likewise, if the variable's value is a computed lvalue, we want
2108 references to it to produce another computed lvalue, where
2109 references and assignments actually operate through the
2110 computed value's functions.
2111
2112 This means that internal variables with computed values
2113 behave a little differently from other internal variables:
2114 assignments to them don't just replace the previous value
2115 altogether. At the moment, this seems like the behavior we
2116 want. */
2117
2118 if (var->kind != INTERNALVAR_MAKE_VALUE
2119 && val->lval () != lval_computed)
2120 {
2121 val->set_lval (lval_internalvar);
2122 VALUE_INTERNALVAR (val) = var;
2123 }
2124
2125 return val;
2126 }
2127
2128 int
2129 get_internalvar_integer (struct internalvar *var, LONGEST *result)
2130 {
2131 if (var->kind == INTERNALVAR_INTEGER)
2132 {
2133 *result = var->u.integer.val;
2134 return 1;
2135 }
2136
2137 if (var->kind == INTERNALVAR_VALUE)
2138 {
2139 struct type *type = check_typedef (var->u.value->type ());
2140
2141 if (type->code () == TYPE_CODE_INT)
2142 {
2143 *result = value_as_long (var->u.value);
2144 return 1;
2145 }
2146 }
2147
2148 if (var->kind == INTERNALVAR_MAKE_VALUE)
2149 {
2150 struct gdbarch *gdbarch = get_current_arch ();
2151 struct value *val
2152 = (*var->u.make_value.functions->make_value) (gdbarch, var,
2153 var->u.make_value.data);
2154 struct type *type = check_typedef (val->type ());
2155
2156 if (type->code () == TYPE_CODE_INT)
2157 {
2158 *result = value_as_long (val);
2159 return 1;
2160 }
2161 }
2162
2163 return 0;
2164 }
2165
2166 static int
2167 get_internalvar_function (struct internalvar *var,
2168 struct internal_function **result)
2169 {
2170 switch (var->kind)
2171 {
2172 case INTERNALVAR_FUNCTION:
2173 *result = var->u.fn.function;
2174 return 1;
2175
2176 default:
2177 return 0;
2178 }
2179 }
2180
2181 void
2182 set_internalvar_component (struct internalvar *var,
2183 LONGEST offset, LONGEST bitpos,
2184 LONGEST bitsize, struct value *newval)
2185 {
2186 gdb_byte *addr;
2187 struct gdbarch *gdbarch;
2188 int unit_size;
2189
2190 switch (var->kind)
2191 {
2192 case INTERNALVAR_VALUE:
2193 addr = var->u.value->contents_writeable ().data ();
2194 gdbarch = var->u.value->arch ();
2195 unit_size = gdbarch_addressable_memory_unit_size (gdbarch);
2196
2197 if (bitsize)
2198 modify_field (var->u.value->type (), addr + offset,
2199 value_as_long (newval), bitpos, bitsize);
2200 else
2201 memcpy (addr + offset * unit_size, newval->contents ().data (),
2202 newval->type ()->length ());
2203 break;
2204
2205 default:
2206 /* We can never get a component of any other kind. */
2207 internal_error (_("set_internalvar_component"));
2208 }
2209 }
2210
2211 void
2212 set_internalvar (struct internalvar *var, struct value *val)
2213 {
2214 enum internalvar_kind new_kind;
2215 union internalvar_data new_data = { 0 };
2216
2217 if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical)
2218 error (_("Cannot overwrite convenience function %s"), var->name.c_str ());
2219
2220 /* Prepare new contents. */
2221 switch (check_typedef (val->type ())->code ())
2222 {
2223 case TYPE_CODE_VOID:
2224 new_kind = INTERNALVAR_VOID;
2225 break;
2226
2227 case TYPE_CODE_INTERNAL_FUNCTION:
2228 gdb_assert (val->lval () == lval_internalvar);
2229 new_kind = INTERNALVAR_FUNCTION;
2230 get_internalvar_function (VALUE_INTERNALVAR (val),
2231 &new_data.fn.function);
2232 /* Copies created here are never canonical. */
2233 break;
2234
2235 default:
2236 new_kind = INTERNALVAR_VALUE;
2237 struct value *copy = val->copy ();
2238 copy->set_modifiable (true);
2239
2240 /* Force the value to be fetched from the target now, to avoid problems
2241 later when this internalvar is referenced and the target is gone or
2242 has changed. */
2243 if (copy->lazy ())
2244 copy->fetch_lazy ();
2245
2246 /* Release the value from the value chain to prevent it from being
2247 deleted by free_all_values. From here on this function should not
2248 call error () until new_data is installed into the var->u to avoid
2249 leaking memory. */
2250 new_data.value = release_value (copy).release ();
2251
2252 /* Internal variables which are created from values with a dynamic
2253 location don't need the location property of the origin anymore.
2254 The resolved dynamic location is used prior then any other address
2255 when accessing the value.
2256 If we keep it, we would still refer to the origin value.
2257 Remove the location property in case it exist. */
2258 new_data.value->type ()->remove_dyn_prop (DYN_PROP_DATA_LOCATION);
2259
2260 break;
2261 }
2262
2263 /* Clean up old contents. */
2264 clear_internalvar (var);
2265
2266 /* Switch over. */
2267 var->kind = new_kind;
2268 var->u = new_data;
2269 /* End code which must not call error(). */
2270 }
2271
2272 void
2273 set_internalvar_integer (struct internalvar *var, LONGEST l)
2274 {
2275 /* Clean up old contents. */
2276 clear_internalvar (var);
2277
2278 var->kind = INTERNALVAR_INTEGER;
2279 var->u.integer.type = NULL;
2280 var->u.integer.val = l;
2281 }
2282
2283 void
2284 set_internalvar_string (struct internalvar *var, const char *string)
2285 {
2286 /* Clean up old contents. */
2287 clear_internalvar (var);
2288
2289 var->kind = INTERNALVAR_STRING;
2290 var->u.string = xstrdup (string);
2291 }
2292
2293 static void
2294 set_internalvar_function (struct internalvar *var, struct internal_function *f)
2295 {
2296 /* Clean up old contents. */
2297 clear_internalvar (var);
2298
2299 var->kind = INTERNALVAR_FUNCTION;
2300 var->u.fn.function = f;
2301 var->u.fn.canonical = 1;
2302 /* Variables installed here are always the canonical version. */
2303 }
2304
2305 void
2306 clear_internalvar (struct internalvar *var)
2307 {
2308 /* Clean up old contents. */
2309 switch (var->kind)
2310 {
2311 case INTERNALVAR_VALUE:
2312 var->u.value->decref ();
2313 break;
2314
2315 case INTERNALVAR_STRING:
2316 xfree (var->u.string);
2317 break;
2318
2319 default:
2320 break;
2321 }
2322
2323 /* Reset to void kind. */
2324 var->kind = INTERNALVAR_VOID;
2325 }
2326
2327 const char *
2328 internalvar_name (const struct internalvar *var)
2329 {
2330 return var->name.c_str ();
2331 }
2332
2333 static struct internal_function *
2334 create_internal_function (const char *name,
2335 internal_function_fn handler, void *cookie)
2336 {
2337 struct internal_function *ifn = XNEW (struct internal_function);
2338
2339 ifn->name = xstrdup (name);
2340 ifn->handler = handler;
2341 ifn->cookie = cookie;
2342 return ifn;
2343 }
2344
2345 const char *
2346 value_internal_function_name (struct value *val)
2347 {
2348 struct internal_function *ifn;
2349 int result;
2350
2351 gdb_assert (val->lval () == lval_internalvar);
2352 result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn);
2353 gdb_assert (result);
2354
2355 return ifn->name;
2356 }
2357
2358 struct value *
2359 call_internal_function (struct gdbarch *gdbarch,
2360 const struct language_defn *language,
2361 struct value *func, int argc, struct value **argv)
2362 {
2363 struct internal_function *ifn;
2364 int result;
2365
2366 gdb_assert (func->lval () == lval_internalvar);
2367 result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn);
2368 gdb_assert (result);
2369
2370 return (*ifn->handler) (gdbarch, language, ifn->cookie, argc, argv);
2371 }
2372
2373 /* The 'function' command. This does nothing -- it is just a
2374 placeholder to let "help function NAME" work. This is also used as
2375 the implementation of the sub-command that is created when
2376 registering an internal function. */
2377 static void
2378 function_command (const char *command, int from_tty)
2379 {
2380 /* Do nothing. */
2381 }
2382
2383 /* Helper function that does the work for add_internal_function. */
2384
2385 static struct cmd_list_element *
2386 do_add_internal_function (const char *name, const char *doc,
2387 internal_function_fn handler, void *cookie)
2388 {
2389 struct internal_function *ifn;
2390 struct internalvar *var = lookup_internalvar (name);
2391
2392 ifn = create_internal_function (name, handler, cookie);
2393 set_internalvar_function (var, ifn);
2394
2395 return add_cmd (name, no_class, function_command, doc, &functionlist);
2396 }
2397
2398 /* See value.h. */
2399
2400 void
2401 add_internal_function (const char *name, const char *doc,
2402 internal_function_fn handler, void *cookie)
2403 {
2404 do_add_internal_function (name, doc, handler, cookie);
2405 }
2406
2407 /* See value.h. */
2408
2409 void
2410 add_internal_function (gdb::unique_xmalloc_ptr<char> &&name,
2411 gdb::unique_xmalloc_ptr<char> &&doc,
2412 internal_function_fn handler, void *cookie)
2413 {
2414 struct cmd_list_element *cmd
2415 = do_add_internal_function (name.get (), doc.get (), handler, cookie);
2416
2417 /* Manually transfer the ownership of the doc and name strings to CMD by
2418 setting the appropriate flags. */
2419 (void) doc.release ();
2420 cmd->doc_allocated = 1;
2421 (void) name.release ();
2422 cmd->name_allocated = 1;
2423 }
2424
2425 void
2426 value::preserve (struct objfile *objfile, htab_t copied_types)
2427 {
2428 if (m_type->objfile_owner () == objfile)
2429 m_type = copy_type_recursive (m_type, copied_types);
2430
2431 if (m_enclosing_type->objfile_owner () == objfile)
2432 m_enclosing_type = copy_type_recursive (m_enclosing_type, copied_types);
2433 }
2434
2435 /* Likewise for internal variable VAR. */
2436
2437 static void
2438 preserve_one_internalvar (struct internalvar *var, struct objfile *objfile,
2439 htab_t copied_types)
2440 {
2441 switch (var->kind)
2442 {
2443 case INTERNALVAR_INTEGER:
2444 if (var->u.integer.type
2445 && var->u.integer.type->objfile_owner () == objfile)
2446 var->u.integer.type
2447 = copy_type_recursive (var->u.integer.type, copied_types);
2448 break;
2449
2450 case INTERNALVAR_VALUE:
2451 var->u.value->preserve (objfile, copied_types);
2452 break;
2453 }
2454 }
2455
2456 /* Make sure that all types and values referenced by VAROBJ are updated before
2457 OBJFILE is discarded. COPIED_TYPES is used to prevent cycles and
2458 duplicates. */
2459
2460 static void
2461 preserve_one_varobj (struct varobj *varobj, struct objfile *objfile,
2462 htab_t copied_types)
2463 {
2464 if (varobj->type->is_objfile_owned ()
2465 && varobj->type->objfile_owner () == objfile)
2466 {
2467 varobj->type
2468 = copy_type_recursive (varobj->type, copied_types);
2469 }
2470
2471 if (varobj->value != nullptr)
2472 varobj->value->preserve (objfile, copied_types);
2473 }
2474
2475 /* Update the internal variables and value history when OBJFILE is
2476 discarded; we must copy the types out of the objfile. New global types
2477 will be created for every convenience variable which currently points to
2478 this objfile's types, and the convenience variables will be adjusted to
2479 use the new global types. */
2480
2481 void
2482 preserve_values (struct objfile *objfile)
2483 {
2484 /* Create the hash table. We allocate on the objfile's obstack, since
2485 it is soon to be deleted. */
2486 htab_up copied_types = create_copied_types_hash ();
2487
2488 for (const value_ref_ptr &item : value_history)
2489 item->preserve (objfile, copied_types.get ());
2490
2491 for (auto &pair : internalvars)
2492 preserve_one_internalvar (&pair.second, objfile, copied_types.get ());
2493
2494 /* For the remaining varobj, check that none has type owned by OBJFILE. */
2495 all_root_varobjs ([&copied_types, objfile] (struct varobj *varobj)
2496 {
2497 preserve_one_varobj (varobj, objfile,
2498 copied_types.get ());
2499 });
2500
2501 preserve_ext_lang_values (objfile, copied_types.get ());
2502 }
2503
2504 static void
2505 show_convenience (const char *ignore, int from_tty)
2506 {
2507 struct gdbarch *gdbarch = get_current_arch ();
2508 int varseen = 0;
2509 struct value_print_options opts;
2510
2511 get_user_print_options (&opts);
2512 for (auto &pair : internalvars)
2513 {
2514 internalvar &var = pair.second;
2515
2516 if (!varseen)
2517 {
2518 varseen = 1;
2519 }
2520 gdb_printf (("$%s = "), var.name.c_str ());
2521
2522 try
2523 {
2524 struct value *val;
2525
2526 val = value_of_internalvar (gdbarch, &var);
2527 value_print (val, gdb_stdout, &opts);
2528 }
2529 catch (const gdb_exception_error &ex)
2530 {
2531 fprintf_styled (gdb_stdout, metadata_style.style (),
2532 _("<error: %s>"), ex.what ());
2533 }
2534
2535 gdb_printf (("\n"));
2536 }
2537 if (!varseen)
2538 {
2539 /* This text does not mention convenience functions on purpose.
2540 The user can't create them except via Python, and if Python support
2541 is installed this message will never be printed ($_streq will
2542 exist). */
2543 gdb_printf (_("No debugger convenience variables now defined.\n"
2544 "Convenience variables have "
2545 "names starting with \"$\";\n"
2546 "use \"set\" as in \"set "
2547 "$foo = 5\" to define them.\n"));
2548 }
2549 }
2550 \f
2551
2552 /* See value.h. */
2553
2554 struct value *
2555 value::from_xmethod (xmethod_worker_up &&worker)
2556 {
2557 struct value *v;
2558
2559 v = value::allocate (builtin_type (current_inferior ()->arch ())->xmethod);
2560 v->m_lval = lval_xcallable;
2561 v->m_location.xm_worker = worker.release ();
2562 v->m_modifiable = false;
2563
2564 return v;
2565 }
2566
2567 /* See value.h. */
2568
2569 struct type *
2570 value::result_type_of_xmethod (gdb::array_view<value *> argv)
2571 {
2572 gdb_assert (type ()->code () == TYPE_CODE_XMETHOD
2573 && m_lval == lval_xcallable && !argv.empty ());
2574
2575 return m_location.xm_worker->get_result_type (argv[0], argv.slice (1));
2576 }
2577
2578 /* See value.h. */
2579
2580 struct value *
2581 value::call_xmethod (gdb::array_view<value *> argv)
2582 {
2583 gdb_assert (type ()->code () == TYPE_CODE_XMETHOD
2584 && m_lval == lval_xcallable && !argv.empty ());
2585
2586 return m_location.xm_worker->invoke (argv[0], argv.slice (1));
2587 }
2588 \f
2589 /* Extract a value as a C number (either long or double).
2590 Knows how to convert fixed values to double, or
2591 floating values to long.
2592 Does not deallocate the value. */
2593
2594 LONGEST
2595 value_as_long (struct value *val)
2596 {
2597 /* This coerces arrays and functions, which is necessary (e.g.
2598 in disassemble_command). It also dereferences references, which
2599 I suspect is the most logical thing to do. */
2600 val = coerce_array (val);
2601 return unpack_long (val->type (), val->contents ().data ());
2602 }
2603
2604 /* See value.h. */
2605
2606 gdb_mpz
2607 value_as_mpz (struct value *val)
2608 {
2609 val = coerce_array (val);
2610 struct type *type = check_typedef (val->type ());
2611
2612 switch (type->code ())
2613 {
2614 case TYPE_CODE_ENUM:
2615 case TYPE_CODE_BOOL:
2616 case TYPE_CODE_INT:
2617 case TYPE_CODE_CHAR:
2618 case TYPE_CODE_RANGE:
2619 break;
2620
2621 default:
2622 return gdb_mpz (value_as_long (val));
2623 }
2624
2625 gdb_mpz result;
2626
2627 gdb::array_view<const gdb_byte> valbytes = val->contents ();
2628 enum bfd_endian byte_order = type_byte_order (type);
2629
2630 /* Handle integers that are either not a multiple of the word size,
2631 or that are stored at some bit offset. */
2632 unsigned bit_off = 0, bit_size = 0;
2633 if (type->bit_size_differs_p ())
2634 {
2635 bit_size = type->bit_size ();
2636 if (bit_size == 0)
2637 {
2638 /* We can just handle this immediately. */
2639 return result;
2640 }
2641
2642 bit_off = type->bit_offset ();
2643
2644 unsigned n_bytes = ((bit_off % 8) + bit_size + 7) / 8;
2645 valbytes = valbytes.slice (bit_off / 8, n_bytes);
2646
2647 if (byte_order == BFD_ENDIAN_BIG)
2648 bit_off = (n_bytes * 8 - bit_off % 8 - bit_size);
2649 else
2650 bit_off %= 8;
2651 }
2652
2653 result.read (val->contents (), byte_order, type->is_unsigned ());
2654
2655 /* Shift off any low bits, if needed. */
2656 if (bit_off != 0)
2657 result >>= bit_off;
2658
2659 /* Mask off any high bits, if needed. */
2660 if (bit_size)
2661 result.mask (bit_size);
2662
2663 /* Now handle any range bias. */
2664 if (type->code () == TYPE_CODE_RANGE && type->bounds ()->bias != 0)
2665 {
2666 /* Unfortunately we have to box here, because LONGEST is
2667 probably wider than long. */
2668 result += gdb_mpz (type->bounds ()->bias);
2669 }
2670
2671 return result;
2672 }
2673
2674 /* Extract a value as a C pointer. */
2675
2676 CORE_ADDR
2677 value_as_address (struct value *val)
2678 {
2679 struct gdbarch *gdbarch = val->type ()->arch ();
2680
2681 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2682 whether we want this to be true eventually. */
2683 #if 0
2684 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2685 non-address (e.g. argument to "signal", "info break", etc.), or
2686 for pointers to char, in which the low bits *are* significant. */
2687 return gdbarch_addr_bits_remove (gdbarch, value_as_long (val));
2688 #else
2689
2690 /* There are several targets (IA-64, PowerPC, and others) which
2691 don't represent pointers to functions as simply the address of
2692 the function's entry point. For example, on the IA-64, a
2693 function pointer points to a two-word descriptor, generated by
2694 the linker, which contains the function's entry point, and the
2695 value the IA-64 "global pointer" register should have --- to
2696 support position-independent code. The linker generates
2697 descriptors only for those functions whose addresses are taken.
2698
2699 On such targets, it's difficult for GDB to convert an arbitrary
2700 function address into a function pointer; it has to either find
2701 an existing descriptor for that function, or call malloc and
2702 build its own. On some targets, it is impossible for GDB to
2703 build a descriptor at all: the descriptor must contain a jump
2704 instruction; data memory cannot be executed; and code memory
2705 cannot be modified.
2706
2707 Upon entry to this function, if VAL is a value of type `function'
2708 (that is, TYPE_CODE (val->type ()) == TYPE_CODE_FUNC), then
2709 val->address () is the address of the function. This is what
2710 you'll get if you evaluate an expression like `main'. The call
2711 to COERCE_ARRAY below actually does all the usual unary
2712 conversions, which includes converting values of type `function'
2713 to `pointer to function'. This is the challenging conversion
2714 discussed above. Then, `unpack_pointer' will convert that pointer
2715 back into an address.
2716
2717 So, suppose the user types `disassemble foo' on an architecture
2718 with a strange function pointer representation, on which GDB
2719 cannot build its own descriptors, and suppose further that `foo'
2720 has no linker-built descriptor. The address->pointer conversion
2721 will signal an error and prevent the command from running, even
2722 though the next step would have been to convert the pointer
2723 directly back into the same address.
2724
2725 The following shortcut avoids this whole mess. If VAL is a
2726 function, just return its address directly. */
2727 if (val->type ()->code () == TYPE_CODE_FUNC
2728 || val->type ()->code () == TYPE_CODE_METHOD)
2729 return val->address ();
2730
2731 val = coerce_array (val);
2732
2733 /* Some architectures (e.g. Harvard), map instruction and data
2734 addresses onto a single large unified address space. For
2735 instance: An architecture may consider a large integer in the
2736 range 0x10000000 .. 0x1000ffff to already represent a data
2737 addresses (hence not need a pointer to address conversion) while
2738 a small integer would still need to be converted integer to
2739 pointer to address. Just assume such architectures handle all
2740 integer conversions in a single function. */
2741
2742 /* JimB writes:
2743
2744 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2745 must admonish GDB hackers to make sure its behavior matches the
2746 compiler's, whenever possible.
2747
2748 In general, I think GDB should evaluate expressions the same way
2749 the compiler does. When the user copies an expression out of
2750 their source code and hands it to a `print' command, they should
2751 get the same value the compiler would have computed. Any
2752 deviation from this rule can cause major confusion and annoyance,
2753 and needs to be justified carefully. In other words, GDB doesn't
2754 really have the freedom to do these conversions in clever and
2755 useful ways.
2756
2757 AndrewC pointed out that users aren't complaining about how GDB
2758 casts integers to pointers; they are complaining that they can't
2759 take an address from a disassembly listing and give it to `x/i'.
2760 This is certainly important.
2761
2762 Adding an architecture method like integer_to_address() certainly
2763 makes it possible for GDB to "get it right" in all circumstances
2764 --- the target has complete control over how things get done, so
2765 people can Do The Right Thing for their target without breaking
2766 anyone else. The standard doesn't specify how integers get
2767 converted to pointers; usually, the ABI doesn't either, but
2768 ABI-specific code is a more reasonable place to handle it. */
2769
2770 if (!val->type ()->is_pointer_or_reference ()
2771 && gdbarch_integer_to_address_p (gdbarch))
2772 return gdbarch_integer_to_address (gdbarch, val->type (),
2773 val->contents ().data ());
2774
2775 return unpack_pointer (val->type (), val->contents ().data ());
2776 #endif
2777 }
2778 \f
2779 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2780 as a long, or as a double, assuming the raw data is described
2781 by type TYPE. Knows how to convert different sizes of values
2782 and can convert between fixed and floating point. We don't assume
2783 any alignment for the raw data. Return value is in host byte order.
2784
2785 If you want functions and arrays to be coerced to pointers, and
2786 references to be dereferenced, call value_as_long() instead.
2787
2788 C++: It is assumed that the front-end has taken care of
2789 all matters concerning pointers to members. A pointer
2790 to member which reaches here is considered to be equivalent
2791 to an INT (or some size). After all, it is only an offset. */
2792
2793 LONGEST
2794 unpack_long (struct type *type, const gdb_byte *valaddr)
2795 {
2796 if (is_fixed_point_type (type))
2797 type = type->fixed_point_type_base_type ();
2798
2799 enum bfd_endian byte_order = type_byte_order (type);
2800 enum type_code code = type->code ();
2801 int len = type->length ();
2802 int nosign = type->is_unsigned ();
2803
2804 switch (code)
2805 {
2806 case TYPE_CODE_TYPEDEF:
2807 return unpack_long (check_typedef (type), valaddr);
2808 case TYPE_CODE_ENUM:
2809 case TYPE_CODE_FLAGS:
2810 case TYPE_CODE_BOOL:
2811 case TYPE_CODE_INT:
2812 case TYPE_CODE_CHAR:
2813 case TYPE_CODE_RANGE:
2814 case TYPE_CODE_MEMBERPTR:
2815 {
2816 LONGEST result;
2817
2818 if (type->bit_size_differs_p ())
2819 {
2820 unsigned bit_off = type->bit_offset ();
2821 unsigned bit_size = type->bit_size ();
2822 if (bit_size == 0)
2823 {
2824 /* unpack_bits_as_long doesn't handle this case the
2825 way we'd like, so handle it here. */
2826 result = 0;
2827 }
2828 else
2829 result = unpack_bits_as_long (type, valaddr, bit_off, bit_size);
2830 }
2831 else
2832 {
2833 if (nosign)
2834 result = extract_unsigned_integer (valaddr, len, byte_order);
2835 else
2836 result = extract_signed_integer (valaddr, len, byte_order);
2837 }
2838 if (code == TYPE_CODE_RANGE)
2839 result += type->bounds ()->bias;
2840 return result;
2841 }
2842
2843 case TYPE_CODE_FLT:
2844 case TYPE_CODE_DECFLOAT:
2845 return target_float_to_longest (valaddr, type);
2846
2847 case TYPE_CODE_FIXED_POINT:
2848 {
2849 gdb_mpq vq;
2850 vq.read_fixed_point (gdb::make_array_view (valaddr, len),
2851 byte_order, nosign,
2852 type->fixed_point_scaling_factor ());
2853
2854 gdb_mpz vz = vq.as_integer ();
2855 return vz.as_integer<LONGEST> ();
2856 }
2857
2858 case TYPE_CODE_PTR:
2859 case TYPE_CODE_REF:
2860 case TYPE_CODE_RVALUE_REF:
2861 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2862 whether we want this to be true eventually. */
2863 return extract_typed_address (valaddr, type);
2864
2865 default:
2866 error (_("Value can't be converted to integer."));
2867 }
2868 }
2869
2870 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2871 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2872 We don't assume any alignment for the raw data. Return value is in
2873 host byte order.
2874
2875 If you want functions and arrays to be coerced to pointers, and
2876 references to be dereferenced, call value_as_address() instead.
2877
2878 C++: It is assumed that the front-end has taken care of
2879 all matters concerning pointers to members. A pointer
2880 to member which reaches here is considered to be equivalent
2881 to an INT (or some size). After all, it is only an offset. */
2882
2883 CORE_ADDR
2884 unpack_pointer (struct type *type, const gdb_byte *valaddr)
2885 {
2886 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2887 whether we want this to be true eventually. */
2888 return unpack_long (type, valaddr);
2889 }
2890
2891 bool
2892 is_floating_value (struct value *val)
2893 {
2894 struct type *type = check_typedef (val->type ());
2895
2896 if (is_floating_type (type))
2897 {
2898 if (!target_float_is_valid (val->contents ().data (), type))
2899 error (_("Invalid floating value found in program."));
2900 return true;
2901 }
2902
2903 return false;
2904 }
2905
2906 \f
2907 /* Get the value of the FIELDNO'th field (which must be static) of
2908 TYPE. */
2909
2910 struct value *
2911 value_static_field (struct type *type, int fieldno)
2912 {
2913 struct value *retval;
2914
2915 switch (type->field (fieldno).loc_kind ())
2916 {
2917 case FIELD_LOC_KIND_PHYSADDR:
2918 retval = value_at_lazy (type->field (fieldno).type (),
2919 type->field (fieldno).loc_physaddr ());
2920 break;
2921 case FIELD_LOC_KIND_PHYSNAME:
2922 {
2923 const char *phys_name = type->field (fieldno).loc_physname ();
2924 /* type->field (fieldno).name (); */
2925 struct block_symbol sym = lookup_symbol (phys_name, 0, VAR_DOMAIN, 0);
2926
2927 if (sym.symbol == NULL)
2928 {
2929 /* With some compilers, e.g. HP aCC, static data members are
2930 reported as non-debuggable symbols. */
2931 struct bound_minimal_symbol msym
2932 = lookup_minimal_symbol (phys_name, NULL, NULL);
2933 struct type *field_type = type->field (fieldno).type ();
2934
2935 if (!msym.minsym)
2936 retval = value::allocate_optimized_out (field_type);
2937 else
2938 retval = value_at_lazy (field_type, msym.value_address ());
2939 }
2940 else
2941 retval = value_of_variable (sym.symbol, sym.block);
2942 break;
2943 }
2944 default:
2945 gdb_assert_not_reached ("unexpected field location kind");
2946 }
2947
2948 return retval;
2949 }
2950
2951 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2952 You have to be careful here, since the size of the data area for the value
2953 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
2954 than the old enclosing type, you have to allocate more space for the
2955 data. */
2956
2957 void
2958 value::set_enclosing_type (struct type *new_encl_type)
2959 {
2960 if (new_encl_type->length () > enclosing_type ()->length ())
2961 {
2962 check_type_length_before_alloc (new_encl_type);
2963 m_contents.reset ((gdb_byte *) xrealloc (m_contents.release (),
2964 new_encl_type->length ()));
2965 }
2966
2967 m_enclosing_type = new_encl_type;
2968 }
2969
2970 /* See value.h. */
2971
2972 struct value *
2973 value::primitive_field (LONGEST offset, int fieldno, struct type *arg_type)
2974 {
2975 struct value *v;
2976 struct type *type;
2977 int unit_size = gdbarch_addressable_memory_unit_size (arch ());
2978
2979 arg_type = check_typedef (arg_type);
2980 type = arg_type->field (fieldno).type ();
2981
2982 /* Call check_typedef on our type to make sure that, if TYPE
2983 is a TYPE_CODE_TYPEDEF, its length is set to the length
2984 of the target type instead of zero. However, we do not
2985 replace the typedef type by the target type, because we want
2986 to keep the typedef in order to be able to print the type
2987 description correctly. */
2988 check_typedef (type);
2989
2990 if (arg_type->field (fieldno).bitsize ())
2991 {
2992 /* Handle packed fields.
2993
2994 Create a new value for the bitfield, with bitpos and bitsize
2995 set. If possible, arrange offset and bitpos so that we can
2996 do a single aligned read of the size of the containing type.
2997 Otherwise, adjust offset to the byte containing the first
2998 bit. Assume that the address, offset, and embedded offset
2999 are sufficiently aligned. */
3000
3001 LONGEST bitpos = arg_type->field (fieldno).loc_bitpos ();
3002 LONGEST container_bitsize = type->length () * 8;
3003
3004 v = value::allocate_lazy (type);
3005 v->set_bitsize (arg_type->field (fieldno).bitsize ());
3006 if ((bitpos % container_bitsize) + v->bitsize () <= container_bitsize
3007 && type->length () <= (int) sizeof (LONGEST))
3008 v->set_bitpos (bitpos % container_bitsize);
3009 else
3010 v->set_bitpos (bitpos % 8);
3011 v->set_offset ((embedded_offset ()
3012 + offset
3013 + (bitpos - v->bitpos ()) / 8));
3014 v->set_parent (this);
3015 if (!lazy ())
3016 v->fetch_lazy ();
3017 }
3018 else if (fieldno < TYPE_N_BASECLASSES (arg_type))
3019 {
3020 /* This field is actually a base subobject, so preserve the
3021 entire object's contents for later references to virtual
3022 bases, etc. */
3023 LONGEST boffset;
3024
3025 /* Lazy register values with offsets are not supported. */
3026 if (this->lval () == lval_register && lazy ())
3027 fetch_lazy ();
3028
3029 /* We special case virtual inheritance here because this
3030 requires access to the contents, which we would rather avoid
3031 for references to ordinary fields of unavailable values. */
3032 if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno))
3033 boffset = baseclass_offset (arg_type, fieldno,
3034 contents ().data (),
3035 embedded_offset (),
3036 address (),
3037 this);
3038 else
3039 boffset = arg_type->field (fieldno).loc_bitpos () / 8;
3040
3041 if (lazy ())
3042 v = value::allocate_lazy (enclosing_type ());
3043 else
3044 {
3045 v = value::allocate (enclosing_type ());
3046 contents_copy_raw (v, 0, 0, enclosing_type ()->length ());
3047 }
3048 v->deprecated_set_type (type);
3049 v->set_offset (this->offset ());
3050 v->set_embedded_offset (offset + embedded_offset () + boffset);
3051 }
3052 else if (NULL != TYPE_DATA_LOCATION (type))
3053 {
3054 /* Field is a dynamic data member. */
3055
3056 gdb_assert (0 == offset);
3057 /* We expect an already resolved data location. */
3058 gdb_assert (TYPE_DATA_LOCATION (type)->is_constant ());
3059 /* For dynamic data types defer memory allocation
3060 until we actual access the value. */
3061 v = value::allocate_lazy (type);
3062 }
3063 else
3064 {
3065 /* Plain old data member */
3066 offset += (arg_type->field (fieldno).loc_bitpos ()
3067 / (HOST_CHAR_BIT * unit_size));
3068
3069 /* Lazy register values with offsets are not supported. */
3070 if (this->lval () == lval_register && lazy ())
3071 fetch_lazy ();
3072
3073 if (lazy ())
3074 v = value::allocate_lazy (type);
3075 else
3076 {
3077 v = value::allocate (type);
3078 contents_copy_raw (v, v->embedded_offset (),
3079 embedded_offset () + offset,
3080 type_length_units (type));
3081 }
3082 v->set_offset (this->offset () + offset + embedded_offset ());
3083 }
3084 v->set_component_location (this);
3085 return v;
3086 }
3087
3088 /* Given a value ARG1 of a struct or union type,
3089 extract and return the value of one of its (non-static) fields.
3090 FIELDNO says which field. */
3091
3092 struct value *
3093 value_field (struct value *arg1, int fieldno)
3094 {
3095 return arg1->primitive_field (0, fieldno, arg1->type ());
3096 }
3097
3098 /* Return a non-virtual function as a value.
3099 F is the list of member functions which contains the desired method.
3100 J is an index into F which provides the desired method.
3101
3102 We only use the symbol for its address, so be happy with either a
3103 full symbol or a minimal symbol. */
3104
3105 struct value *
3106 value_fn_field (struct value **arg1p, struct fn_field *f,
3107 int j, struct type *type,
3108 LONGEST offset)
3109 {
3110 struct value *v;
3111 struct type *ftype = TYPE_FN_FIELD_TYPE (f, j);
3112 const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j);
3113 struct symbol *sym;
3114 struct bound_minimal_symbol msym;
3115
3116 sym = lookup_symbol (physname, 0, VAR_DOMAIN, 0).symbol;
3117 if (sym == nullptr)
3118 {
3119 msym = lookup_bound_minimal_symbol (physname);
3120 if (msym.minsym == NULL)
3121 return NULL;
3122 }
3123
3124 v = value::allocate (ftype);
3125 v->set_lval (lval_memory);
3126 if (sym)
3127 {
3128 v->set_address (sym->value_block ()->entry_pc ());
3129 }
3130 else
3131 {
3132 /* The minimal symbol might point to a function descriptor;
3133 resolve it to the actual code address instead. */
3134 struct objfile *objfile = msym.objfile;
3135 struct gdbarch *gdbarch = objfile->arch ();
3136
3137 v->set_address (gdbarch_convert_from_func_ptr_addr
3138 (gdbarch, msym.value_address (),
3139 current_inferior ()->top_target ()));
3140 }
3141
3142 if (arg1p)
3143 {
3144 if (type != (*arg1p)->type ())
3145 *arg1p = value_ind (value_cast (lookup_pointer_type (type),
3146 value_addr (*arg1p)));
3147
3148 /* Move the `this' pointer according to the offset.
3149 (*arg1p)->offset () += offset; */
3150 }
3151
3152 return v;
3153 }
3154
3155 \f
3156
3157 /* See value.h. */
3158
3159 LONGEST
3160 unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr,
3161 LONGEST bitpos, LONGEST bitsize)
3162 {
3163 enum bfd_endian byte_order = type_byte_order (field_type);
3164 ULONGEST val;
3165 ULONGEST valmask;
3166 int lsbcount;
3167 LONGEST bytes_read;
3168 LONGEST read_offset;
3169
3170 /* Read the minimum number of bytes required; there may not be
3171 enough bytes to read an entire ULONGEST. */
3172 field_type = check_typedef (field_type);
3173 if (bitsize)
3174 bytes_read = ((bitpos % 8) + bitsize + 7) / 8;
3175 else
3176 {
3177 bytes_read = field_type->length ();
3178 bitsize = 8 * bytes_read;
3179 }
3180
3181 read_offset = bitpos / 8;
3182
3183 val = extract_unsigned_integer (valaddr + read_offset,
3184 bytes_read, byte_order);
3185
3186 /* Extract bits. See comment above. */
3187
3188 if (byte_order == BFD_ENDIAN_BIG)
3189 lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize);
3190 else
3191 lsbcount = (bitpos % 8);
3192 val >>= lsbcount;
3193
3194 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3195 If the field is signed, and is negative, then sign extend. */
3196
3197 if (bitsize < 8 * (int) sizeof (val))
3198 {
3199 valmask = (((ULONGEST) 1) << bitsize) - 1;
3200 val &= valmask;
3201 if (!field_type->is_unsigned ())
3202 {
3203 if (val & (valmask ^ (valmask >> 1)))
3204 {
3205 val |= ~valmask;
3206 }
3207 }
3208 }
3209
3210 return val;
3211 }
3212
3213 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3214 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3215 ORIGINAL_VALUE, which must not be NULL. See
3216 unpack_value_bits_as_long for more details. */
3217
3218 int
3219 unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr,
3220 LONGEST embedded_offset, int fieldno,
3221 const struct value *val, LONGEST *result)
3222 {
3223 int bitpos = type->field (fieldno).loc_bitpos ();
3224 int bitsize = type->field (fieldno).bitsize ();
3225 struct type *field_type = type->field (fieldno).type ();
3226 int bit_offset;
3227
3228 gdb_assert (val != NULL);
3229
3230 bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3231 if (val->bits_any_optimized_out (bit_offset, bitsize)
3232 || !val->bits_available (bit_offset, bitsize))
3233 return 0;
3234
3235 *result = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3236 bitpos, bitsize);
3237 return 1;
3238 }
3239
3240 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3241 object at VALADDR. See unpack_bits_as_long for more details. */
3242
3243 LONGEST
3244 unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno)
3245 {
3246 int bitpos = type->field (fieldno).loc_bitpos ();
3247 int bitsize = type->field (fieldno).bitsize ();
3248 struct type *field_type = type->field (fieldno).type ();
3249
3250 return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize);
3251 }
3252
3253 /* See value.h. */
3254
3255 void
3256 value::unpack_bitfield (struct value *dest_val,
3257 LONGEST bitpos, LONGEST bitsize,
3258 const gdb_byte *valaddr, LONGEST embedded_offset)
3259 const
3260 {
3261 enum bfd_endian byte_order;
3262 int src_bit_offset;
3263 int dst_bit_offset;
3264 struct type *field_type = dest_val->type ();
3265
3266 byte_order = type_byte_order (field_type);
3267
3268 /* First, unpack and sign extend the bitfield as if it was wholly
3269 valid. Optimized out/unavailable bits are read as zero, but
3270 that's OK, as they'll end up marked below. If the VAL is
3271 wholly-invalid we may have skipped allocating its contents,
3272 though. See value::allocate_optimized_out. */
3273 if (valaddr != NULL)
3274 {
3275 LONGEST num;
3276
3277 num = unpack_bits_as_long (field_type, valaddr + embedded_offset,
3278 bitpos, bitsize);
3279 store_signed_integer (dest_val->contents_raw ().data (),
3280 field_type->length (), byte_order, num);
3281 }
3282
3283 /* Now copy the optimized out / unavailability ranges to the right
3284 bits. */
3285 src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos;
3286 if (byte_order == BFD_ENDIAN_BIG)
3287 dst_bit_offset = field_type->length () * TARGET_CHAR_BIT - bitsize;
3288 else
3289 dst_bit_offset = 0;
3290 ranges_copy_adjusted (dest_val, dst_bit_offset, src_bit_offset, bitsize);
3291 }
3292
3293 /* Return a new value with type TYPE, which is FIELDNO field of the
3294 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3295 of VAL. If the VAL's contents required to extract the bitfield
3296 from are unavailable/optimized out, the new value is
3297 correspondingly marked unavailable/optimized out. */
3298
3299 struct value *
3300 value_field_bitfield (struct type *type, int fieldno,
3301 const gdb_byte *valaddr,
3302 LONGEST embedded_offset, const struct value *val)
3303 {
3304 int bitpos = type->field (fieldno).loc_bitpos ();
3305 int bitsize = type->field (fieldno).bitsize ();
3306 struct value *res_val = value::allocate (type->field (fieldno).type ());
3307
3308 val->unpack_bitfield (res_val, bitpos, bitsize, valaddr, embedded_offset);
3309
3310 return res_val;
3311 }
3312
3313 /* Modify the value of a bitfield. ADDR points to a block of memory in
3314 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3315 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3316 indicate which bits (in target bit order) comprise the bitfield.
3317 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3318 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3319
3320 void
3321 modify_field (struct type *type, gdb_byte *addr,
3322 LONGEST fieldval, LONGEST bitpos, LONGEST bitsize)
3323 {
3324 enum bfd_endian byte_order = type_byte_order (type);
3325 ULONGEST oword;
3326 ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize);
3327 LONGEST bytesize;
3328
3329 /* Normalize BITPOS. */
3330 addr += bitpos / 8;
3331 bitpos %= 8;
3332
3333 /* If a negative fieldval fits in the field in question, chop
3334 off the sign extension bits. */
3335 if ((~fieldval & ~(mask >> 1)) == 0)
3336 fieldval &= mask;
3337
3338 /* Warn if value is too big to fit in the field in question. */
3339 if (0 != (fieldval & ~mask))
3340 {
3341 /* FIXME: would like to include fieldval in the message, but
3342 we don't have a sprintf_longest. */
3343 warning (_("Value does not fit in %s bits."), plongest (bitsize));
3344
3345 /* Truncate it, otherwise adjoining fields may be corrupted. */
3346 fieldval &= mask;
3347 }
3348
3349 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3350 false valgrind reports. */
3351
3352 bytesize = (bitpos + bitsize + 7) / 8;
3353 oword = extract_unsigned_integer (addr, bytesize, byte_order);
3354
3355 /* Shifting for bit field depends on endianness of the target machine. */
3356 if (byte_order == BFD_ENDIAN_BIG)
3357 bitpos = bytesize * 8 - bitpos - bitsize;
3358
3359 oword &= ~(mask << bitpos);
3360 oword |= fieldval << bitpos;
3361
3362 store_unsigned_integer (addr, bytesize, byte_order, oword);
3363 }
3364 \f
3365 /* Pack NUM into BUF using a target format of TYPE. */
3366
3367 void
3368 pack_long (gdb_byte *buf, struct type *type, LONGEST num)
3369 {
3370 enum bfd_endian byte_order = type_byte_order (type);
3371 LONGEST len;
3372
3373 type = check_typedef (type);
3374 len = type->length ();
3375
3376 switch (type->code ())
3377 {
3378 case TYPE_CODE_RANGE:
3379 num -= type->bounds ()->bias;
3380 [[fallthrough]];
3381 case TYPE_CODE_INT:
3382 case TYPE_CODE_CHAR:
3383 case TYPE_CODE_ENUM:
3384 case TYPE_CODE_FLAGS:
3385 case TYPE_CODE_BOOL:
3386 case TYPE_CODE_MEMBERPTR:
3387 if (type->bit_size_differs_p ())
3388 {
3389 unsigned bit_off = type->bit_offset ();
3390 unsigned bit_size = type->bit_size ();
3391 num &= ((ULONGEST) 1 << bit_size) - 1;
3392 num <<= bit_off;
3393 }
3394 store_signed_integer (buf, len, byte_order, num);
3395 break;
3396
3397 case TYPE_CODE_REF:
3398 case TYPE_CODE_RVALUE_REF:
3399 case TYPE_CODE_PTR:
3400 store_typed_address (buf, type, (CORE_ADDR) num);
3401 break;
3402
3403 case TYPE_CODE_FLT:
3404 case TYPE_CODE_DECFLOAT:
3405 target_float_from_longest (buf, type, num);
3406 break;
3407
3408 default:
3409 error (_("Unexpected type (%d) encountered for integer constant."),
3410 type->code ());
3411 }
3412 }
3413
3414
3415 /* Pack NUM into BUF using a target format of TYPE. */
3416
3417 static void
3418 pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num)
3419 {
3420 LONGEST len;
3421 enum bfd_endian byte_order;
3422
3423 type = check_typedef (type);
3424 len = type->length ();
3425 byte_order = type_byte_order (type);
3426
3427 switch (type->code ())
3428 {
3429 case TYPE_CODE_INT:
3430 case TYPE_CODE_CHAR:
3431 case TYPE_CODE_ENUM:
3432 case TYPE_CODE_FLAGS:
3433 case TYPE_CODE_BOOL:
3434 case TYPE_CODE_RANGE:
3435 case TYPE_CODE_MEMBERPTR:
3436 if (type->bit_size_differs_p ())
3437 {
3438 unsigned bit_off = type->bit_offset ();
3439 unsigned bit_size = type->bit_size ();
3440 num &= ((ULONGEST) 1 << bit_size) - 1;
3441 num <<= bit_off;
3442 }
3443 store_unsigned_integer (buf, len, byte_order, num);
3444 break;
3445
3446 case TYPE_CODE_REF:
3447 case TYPE_CODE_RVALUE_REF:
3448 case TYPE_CODE_PTR:
3449 store_typed_address (buf, type, (CORE_ADDR) num);
3450 break;
3451
3452 case TYPE_CODE_FLT:
3453 case TYPE_CODE_DECFLOAT:
3454 target_float_from_ulongest (buf, type, num);
3455 break;
3456
3457 default:
3458 error (_("Unexpected type (%d) encountered "
3459 "for unsigned integer constant."),
3460 type->code ());
3461 }
3462 }
3463
3464 /* See value.h. */
3465
3466 struct value *
3467 value::zero (struct type *type, enum lval_type lv)
3468 {
3469 struct value *val = value::allocate_lazy (type);
3470
3471 val->set_lval (lv == lval_computed ? not_lval : lv);
3472 val->m_is_zero = true;
3473 return val;
3474 }
3475
3476 /* Convert C numbers into newly allocated values. */
3477
3478 struct value *
3479 value_from_longest (struct type *type, LONGEST num)
3480 {
3481 struct value *val = value::allocate (type);
3482
3483 pack_long (val->contents_raw ().data (), type, num);
3484 return val;
3485 }
3486
3487
3488 /* Convert C unsigned numbers into newly allocated values. */
3489
3490 struct value *
3491 value_from_ulongest (struct type *type, ULONGEST num)
3492 {
3493 struct value *val = value::allocate (type);
3494
3495 pack_unsigned_long (val->contents_raw ().data (), type, num);
3496
3497 return val;
3498 }
3499
3500 /* See value.h. */
3501
3502 struct value *
3503 value_from_mpz (struct type *type, const gdb_mpz &v)
3504 {
3505 struct type *real_type = check_typedef (type);
3506
3507 const gdb_mpz *val = &v;
3508 gdb_mpz storage;
3509 if (real_type->code () == TYPE_CODE_RANGE && type->bounds ()->bias != 0)
3510 {
3511 storage = *val;
3512 val = &storage;
3513 storage -= type->bounds ()->bias;
3514 }
3515
3516 if (type->bit_size_differs_p ())
3517 {
3518 unsigned bit_off = type->bit_offset ();
3519 unsigned bit_size = type->bit_size ();
3520
3521 if (val != &storage)
3522 {
3523 storage = *val;
3524 val = &storage;
3525 }
3526
3527 storage.mask (bit_size);
3528 storage <<= bit_off;
3529 }
3530
3531 struct value *result = value::allocate (type);
3532 val->truncate (result->contents_raw (), type_byte_order (type),
3533 type->is_unsigned ());
3534 return result;
3535 }
3536
3537 /* Create a value representing a pointer of type TYPE to the address
3538 ADDR. */
3539
3540 struct value *
3541 value_from_pointer (struct type *type, CORE_ADDR addr)
3542 {
3543 struct value *val = value::allocate (type);
3544
3545 store_typed_address (val->contents_raw ().data (),
3546 check_typedef (type), addr);
3547 return val;
3548 }
3549
3550 /* Create and return a value object of TYPE containing the value D. The
3551 TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once
3552 it is converted to target format. */
3553
3554 struct value *
3555 value_from_host_double (struct type *type, double d)
3556 {
3557 struct value *value = value::allocate (type);
3558 gdb_assert (type->code () == TYPE_CODE_FLT);
3559 target_float_from_host_double (value->contents_raw ().data (),
3560 value->type (), d);
3561 return value;
3562 }
3563
3564 /* Create a value of type TYPE whose contents come from VALADDR, if it
3565 is non-null, and whose memory address (in the inferior) is
3566 ADDRESS. The type of the created value may differ from the passed
3567 type TYPE. Make sure to retrieve values new type after this call.
3568 Note that TYPE is not passed through resolve_dynamic_type; this is
3569 a special API intended for use only by Ada. */
3570
3571 struct value *
3572 value_from_contents_and_address_unresolved (struct type *type,
3573 const gdb_byte *valaddr,
3574 CORE_ADDR address)
3575 {
3576 struct value *v;
3577
3578 if (valaddr == NULL)
3579 v = value::allocate_lazy (type);
3580 else
3581 v = value_from_contents (type, valaddr);
3582 v->set_lval (lval_memory);
3583 v->set_address (address);
3584 return v;
3585 }
3586
3587 /* Create a value of type TYPE whose contents come from VALADDR, if it
3588 is non-null, and whose memory address (in the inferior) is
3589 ADDRESS. The type of the created value may differ from the passed
3590 type TYPE. Make sure to retrieve values new type after this call. */
3591
3592 struct value *
3593 value_from_contents_and_address (struct type *type,
3594 const gdb_byte *valaddr,
3595 CORE_ADDR address,
3596 frame_info_ptr frame)
3597 {
3598 gdb::array_view<const gdb_byte> view;
3599 if (valaddr != nullptr)
3600 view = gdb::make_array_view (valaddr, type->length ());
3601 struct type *resolved_type = resolve_dynamic_type (type, view, address,
3602 &frame);
3603 struct type *resolved_type_no_typedef = check_typedef (resolved_type);
3604 struct value *v;
3605
3606 if (valaddr == NULL)
3607 v = value::allocate_lazy (resolved_type);
3608 else
3609 v = value_from_contents (resolved_type, valaddr);
3610 if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL
3611 && TYPE_DATA_LOCATION (resolved_type_no_typedef)->is_constant ())
3612 address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef);
3613 v->set_lval (lval_memory);
3614 v->set_address (address);
3615 return v;
3616 }
3617
3618 /* Create a value of type TYPE holding the contents CONTENTS.
3619 The new value is `not_lval'. */
3620
3621 struct value *
3622 value_from_contents (struct type *type, const gdb_byte *contents)
3623 {
3624 struct value *result;
3625
3626 result = value::allocate (type);
3627 memcpy (result->contents_raw ().data (), contents, type->length ());
3628 return result;
3629 }
3630
3631 /* Extract a value from the history file. Input will be of the form
3632 $digits or $$digits. See block comment above 'write_dollar_variable'
3633 for details. */
3634
3635 struct value *
3636 value_from_history_ref (const char *h, const char **endp)
3637 {
3638 int index, len;
3639
3640 if (h[0] == '$')
3641 len = 1;
3642 else
3643 return NULL;
3644
3645 if (h[1] == '$')
3646 len = 2;
3647
3648 /* Find length of numeral string. */
3649 for (; isdigit (h[len]); len++)
3650 ;
3651
3652 /* Make sure numeral string is not part of an identifier. */
3653 if (h[len] == '_' || isalpha (h[len]))
3654 return NULL;
3655
3656 /* Now collect the index value. */
3657 if (h[1] == '$')
3658 {
3659 if (len == 2)
3660 {
3661 /* For some bizarre reason, "$$" is equivalent to "$$1",
3662 rather than to "$$0" as it ought to be! */
3663 index = -1;
3664 *endp += len;
3665 }
3666 else
3667 {
3668 char *local_end;
3669
3670 index = -strtol (&h[2], &local_end, 10);
3671 *endp = local_end;
3672 }
3673 }
3674 else
3675 {
3676 if (len == 1)
3677 {
3678 /* "$" is equivalent to "$0". */
3679 index = 0;
3680 *endp += len;
3681 }
3682 else
3683 {
3684 char *local_end;
3685
3686 index = strtol (&h[1], &local_end, 10);
3687 *endp = local_end;
3688 }
3689 }
3690
3691 return access_value_history (index);
3692 }
3693
3694 /* Get the component value (offset by OFFSET bytes) of a struct or
3695 union WHOLE. Component's type is TYPE. */
3696
3697 struct value *
3698 value_from_component (struct value *whole, struct type *type, LONGEST offset)
3699 {
3700 struct value *v;
3701
3702 if (whole->lval () == lval_memory && whole->lazy ())
3703 v = value::allocate_lazy (type);
3704 else
3705 {
3706 v = value::allocate (type);
3707 whole->contents_copy (v, v->embedded_offset (),
3708 whole->embedded_offset () + offset,
3709 type_length_units (type));
3710 }
3711 v->set_offset (whole->offset () + offset + whole->embedded_offset ());
3712 v->set_component_location (whole);
3713
3714 return v;
3715 }
3716
3717 /* See value.h. */
3718
3719 struct value *
3720 value::from_component_bitsize (struct type *type,
3721 LONGEST bit_offset, LONGEST bit_length)
3722 {
3723 gdb_assert (!lazy ());
3724
3725 /* Preserve lvalue-ness if possible. This is needed to avoid
3726 array-printing failures (including crashes) when printing Ada
3727 arrays in programs compiled with -fgnat-encodings=all. */
3728 if ((bit_offset % TARGET_CHAR_BIT) == 0
3729 && (bit_length % TARGET_CHAR_BIT) == 0
3730 && bit_length == TARGET_CHAR_BIT * type->length ())
3731 return value_from_component (this, type, bit_offset / TARGET_CHAR_BIT);
3732
3733 struct value *v = value::allocate (type);
3734
3735 LONGEST dst_offset = TARGET_CHAR_BIT * v->embedded_offset ();
3736 if (is_scalar_type (type) && type_byte_order (type) == BFD_ENDIAN_BIG)
3737 dst_offset += TARGET_CHAR_BIT * type->length () - bit_length;
3738
3739 contents_copy_raw_bitwise (v, dst_offset,
3740 TARGET_CHAR_BIT
3741 * embedded_offset ()
3742 + bit_offset,
3743 bit_length);
3744 return v;
3745 }
3746
3747 struct value *
3748 coerce_ref_if_computed (const struct value *arg)
3749 {
3750 const struct lval_funcs *funcs;
3751
3752 if (!TYPE_IS_REFERENCE (check_typedef (arg->type ())))
3753 return NULL;
3754
3755 if (arg->lval () != lval_computed)
3756 return NULL;
3757
3758 funcs = arg->computed_funcs ();
3759 if (funcs->coerce_ref == NULL)
3760 return NULL;
3761
3762 return funcs->coerce_ref (arg);
3763 }
3764
3765 /* Look at value.h for description. */
3766
3767 struct value *
3768 readjust_indirect_value_type (struct value *value, struct type *enc_type,
3769 const struct type *original_type,
3770 struct value *original_value,
3771 CORE_ADDR original_value_address)
3772 {
3773 gdb_assert (original_type->is_pointer_or_reference ());
3774
3775 struct type *original_target_type = original_type->target_type ();
3776 gdb::array_view<const gdb_byte> view;
3777 struct type *resolved_original_target_type
3778 = resolve_dynamic_type (original_target_type, view,
3779 original_value_address);
3780
3781 /* Re-adjust type. */
3782 value->deprecated_set_type (resolved_original_target_type);
3783
3784 /* Add embedding info. */
3785 value->set_enclosing_type (enc_type);
3786 value->set_embedded_offset (original_value->pointed_to_offset ());
3787
3788 /* We may be pointing to an object of some derived type. */
3789 return value_full_object (value, NULL, 0, 0, 0);
3790 }
3791
3792 struct value *
3793 coerce_ref (struct value *arg)
3794 {
3795 struct type *value_type_arg_tmp = check_typedef (arg->type ());
3796 struct value *retval;
3797 struct type *enc_type;
3798
3799 retval = coerce_ref_if_computed (arg);
3800 if (retval)
3801 return retval;
3802
3803 if (!TYPE_IS_REFERENCE (value_type_arg_tmp))
3804 return arg;
3805
3806 enc_type = check_typedef (arg->enclosing_type ());
3807 enc_type = enc_type->target_type ();
3808
3809 CORE_ADDR addr = unpack_pointer (arg->type (), arg->contents ().data ());
3810 retval = value_at_lazy (enc_type, addr);
3811 enc_type = retval->type ();
3812 return readjust_indirect_value_type (retval, enc_type, value_type_arg_tmp,
3813 arg, addr);
3814 }
3815
3816 struct value *
3817 coerce_array (struct value *arg)
3818 {
3819 struct type *type;
3820
3821 arg = coerce_ref (arg);
3822 type = check_typedef (arg->type ());
3823
3824 switch (type->code ())
3825 {
3826 case TYPE_CODE_ARRAY:
3827 if (!type->is_vector () && current_language->c_style_arrays_p ())
3828 arg = value_coerce_array (arg);
3829 break;
3830 case TYPE_CODE_FUNC:
3831 arg = value_coerce_function (arg);
3832 break;
3833 }
3834 return arg;
3835 }
3836 \f
3837
3838 /* Return the return value convention that will be used for the
3839 specified type. */
3840
3841 enum return_value_convention
3842 struct_return_convention (struct gdbarch *gdbarch,
3843 struct value *function, struct type *value_type)
3844 {
3845 enum type_code code = value_type->code ();
3846
3847 if (code == TYPE_CODE_ERROR)
3848 error (_("Function return type unknown."));
3849
3850 /* Probe the architecture for the return-value convention. */
3851 return gdbarch_return_value_as_value (gdbarch, function, value_type,
3852 NULL, NULL, NULL);
3853 }
3854
3855 /* Return true if the function returning the specified type is using
3856 the convention of returning structures in memory (passing in the
3857 address as a hidden first parameter). */
3858
3859 int
3860 using_struct_return (struct gdbarch *gdbarch,
3861 struct value *function, struct type *value_type)
3862 {
3863 if (value_type->code () == TYPE_CODE_VOID)
3864 /* A void return value is never in memory. See also corresponding
3865 code in "print_return_value". */
3866 return 0;
3867
3868 return (struct_return_convention (gdbarch, function, value_type)
3869 != RETURN_VALUE_REGISTER_CONVENTION);
3870 }
3871
3872 /* See value.h. */
3873
3874 void
3875 value::fetch_lazy_bitfield ()
3876 {
3877 gdb_assert (bitsize () != 0);
3878
3879 /* To read a lazy bitfield, read the entire enclosing value. This
3880 prevents reading the same block of (possibly volatile) memory once
3881 per bitfield. It would be even better to read only the containing
3882 word, but we have no way to record that just specific bits of a
3883 value have been fetched. */
3884 struct value *parent = this->parent ();
3885
3886 if (parent->lazy ())
3887 parent->fetch_lazy ();
3888
3889 parent->unpack_bitfield (this, bitpos (), bitsize (),
3890 parent->contents_for_printing ().data (),
3891 offset ());
3892 }
3893
3894 /* See value.h. */
3895
3896 void
3897 value::fetch_lazy_memory ()
3898 {
3899 gdb_assert (m_lval == lval_memory);
3900
3901 CORE_ADDR addr = address ();
3902 struct type *type = check_typedef (enclosing_type ());
3903
3904 /* Figure out how much we should copy from memory. Usually, this is just
3905 the size of the type, but, for arrays, we might only be loading a
3906 small part of the array (this is only done for very large arrays). */
3907 int len = 0;
3908 if (m_limited_length > 0)
3909 {
3910 gdb_assert (this->type ()->code () == TYPE_CODE_ARRAY);
3911 len = m_limited_length;
3912 }
3913 else if (type->length () > 0)
3914 len = type_length_units (type);
3915
3916 gdb_assert (len >= 0);
3917
3918 if (len > 0)
3919 read_value_memory (this, 0, stack (), addr,
3920 contents_all_raw ().data (), len);
3921 }
3922
3923 /* See value.h. */
3924
3925 void
3926 value::fetch_lazy_register ()
3927 {
3928 struct type *type = check_typedef (this->type ());
3929 struct value *new_val = this;
3930
3931 scoped_value_mark mark;
3932
3933 /* Offsets are not supported here; lazy register values must
3934 refer to the entire register. */
3935 gdb_assert (offset () == 0);
3936
3937 while (new_val->lval () == lval_register && new_val->lazy ())
3938 {
3939 frame_id next_frame_id = new_val->next_frame_id ();
3940 frame_info_ptr next_frame = frame_find_by_id (next_frame_id);
3941 gdb_assert (next_frame != NULL);
3942
3943 int regnum = new_val->regnum ();
3944
3945 /* Convertible register routines are used for multi-register
3946 values and for interpretation in different types
3947 (e.g. float or int from a double register). Lazy
3948 register values should have the register's natural type,
3949 so they do not apply. */
3950 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame),
3951 regnum, type));
3952
3953 new_val = frame_unwind_register_value (next_frame, regnum);
3954
3955 /* If we get another lazy lval_register value, it means the
3956 register is found by reading it from NEXT_FRAME's next frame.
3957 frame_unwind_register_value should never return a value with
3958 the frame id pointing to NEXT_FRAME. If it does, it means we
3959 either have two consecutive frames with the same frame id
3960 in the frame chain, or some code is trying to unwind
3961 behind get_prev_frame's back (e.g., a frame unwind
3962 sniffer trying to unwind), bypassing its validations. In
3963 any case, it should always be an internal error to end up
3964 in this situation. */
3965 if (new_val->lval () == lval_register
3966 && new_val->lazy ()
3967 && new_val->next_frame_id () == next_frame_id)
3968 internal_error (_("infinite loop while fetching a register"));
3969 }
3970
3971 /* If it's still lazy (for instance, a saved register on the
3972 stack), fetch it. */
3973 if (new_val->lazy ())
3974 new_val->fetch_lazy ();
3975
3976 /* Copy the contents and the unavailability/optimized-out
3977 meta-data from NEW_VAL to VAL. */
3978 set_lazy (false);
3979 new_val->contents_copy (this, embedded_offset (),
3980 new_val->embedded_offset (),
3981 type_length_units (type));
3982
3983 if (frame_debug)
3984 {
3985 frame_info_ptr frame = frame_find_by_id (this->next_frame_id ());
3986 frame = get_prev_frame_always (frame);
3987 int regnum = this->regnum ();
3988 gdbarch *gdbarch = get_frame_arch (frame);
3989
3990 string_file debug_file;
3991 gdb_printf (&debug_file,
3992 "(frame=%d, regnum=%d(%s), ...) ",
3993 frame_relative_level (frame), regnum,
3994 user_reg_map_regnum_to_name (gdbarch, regnum));
3995
3996 gdb_printf (&debug_file, "->");
3997 if (new_val->optimized_out ())
3998 {
3999 gdb_printf (&debug_file, " ");
4000 val_print_optimized_out (new_val, &debug_file);
4001 }
4002 else
4003 {
4004 int i;
4005 gdb::array_view<const gdb_byte> buf = new_val->contents ();
4006
4007 if (new_val->lval () == lval_register)
4008 gdb_printf (&debug_file, " register=%d", new_val->regnum ());
4009 else if (new_val->lval () == lval_memory)
4010 gdb_printf (&debug_file, " address=%s",
4011 paddress (gdbarch,
4012 new_val->address ()));
4013 else
4014 gdb_printf (&debug_file, " computed");
4015
4016 gdb_printf (&debug_file, " bytes=");
4017 gdb_printf (&debug_file, "[");
4018 for (i = 0; i < register_size (gdbarch, regnum); i++)
4019 gdb_printf (&debug_file, "%02x", buf[i]);
4020 gdb_printf (&debug_file, "]");
4021 }
4022
4023 frame_debug_printf ("%s", debug_file.c_str ());
4024 }
4025 }
4026
4027 /* See value.h. */
4028
4029 void
4030 value::fetch_lazy ()
4031 {
4032 gdb_assert (lazy ());
4033 allocate_contents (true);
4034 /* A value is either lazy, or fully fetched. The
4035 availability/validity is only established as we try to fetch a
4036 value. */
4037 gdb_assert (m_optimized_out.empty ());
4038 gdb_assert (m_unavailable.empty ());
4039 if (m_is_zero)
4040 {
4041 /* Nothing. */
4042 }
4043 else if (bitsize ())
4044 fetch_lazy_bitfield ();
4045 else if (this->lval () == lval_memory)
4046 fetch_lazy_memory ();
4047 else if (this->lval () == lval_register)
4048 fetch_lazy_register ();
4049 else if (this->lval () == lval_computed
4050 && computed_funcs ()->read != NULL)
4051 computed_funcs ()->read (this);
4052 else
4053 internal_error (_("Unexpected lazy value type."));
4054
4055 set_lazy (false);
4056 }
4057
4058 /* See value.h. */
4059
4060 value *
4061 pseudo_from_raw_part (frame_info_ptr next_frame, int pseudo_reg_num,
4062 int raw_reg_num, int raw_offset)
4063 {
4064 value *pseudo_reg_val
4065 = value::allocate_register (next_frame, pseudo_reg_num);
4066 value *raw_reg_val = value_of_register (raw_reg_num, next_frame);
4067 raw_reg_val->contents_copy (pseudo_reg_val, 0, raw_offset,
4068 pseudo_reg_val->type ()->length ());
4069 return pseudo_reg_val;
4070 }
4071
4072 /* See value.h. */
4073
4074 void
4075 pseudo_to_raw_part (frame_info_ptr next_frame,
4076 gdb::array_view<const gdb_byte> pseudo_buf,
4077 int raw_reg_num, int raw_offset)
4078 {
4079 int raw_reg_size
4080 = register_size (frame_unwind_arch (next_frame), raw_reg_num);
4081
4082 /* When overflowing a register, put_frame_register_bytes writes to the
4083 subsequent registers. We don't want that behavior here, so make sure
4084 the write is wholly within register RAW_REG_NUM. */
4085 gdb_assert (raw_offset + pseudo_buf.size () <= raw_reg_size);
4086 put_frame_register_bytes (next_frame, raw_reg_num, raw_offset, pseudo_buf);
4087 }
4088
4089 /* See value.h. */
4090
4091 value *
4092 pseudo_from_concat_raw (frame_info_ptr next_frame, int pseudo_reg_num,
4093 int raw_reg_1_num, int raw_reg_2_num)
4094 {
4095 value *pseudo_reg_val
4096 = value::allocate_register (next_frame, pseudo_reg_num);
4097 int dst_offset = 0;
4098
4099 value *raw_reg_1_val = value_of_register (raw_reg_1_num, next_frame);
4100 raw_reg_1_val->contents_copy (pseudo_reg_val, dst_offset, 0,
4101 raw_reg_1_val->type ()->length ());
4102 dst_offset += raw_reg_1_val->type ()->length ();
4103
4104 value *raw_reg_2_val = value_of_register (raw_reg_2_num, next_frame);
4105 raw_reg_2_val->contents_copy (pseudo_reg_val, dst_offset, 0,
4106 raw_reg_2_val->type ()->length ());
4107 dst_offset += raw_reg_2_val->type ()->length ();
4108
4109 gdb_assert (dst_offset == pseudo_reg_val->type ()->length ());
4110
4111 return pseudo_reg_val;
4112 }
4113
4114 /* See value.h. */
4115
4116 void
4117 pseudo_to_concat_raw (frame_info_ptr next_frame,
4118 gdb::array_view<const gdb_byte> pseudo_buf,
4119 int raw_reg_1_num, int raw_reg_2_num)
4120 {
4121 int src_offset = 0;
4122 gdbarch *arch = frame_unwind_arch (next_frame);
4123
4124 int raw_reg_1_size = register_size (arch, raw_reg_1_num);
4125 put_frame_register (next_frame, raw_reg_1_num,
4126 pseudo_buf.slice (src_offset, raw_reg_1_size));
4127 src_offset += raw_reg_1_size;
4128
4129 int raw_reg_2_size = register_size (arch, raw_reg_2_num);
4130 put_frame_register (next_frame, raw_reg_2_num,
4131 pseudo_buf.slice (src_offset, raw_reg_2_size));
4132 src_offset += raw_reg_2_size;
4133
4134 gdb_assert (src_offset == pseudo_buf.size ());
4135 }
4136
4137 /* See value.h. */
4138
4139 value *
4140 pseudo_from_concat_raw (frame_info_ptr next_frame, int pseudo_reg_num,
4141 int raw_reg_1_num, int raw_reg_2_num,
4142 int raw_reg_3_num)
4143 {
4144 value *pseudo_reg_val
4145 = value::allocate_register (next_frame, pseudo_reg_num);
4146 int dst_offset = 0;
4147
4148 value *raw_reg_1_val = value_of_register (raw_reg_1_num, next_frame);
4149 raw_reg_1_val->contents_copy (pseudo_reg_val, dst_offset, 0,
4150 raw_reg_1_val->type ()->length ());
4151 dst_offset += raw_reg_1_val->type ()->length ();
4152
4153 value *raw_reg_2_val = value_of_register (raw_reg_2_num, next_frame);
4154 raw_reg_2_val->contents_copy (pseudo_reg_val, dst_offset, 0,
4155 raw_reg_2_val->type ()->length ());
4156 dst_offset += raw_reg_2_val->type ()->length ();
4157
4158 value *raw_reg_3_val = value_of_register (raw_reg_3_num, next_frame);
4159 raw_reg_3_val->contents_copy (pseudo_reg_val, dst_offset, 0,
4160 raw_reg_3_val->type ()->length ());
4161 dst_offset += raw_reg_3_val->type ()->length ();
4162
4163 gdb_assert (dst_offset == pseudo_reg_val->type ()->length ());
4164
4165 return pseudo_reg_val;
4166 }
4167
4168 /* See value.h. */
4169
4170 void
4171 pseudo_to_concat_raw (frame_info_ptr next_frame,
4172 gdb::array_view<const gdb_byte> pseudo_buf,
4173 int raw_reg_1_num, int raw_reg_2_num, int raw_reg_3_num)
4174 {
4175 int src_offset = 0;
4176 gdbarch *arch = frame_unwind_arch (next_frame);
4177
4178 int raw_reg_1_size = register_size (arch, raw_reg_1_num);
4179 put_frame_register (next_frame, raw_reg_1_num,
4180 pseudo_buf.slice (src_offset, raw_reg_1_size));
4181 src_offset += raw_reg_1_size;
4182
4183 int raw_reg_2_size = register_size (arch, raw_reg_2_num);
4184 put_frame_register (next_frame, raw_reg_2_num,
4185 pseudo_buf.slice (src_offset, raw_reg_2_size));
4186 src_offset += raw_reg_2_size;
4187
4188 int raw_reg_3_size = register_size (arch, raw_reg_3_num);
4189 put_frame_register (next_frame, raw_reg_3_num,
4190 pseudo_buf.slice (src_offset, raw_reg_3_size));
4191 src_offset += raw_reg_3_size;
4192
4193 gdb_assert (src_offset == pseudo_buf.size ());
4194 }
4195
4196 /* Implementation of the convenience function $_isvoid. */
4197
4198 static struct value *
4199 isvoid_internal_fn (struct gdbarch *gdbarch,
4200 const struct language_defn *language,
4201 void *cookie, int argc, struct value **argv)
4202 {
4203 int ret;
4204
4205 if (argc != 1)
4206 error (_("You must provide one argument for $_isvoid."));
4207
4208 ret = argv[0]->type ()->code () == TYPE_CODE_VOID;
4209
4210 return value_from_longest (builtin_type (gdbarch)->builtin_int, ret);
4211 }
4212
4213 /* Implementation of the convenience function $_creal. Extracts the
4214 real part from a complex number. */
4215
4216 static struct value *
4217 creal_internal_fn (struct gdbarch *gdbarch,
4218 const struct language_defn *language,
4219 void *cookie, int argc, struct value **argv)
4220 {
4221 if (argc != 1)
4222 error (_("You must provide one argument for $_creal."));
4223
4224 value *cval = argv[0];
4225 type *ctype = check_typedef (cval->type ());
4226 if (ctype->code () != TYPE_CODE_COMPLEX)
4227 error (_("expected a complex number"));
4228 return value_real_part (cval);
4229 }
4230
4231 /* Implementation of the convenience function $_cimag. Extracts the
4232 imaginary part from a complex number. */
4233
4234 static struct value *
4235 cimag_internal_fn (struct gdbarch *gdbarch,
4236 const struct language_defn *language,
4237 void *cookie, int argc,
4238 struct value **argv)
4239 {
4240 if (argc != 1)
4241 error (_("You must provide one argument for $_cimag."));
4242
4243 value *cval = argv[0];
4244 type *ctype = check_typedef (cval->type ());
4245 if (ctype->code () != TYPE_CODE_COMPLEX)
4246 error (_("expected a complex number"));
4247 return value_imaginary_part (cval);
4248 }
4249
4250 #if GDB_SELF_TEST
4251 namespace selftests
4252 {
4253
4254 /* Test the ranges_contain function. */
4255
4256 static void
4257 test_ranges_contain ()
4258 {
4259 std::vector<range> ranges;
4260 range r;
4261
4262 /* [10, 14] */
4263 r.offset = 10;
4264 r.length = 5;
4265 ranges.push_back (r);
4266
4267 /* [20, 24] */
4268 r.offset = 20;
4269 r.length = 5;
4270 ranges.push_back (r);
4271
4272 /* [2, 6] */
4273 SELF_CHECK (!ranges_contain (ranges, 2, 5));
4274 /* [9, 13] */
4275 SELF_CHECK (ranges_contain (ranges, 9, 5));
4276 /* [10, 11] */
4277 SELF_CHECK (ranges_contain (ranges, 10, 2));
4278 /* [10, 14] */
4279 SELF_CHECK (ranges_contain (ranges, 10, 5));
4280 /* [13, 18] */
4281 SELF_CHECK (ranges_contain (ranges, 13, 6));
4282 /* [14, 18] */
4283 SELF_CHECK (ranges_contain (ranges, 14, 5));
4284 /* [15, 18] */
4285 SELF_CHECK (!ranges_contain (ranges, 15, 4));
4286 /* [16, 19] */
4287 SELF_CHECK (!ranges_contain (ranges, 16, 4));
4288 /* [16, 21] */
4289 SELF_CHECK (ranges_contain (ranges, 16, 6));
4290 /* [21, 21] */
4291 SELF_CHECK (ranges_contain (ranges, 21, 1));
4292 /* [21, 25] */
4293 SELF_CHECK (ranges_contain (ranges, 21, 5));
4294 /* [26, 28] */
4295 SELF_CHECK (!ranges_contain (ranges, 26, 3));
4296 }
4297
4298 /* Check that RANGES contains the same ranges as EXPECTED. */
4299
4300 static bool
4301 check_ranges_vector (gdb::array_view<const range> ranges,
4302 gdb::array_view<const range> expected)
4303 {
4304 return ranges == expected;
4305 }
4306
4307 /* Test the insert_into_bit_range_vector function. */
4308
4309 static void
4310 test_insert_into_bit_range_vector ()
4311 {
4312 std::vector<range> ranges;
4313
4314 /* [10, 14] */
4315 {
4316 insert_into_bit_range_vector (&ranges, 10, 5);
4317 static const range expected[] = {
4318 {10, 5}
4319 };
4320 SELF_CHECK (check_ranges_vector (ranges, expected));
4321 }
4322
4323 /* [10, 14] */
4324 {
4325 insert_into_bit_range_vector (&ranges, 11, 4);
4326 static const range expected = {10, 5};
4327 SELF_CHECK (check_ranges_vector (ranges, expected));
4328 }
4329
4330 /* [10, 14] [20, 24] */
4331 {
4332 insert_into_bit_range_vector (&ranges, 20, 5);
4333 static const range expected[] = {
4334 {10, 5},
4335 {20, 5},
4336 };
4337 SELF_CHECK (check_ranges_vector (ranges, expected));
4338 }
4339
4340 /* [10, 14] [17, 24] */
4341 {
4342 insert_into_bit_range_vector (&ranges, 17, 5);
4343 static const range expected[] = {
4344 {10, 5},
4345 {17, 8},
4346 };
4347 SELF_CHECK (check_ranges_vector (ranges, expected));
4348 }
4349
4350 /* [2, 8] [10, 14] [17, 24] */
4351 {
4352 insert_into_bit_range_vector (&ranges, 2, 7);
4353 static const range expected[] = {
4354 {2, 7},
4355 {10, 5},
4356 {17, 8},
4357 };
4358 SELF_CHECK (check_ranges_vector (ranges, expected));
4359 }
4360
4361 /* [2, 14] [17, 24] */
4362 {
4363 insert_into_bit_range_vector (&ranges, 9, 1);
4364 static const range expected[] = {
4365 {2, 13},
4366 {17, 8},
4367 };
4368 SELF_CHECK (check_ranges_vector (ranges, expected));
4369 }
4370
4371 /* [2, 14] [17, 24] */
4372 {
4373 insert_into_bit_range_vector (&ranges, 9, 1);
4374 static const range expected[] = {
4375 {2, 13},
4376 {17, 8},
4377 };
4378 SELF_CHECK (check_ranges_vector (ranges, expected));
4379 }
4380
4381 /* [2, 33] */
4382 {
4383 insert_into_bit_range_vector (&ranges, 4, 30);
4384 static const range expected = {2, 32};
4385 SELF_CHECK (check_ranges_vector (ranges, expected));
4386 }
4387 }
4388
4389 static void
4390 test_value_copy ()
4391 {
4392 type *type = builtin_type (current_inferior ()->arch ())->builtin_int;
4393
4394 /* Verify that we can copy an entirely optimized out value, that may not have
4395 its contents allocated. */
4396 value_ref_ptr val = release_value (value::allocate_optimized_out (type));
4397 value_ref_ptr copy = release_value (val->copy ());
4398
4399 SELF_CHECK (val->entirely_optimized_out ());
4400 SELF_CHECK (copy->entirely_optimized_out ());
4401 }
4402
4403 } /* namespace selftests */
4404 #endif /* GDB_SELF_TEST */
4405
4406 void _initialize_values ();
4407 void
4408 _initialize_values ()
4409 {
4410 cmd_list_element *show_convenience_cmd
4411 = add_cmd ("convenience", no_class, show_convenience, _("\
4412 Debugger convenience (\"$foo\") variables and functions.\n\
4413 Convenience variables are created when you assign them values;\n\
4414 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4415 \n\
4416 A few convenience variables are given values automatically:\n\
4417 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4418 \"$__\" holds the contents of the last address examined with \"x\"."
4419 #ifdef HAVE_PYTHON
4420 "\n\n\
4421 Convenience functions are defined via the Python API."
4422 #endif
4423 ), &showlist);
4424 add_alias_cmd ("conv", show_convenience_cmd, no_class, 1, &showlist);
4425
4426 add_cmd ("values", no_set_class, show_values, _("\
4427 Elements of value history around item number IDX (or last ten)."),
4428 &showlist);
4429
4430 add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\
4431 Initialize a convenience variable if necessary.\n\
4432 init-if-undefined VARIABLE = EXPRESSION\n\
4433 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4434 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4435 VARIABLE is already initialized."));
4436
4437 add_prefix_cmd ("function", no_class, function_command, _("\
4438 Placeholder command for showing help on convenience functions."),
4439 &functionlist, 0, &cmdlist);
4440
4441 add_internal_function ("_isvoid", _("\
4442 Check whether an expression is void.\n\
4443 Usage: $_isvoid (expression)\n\
4444 Return 1 if the expression is void, zero otherwise."),
4445 isvoid_internal_fn, NULL);
4446
4447 add_internal_function ("_creal", _("\
4448 Extract the real part of a complex number.\n\
4449 Usage: $_creal (expression)\n\
4450 Return the real part of a complex number, the type depends on the\n\
4451 type of a complex number."),
4452 creal_internal_fn, NULL);
4453
4454 add_internal_function ("_cimag", _("\
4455 Extract the imaginary part of a complex number.\n\
4456 Usage: $_cimag (expression)\n\
4457 Return the imaginary part of a complex number, the type depends on the\n\
4458 type of a complex number."),
4459 cimag_internal_fn, NULL);
4460
4461 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4462 class_support, &max_value_size, _("\
4463 Set maximum sized value gdb will load from the inferior."), _("\
4464 Show maximum sized value gdb will load from the inferior."), _("\
4465 Use this to control the maximum size, in bytes, of a value that gdb\n\
4466 will load from the inferior. Setting this value to 'unlimited'\n\
4467 disables checking.\n\
4468 Setting this does not invalidate already allocated values, it only\n\
4469 prevents future values, larger than this size, from being allocated."),
4470 set_max_value_size,
4471 show_max_value_size,
4472 &setlist, &showlist);
4473 set_show_commands vsize_limit
4474 = add_setshow_zuinteger_unlimited_cmd ("varsize-limit", class_support,
4475 &max_value_size, _("\
4476 Set the maximum number of bytes allowed in a variable-size object."), _("\
4477 Show the maximum number of bytes allowed in a variable-size object."), _("\
4478 Attempts to access an object whose size is not a compile-time constant\n\
4479 and exceeds this limit will cause an error."),
4480 NULL, NULL, &setlist, &showlist);
4481 deprecate_cmd (vsize_limit.set, "set max-value-size");
4482
4483 #if GDB_SELF_TEST
4484 selftests::register_test ("ranges_contain", selftests::test_ranges_contain);
4485 selftests::register_test ("insert_into_bit_range_vector",
4486 selftests::test_insert_into_bit_range_vector);
4487 selftests::register_test ("value_copy", selftests::test_value_copy);
4488 #endif
4489 }
4490
4491 /* See value.h. */
4492
4493 void
4494 finalize_values ()
4495 {
4496 all_values.clear ();
4497 }