]>
Commit | Line | Data |
---|---|---|
1 | /* Low level packing and unpacking of values for GDB, the GNU Debugger. | |
2 | ||
3 | Copyright (C) 1986-2025 Free Software Foundation, Inc. | |
4 | ||
5 | This file is part of GDB. | |
6 | ||
7 | This program is free software; you can redistribute it and/or modify | |
8 | it under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation; either version 3 of the License, or | |
10 | (at your option) any later version. | |
11 | ||
12 | This program is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | GNU General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ | |
19 | ||
20 | #include "arch-utils.h" | |
21 | #include "extract-store-integer.h" | |
22 | #include "symtab.h" | |
23 | #include "gdbtypes.h" | |
24 | #include "value.h" | |
25 | #include "gdbcore.h" | |
26 | #include "command.h" | |
27 | #include "cli/cli-cmds.h" | |
28 | #include "target.h" | |
29 | #include "language.h" | |
30 | #include "demangle.h" | |
31 | #include "regcache.h" | |
32 | #include "block.h" | |
33 | #include "target-float.h" | |
34 | #include "objfiles.h" | |
35 | #include "valprint.h" | |
36 | #include "cli/cli-decode.h" | |
37 | #include "extension.h" | |
38 | #include <ctype.h> | |
39 | #include "tracepoint.h" | |
40 | #include "cp-abi.h" | |
41 | #include "user-regs.h" | |
42 | #include <algorithm> | |
43 | #include <iterator> | |
44 | #include <map> | |
45 | #include <utility> | |
46 | #include <vector> | |
47 | #include "completer.h" | |
48 | #include "gdbsupport/selftest.h" | |
49 | #include "gdbsupport/array-view.h" | |
50 | #include "cli/cli-style.h" | |
51 | #include "expop.h" | |
52 | #include "inferior.h" | |
53 | #include "varobj.h" | |
54 | ||
55 | /* Definition of a user function. */ | |
56 | struct internal_function | |
57 | { | |
58 | internal_function (std::string name, internal_function_fn_noside handler, | |
59 | void *cookie) | |
60 | : name (std::move (name)), | |
61 | handler (handler), | |
62 | cookie (cookie) | |
63 | {} | |
64 | ||
65 | /* The name of the function. It is a bit odd to have this in the | |
66 | function itself -- the user might use a differently-named | |
67 | convenience variable to hold the function. */ | |
68 | std::string name; | |
69 | ||
70 | /* The handler. */ | |
71 | internal_function_fn_noside handler; | |
72 | ||
73 | /* User data for the handler. */ | |
74 | void *cookie; | |
75 | }; | |
76 | ||
77 | using internal_function_up = std::unique_ptr<internal_function>; | |
78 | ||
79 | /* Returns true if the ranges defined by [offset1, offset1+len1) and | |
80 | [offset2, offset2+len2) overlap. */ | |
81 | ||
82 | static bool | |
83 | ranges_overlap (LONGEST offset1, ULONGEST len1, | |
84 | LONGEST offset2, ULONGEST len2) | |
85 | { | |
86 | LONGEST h, l; | |
87 | ||
88 | l = std::max (offset1, offset2); | |
89 | h = std::min (offset1 + len1, offset2 + len2); | |
90 | return (l < h); | |
91 | } | |
92 | ||
93 | /* Returns true if RANGES contains any range that overlaps [OFFSET, | |
94 | OFFSET+LENGTH). */ | |
95 | ||
96 | static bool | |
97 | ranges_contain (const std::vector<range> &ranges, LONGEST offset, | |
98 | ULONGEST length) | |
99 | { | |
100 | range what; | |
101 | ||
102 | what.offset = offset; | |
103 | what.length = length; | |
104 | ||
105 | /* We keep ranges sorted by offset and coalesce overlapping and | |
106 | contiguous ranges, so to check if a range list contains a given | |
107 | range, we can do a binary search for the position the given range | |
108 | would be inserted if we only considered the starting OFFSET of | |
109 | ranges. We call that position I. Since we also have LENGTH to | |
110 | care for (this is a range after all), we need to check if the | |
111 | _previous_ range overlaps the I range. E.g., | |
112 | ||
113 | R | |
114 | |---| | |
115 | |---| |---| |------| ... |--| | |
116 | 0 1 2 N | |
117 | ||
118 | I=1 | |
119 | ||
120 | In the case above, the binary search would return `I=1', meaning, | |
121 | this OFFSET should be inserted at position 1, and the current | |
122 | position 1 should be pushed further (and before 2). But, `0' | |
123 | overlaps with R. | |
124 | ||
125 | Then we need to check if the I range overlaps the I range itself. | |
126 | E.g., | |
127 | ||
128 | R | |
129 | |---| | |
130 | |---| |---| |-------| ... |--| | |
131 | 0 1 2 N | |
132 | ||
133 | I=1 | |
134 | */ | |
135 | ||
136 | ||
137 | auto i = std::lower_bound (ranges.begin (), ranges.end (), what); | |
138 | ||
139 | if (i > ranges.begin ()) | |
140 | { | |
141 | const struct range &bef = *(i - 1); | |
142 | ||
143 | if (ranges_overlap (bef.offset, bef.length, offset, length)) | |
144 | return true; | |
145 | } | |
146 | ||
147 | if (i < ranges.end ()) | |
148 | { | |
149 | const struct range &r = *i; | |
150 | ||
151 | if (ranges_overlap (r.offset, r.length, offset, length)) | |
152 | return true; | |
153 | } | |
154 | ||
155 | return false; | |
156 | } | |
157 | ||
158 | static struct cmd_list_element *functionlist; | |
159 | ||
160 | value::~value () | |
161 | { | |
162 | if (this->lval () == lval_computed) | |
163 | { | |
164 | const struct lval_funcs *funcs = m_location.computed.funcs; | |
165 | ||
166 | if (funcs->free_closure) | |
167 | funcs->free_closure (this); | |
168 | } | |
169 | else if (this->lval () == lval_xcallable) | |
170 | delete m_location.xm_worker; | |
171 | } | |
172 | ||
173 | /* See value.h. */ | |
174 | ||
175 | struct gdbarch * | |
176 | value::arch () const | |
177 | { | |
178 | return type ()->arch (); | |
179 | } | |
180 | ||
181 | bool | |
182 | value::bits_available (LONGEST offset, ULONGEST length) const | |
183 | { | |
184 | gdb_assert (!m_lazy); | |
185 | ||
186 | /* Don't pretend we have anything available there in the history beyond | |
187 | the boundaries of the value recorded. It's not like inferior memory | |
188 | where there is actual stuff underneath. */ | |
189 | ULONGEST val_len = TARGET_CHAR_BIT * enclosing_type ()->length (); | |
190 | return !((m_in_history | |
191 | && (offset < 0 || offset + length > val_len)) | |
192 | || ranges_contain (m_unavailable, offset, length)); | |
193 | } | |
194 | ||
195 | bool | |
196 | value::bytes_available (LONGEST offset, ULONGEST length) const | |
197 | { | |
198 | ULONGEST sign = (1ULL << (sizeof (ULONGEST) * 8 - 1)) / TARGET_CHAR_BIT; | |
199 | ULONGEST mask = (sign << 1) - 1; | |
200 | ||
201 | if (offset != ((offset & mask) ^ sign) - sign | |
202 | || length != ((length & mask) ^ sign) - sign | |
203 | || (length > 0 && (~offset & (offset + length - 1) & sign) != 0)) | |
204 | error (_("Integer overflow in data location calculation")); | |
205 | ||
206 | return bits_available (offset * TARGET_CHAR_BIT, length * TARGET_CHAR_BIT); | |
207 | } | |
208 | ||
209 | bool | |
210 | value::bits_any_optimized_out (int bit_offset, int bit_length) const | |
211 | { | |
212 | gdb_assert (!m_lazy); | |
213 | ||
214 | return ranges_contain (m_optimized_out, bit_offset, bit_length); | |
215 | } | |
216 | ||
217 | bool | |
218 | value::entirely_available () | |
219 | { | |
220 | /* We can only tell whether the whole value is available when we try | |
221 | to read it. */ | |
222 | if (m_lazy) | |
223 | fetch_lazy (); | |
224 | ||
225 | if (m_unavailable.empty ()) | |
226 | return true; | |
227 | return false; | |
228 | } | |
229 | ||
230 | /* See value.h. */ | |
231 | ||
232 | bool | |
233 | value::entirely_covered_by_range_vector (const std::vector<range> &ranges) | |
234 | { | |
235 | /* We can only tell whether the whole value is optimized out / | |
236 | unavailable when we try to read it. */ | |
237 | if (m_lazy) | |
238 | fetch_lazy (); | |
239 | ||
240 | if (ranges.size () == 1) | |
241 | { | |
242 | const struct range &t = ranges[0]; | |
243 | ||
244 | if (t.offset == 0 | |
245 | && t.length == TARGET_CHAR_BIT * enclosing_type ()->length ()) | |
246 | return true; | |
247 | } | |
248 | ||
249 | return false; | |
250 | } | |
251 | ||
252 | /* Insert into the vector pointed to by VECTORP the bit range starting of | |
253 | OFFSET bits, and extending for the next LENGTH bits. */ | |
254 | ||
255 | static void | |
256 | insert_into_bit_range_vector (std::vector<range> *vectorp, | |
257 | LONGEST offset, ULONGEST length) | |
258 | { | |
259 | range newr; | |
260 | ||
261 | /* Insert the range sorted. If there's overlap or the new range | |
262 | would be contiguous with an existing range, merge. */ | |
263 | ||
264 | newr.offset = offset; | |
265 | newr.length = length; | |
266 | ||
267 | /* Do a binary search for the position the given range would be | |
268 | inserted if we only considered the starting OFFSET of ranges. | |
269 | Call that position I. Since we also have LENGTH to care for | |
270 | (this is a range after all), we need to check if the _previous_ | |
271 | range overlaps the I range. E.g., calling R the new range: | |
272 | ||
273 | #1 - overlaps with previous | |
274 | ||
275 | R | |
276 | |-...-| | |
277 | |---| |---| |------| ... |--| | |
278 | 0 1 2 N | |
279 | ||
280 | I=1 | |
281 | ||
282 | In the case #1 above, the binary search would return `I=1', | |
283 | meaning, this OFFSET should be inserted at position 1, and the | |
284 | current position 1 should be pushed further (and become 2). But, | |
285 | note that `0' overlaps with R, so we want to merge them. | |
286 | ||
287 | A similar consideration needs to be taken if the new range would | |
288 | be contiguous with the previous range: | |
289 | ||
290 | #2 - contiguous with previous | |
291 | ||
292 | R | |
293 | |-...-| | |
294 | |--| |---| |------| ... |--| | |
295 | 0 1 2 N | |
296 | ||
297 | I=1 | |
298 | ||
299 | If there's no overlap with the previous range, as in: | |
300 | ||
301 | #3 - not overlapping and not contiguous | |
302 | ||
303 | R | |
304 | |-...-| | |
305 | |--| |---| |------| ... |--| | |
306 | 0 1 2 N | |
307 | ||
308 | I=1 | |
309 | ||
310 | or if I is 0: | |
311 | ||
312 | #4 - R is the range with lowest offset | |
313 | ||
314 | R | |
315 | |-...-| | |
316 | |--| |---| |------| ... |--| | |
317 | 0 1 2 N | |
318 | ||
319 | I=0 | |
320 | ||
321 | ... we just push the new range to I. | |
322 | ||
323 | All the 4 cases above need to consider that the new range may | |
324 | also overlap several of the ranges that follow, or that R may be | |
325 | contiguous with the following range, and merge. E.g., | |
326 | ||
327 | #5 - overlapping following ranges | |
328 | ||
329 | R | |
330 | |------------------------| | |
331 | |--| |---| |------| ... |--| | |
332 | 0 1 2 N | |
333 | ||
334 | I=0 | |
335 | ||
336 | or: | |
337 | ||
338 | R | |
339 | |-------| | |
340 | |--| |---| |------| ... |--| | |
341 | 0 1 2 N | |
342 | ||
343 | I=1 | |
344 | ||
345 | */ | |
346 | ||
347 | auto i = std::lower_bound (vectorp->begin (), vectorp->end (), newr); | |
348 | if (i > vectorp->begin ()) | |
349 | { | |
350 | struct range &bef = *(i - 1); | |
351 | ||
352 | if (ranges_overlap (bef.offset, bef.length, offset, length)) | |
353 | { | |
354 | /* #1 */ | |
355 | LONGEST l = std::min (bef.offset, offset); | |
356 | LONGEST h = std::max (bef.offset + bef.length, offset + length); | |
357 | ||
358 | bef.offset = l; | |
359 | bef.length = h - l; | |
360 | i--; | |
361 | } | |
362 | else if (offset == bef.offset + bef.length) | |
363 | { | |
364 | /* #2 */ | |
365 | bef.length += length; | |
366 | i--; | |
367 | } | |
368 | else | |
369 | { | |
370 | /* #3 */ | |
371 | i = vectorp->insert (i, newr); | |
372 | } | |
373 | } | |
374 | else | |
375 | { | |
376 | /* #4 */ | |
377 | i = vectorp->insert (i, newr); | |
378 | } | |
379 | ||
380 | /* Check whether the ranges following the one we've just added or | |
381 | touched can be folded in (#5 above). */ | |
382 | if (i != vectorp->end () && i + 1 < vectorp->end ()) | |
383 | { | |
384 | int removed = 0; | |
385 | auto next = i + 1; | |
386 | ||
387 | /* Get the range we just touched. */ | |
388 | struct range &t = *i; | |
389 | removed = 0; | |
390 | ||
391 | i = next; | |
392 | for (; i < vectorp->end (); i++) | |
393 | { | |
394 | struct range &r = *i; | |
395 | if (r.offset <= t.offset + t.length) | |
396 | { | |
397 | LONGEST l, h; | |
398 | ||
399 | l = std::min (t.offset, r.offset); | |
400 | h = std::max (t.offset + t.length, r.offset + r.length); | |
401 | ||
402 | t.offset = l; | |
403 | t.length = h - l; | |
404 | ||
405 | removed++; | |
406 | } | |
407 | else | |
408 | { | |
409 | /* If we couldn't merge this one, we won't be able to | |
410 | merge following ones either, since the ranges are | |
411 | always sorted by OFFSET. */ | |
412 | break; | |
413 | } | |
414 | } | |
415 | ||
416 | if (removed != 0) | |
417 | vectorp->erase (next, next + removed); | |
418 | } | |
419 | } | |
420 | ||
421 | void | |
422 | value::mark_bits_unavailable (LONGEST offset, ULONGEST length) | |
423 | { | |
424 | insert_into_bit_range_vector (&m_unavailable, offset, length); | |
425 | } | |
426 | ||
427 | void | |
428 | value::mark_bytes_unavailable (LONGEST offset, ULONGEST length) | |
429 | { | |
430 | mark_bits_unavailable (offset * TARGET_CHAR_BIT, | |
431 | length * TARGET_CHAR_BIT); | |
432 | } | |
433 | ||
434 | /* Find the first range in RANGES that overlaps the range defined by | |
435 | OFFSET and LENGTH, starting at element POS in the RANGES vector, | |
436 | Returns the index into RANGES where such overlapping range was | |
437 | found, or -1 if none was found. */ | |
438 | ||
439 | static int | |
440 | find_first_range_overlap (const std::vector<range> *ranges, int pos, | |
441 | LONGEST offset, LONGEST length) | |
442 | { | |
443 | int i; | |
444 | ||
445 | for (i = pos; i < ranges->size (); i++) | |
446 | { | |
447 | const range &r = (*ranges)[i]; | |
448 | if (ranges_overlap (r.offset, r.length, offset, length)) | |
449 | return i; | |
450 | } | |
451 | ||
452 | return -1; | |
453 | } | |
454 | ||
455 | /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at | |
456 | PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise | |
457 | return non-zero. | |
458 | ||
459 | It must always be the case that: | |
460 | OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT | |
461 | ||
462 | It is assumed that memory can be accessed from: | |
463 | PTR + (OFFSET_BITS / TARGET_CHAR_BIT) | |
464 | to: | |
465 | PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1) | |
466 | / TARGET_CHAR_BIT) */ | |
467 | static int | |
468 | memcmp_with_bit_offsets (const gdb_byte *ptr1, size_t offset1_bits, | |
469 | const gdb_byte *ptr2, size_t offset2_bits, | |
470 | size_t length_bits) | |
471 | { | |
472 | gdb_assert (offset1_bits % TARGET_CHAR_BIT | |
473 | == offset2_bits % TARGET_CHAR_BIT); | |
474 | ||
475 | if (offset1_bits % TARGET_CHAR_BIT != 0) | |
476 | { | |
477 | size_t bits; | |
478 | gdb_byte mask, b1, b2; | |
479 | ||
480 | /* The offset from the base pointers PTR1 and PTR2 is not a complete | |
481 | number of bytes. A number of bits up to either the next exact | |
482 | byte boundary, or LENGTH_BITS (which ever is sooner) will be | |
483 | compared. */ | |
484 | bits = TARGET_CHAR_BIT - offset1_bits % TARGET_CHAR_BIT; | |
485 | gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT); | |
486 | mask = (1 << bits) - 1; | |
487 | ||
488 | if (length_bits < bits) | |
489 | { | |
490 | mask &= ~(gdb_byte) ((1 << (bits - length_bits)) - 1); | |
491 | bits = length_bits; | |
492 | } | |
493 | ||
494 | /* Now load the two bytes and mask off the bits we care about. */ | |
495 | b1 = *(ptr1 + offset1_bits / TARGET_CHAR_BIT) & mask; | |
496 | b2 = *(ptr2 + offset2_bits / TARGET_CHAR_BIT) & mask; | |
497 | ||
498 | if (b1 != b2) | |
499 | return 1; | |
500 | ||
501 | /* Now update the length and offsets to take account of the bits | |
502 | we've just compared. */ | |
503 | length_bits -= bits; | |
504 | offset1_bits += bits; | |
505 | offset2_bits += bits; | |
506 | } | |
507 | ||
508 | if (length_bits % TARGET_CHAR_BIT != 0) | |
509 | { | |
510 | size_t bits; | |
511 | size_t o1, o2; | |
512 | gdb_byte mask, b1, b2; | |
513 | ||
514 | /* The length is not an exact number of bytes. After the previous | |
515 | IF.. block then the offsets are byte aligned, or the | |
516 | length is zero (in which case this code is not reached). Compare | |
517 | a number of bits at the end of the region, starting from an exact | |
518 | byte boundary. */ | |
519 | bits = length_bits % TARGET_CHAR_BIT; | |
520 | o1 = offset1_bits + length_bits - bits; | |
521 | o2 = offset2_bits + length_bits - bits; | |
522 | ||
523 | gdb_assert (bits < sizeof (mask) * TARGET_CHAR_BIT); | |
524 | mask = ((1 << bits) - 1) << (TARGET_CHAR_BIT - bits); | |
525 | ||
526 | gdb_assert (o1 % TARGET_CHAR_BIT == 0); | |
527 | gdb_assert (o2 % TARGET_CHAR_BIT == 0); | |
528 | ||
529 | b1 = *(ptr1 + o1 / TARGET_CHAR_BIT) & mask; | |
530 | b2 = *(ptr2 + o2 / TARGET_CHAR_BIT) & mask; | |
531 | ||
532 | if (b1 != b2) | |
533 | return 1; | |
534 | ||
535 | length_bits -= bits; | |
536 | } | |
537 | ||
538 | if (length_bits > 0) | |
539 | { | |
540 | /* We've now taken care of any stray "bits" at the start, or end of | |
541 | the region to compare, the remainder can be covered with a simple | |
542 | memcmp. */ | |
543 | gdb_assert (offset1_bits % TARGET_CHAR_BIT == 0); | |
544 | gdb_assert (offset2_bits % TARGET_CHAR_BIT == 0); | |
545 | gdb_assert (length_bits % TARGET_CHAR_BIT == 0); | |
546 | ||
547 | return memcmp (ptr1 + offset1_bits / TARGET_CHAR_BIT, | |
548 | ptr2 + offset2_bits / TARGET_CHAR_BIT, | |
549 | length_bits / TARGET_CHAR_BIT); | |
550 | } | |
551 | ||
552 | /* Length is zero, regions match. */ | |
553 | return 0; | |
554 | } | |
555 | ||
556 | /* Helper struct for find_first_range_overlap_and_match and | |
557 | value_contents_bits_eq. Keep track of which slot of a given ranges | |
558 | vector have we last looked at. */ | |
559 | ||
560 | struct ranges_and_idx | |
561 | { | |
562 | /* The ranges. */ | |
563 | const std::vector<range> *ranges; | |
564 | ||
565 | /* The range we've last found in RANGES. Given ranges are sorted, | |
566 | we can start the next lookup here. */ | |
567 | int idx; | |
568 | }; | |
569 | ||
570 | /* Helper function for value_contents_bits_eq. Compare LENGTH bits of | |
571 | RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's | |
572 | ranges starting at OFFSET2 bits. Return true if the ranges match | |
573 | and fill in *L and *H with the overlapping window relative to | |
574 | (both) OFFSET1 or OFFSET2. */ | |
575 | ||
576 | static int | |
577 | find_first_range_overlap_and_match (struct ranges_and_idx *rp1, | |
578 | struct ranges_and_idx *rp2, | |
579 | LONGEST offset1, LONGEST offset2, | |
580 | ULONGEST length, ULONGEST *l, ULONGEST *h) | |
581 | { | |
582 | rp1->idx = find_first_range_overlap (rp1->ranges, rp1->idx, | |
583 | offset1, length); | |
584 | rp2->idx = find_first_range_overlap (rp2->ranges, rp2->idx, | |
585 | offset2, length); | |
586 | ||
587 | if (rp1->idx == -1 && rp2->idx == -1) | |
588 | { | |
589 | *l = length; | |
590 | *h = length; | |
591 | return 1; | |
592 | } | |
593 | else if (rp1->idx == -1 || rp2->idx == -1) | |
594 | return 0; | |
595 | else | |
596 | { | |
597 | const range *r1, *r2; | |
598 | ULONGEST l1, h1; | |
599 | ULONGEST l2, h2; | |
600 | ||
601 | r1 = &(*rp1->ranges)[rp1->idx]; | |
602 | r2 = &(*rp2->ranges)[rp2->idx]; | |
603 | ||
604 | /* Get the unavailable windows intersected by the incoming | |
605 | ranges. The first and last ranges that overlap the argument | |
606 | range may be wider than said incoming arguments ranges. */ | |
607 | l1 = std::max (offset1, r1->offset); | |
608 | h1 = std::min (offset1 + length, r1->offset + r1->length); | |
609 | ||
610 | l2 = std::max (offset2, r2->offset); | |
611 | h2 = std::min (offset2 + length, offset2 + r2->length); | |
612 | ||
613 | /* Make them relative to the respective start offsets, so we can | |
614 | compare them for equality. */ | |
615 | l1 -= offset1; | |
616 | h1 -= offset1; | |
617 | ||
618 | l2 -= offset2; | |
619 | h2 -= offset2; | |
620 | ||
621 | /* Different ranges, no match. */ | |
622 | if (l1 != l2 || h1 != h2) | |
623 | return 0; | |
624 | ||
625 | *h = h1; | |
626 | *l = l1; | |
627 | return 1; | |
628 | } | |
629 | } | |
630 | ||
631 | /* Helper function for value_contents_eq. The only difference is that | |
632 | this function is bit rather than byte based. | |
633 | ||
634 | Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits | |
635 | with LENGTH bits of VAL2's contents starting at OFFSET2 bits. | |
636 | Return true if the available bits match. */ | |
637 | ||
638 | bool | |
639 | value::contents_bits_eq (int offset1, const struct value *val2, int offset2, | |
640 | int length) const | |
641 | { | |
642 | /* Each array element corresponds to a ranges source (unavailable, | |
643 | optimized out). '1' is for VAL1, '2' for VAL2. */ | |
644 | struct ranges_and_idx rp1[2], rp2[2]; | |
645 | ||
646 | /* See function description in value.h. */ | |
647 | gdb_assert (!m_lazy && !val2->m_lazy); | |
648 | ||
649 | /* We shouldn't be trying to compare past the end of the values. */ | |
650 | gdb_assert (offset1 + length | |
651 | <= m_enclosing_type->length () * TARGET_CHAR_BIT); | |
652 | gdb_assert (offset2 + length | |
653 | <= val2->m_enclosing_type->length () * TARGET_CHAR_BIT); | |
654 | ||
655 | memset (&rp1, 0, sizeof (rp1)); | |
656 | memset (&rp2, 0, sizeof (rp2)); | |
657 | rp1[0].ranges = &m_unavailable; | |
658 | rp2[0].ranges = &val2->m_unavailable; | |
659 | rp1[1].ranges = &m_optimized_out; | |
660 | rp2[1].ranges = &val2->m_optimized_out; | |
661 | ||
662 | while (length > 0) | |
663 | { | |
664 | ULONGEST l = 0, h = 0; /* init for gcc -Wall */ | |
665 | int i; | |
666 | ||
667 | for (i = 0; i < 2; i++) | |
668 | { | |
669 | ULONGEST l_tmp, h_tmp; | |
670 | ||
671 | /* The contents only match equal if the invalid/unavailable | |
672 | contents ranges match as well. */ | |
673 | if (!find_first_range_overlap_and_match (&rp1[i], &rp2[i], | |
674 | offset1, offset2, length, | |
675 | &l_tmp, &h_tmp)) | |
676 | return false; | |
677 | ||
678 | /* We're interested in the lowest/first range found. */ | |
679 | if (i == 0 || l_tmp < l) | |
680 | { | |
681 | l = l_tmp; | |
682 | h = h_tmp; | |
683 | } | |
684 | } | |
685 | ||
686 | /* Compare the available/valid contents. */ | |
687 | if (memcmp_with_bit_offsets (m_contents.get (), offset1, | |
688 | val2->m_contents.get (), offset2, l) != 0) | |
689 | return false; | |
690 | ||
691 | length -= h; | |
692 | offset1 += h; | |
693 | offset2 += h; | |
694 | } | |
695 | ||
696 | return true; | |
697 | } | |
698 | ||
699 | /* See value.h. */ | |
700 | ||
701 | bool | |
702 | value::contents_eq (LONGEST offset1, | |
703 | const struct value *val2, LONGEST offset2, | |
704 | LONGEST length) const | |
705 | { | |
706 | return contents_bits_eq (offset1 * TARGET_CHAR_BIT, | |
707 | val2, offset2 * TARGET_CHAR_BIT, | |
708 | length * TARGET_CHAR_BIT); | |
709 | } | |
710 | ||
711 | /* See value.h. */ | |
712 | ||
713 | bool | |
714 | value::contents_eq (const struct value *val2) const | |
715 | { | |
716 | ULONGEST len1 = check_typedef (enclosing_type ())->length (); | |
717 | ULONGEST len2 = check_typedef (val2->enclosing_type ())->length (); | |
718 | if (len1 != len2) | |
719 | return false; | |
720 | return contents_eq (0, val2, 0, len1); | |
721 | } | |
722 | ||
723 | /* The value-history records all the values printed by print commands | |
724 | during this session. */ | |
725 | ||
726 | static std::vector<value_ref_ptr> value_history; | |
727 | ||
728 | \f | |
729 | /* List of all value objects currently allocated | |
730 | (except for those released by calls to release_value) | |
731 | This is so they can be freed after each command. */ | |
732 | ||
733 | static std::vector<value_ref_ptr> all_values; | |
734 | ||
735 | /* See value.h. */ | |
736 | ||
737 | struct value * | |
738 | value::allocate_lazy (struct type *type) | |
739 | { | |
740 | struct value *val; | |
741 | ||
742 | /* Call check_typedef on our type to make sure that, if TYPE | |
743 | is a TYPE_CODE_TYPEDEF, its length is set to the length | |
744 | of the target type instead of zero. However, we do not | |
745 | replace the typedef type by the target type, because we want | |
746 | to keep the typedef in order to be able to set the VAL's type | |
747 | description correctly. */ | |
748 | check_typedef (type); | |
749 | ||
750 | val = new struct value (type); | |
751 | ||
752 | /* Values start out on the all_values chain. */ | |
753 | all_values.emplace_back (val); | |
754 | ||
755 | return val; | |
756 | } | |
757 | ||
758 | /* The maximum size, in bytes, that GDB will try to allocate for a value. | |
759 | The initial value of 64k was not selected for any specific reason, it is | |
760 | just a reasonable starting point. */ | |
761 | ||
762 | static int max_value_size = 65536; /* 64k bytes */ | |
763 | ||
764 | /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of | |
765 | LONGEST, otherwise GDB will not be able to parse integer values from the | |
766 | CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would | |
767 | be unable to parse "set max-value-size 2". | |
768 | ||
769 | As we want a consistent GDB experience across hosts with different sizes | |
770 | of LONGEST, this arbitrary minimum value was selected, so long as this | |
771 | is bigger than LONGEST on all GDB supported hosts we're fine. */ | |
772 | ||
773 | #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16 | |
774 | static_assert (sizeof (LONGEST) <= MIN_VALUE_FOR_MAX_VALUE_SIZE); | |
775 | ||
776 | /* Implement the "set max-value-size" command. */ | |
777 | ||
778 | static void | |
779 | set_max_value_size (const char *args, int from_tty, | |
780 | struct cmd_list_element *c) | |
781 | { | |
782 | gdb_assert (max_value_size == -1 || max_value_size >= 0); | |
783 | ||
784 | if (max_value_size > -1 && max_value_size < MIN_VALUE_FOR_MAX_VALUE_SIZE) | |
785 | { | |
786 | max_value_size = MIN_VALUE_FOR_MAX_VALUE_SIZE; | |
787 | error (_("max-value-size set too low, increasing to %d bytes"), | |
788 | max_value_size); | |
789 | } | |
790 | } | |
791 | ||
792 | /* Implement the "show max-value-size" command. */ | |
793 | ||
794 | static void | |
795 | show_max_value_size (struct ui_file *file, int from_tty, | |
796 | struct cmd_list_element *c, const char *value) | |
797 | { | |
798 | if (max_value_size == -1) | |
799 | gdb_printf (file, _("Maximum value size is unlimited.\n")); | |
800 | else | |
801 | gdb_printf (file, _("Maximum value size is %d bytes.\n"), | |
802 | max_value_size); | |
803 | } | |
804 | ||
805 | /* Called before we attempt to allocate or reallocate a buffer for the | |
806 | contents of a value. TYPE is the type of the value for which we are | |
807 | allocating the buffer. If the buffer is too large (based on the user | |
808 | controllable setting) then throw an error. If this function returns | |
809 | then we should attempt to allocate the buffer. */ | |
810 | ||
811 | static void | |
812 | check_type_length_before_alloc (const struct type *type) | |
813 | { | |
814 | ULONGEST length = type->length (); | |
815 | ||
816 | if (exceeds_max_value_size (length)) | |
817 | { | |
818 | if (type->name () != NULL) | |
819 | error (_("value of type `%s' requires %s bytes, which is more " | |
820 | "than max-value-size"), type->name (), pulongest (length)); | |
821 | else | |
822 | error (_("value requires %s bytes, which is more than " | |
823 | "max-value-size"), pulongest (length)); | |
824 | } | |
825 | } | |
826 | ||
827 | /* See value.h. */ | |
828 | ||
829 | bool | |
830 | exceeds_max_value_size (ULONGEST length) | |
831 | { | |
832 | return max_value_size > -1 && length > max_value_size; | |
833 | } | |
834 | ||
835 | /* When this has a value, it is used to limit the number of array elements | |
836 | of an array that are loaded into memory when an array value is made | |
837 | non-lazy. */ | |
838 | static std::optional<int> array_length_limiting_element_count; | |
839 | ||
840 | /* See value.h. */ | |
841 | scoped_array_length_limiting::scoped_array_length_limiting (int elements) | |
842 | { | |
843 | m_old_value = array_length_limiting_element_count; | |
844 | array_length_limiting_element_count.emplace (elements); | |
845 | } | |
846 | ||
847 | /* See value.h. */ | |
848 | scoped_array_length_limiting::~scoped_array_length_limiting () | |
849 | { | |
850 | array_length_limiting_element_count = m_old_value; | |
851 | } | |
852 | ||
853 | /* Find the inner element type for ARRAY_TYPE. */ | |
854 | ||
855 | static struct type * | |
856 | find_array_element_type (struct type *array_type) | |
857 | { | |
858 | array_type = check_typedef (array_type); | |
859 | gdb_assert (array_type->code () == TYPE_CODE_ARRAY); | |
860 | ||
861 | if (current_language->la_language == language_fortran) | |
862 | while (array_type->code () == TYPE_CODE_ARRAY) | |
863 | { | |
864 | array_type = array_type->target_type (); | |
865 | array_type = check_typedef (array_type); | |
866 | } | |
867 | else | |
868 | { | |
869 | array_type = array_type->target_type (); | |
870 | array_type = check_typedef (array_type); | |
871 | } | |
872 | ||
873 | return array_type; | |
874 | } | |
875 | ||
876 | /* Return the limited length of ARRAY_TYPE, which must be of | |
877 | TYPE_CODE_ARRAY. This function can only be called when the global | |
878 | ARRAY_LENGTH_LIMITING_ELEMENT_COUNT has a value. | |
879 | ||
880 | The limited length of an array is the smallest of either (1) the total | |
881 | size of the array type, or (2) the array target type multiplies by the | |
882 | array_length_limiting_element_count. */ | |
883 | ||
884 | static ULONGEST | |
885 | calculate_limited_array_length (struct type *array_type) | |
886 | { | |
887 | gdb_assert (array_length_limiting_element_count.has_value ()); | |
888 | ||
889 | array_type = check_typedef (array_type); | |
890 | gdb_assert (array_type->code () == TYPE_CODE_ARRAY); | |
891 | ||
892 | struct type *elm_type = find_array_element_type (array_type); | |
893 | ULONGEST len = (elm_type->length () | |
894 | * (*array_length_limiting_element_count)); | |
895 | len = std::min (len, array_type->length ()); | |
896 | ||
897 | return len; | |
898 | } | |
899 | ||
900 | /* See value.h. */ | |
901 | ||
902 | bool | |
903 | value::set_limited_array_length () | |
904 | { | |
905 | ULONGEST limit = m_limited_length; | |
906 | ULONGEST len = type ()->length (); | |
907 | ||
908 | if (array_length_limiting_element_count.has_value ()) | |
909 | len = calculate_limited_array_length (type ()); | |
910 | ||
911 | if (limit != 0 && len > limit) | |
912 | len = limit; | |
913 | if (len > max_value_size) | |
914 | return false; | |
915 | ||
916 | m_limited_length = max_value_size; | |
917 | return true; | |
918 | } | |
919 | ||
920 | /* See value.h. */ | |
921 | ||
922 | void | |
923 | value::allocate_contents (bool check_size) | |
924 | { | |
925 | if (!m_contents) | |
926 | { | |
927 | struct type *enc_type = enclosing_type (); | |
928 | ULONGEST len = enc_type->length (); | |
929 | ||
930 | if (check_size) | |
931 | { | |
932 | /* If we are allocating the contents of an array, which | |
933 | is greater in size than max_value_size, and there is | |
934 | an element limit in effect, then we can possibly try | |
935 | to load only a sub-set of the array contents into | |
936 | GDB's memory. */ | |
937 | if (type () == enc_type | |
938 | && type ()->code () == TYPE_CODE_ARRAY | |
939 | && len > max_value_size | |
940 | && set_limited_array_length ()) | |
941 | len = m_limited_length; | |
942 | else | |
943 | check_type_length_before_alloc (enc_type); | |
944 | } | |
945 | ||
946 | m_contents.reset ((gdb_byte *) xzalloc (len)); | |
947 | } | |
948 | } | |
949 | ||
950 | /* Allocate a value and its contents for type TYPE. If CHECK_SIZE is true, | |
951 | then apply the usual max-value-size checks. */ | |
952 | ||
953 | struct value * | |
954 | value::allocate (struct type *type, bool check_size) | |
955 | { | |
956 | struct value *val = value::allocate_lazy (type); | |
957 | ||
958 | val->allocate_contents (check_size); | |
959 | val->m_lazy = false; | |
960 | return val; | |
961 | } | |
962 | ||
963 | /* Allocate a value and its contents for type TYPE. */ | |
964 | ||
965 | struct value * | |
966 | value::allocate (struct type *type) | |
967 | { | |
968 | return allocate (type, true); | |
969 | } | |
970 | ||
971 | /* See value.h */ | |
972 | ||
973 | value * | |
974 | value::allocate_register_lazy (const frame_info_ptr &initial_next_frame, | |
975 | int regnum, struct type *type) | |
976 | { | |
977 | if (type == nullptr) | |
978 | type = register_type (frame_unwind_arch (initial_next_frame), regnum); | |
979 | ||
980 | value *result = value::allocate_lazy (type); | |
981 | ||
982 | result->set_lval (lval_register); | |
983 | result->m_location.reg.regnum = regnum; | |
984 | ||
985 | /* If this register value is created during unwind (while computing a frame | |
986 | id), and NEXT_FRAME is a frame inlined in the frame being unwound, then | |
987 | NEXT_FRAME will not have a valid frame id yet. Find the next non-inline | |
988 | frame (possibly the sentinel frame). This is where registers are unwound | |
989 | from anyway. */ | |
990 | frame_info_ptr next_frame = initial_next_frame; | |
991 | while (get_frame_type (next_frame) == INLINE_FRAME) | |
992 | next_frame = get_next_frame_sentinel_okay (next_frame); | |
993 | ||
994 | result->m_location.reg.next_frame_id = get_frame_id (next_frame); | |
995 | ||
996 | /* We should have a next frame with a valid id. */ | |
997 | gdb_assert (frame_id_p (result->m_location.reg.next_frame_id)); | |
998 | ||
999 | return result; | |
1000 | } | |
1001 | ||
1002 | /* See value.h */ | |
1003 | ||
1004 | value * | |
1005 | value::allocate_register (const frame_info_ptr &next_frame, int regnum, | |
1006 | struct type *type) | |
1007 | { | |
1008 | value *result = value::allocate_register_lazy (next_frame, regnum, type); | |
1009 | result->set_lazy (false); | |
1010 | return result; | |
1011 | } | |
1012 | ||
1013 | /* Allocate a value that has the correct length | |
1014 | for COUNT repetitions of type TYPE. */ | |
1015 | ||
1016 | struct value * | |
1017 | allocate_repeat_value (struct type *type, int count) | |
1018 | { | |
1019 | /* Despite the fact that we are really creating an array of TYPE here, we | |
1020 | use the string lower bound as the array lower bound. This seems to | |
1021 | work fine for now. */ | |
1022 | int low_bound = current_language->string_lower_bound (); | |
1023 | /* FIXME-type-allocation: need a way to free this type when we are | |
1024 | done with it. */ | |
1025 | struct type *array_type | |
1026 | = lookup_array_range_type (type, low_bound, count + low_bound - 1); | |
1027 | ||
1028 | return value::allocate (array_type); | |
1029 | } | |
1030 | ||
1031 | struct value * | |
1032 | value::allocate_computed (struct type *type, | |
1033 | const struct lval_funcs *funcs, | |
1034 | void *closure) | |
1035 | { | |
1036 | struct value *v = value::allocate_lazy (type); | |
1037 | ||
1038 | v->set_lval (lval_computed); | |
1039 | v->m_location.computed.funcs = funcs; | |
1040 | v->m_location.computed.closure = closure; | |
1041 | ||
1042 | return v; | |
1043 | } | |
1044 | ||
1045 | /* See value.h. */ | |
1046 | ||
1047 | struct value * | |
1048 | value::allocate_optimized_out (struct type *type) | |
1049 | { | |
1050 | struct value *retval = value::allocate_lazy (type); | |
1051 | ||
1052 | retval->mark_bytes_optimized_out (0, type->length ()); | |
1053 | retval->set_lazy (false); | |
1054 | return retval; | |
1055 | } | |
1056 | ||
1057 | /* Accessor methods. */ | |
1058 | ||
1059 | gdb::array_view<gdb_byte> | |
1060 | value::contents_raw () | |
1061 | { | |
1062 | int unit_size = gdbarch_addressable_memory_unit_size (arch ()); | |
1063 | ||
1064 | allocate_contents (true); | |
1065 | ||
1066 | ULONGEST length = type ()->length (); | |
1067 | return gdb::make_array_view | |
1068 | (m_contents.get () + m_embedded_offset * unit_size, length); | |
1069 | } | |
1070 | ||
1071 | gdb::array_view<gdb_byte> | |
1072 | value::contents_all_raw () | |
1073 | { | |
1074 | allocate_contents (true); | |
1075 | ||
1076 | ULONGEST length = enclosing_type ()->length (); | |
1077 | return gdb::make_array_view (m_contents.get (), length); | |
1078 | } | |
1079 | ||
1080 | /* Look at value.h for description. */ | |
1081 | ||
1082 | struct type * | |
1083 | value_actual_type (struct value *value, int resolve_simple_types, | |
1084 | int *real_type_found) | |
1085 | { | |
1086 | struct value_print_options opts; | |
1087 | struct type *result; | |
1088 | ||
1089 | get_user_print_options (&opts); | |
1090 | ||
1091 | if (real_type_found) | |
1092 | *real_type_found = 0; | |
1093 | result = value->type (); | |
1094 | if (opts.objectprint) | |
1095 | { | |
1096 | /* If result's target type is TYPE_CODE_STRUCT, proceed to | |
1097 | fetch its rtti type. */ | |
1098 | if (result->is_pointer_or_reference () | |
1099 | && (check_typedef (result->target_type ())->code () | |
1100 | == TYPE_CODE_STRUCT) | |
1101 | && !value->optimized_out ()) | |
1102 | { | |
1103 | struct type *real_type; | |
1104 | ||
1105 | real_type = value_rtti_indirect_type (value, NULL, NULL, NULL); | |
1106 | if (real_type) | |
1107 | { | |
1108 | if (real_type_found) | |
1109 | *real_type_found = 1; | |
1110 | result = real_type; | |
1111 | } | |
1112 | } | |
1113 | else if (resolve_simple_types) | |
1114 | { | |
1115 | if (real_type_found) | |
1116 | *real_type_found = 1; | |
1117 | result = value->enclosing_type (); | |
1118 | } | |
1119 | } | |
1120 | ||
1121 | return result; | |
1122 | } | |
1123 | ||
1124 | void | |
1125 | error_value_optimized_out (void) | |
1126 | { | |
1127 | throw_error (OPTIMIZED_OUT_ERROR, _("value has been optimized out")); | |
1128 | } | |
1129 | ||
1130 | void | |
1131 | value::require_not_optimized_out () const | |
1132 | { | |
1133 | if (!m_optimized_out.empty ()) | |
1134 | { | |
1135 | if (m_lval == lval_register) | |
1136 | throw_error (OPTIMIZED_OUT_ERROR, | |
1137 | _("register has not been saved in frame")); | |
1138 | else | |
1139 | error_value_optimized_out (); | |
1140 | } | |
1141 | } | |
1142 | ||
1143 | void | |
1144 | value::require_available () const | |
1145 | { | |
1146 | if (!m_unavailable.empty ()) | |
1147 | throw_error (NOT_AVAILABLE_ERROR, _("value is not available")); | |
1148 | } | |
1149 | ||
1150 | gdb::array_view<const gdb_byte> | |
1151 | value::contents_for_printing () | |
1152 | { | |
1153 | if (m_lazy) | |
1154 | fetch_lazy (); | |
1155 | ||
1156 | ULONGEST length = enclosing_type ()->length (); | |
1157 | return gdb::make_array_view (m_contents.get (), length); | |
1158 | } | |
1159 | ||
1160 | gdb::array_view<const gdb_byte> | |
1161 | value::contents_for_printing () const | |
1162 | { | |
1163 | gdb_assert (!m_lazy); | |
1164 | ||
1165 | ULONGEST length = enclosing_type ()->length (); | |
1166 | return gdb::make_array_view (m_contents.get (), length); | |
1167 | } | |
1168 | ||
1169 | gdb::array_view<const gdb_byte> | |
1170 | value::contents_all () | |
1171 | { | |
1172 | gdb::array_view<const gdb_byte> result = contents_for_printing (); | |
1173 | require_not_optimized_out (); | |
1174 | require_available (); | |
1175 | return result; | |
1176 | } | |
1177 | ||
1178 | /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET, | |
1179 | SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */ | |
1180 | ||
1181 | static void | |
1182 | ranges_copy_adjusted (std::vector<range> *dst_range, int dst_bit_offset, | |
1183 | const std::vector<range> &src_range, int src_bit_offset, | |
1184 | unsigned int bit_length) | |
1185 | { | |
1186 | for (const range &r : src_range) | |
1187 | { | |
1188 | LONGEST h, l; | |
1189 | ||
1190 | l = std::max (r.offset, (LONGEST) src_bit_offset); | |
1191 | h = std::min ((LONGEST) (r.offset + r.length), | |
1192 | (LONGEST) src_bit_offset + bit_length); | |
1193 | ||
1194 | if (l < h) | |
1195 | insert_into_bit_range_vector (dst_range, | |
1196 | dst_bit_offset + (l - src_bit_offset), | |
1197 | h - l); | |
1198 | } | |
1199 | } | |
1200 | ||
1201 | /* See value.h. */ | |
1202 | ||
1203 | void | |
1204 | value::ranges_copy_adjusted (struct value *dst, int dst_bit_offset, | |
1205 | int src_bit_offset, int bit_length) const | |
1206 | { | |
1207 | ::ranges_copy_adjusted (&dst->m_unavailable, dst_bit_offset, | |
1208 | m_unavailable, src_bit_offset, | |
1209 | bit_length); | |
1210 | ::ranges_copy_adjusted (&dst->m_optimized_out, dst_bit_offset, | |
1211 | m_optimized_out, src_bit_offset, | |
1212 | bit_length); | |
1213 | } | |
1214 | ||
1215 | /* See value.h. */ | |
1216 | ||
1217 | void | |
1218 | value::contents_copy_raw (struct value *dst, LONGEST dst_offset, | |
1219 | LONGEST src_offset, LONGEST length) | |
1220 | { | |
1221 | LONGEST src_bit_offset, dst_bit_offset, bit_length; | |
1222 | int unit_size = gdbarch_addressable_memory_unit_size (arch ()); | |
1223 | ||
1224 | /* A lazy DST would make that this copy operation useless, since as | |
1225 | soon as DST's contents were un-lazied (by a later value_contents | |
1226 | call, say), the contents would be overwritten. A lazy SRC would | |
1227 | mean we'd be copying garbage. */ | |
1228 | gdb_assert (!dst->m_lazy && !m_lazy); | |
1229 | ||
1230 | ULONGEST copy_length = length; | |
1231 | ULONGEST limit = m_limited_length; | |
1232 | if (limit > 0 && src_offset + length > limit) | |
1233 | copy_length = src_offset > limit ? 0 : limit - src_offset; | |
1234 | ||
1235 | /* The overwritten DST range gets unavailability ORed in, not | |
1236 | replaced. Make sure to remember to implement replacing if it | |
1237 | turns out actually necessary. */ | |
1238 | gdb_assert (dst->bytes_available (dst_offset, length)); | |
1239 | gdb_assert (!dst->bits_any_optimized_out (TARGET_CHAR_BIT * dst_offset, | |
1240 | TARGET_CHAR_BIT * length)); | |
1241 | ||
1242 | if ((src_offset + copy_length) * unit_size > enclosing_type ()-> length ()) | |
1243 | error (_("access outside bounds of object")); | |
1244 | ||
1245 | /* Copy the data. */ | |
1246 | gdb::array_view<gdb_byte> dst_contents | |
1247 | = dst->contents_all_raw ().slice (dst_offset * unit_size, | |
1248 | copy_length * unit_size); | |
1249 | gdb::array_view<const gdb_byte> src_contents | |
1250 | = contents_all_raw ().slice (src_offset * unit_size, | |
1251 | copy_length * unit_size); | |
1252 | gdb::copy (src_contents, dst_contents); | |
1253 | ||
1254 | /* Copy the meta-data, adjusted. */ | |
1255 | src_bit_offset = src_offset * unit_size * HOST_CHAR_BIT; | |
1256 | dst_bit_offset = dst_offset * unit_size * HOST_CHAR_BIT; | |
1257 | bit_length = length * unit_size * HOST_CHAR_BIT; | |
1258 | ||
1259 | ranges_copy_adjusted (dst, dst_bit_offset, | |
1260 | src_bit_offset, bit_length); | |
1261 | } | |
1262 | ||
1263 | /* See value.h. */ | |
1264 | ||
1265 | void | |
1266 | value::contents_copy_raw_bitwise (struct value *dst, LONGEST dst_bit_offset, | |
1267 | LONGEST src_bit_offset, | |
1268 | LONGEST bit_length) | |
1269 | { | |
1270 | /* A lazy DST would make that this copy operation useless, since as | |
1271 | soon as DST's contents were un-lazied (by a later value_contents | |
1272 | call, say), the contents would be overwritten. A lazy SRC would | |
1273 | mean we'd be copying garbage. */ | |
1274 | gdb_assert (!dst->m_lazy && !m_lazy); | |
1275 | ||
1276 | ULONGEST copy_bit_length = bit_length; | |
1277 | ULONGEST bit_limit = m_limited_length * TARGET_CHAR_BIT; | |
1278 | if (bit_limit > 0 && src_bit_offset + bit_length > bit_limit) | |
1279 | copy_bit_length = (src_bit_offset > bit_limit ? 0 | |
1280 | : bit_limit - src_bit_offset); | |
1281 | ||
1282 | /* The overwritten DST range gets unavailability ORed in, not | |
1283 | replaced. Make sure to remember to implement replacing if it | |
1284 | turns out actually necessary. */ | |
1285 | LONGEST dst_offset = dst_bit_offset / TARGET_CHAR_BIT; | |
1286 | LONGEST length = bit_length / TARGET_CHAR_BIT; | |
1287 | gdb_assert (dst->bytes_available (dst_offset, length)); | |
1288 | gdb_assert (!dst->bits_any_optimized_out (dst_bit_offset, | |
1289 | bit_length)); | |
1290 | ||
1291 | /* Copy the data. */ | |
1292 | gdb::array_view<gdb_byte> dst_contents = dst->contents_all_raw (); | |
1293 | gdb::array_view<const gdb_byte> src_contents = contents_all_raw (); | |
1294 | copy_bitwise (dst_contents.data (), dst_bit_offset, | |
1295 | src_contents.data (), src_bit_offset, | |
1296 | copy_bit_length, | |
1297 | type_byte_order (type ()) == BFD_ENDIAN_BIG); | |
1298 | ||
1299 | /* Copy the meta-data. */ | |
1300 | ranges_copy_adjusted (dst, dst_bit_offset, src_bit_offset, bit_length); | |
1301 | } | |
1302 | ||
1303 | /* See value.h. */ | |
1304 | ||
1305 | void | |
1306 | value::contents_copy (struct value *dst, LONGEST dst_offset, | |
1307 | LONGEST src_offset, LONGEST length) | |
1308 | { | |
1309 | if (m_lazy) | |
1310 | fetch_lazy (); | |
1311 | ||
1312 | contents_copy_raw (dst, dst_offset, src_offset, length); | |
1313 | } | |
1314 | ||
1315 | gdb::array_view<const gdb_byte> | |
1316 | value::contents () | |
1317 | { | |
1318 | gdb::array_view<const gdb_byte> result = contents_writeable (); | |
1319 | require_not_optimized_out (); | |
1320 | require_available (); | |
1321 | return result; | |
1322 | } | |
1323 | ||
1324 | gdb::array_view<gdb_byte> | |
1325 | value::contents_writeable () | |
1326 | { | |
1327 | if (m_lazy) | |
1328 | fetch_lazy (); | |
1329 | return contents_raw (); | |
1330 | } | |
1331 | ||
1332 | bool | |
1333 | value::optimized_out () | |
1334 | { | |
1335 | if (m_lazy) | |
1336 | { | |
1337 | /* See if we can compute the result without fetching the | |
1338 | value. */ | |
1339 | if (this->lval () == lval_memory) | |
1340 | return false; | |
1341 | else if (this->lval () == lval_computed) | |
1342 | { | |
1343 | const struct lval_funcs *funcs = m_location.computed.funcs; | |
1344 | ||
1345 | if (funcs->is_optimized_out != nullptr) | |
1346 | return funcs->is_optimized_out (this); | |
1347 | } | |
1348 | ||
1349 | /* Fall back to fetching. */ | |
1350 | try | |
1351 | { | |
1352 | fetch_lazy (); | |
1353 | } | |
1354 | catch (const gdb_exception_error &ex) | |
1355 | { | |
1356 | switch (ex.error) | |
1357 | { | |
1358 | case MEMORY_ERROR: | |
1359 | case OPTIMIZED_OUT_ERROR: | |
1360 | case NOT_AVAILABLE_ERROR: | |
1361 | /* These can normally happen when we try to access an | |
1362 | optimized out or unavailable register, either in a | |
1363 | physical register or spilled to memory. */ | |
1364 | break; | |
1365 | default: | |
1366 | throw; | |
1367 | } | |
1368 | } | |
1369 | } | |
1370 | ||
1371 | return !m_optimized_out.empty (); | |
1372 | } | |
1373 | ||
1374 | /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and | |
1375 | the following LENGTH bytes. */ | |
1376 | ||
1377 | void | |
1378 | value::mark_bytes_optimized_out (int offset, int length) | |
1379 | { | |
1380 | mark_bits_optimized_out (offset * TARGET_CHAR_BIT, | |
1381 | length * TARGET_CHAR_BIT); | |
1382 | } | |
1383 | ||
1384 | /* See value.h. */ | |
1385 | ||
1386 | void | |
1387 | value::mark_bits_optimized_out (LONGEST offset, LONGEST length) | |
1388 | { | |
1389 | insert_into_bit_range_vector (&m_optimized_out, offset, length); | |
1390 | } | |
1391 | ||
1392 | bool | |
1393 | value::bits_synthetic_pointer (LONGEST offset, LONGEST length) const | |
1394 | { | |
1395 | if (m_lval != lval_computed | |
1396 | || !m_location.computed.funcs->check_synthetic_pointer) | |
1397 | return false; | |
1398 | return m_location.computed.funcs->check_synthetic_pointer (this, offset, | |
1399 | length); | |
1400 | } | |
1401 | ||
1402 | const struct lval_funcs * | |
1403 | value::computed_funcs () const | |
1404 | { | |
1405 | gdb_assert (m_lval == lval_computed); | |
1406 | ||
1407 | return m_location.computed.funcs; | |
1408 | } | |
1409 | ||
1410 | void * | |
1411 | value::computed_closure () const | |
1412 | { | |
1413 | gdb_assert (m_lval == lval_computed); | |
1414 | ||
1415 | return m_location.computed.closure; | |
1416 | } | |
1417 | ||
1418 | CORE_ADDR | |
1419 | value::address () const | |
1420 | { | |
1421 | if (m_lval != lval_memory) | |
1422 | return 0; | |
1423 | if (m_parent != NULL) | |
1424 | return m_parent->address () + m_offset; | |
1425 | if (NULL != TYPE_DATA_LOCATION (type ())) | |
1426 | { | |
1427 | gdb_assert (TYPE_DATA_LOCATION (type ())->is_constant ()); | |
1428 | return TYPE_DATA_LOCATION_ADDR (type ()); | |
1429 | } | |
1430 | ||
1431 | return m_location.address + m_offset; | |
1432 | } | |
1433 | ||
1434 | CORE_ADDR | |
1435 | value::raw_address () const | |
1436 | { | |
1437 | if (m_lval != lval_memory) | |
1438 | return 0; | |
1439 | return m_location.address; | |
1440 | } | |
1441 | ||
1442 | void | |
1443 | value::set_address (CORE_ADDR addr) | |
1444 | { | |
1445 | gdb_assert (m_lval == lval_memory); | |
1446 | m_location.address = addr; | |
1447 | } | |
1448 | ||
1449 | /* Return a mark in the value chain. All values allocated after the | |
1450 | mark is obtained (except for those released) are subject to being freed | |
1451 | if a subsequent value_free_to_mark is passed the mark. */ | |
1452 | struct value * | |
1453 | value_mark (void) | |
1454 | { | |
1455 | if (all_values.empty ()) | |
1456 | return nullptr; | |
1457 | return all_values.back ().get (); | |
1458 | } | |
1459 | ||
1460 | /* Release a reference to VAL, which was acquired with value_incref. | |
1461 | This function is also called to deallocate values from the value | |
1462 | chain. */ | |
1463 | ||
1464 | void | |
1465 | value::decref () | |
1466 | { | |
1467 | gdb_assert (m_reference_count > 0); | |
1468 | m_reference_count--; | |
1469 | if (m_reference_count == 0) | |
1470 | delete this; | |
1471 | } | |
1472 | ||
1473 | /* Free all values allocated since MARK was obtained by value_mark | |
1474 | (except for those released). */ | |
1475 | void | |
1476 | value_free_to_mark (const struct value *mark) | |
1477 | { | |
1478 | auto iter = std::find (all_values.begin (), all_values.end (), mark); | |
1479 | if (iter == all_values.end ()) | |
1480 | all_values.clear (); | |
1481 | else | |
1482 | all_values.erase (iter + 1, all_values.end ()); | |
1483 | } | |
1484 | ||
1485 | /* Remove VAL from the chain all_values | |
1486 | so it will not be freed automatically. */ | |
1487 | ||
1488 | value_ref_ptr | |
1489 | release_value (struct value *val) | |
1490 | { | |
1491 | if (val == nullptr) | |
1492 | return value_ref_ptr (); | |
1493 | ||
1494 | std::vector<value_ref_ptr>::reverse_iterator iter; | |
1495 | for (iter = all_values.rbegin (); iter != all_values.rend (); ++iter) | |
1496 | { | |
1497 | if (*iter == val) | |
1498 | { | |
1499 | value_ref_ptr result = *iter; | |
1500 | all_values.erase (iter.base () - 1); | |
1501 | return result; | |
1502 | } | |
1503 | } | |
1504 | ||
1505 | /* We must always return an owned reference. Normally this happens | |
1506 | because we transfer the reference from the value chain, but in | |
1507 | this case the value was not on the chain. */ | |
1508 | return value_ref_ptr::new_reference (val); | |
1509 | } | |
1510 | ||
1511 | /* See value.h. */ | |
1512 | ||
1513 | std::vector<value_ref_ptr> | |
1514 | value_release_to_mark (const struct value *mark) | |
1515 | { | |
1516 | std::vector<value_ref_ptr> result; | |
1517 | ||
1518 | auto iter = std::find (all_values.begin (), all_values.end (), mark); | |
1519 | if (iter == all_values.end ()) | |
1520 | std::swap (result, all_values); | |
1521 | else | |
1522 | { | |
1523 | std::move (iter + 1, all_values.end (), std::back_inserter (result)); | |
1524 | all_values.erase (iter + 1, all_values.end ()); | |
1525 | } | |
1526 | std::reverse (result.begin (), result.end ()); | |
1527 | return result; | |
1528 | } | |
1529 | ||
1530 | /* See value.h. */ | |
1531 | ||
1532 | struct value * | |
1533 | value::copy () const | |
1534 | { | |
1535 | struct type *encl_type = enclosing_type (); | |
1536 | struct value *val; | |
1537 | ||
1538 | val = value::allocate_lazy (encl_type); | |
1539 | val->m_type = m_type; | |
1540 | val->set_lval (m_lval); | |
1541 | val->m_location = m_location; | |
1542 | val->m_offset = m_offset; | |
1543 | val->m_bitpos = m_bitpos; | |
1544 | val->m_bitsize = m_bitsize; | |
1545 | val->m_lazy = m_lazy; | |
1546 | val->m_embedded_offset = embedded_offset (); | |
1547 | val->m_pointed_to_offset = m_pointed_to_offset; | |
1548 | val->m_modifiable = m_modifiable; | |
1549 | val->m_stack = m_stack; | |
1550 | val->m_is_zero = m_is_zero; | |
1551 | val->m_in_history = m_in_history; | |
1552 | val->m_initialized = m_initialized; | |
1553 | val->m_unavailable = m_unavailable; | |
1554 | val->m_optimized_out = m_optimized_out; | |
1555 | val->m_parent = m_parent; | |
1556 | val->m_limited_length = m_limited_length; | |
1557 | ||
1558 | if (!val->lazy () | |
1559 | && !(val->entirely_optimized_out () | |
1560 | || val->entirely_unavailable ())) | |
1561 | { | |
1562 | ULONGEST length = val->m_limited_length; | |
1563 | if (length == 0) | |
1564 | length = val->enclosing_type ()->length (); | |
1565 | ||
1566 | gdb_assert (m_contents != nullptr); | |
1567 | const auto &arg_view | |
1568 | = gdb::make_array_view (m_contents.get (), length); | |
1569 | ||
1570 | val->allocate_contents (false); | |
1571 | gdb::array_view<gdb_byte> val_contents | |
1572 | = val->contents_all_raw ().slice (0, length); | |
1573 | ||
1574 | gdb::copy (arg_view, val_contents); | |
1575 | } | |
1576 | ||
1577 | if (val->lval () == lval_computed) | |
1578 | { | |
1579 | const struct lval_funcs *funcs = val->m_location.computed.funcs; | |
1580 | ||
1581 | if (funcs->copy_closure) | |
1582 | val->m_location.computed.closure = funcs->copy_closure (val); | |
1583 | } | |
1584 | return val; | |
1585 | } | |
1586 | ||
1587 | /* Return a "const" and/or "volatile" qualified version of the value V. | |
1588 | If CNST is true, then the returned value will be qualified with | |
1589 | "const". | |
1590 | if VOLTL is true, then the returned value will be qualified with | |
1591 | "volatile". */ | |
1592 | ||
1593 | struct value * | |
1594 | make_cv_value (int cnst, int voltl, struct value *v) | |
1595 | { | |
1596 | struct type *val_type = v->type (); | |
1597 | struct type *m_enclosing_type = v->enclosing_type (); | |
1598 | struct value *cv_val = v->copy (); | |
1599 | ||
1600 | cv_val->deprecated_set_type (make_cv_type (cnst, voltl, val_type, NULL)); | |
1601 | cv_val->set_enclosing_type (make_cv_type (cnst, voltl, m_enclosing_type, NULL)); | |
1602 | ||
1603 | return cv_val; | |
1604 | } | |
1605 | ||
1606 | /* See value.h. */ | |
1607 | ||
1608 | struct value * | |
1609 | value::non_lval () | |
1610 | { | |
1611 | if (this->lval () != not_lval) | |
1612 | { | |
1613 | struct type *enc_type = enclosing_type (); | |
1614 | struct value *val = value::allocate (enc_type); | |
1615 | ||
1616 | gdb::copy (contents_all (), val->contents_all_raw ()); | |
1617 | val->m_type = m_type; | |
1618 | val->set_embedded_offset (embedded_offset ()); | |
1619 | val->set_pointed_to_offset (pointed_to_offset ()); | |
1620 | return val; | |
1621 | } | |
1622 | return this; | |
1623 | } | |
1624 | ||
1625 | /* See value.h. */ | |
1626 | ||
1627 | void | |
1628 | value::force_lval (CORE_ADDR addr) | |
1629 | { | |
1630 | gdb_assert (this->lval () == not_lval); | |
1631 | ||
1632 | write_memory (addr, contents_raw ().data (), type ()->length ()); | |
1633 | m_lval = lval_memory; | |
1634 | m_location.address = addr; | |
1635 | } | |
1636 | ||
1637 | void | |
1638 | value::set_component_location (const struct value *whole) | |
1639 | { | |
1640 | struct type *type; | |
1641 | ||
1642 | gdb_assert (whole->m_lval != lval_xcallable); | |
1643 | ||
1644 | if (whole->m_lval == lval_internalvar) | |
1645 | m_lval = lval_internalvar_component; | |
1646 | else | |
1647 | m_lval = whole->m_lval; | |
1648 | ||
1649 | m_location = whole->m_location; | |
1650 | if (whole->m_lval == lval_computed) | |
1651 | { | |
1652 | const struct lval_funcs *funcs = whole->m_location.computed.funcs; | |
1653 | ||
1654 | if (funcs->copy_closure) | |
1655 | m_location.computed.closure = funcs->copy_closure (whole); | |
1656 | } | |
1657 | ||
1658 | /* If the WHOLE value has a dynamically resolved location property then | |
1659 | update the address of the COMPONENT. */ | |
1660 | type = whole->type (); | |
1661 | if (NULL != TYPE_DATA_LOCATION (type) | |
1662 | && TYPE_DATA_LOCATION (type)->is_constant ()) | |
1663 | set_address (TYPE_DATA_LOCATION_ADDR (type)); | |
1664 | ||
1665 | /* Similarly, if the COMPONENT value has a dynamically resolved location | |
1666 | property then update its address. */ | |
1667 | type = this->type (); | |
1668 | if (NULL != TYPE_DATA_LOCATION (type) | |
1669 | && TYPE_DATA_LOCATION (type)->is_constant ()) | |
1670 | { | |
1671 | /* If the COMPONENT has a dynamic location, and is an | |
1672 | lval_internalvar_component, then we change it to a lval_memory. | |
1673 | ||
1674 | Usually a component of an internalvar is created non-lazy, and has | |
1675 | its content immediately copied from the parent internalvar. | |
1676 | However, for components with a dynamic location, the content of | |
1677 | the component is not contained within the parent, but is instead | |
1678 | accessed indirectly. Further, the component will be created as a | |
1679 | lazy value. | |
1680 | ||
1681 | By changing the type of the component to lval_memory we ensure | |
1682 | that value_fetch_lazy can successfully load the component. | |
1683 | ||
1684 | This solution isn't ideal, but a real fix would require values to | |
1685 | carry around both the parent value contents, and the contents of | |
1686 | any dynamic fields within the parent. This is a substantial | |
1687 | change to how values work in GDB. */ | |
1688 | if (this->lval () == lval_internalvar_component) | |
1689 | { | |
1690 | gdb_assert (lazy ()); | |
1691 | m_lval = lval_memory; | |
1692 | } | |
1693 | else | |
1694 | gdb_assert (this->lval () == lval_memory); | |
1695 | set_address (TYPE_DATA_LOCATION_ADDR (type)); | |
1696 | } | |
1697 | } | |
1698 | ||
1699 | /* Access to the value history. */ | |
1700 | ||
1701 | /* Record a new value in the value history. | |
1702 | Returns the absolute history index of the entry. */ | |
1703 | ||
1704 | int | |
1705 | value::record_latest () | |
1706 | { | |
1707 | /* We don't want this value to have anything to do with the inferior anymore. | |
1708 | In particular, "set $1 = 50" should not affect the variable from which | |
1709 | the value was taken, and fast watchpoints should be able to assume that | |
1710 | a value on the value history never changes. */ | |
1711 | if (lazy ()) | |
1712 | fetch_lazy (); | |
1713 | ||
1714 | /* Mark the value as recorded in the history for the availability check. */ | |
1715 | m_in_history = true; | |
1716 | ||
1717 | /* We preserve VALUE_LVAL so that the user can find out where it was fetched | |
1718 | from. This is a bit dubious, because then *&$1 does not just return $1 | |
1719 | but the current contents of that location. c'est la vie... */ | |
1720 | set_modifiable (false); | |
1721 | ||
1722 | value_history.push_back (release_value (this)); | |
1723 | ||
1724 | return value_history.size (); | |
1725 | } | |
1726 | ||
1727 | /* Return a copy of the value in the history with sequence number NUM. */ | |
1728 | ||
1729 | struct value * | |
1730 | access_value_history (int num) | |
1731 | { | |
1732 | int absnum = num; | |
1733 | ||
1734 | if (absnum <= 0) | |
1735 | absnum += value_history.size (); | |
1736 | ||
1737 | if (absnum <= 0) | |
1738 | { | |
1739 | if (num == 0) | |
1740 | error (_("The history is empty.")); | |
1741 | else if (num == 1) | |
1742 | error (_("There is only one value in the history.")); | |
1743 | else | |
1744 | error (_("History does not go back to $$%d."), -num); | |
1745 | } | |
1746 | if (absnum > value_history.size ()) | |
1747 | error (_("History has not yet reached $%d."), absnum); | |
1748 | ||
1749 | absnum--; | |
1750 | ||
1751 | return value_history[absnum]->copy (); | |
1752 | } | |
1753 | ||
1754 | /* See value.h. */ | |
1755 | ||
1756 | ULONGEST | |
1757 | value_history_count () | |
1758 | { | |
1759 | return value_history.size (); | |
1760 | } | |
1761 | ||
1762 | static void | |
1763 | show_values (const char *num_exp, int from_tty) | |
1764 | { | |
1765 | int i; | |
1766 | struct value *val; | |
1767 | static int num = 1; | |
1768 | ||
1769 | if (num_exp) | |
1770 | { | |
1771 | /* "show values +" should print from the stored position. | |
1772 | "show values <exp>" should print around value number <exp>. */ | |
1773 | if (num_exp[0] != '+' || num_exp[1] != '\0') | |
1774 | num = parse_and_eval_long (num_exp) - 5; | |
1775 | } | |
1776 | else | |
1777 | { | |
1778 | /* "show values" means print the last 10 values. */ | |
1779 | num = value_history.size () - 9; | |
1780 | } | |
1781 | ||
1782 | if (num <= 0) | |
1783 | num = 1; | |
1784 | ||
1785 | for (i = num; i < num + 10 && i <= value_history.size (); i++) | |
1786 | { | |
1787 | struct value_print_options opts; | |
1788 | ||
1789 | val = access_value_history (i); | |
1790 | gdb_printf (("$%d = "), i); | |
1791 | get_user_print_options (&opts); | |
1792 | value_print (val, gdb_stdout, &opts); | |
1793 | gdb_printf (("\n")); | |
1794 | } | |
1795 | ||
1796 | /* The next "show values +" should start after what we just printed. */ | |
1797 | num += 10; | |
1798 | ||
1799 | /* Hitting just return after this command should do the same thing as | |
1800 | "show values +". If num_exp is null, this is unnecessary, since | |
1801 | "show values +" is not useful after "show values". */ | |
1802 | if (from_tty && num_exp) | |
1803 | set_repeat_arguments ("+"); | |
1804 | } | |
1805 | \f | |
1806 | enum internalvar_kind | |
1807 | { | |
1808 | /* The internal variable is empty. */ | |
1809 | INTERNALVAR_VOID, | |
1810 | ||
1811 | /* The value of the internal variable is provided directly as | |
1812 | a GDB value object. */ | |
1813 | INTERNALVAR_VALUE, | |
1814 | ||
1815 | /* A fresh value is computed via a call-back routine on every | |
1816 | access to the internal variable. */ | |
1817 | INTERNALVAR_MAKE_VALUE, | |
1818 | ||
1819 | /* The internal variable holds a GDB internal convenience function. */ | |
1820 | INTERNALVAR_FUNCTION, | |
1821 | ||
1822 | /* The variable holds an integer value. */ | |
1823 | INTERNALVAR_INTEGER, | |
1824 | ||
1825 | /* The variable holds a GDB-provided string. */ | |
1826 | INTERNALVAR_STRING, | |
1827 | }; | |
1828 | ||
1829 | union internalvar_data | |
1830 | { | |
1831 | /* A value object used with INTERNALVAR_VALUE. */ | |
1832 | struct value *value; | |
1833 | ||
1834 | /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */ | |
1835 | struct | |
1836 | { | |
1837 | /* The functions to call. */ | |
1838 | const struct internalvar_funcs *functions; | |
1839 | ||
1840 | /* The function's user-data. */ | |
1841 | void *data; | |
1842 | } make_value; | |
1843 | ||
1844 | /* The internal function used with INTERNALVAR_FUNCTION. */ | |
1845 | struct | |
1846 | { | |
1847 | struct internal_function *function; | |
1848 | /* True if this is the canonical name for the function. */ | |
1849 | int canonical; | |
1850 | } fn; | |
1851 | ||
1852 | /* An integer value used with INTERNALVAR_INTEGER. */ | |
1853 | struct | |
1854 | { | |
1855 | /* If type is non-NULL, it will be used as the type to generate | |
1856 | a value for this internal variable. If type is NULL, a default | |
1857 | integer type for the architecture is used. */ | |
1858 | struct type *type; | |
1859 | LONGEST val; | |
1860 | } integer; | |
1861 | ||
1862 | /* A string value used with INTERNALVAR_STRING. */ | |
1863 | char *string; | |
1864 | }; | |
1865 | ||
1866 | /* Internal variables. These are variables within the debugger | |
1867 | that hold values assigned by debugger commands. | |
1868 | The user refers to them with a '$' prefix | |
1869 | that does not appear in the variable names stored internally. */ | |
1870 | ||
1871 | struct internalvar | |
1872 | { | |
1873 | internalvar (std::string name) | |
1874 | : name (std::move (name)) | |
1875 | {} | |
1876 | ||
1877 | internalvar (internalvar &&other) | |
1878 | : name (std::move(other.name)), | |
1879 | kind (other.kind), | |
1880 | u (other.u) | |
1881 | { | |
1882 | other.kind = INTERNALVAR_VOID; | |
1883 | } | |
1884 | ||
1885 | ~internalvar () | |
1886 | { | |
1887 | clear_internalvar (this); | |
1888 | } | |
1889 | ||
1890 | std::string name; | |
1891 | ||
1892 | /* We support various different kinds of content of an internal variable. | |
1893 | enum internalvar_kind specifies the kind, and union internalvar_data | |
1894 | provides the data associated with this particular kind. */ | |
1895 | ||
1896 | enum internalvar_kind kind = INTERNALVAR_VOID; | |
1897 | ||
1898 | union internalvar_data u {}; | |
1899 | }; | |
1900 | ||
1901 | /* Use std::map, a sorted container, to make the order of iteration (and | |
1902 | therefore the output of "show convenience") stable. */ | |
1903 | ||
1904 | static std::map<std::string, internalvar> internalvars; | |
1905 | ||
1906 | /* If the variable does not already exist create it and give it the | |
1907 | value given. If no value is given then the default is zero. */ | |
1908 | static void | |
1909 | init_if_undefined_command (const char* args, int from_tty) | |
1910 | { | |
1911 | struct internalvar *intvar = nullptr; | |
1912 | ||
1913 | /* Parse the expression - this is taken from set_command(). */ | |
1914 | expression_up expr = parse_expression (args); | |
1915 | ||
1916 | /* Validate the expression. | |
1917 | Was the expression an assignment? | |
1918 | Or even an expression at all? */ | |
1919 | if (expr->first_opcode () != BINOP_ASSIGN) | |
1920 | error (_("Init-if-undefined requires an assignment expression.")); | |
1921 | ||
1922 | /* Extract the variable from the parsed expression. */ | |
1923 | expr::assign_operation *assign | |
1924 | = dynamic_cast<expr::assign_operation *> (expr->op.get ()); | |
1925 | if (assign != nullptr) | |
1926 | { | |
1927 | expr::operation *lhs = assign->get_lhs (); | |
1928 | expr::internalvar_operation *ivarop | |
1929 | = dynamic_cast<expr::internalvar_operation *> (lhs); | |
1930 | if (ivarop != nullptr) | |
1931 | intvar = ivarop->get_internalvar (); | |
1932 | } | |
1933 | ||
1934 | if (intvar == nullptr) | |
1935 | error (_("The first parameter to init-if-undefined " | |
1936 | "should be a GDB variable.")); | |
1937 | ||
1938 | /* Only evaluate the expression if the lvalue is void. | |
1939 | This may still fail if the expression is invalid. */ | |
1940 | if (intvar->kind == INTERNALVAR_VOID) | |
1941 | expr->evaluate (); | |
1942 | } | |
1943 | ||
1944 | ||
1945 | /* Look up an internal variable with name NAME. NAME should not | |
1946 | normally include a dollar sign. | |
1947 | ||
1948 | If the specified internal variable does not exist, | |
1949 | the return value is NULL. */ | |
1950 | ||
1951 | struct internalvar * | |
1952 | lookup_only_internalvar (const char *name) | |
1953 | { | |
1954 | auto it = internalvars.find (name); | |
1955 | if (it == internalvars.end ()) | |
1956 | return nullptr; | |
1957 | ||
1958 | return &it->second; | |
1959 | } | |
1960 | ||
1961 | /* Complete NAME by comparing it to the names of internal | |
1962 | variables. */ | |
1963 | ||
1964 | void | |
1965 | complete_internalvar (completion_tracker &tracker, const char *name) | |
1966 | { | |
1967 | int len = strlen (name); | |
1968 | ||
1969 | for (auto &pair : internalvars) | |
1970 | { | |
1971 | const internalvar &var = pair.second; | |
1972 | ||
1973 | if (var.name.compare (0, len, name) == 0) | |
1974 | tracker.add_completion (make_unique_xstrdup (var.name.c_str ())); | |
1975 | } | |
1976 | } | |
1977 | ||
1978 | /* Create an internal variable with name NAME and with a void value. | |
1979 | NAME should not normally include a dollar sign. | |
1980 | ||
1981 | An internal variable with that name must not exist already. */ | |
1982 | ||
1983 | struct internalvar * | |
1984 | create_internalvar (const char *name) | |
1985 | { | |
1986 | auto pair = internalvars.emplace (std::make_pair (name, internalvar (name))); | |
1987 | gdb_assert (pair.second); | |
1988 | ||
1989 | return &pair.first->second; | |
1990 | } | |
1991 | ||
1992 | /* Create an internal variable with name NAME and register FUN as the | |
1993 | function that value_of_internalvar uses to create a value whenever | |
1994 | this variable is referenced. NAME should not normally include a | |
1995 | dollar sign. DATA is passed uninterpreted to FUN when it is | |
1996 | called. CLEANUP, if not NULL, is called when the internal variable | |
1997 | is destroyed. It is passed DATA as its only argument. */ | |
1998 | ||
1999 | struct internalvar * | |
2000 | create_internalvar_type_lazy (const char *name, | |
2001 | const struct internalvar_funcs *funcs, | |
2002 | void *data) | |
2003 | { | |
2004 | struct internalvar *var = create_internalvar (name); | |
2005 | ||
2006 | var->kind = INTERNALVAR_MAKE_VALUE; | |
2007 | var->u.make_value.functions = funcs; | |
2008 | var->u.make_value.data = data; | |
2009 | return var; | |
2010 | } | |
2011 | ||
2012 | /* See documentation in value.h. */ | |
2013 | ||
2014 | int | |
2015 | compile_internalvar_to_ax (struct internalvar *var, | |
2016 | struct agent_expr *expr, | |
2017 | struct axs_value *value) | |
2018 | { | |
2019 | if (var->kind != INTERNALVAR_MAKE_VALUE | |
2020 | || var->u.make_value.functions->compile_to_ax == NULL) | |
2021 | return 0; | |
2022 | ||
2023 | var->u.make_value.functions->compile_to_ax (var, expr, value, | |
2024 | var->u.make_value.data); | |
2025 | return 1; | |
2026 | } | |
2027 | ||
2028 | /* Look up an internal variable with name NAME. NAME should not | |
2029 | normally include a dollar sign. | |
2030 | ||
2031 | If the specified internal variable does not exist, | |
2032 | one is created, with a void value. */ | |
2033 | ||
2034 | struct internalvar * | |
2035 | lookup_internalvar (const char *name) | |
2036 | { | |
2037 | struct internalvar *var; | |
2038 | ||
2039 | var = lookup_only_internalvar (name); | |
2040 | if (var) | |
2041 | return var; | |
2042 | ||
2043 | return create_internalvar (name); | |
2044 | } | |
2045 | ||
2046 | /* Return current value of internal variable VAR. For variables that | |
2047 | are not inherently typed, use a value type appropriate for GDBARCH. */ | |
2048 | ||
2049 | struct value * | |
2050 | value_of_internalvar (struct gdbarch *gdbarch, struct internalvar *var) | |
2051 | { | |
2052 | struct value *val; | |
2053 | struct trace_state_variable *tsv; | |
2054 | ||
2055 | /* If there is a trace state variable of the same name, assume that | |
2056 | is what we really want to see. */ | |
2057 | tsv = find_trace_state_variable (var->name.c_str ()); | |
2058 | if (tsv) | |
2059 | { | |
2060 | tsv->value_known = target_get_trace_state_variable_value (tsv->number, | |
2061 | &(tsv->value)); | |
2062 | if (tsv->value_known) | |
2063 | val = value_from_longest (builtin_type (gdbarch)->builtin_int64, | |
2064 | tsv->value); | |
2065 | else | |
2066 | val = value::allocate (builtin_type (gdbarch)->builtin_void); | |
2067 | return val; | |
2068 | } | |
2069 | ||
2070 | switch (var->kind) | |
2071 | { | |
2072 | case INTERNALVAR_VOID: | |
2073 | val = value::allocate (builtin_type (gdbarch)->builtin_void); | |
2074 | break; | |
2075 | ||
2076 | case INTERNALVAR_FUNCTION: | |
2077 | val = value::allocate (builtin_type (gdbarch)->internal_fn); | |
2078 | break; | |
2079 | ||
2080 | case INTERNALVAR_INTEGER: | |
2081 | if (!var->u.integer.type) | |
2082 | val = value_from_longest (builtin_type (gdbarch)->builtin_int, | |
2083 | var->u.integer.val); | |
2084 | else | |
2085 | val = value_from_longest (var->u.integer.type, var->u.integer.val); | |
2086 | break; | |
2087 | ||
2088 | case INTERNALVAR_STRING: | |
2089 | val = current_language->value_string (gdbarch, | |
2090 | var->u.string, | |
2091 | strlen (var->u.string)); | |
2092 | break; | |
2093 | ||
2094 | case INTERNALVAR_VALUE: | |
2095 | val = var->u.value->copy (); | |
2096 | if (val->lazy ()) | |
2097 | val->fetch_lazy (); | |
2098 | break; | |
2099 | ||
2100 | case INTERNALVAR_MAKE_VALUE: | |
2101 | val = (*var->u.make_value.functions->make_value) (gdbarch, var, | |
2102 | var->u.make_value.data); | |
2103 | break; | |
2104 | ||
2105 | default: | |
2106 | internal_error (_("bad kind")); | |
2107 | } | |
2108 | ||
2109 | /* Change the VALUE_LVAL to lval_internalvar so that future operations | |
2110 | on this value go back to affect the original internal variable. | |
2111 | ||
2112 | Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have | |
2113 | no underlying modifiable state in the internal variable. | |
2114 | ||
2115 | Likewise, if the variable's value is a computed lvalue, we want | |
2116 | references to it to produce another computed lvalue, where | |
2117 | references and assignments actually operate through the | |
2118 | computed value's functions. | |
2119 | ||
2120 | This means that internal variables with computed values | |
2121 | behave a little differently from other internal variables: | |
2122 | assignments to them don't just replace the previous value | |
2123 | altogether. At the moment, this seems like the behavior we | |
2124 | want. */ | |
2125 | ||
2126 | if (var->kind != INTERNALVAR_MAKE_VALUE | |
2127 | && val->lval () != lval_computed) | |
2128 | { | |
2129 | val->set_lval (lval_internalvar); | |
2130 | VALUE_INTERNALVAR (val) = var; | |
2131 | } | |
2132 | ||
2133 | return val; | |
2134 | } | |
2135 | ||
2136 | int | |
2137 | get_internalvar_integer (struct internalvar *var, LONGEST *result) | |
2138 | { | |
2139 | if (var->kind == INTERNALVAR_INTEGER) | |
2140 | { | |
2141 | *result = var->u.integer.val; | |
2142 | return 1; | |
2143 | } | |
2144 | ||
2145 | if (var->kind == INTERNALVAR_VALUE) | |
2146 | { | |
2147 | struct type *type = check_typedef (var->u.value->type ()); | |
2148 | ||
2149 | if (type->code () == TYPE_CODE_INT) | |
2150 | { | |
2151 | *result = value_as_long (var->u.value); | |
2152 | return 1; | |
2153 | } | |
2154 | } | |
2155 | ||
2156 | if (var->kind == INTERNALVAR_MAKE_VALUE) | |
2157 | { | |
2158 | struct gdbarch *gdbarch = get_current_arch (); | |
2159 | struct value *val | |
2160 | = (*var->u.make_value.functions->make_value) (gdbarch, var, | |
2161 | var->u.make_value.data); | |
2162 | struct type *type = check_typedef (val->type ()); | |
2163 | ||
2164 | if (type->code () == TYPE_CODE_INT) | |
2165 | { | |
2166 | *result = value_as_long (val); | |
2167 | return 1; | |
2168 | } | |
2169 | } | |
2170 | ||
2171 | return 0; | |
2172 | } | |
2173 | ||
2174 | static int | |
2175 | get_internalvar_function (struct internalvar *var, | |
2176 | struct internal_function **result) | |
2177 | { | |
2178 | switch (var->kind) | |
2179 | { | |
2180 | case INTERNALVAR_FUNCTION: | |
2181 | *result = var->u.fn.function; | |
2182 | return 1; | |
2183 | ||
2184 | default: | |
2185 | return 0; | |
2186 | } | |
2187 | } | |
2188 | ||
2189 | void | |
2190 | set_internalvar_component (struct internalvar *var, | |
2191 | LONGEST offset, LONGEST bitpos, | |
2192 | LONGEST bitsize, struct value *newval) | |
2193 | { | |
2194 | gdb_byte *addr; | |
2195 | struct gdbarch *gdbarch; | |
2196 | int unit_size; | |
2197 | ||
2198 | switch (var->kind) | |
2199 | { | |
2200 | case INTERNALVAR_VALUE: | |
2201 | addr = var->u.value->contents_writeable ().data (); | |
2202 | gdbarch = var->u.value->arch (); | |
2203 | unit_size = gdbarch_addressable_memory_unit_size (gdbarch); | |
2204 | ||
2205 | if (bitsize) | |
2206 | modify_field (var->u.value->type (), addr + offset, | |
2207 | value_as_long (newval), bitpos, bitsize); | |
2208 | else | |
2209 | memcpy (addr + offset * unit_size, newval->contents ().data (), | |
2210 | newval->type ()->length ()); | |
2211 | break; | |
2212 | ||
2213 | default: | |
2214 | /* We can never get a component of any other kind. */ | |
2215 | internal_error (_("set_internalvar_component")); | |
2216 | } | |
2217 | } | |
2218 | ||
2219 | void | |
2220 | set_internalvar (struct internalvar *var, struct value *val) | |
2221 | { | |
2222 | enum internalvar_kind new_kind; | |
2223 | union internalvar_data new_data = { 0 }; | |
2224 | ||
2225 | if (var->kind == INTERNALVAR_FUNCTION && var->u.fn.canonical) | |
2226 | error (_("Cannot overwrite convenience function %s"), var->name.c_str ()); | |
2227 | ||
2228 | /* Prepare new contents. */ | |
2229 | switch (check_typedef (val->type ())->code ()) | |
2230 | { | |
2231 | case TYPE_CODE_VOID: | |
2232 | new_kind = INTERNALVAR_VOID; | |
2233 | break; | |
2234 | ||
2235 | case TYPE_CODE_INTERNAL_FUNCTION: | |
2236 | gdb_assert (val->lval () == lval_internalvar); | |
2237 | new_kind = INTERNALVAR_FUNCTION; | |
2238 | get_internalvar_function (VALUE_INTERNALVAR (val), | |
2239 | &new_data.fn.function); | |
2240 | /* Copies created here are never canonical. */ | |
2241 | break; | |
2242 | ||
2243 | default: | |
2244 | new_kind = INTERNALVAR_VALUE; | |
2245 | struct value *copy = val->copy (); | |
2246 | copy->set_modifiable (true); | |
2247 | ||
2248 | /* Force the value to be fetched from the target now, to avoid problems | |
2249 | later when this internalvar is referenced and the target is gone or | |
2250 | has changed. */ | |
2251 | if (copy->lazy ()) | |
2252 | copy->fetch_lazy (); | |
2253 | ||
2254 | /* Release the value from the value chain to prevent it from being | |
2255 | deleted by free_all_values. From here on this function should not | |
2256 | call error () until new_data is installed into the var->u to avoid | |
2257 | leaking memory. */ | |
2258 | new_data.value = release_value (copy).release (); | |
2259 | ||
2260 | /* Internal variables which are created from values with a dynamic | |
2261 | location don't need the location property of the origin anymore. | |
2262 | The resolved dynamic location is used prior then any other address | |
2263 | when accessing the value. | |
2264 | If we keep it, we would still refer to the origin value. | |
2265 | Remove the location property in case it exist. */ | |
2266 | new_data.value->type ()->remove_dyn_prop (DYN_PROP_DATA_LOCATION); | |
2267 | ||
2268 | break; | |
2269 | } | |
2270 | ||
2271 | /* Clean up old contents. */ | |
2272 | clear_internalvar (var); | |
2273 | ||
2274 | /* Switch over. */ | |
2275 | var->kind = new_kind; | |
2276 | var->u = new_data; | |
2277 | /* End code which must not call error(). */ | |
2278 | } | |
2279 | ||
2280 | void | |
2281 | set_internalvar_integer (struct internalvar *var, LONGEST l) | |
2282 | { | |
2283 | /* Clean up old contents. */ | |
2284 | clear_internalvar (var); | |
2285 | ||
2286 | var->kind = INTERNALVAR_INTEGER; | |
2287 | var->u.integer.type = NULL; | |
2288 | var->u.integer.val = l; | |
2289 | } | |
2290 | ||
2291 | void | |
2292 | set_internalvar_string (struct internalvar *var, const char *string) | |
2293 | { | |
2294 | /* Clean up old contents. */ | |
2295 | clear_internalvar (var); | |
2296 | ||
2297 | var->kind = INTERNALVAR_STRING; | |
2298 | var->u.string = xstrdup (string); | |
2299 | } | |
2300 | ||
2301 | static void | |
2302 | set_internalvar_function (internalvar *var, internal_function_up f) | |
2303 | { | |
2304 | /* Clean up old contents. */ | |
2305 | clear_internalvar (var); | |
2306 | ||
2307 | var->kind = INTERNALVAR_FUNCTION; | |
2308 | var->u.fn.function = f.release (); | |
2309 | var->u.fn.canonical = 1; | |
2310 | /* Variables installed here are always the canonical version. */ | |
2311 | } | |
2312 | ||
2313 | void | |
2314 | clear_internalvar (struct internalvar *var) | |
2315 | { | |
2316 | /* Clean up old contents. */ | |
2317 | switch (var->kind) | |
2318 | { | |
2319 | case INTERNALVAR_VALUE: | |
2320 | var->u.value->decref (); | |
2321 | break; | |
2322 | ||
2323 | case INTERNALVAR_STRING: | |
2324 | xfree (var->u.string); | |
2325 | break; | |
2326 | ||
2327 | case INTERNALVAR_FUNCTION: | |
2328 | delete var->u.fn.function; | |
2329 | break; | |
2330 | ||
2331 | default: | |
2332 | break; | |
2333 | } | |
2334 | ||
2335 | /* Reset to void kind. */ | |
2336 | var->kind = INTERNALVAR_VOID; | |
2337 | } | |
2338 | ||
2339 | const char * | |
2340 | internalvar_name (const struct internalvar *var) | |
2341 | { | |
2342 | return var->name.c_str (); | |
2343 | } | |
2344 | ||
2345 | const char * | |
2346 | value_internal_function_name (struct value *val) | |
2347 | { | |
2348 | struct internal_function *ifn; | |
2349 | int result; | |
2350 | ||
2351 | gdb_assert (val->lval () == lval_internalvar); | |
2352 | result = get_internalvar_function (VALUE_INTERNALVAR (val), &ifn); | |
2353 | gdb_assert (result); | |
2354 | ||
2355 | return ifn->name.c_str (); | |
2356 | } | |
2357 | ||
2358 | struct value * | |
2359 | call_internal_function (struct gdbarch *gdbarch, | |
2360 | const struct language_defn *language, | |
2361 | struct value *func, int argc, struct value **argv, | |
2362 | enum noside noside) | |
2363 | { | |
2364 | struct internal_function *ifn; | |
2365 | int result; | |
2366 | ||
2367 | gdb_assert (func->lval () == lval_internalvar); | |
2368 | result = get_internalvar_function (VALUE_INTERNALVAR (func), &ifn); | |
2369 | gdb_assert (result); | |
2370 | ||
2371 | return ifn->handler (gdbarch, language, ifn->cookie, argc, argv, noside); | |
2372 | } | |
2373 | ||
2374 | /* The 'function' command. This does nothing -- it is just a | |
2375 | placeholder to let "help function NAME" work. This is also used as | |
2376 | the implementation of the sub-command that is created when | |
2377 | registering an internal function. */ | |
2378 | static void | |
2379 | function_command (const char *command, int from_tty) | |
2380 | { | |
2381 | /* Do nothing. */ | |
2382 | } | |
2383 | ||
2384 | /* Helper function that does the work for add_internal_function. */ | |
2385 | ||
2386 | static struct cmd_list_element * | |
2387 | do_add_internal_function (const char *name, const char *doc, | |
2388 | internal_function_fn_noside handler, void *cookie) | |
2389 | { | |
2390 | set_internalvar_function (lookup_internalvar (name), | |
2391 | std::make_unique<internal_function> (name, handler, | |
2392 | cookie)); | |
2393 | ||
2394 | return add_cmd (name, no_class, function_command, doc, &functionlist); | |
2395 | } | |
2396 | ||
2397 | /* See value.h. */ | |
2398 | ||
2399 | void | |
2400 | add_internal_function (const char *name, const char *doc, | |
2401 | internal_function_fn_noside handler, void *cookie) | |
2402 | { | |
2403 | do_add_internal_function (name, doc, handler, cookie); | |
2404 | } | |
2405 | ||
2406 | /* By default, internal functions are assumed to return int. Return a value | |
2407 | with that type to reflect this. If this is not correct for a specific | |
2408 | internal function, it should use an internal_function_fn_noside handler to | |
2409 | bypass this default. */ | |
2410 | ||
2411 | static struct value * | |
2412 | internal_function_default_return_type (struct gdbarch *gdbarch) | |
2413 | { | |
2414 | return value::zero (builtin_type (gdbarch)->builtin_int, not_lval); | |
2415 | } | |
2416 | ||
2417 | /* See value.h. */ | |
2418 | ||
2419 | void | |
2420 | add_internal_function (const char *name, const char *doc, | |
2421 | internal_function_fn handler, void *cookie) | |
2422 | { | |
2423 | internal_function_fn_noside fn | |
2424 | = [=] (struct gdbarch *gdbarch, | |
2425 | const struct language_defn *language, | |
2426 | void *_cookie, | |
2427 | int argc, | |
2428 | struct value **argv, | |
2429 | enum noside noside) | |
2430 | { | |
2431 | if (noside == EVAL_AVOID_SIDE_EFFECTS) | |
2432 | return internal_function_default_return_type (gdbarch); | |
2433 | return handler (gdbarch, language, _cookie, argc, argv); | |
2434 | }; | |
2435 | ||
2436 | do_add_internal_function (name, doc, fn, cookie); | |
2437 | } | |
2438 | ||
2439 | /* See value.h. */ | |
2440 | ||
2441 | void | |
2442 | add_internal_function (gdb::unique_xmalloc_ptr<char> &&name, | |
2443 | gdb::unique_xmalloc_ptr<char> &&doc, | |
2444 | internal_function_fn_noside handler, void *cookie) | |
2445 | { | |
2446 | struct cmd_list_element *cmd | |
2447 | = do_add_internal_function (name.get (), doc.get (), handler, cookie); | |
2448 | ||
2449 | /* Manually transfer the ownership of the doc and name strings to CMD by | |
2450 | setting the appropriate flags. */ | |
2451 | (void) doc.release (); | |
2452 | cmd->doc_allocated = 1; | |
2453 | (void) name.release (); | |
2454 | cmd->name_allocated = 1; | |
2455 | } | |
2456 | ||
2457 | /* See value.h. */ | |
2458 | ||
2459 | void | |
2460 | add_internal_function (gdb::unique_xmalloc_ptr<char> &&name, | |
2461 | gdb::unique_xmalloc_ptr<char> &&doc, | |
2462 | internal_function_fn handler, void *cookie) | |
2463 | { | |
2464 | internal_function_fn_noside fn | |
2465 | = [=] (struct gdbarch *gdbarch, | |
2466 | const struct language_defn *language, | |
2467 | void *_cookie, | |
2468 | int argc, | |
2469 | struct value **argv, | |
2470 | enum noside noside) | |
2471 | { | |
2472 | if (noside == EVAL_AVOID_SIDE_EFFECTS) | |
2473 | return internal_function_default_return_type (gdbarch); | |
2474 | return handler (gdbarch, language, _cookie, argc, argv); | |
2475 | }; | |
2476 | ||
2477 | add_internal_function (std::forward<gdb::unique_xmalloc_ptr<char>>(name), | |
2478 | std::forward<gdb::unique_xmalloc_ptr<char>>(doc), | |
2479 | fn, cookie); | |
2480 | } | |
2481 | ||
2482 | void | |
2483 | value::preserve (struct objfile *objfile, copied_types_hash_t &copied_types) | |
2484 | { | |
2485 | if (m_type->objfile_owner () == objfile) | |
2486 | m_type = copy_type_recursive (m_type, copied_types); | |
2487 | ||
2488 | if (m_enclosing_type->objfile_owner () == objfile) | |
2489 | m_enclosing_type = copy_type_recursive (m_enclosing_type, copied_types); | |
2490 | } | |
2491 | ||
2492 | /* Likewise for internal variable VAR. */ | |
2493 | ||
2494 | static void | |
2495 | preserve_one_internalvar (struct internalvar *var, struct objfile *objfile, | |
2496 | copied_types_hash_t &copied_types) | |
2497 | { | |
2498 | switch (var->kind) | |
2499 | { | |
2500 | case INTERNALVAR_INTEGER: | |
2501 | if (var->u.integer.type | |
2502 | && var->u.integer.type->objfile_owner () == objfile) | |
2503 | var->u.integer.type | |
2504 | = copy_type_recursive (var->u.integer.type, copied_types); | |
2505 | break; | |
2506 | ||
2507 | case INTERNALVAR_VALUE: | |
2508 | var->u.value->preserve (objfile, copied_types); | |
2509 | break; | |
2510 | } | |
2511 | } | |
2512 | ||
2513 | /* Make sure that all types and values referenced by VAROBJ are updated before | |
2514 | OBJFILE is discarded. COPIED_TYPES is used to prevent cycles and | |
2515 | duplicates. */ | |
2516 | ||
2517 | static void | |
2518 | preserve_one_varobj (struct varobj *varobj, struct objfile *objfile, | |
2519 | copied_types_hash_t &copied_types) | |
2520 | { | |
2521 | if (varobj->type->is_objfile_owned () | |
2522 | && varobj->type->objfile_owner () == objfile) | |
2523 | { | |
2524 | varobj->type | |
2525 | = copy_type_recursive (varobj->type, copied_types); | |
2526 | } | |
2527 | ||
2528 | if (varobj->value != nullptr) | |
2529 | varobj->value->preserve (objfile, copied_types); | |
2530 | } | |
2531 | ||
2532 | /* Update the internal variables and value history when OBJFILE is | |
2533 | discarded; we must copy the types out of the objfile. New global types | |
2534 | will be created for every convenience variable which currently points to | |
2535 | this objfile's types, and the convenience variables will be adjusted to | |
2536 | use the new global types. */ | |
2537 | ||
2538 | void | |
2539 | preserve_values (struct objfile *objfile) | |
2540 | { | |
2541 | /* Create the hash table. We allocate on the objfile's obstack, since | |
2542 | it is soon to be deleted. */ | |
2543 | copied_types_hash_t copied_types; | |
2544 | ||
2545 | for (const value_ref_ptr &item : value_history) | |
2546 | item->preserve (objfile, copied_types); | |
2547 | ||
2548 | for (auto &pair : internalvars) | |
2549 | preserve_one_internalvar (&pair.second, objfile, copied_types); | |
2550 | ||
2551 | /* For the remaining varobj, check that none has type owned by OBJFILE. */ | |
2552 | all_root_varobjs ([&copied_types, objfile] (struct varobj *varobj) | |
2553 | { | |
2554 | preserve_one_varobj (varobj, objfile, copied_types); | |
2555 | }); | |
2556 | ||
2557 | preserve_ext_lang_values (objfile, copied_types); | |
2558 | } | |
2559 | ||
2560 | static void | |
2561 | show_convenience (const char *ignore, int from_tty) | |
2562 | { | |
2563 | struct gdbarch *gdbarch = get_current_arch (); | |
2564 | int varseen = 0; | |
2565 | struct value_print_options opts; | |
2566 | ||
2567 | get_user_print_options (&opts); | |
2568 | for (auto &pair : internalvars) | |
2569 | { | |
2570 | internalvar &var = pair.second; | |
2571 | ||
2572 | if (!varseen) | |
2573 | { | |
2574 | varseen = 1; | |
2575 | } | |
2576 | gdb_printf (("$%s = "), var.name.c_str ()); | |
2577 | ||
2578 | try | |
2579 | { | |
2580 | struct value *val; | |
2581 | ||
2582 | val = value_of_internalvar (gdbarch, &var); | |
2583 | value_print (val, gdb_stdout, &opts); | |
2584 | } | |
2585 | catch (const gdb_exception_error &ex) | |
2586 | { | |
2587 | fprintf_styled (gdb_stdout, metadata_style.style (), | |
2588 | _("<error: %s>"), ex.what ()); | |
2589 | } | |
2590 | ||
2591 | gdb_printf (("\n")); | |
2592 | } | |
2593 | if (!varseen) | |
2594 | { | |
2595 | /* This text does not mention convenience functions on purpose. | |
2596 | The user can't create them except via Python, and if Python support | |
2597 | is installed this message will never be printed ($_streq will | |
2598 | exist). */ | |
2599 | gdb_printf (_("No debugger convenience variables now defined.\n" | |
2600 | "Convenience variables have " | |
2601 | "names starting with \"$\";\n" | |
2602 | "use \"%ps\" as in \"%ps\" to define them.\n"), | |
2603 | styled_string (command_style.style (), "set"), | |
2604 | styled_string (command_style.style (), "set $foo = 5")); | |
2605 | } | |
2606 | } | |
2607 | \f | |
2608 | ||
2609 | /* See value.h. */ | |
2610 | ||
2611 | struct value * | |
2612 | value::from_xmethod (xmethod_worker_up &&worker) | |
2613 | { | |
2614 | struct value *v; | |
2615 | ||
2616 | v = value::allocate (builtin_type (current_inferior ()->arch ())->xmethod); | |
2617 | v->m_lval = lval_xcallable; | |
2618 | v->m_location.xm_worker = worker.release (); | |
2619 | v->m_modifiable = false; | |
2620 | ||
2621 | return v; | |
2622 | } | |
2623 | ||
2624 | /* See value.h. */ | |
2625 | ||
2626 | struct type * | |
2627 | value::result_type_of_xmethod (gdb::array_view<value *> argv) | |
2628 | { | |
2629 | gdb_assert (type ()->code () == TYPE_CODE_XMETHOD | |
2630 | && m_lval == lval_xcallable && !argv.empty ()); | |
2631 | ||
2632 | return m_location.xm_worker->get_result_type (argv[0], argv.slice (1)); | |
2633 | } | |
2634 | ||
2635 | /* See value.h. */ | |
2636 | ||
2637 | struct value * | |
2638 | value::call_xmethod (gdb::array_view<value *> argv) | |
2639 | { | |
2640 | gdb_assert (type ()->code () == TYPE_CODE_XMETHOD | |
2641 | && m_lval == lval_xcallable && !argv.empty ()); | |
2642 | ||
2643 | return m_location.xm_worker->invoke (argv[0], argv.slice (1)); | |
2644 | } | |
2645 | \f | |
2646 | /* Extract a value as a C number (either long or double). | |
2647 | Knows how to convert fixed values to double, or | |
2648 | floating values to long. | |
2649 | Does not deallocate the value. */ | |
2650 | ||
2651 | LONGEST | |
2652 | value_as_long (struct value *val) | |
2653 | { | |
2654 | /* This coerces arrays and functions, which is necessary (e.g. | |
2655 | in disassemble_command). It also dereferences references, which | |
2656 | I suspect is the most logical thing to do. */ | |
2657 | val = coerce_array (val); | |
2658 | return unpack_long (val->type (), val->contents ().data ()); | |
2659 | } | |
2660 | ||
2661 | /* See value.h. */ | |
2662 | ||
2663 | gdb_mpz | |
2664 | value_as_mpz (struct value *val) | |
2665 | { | |
2666 | val = coerce_array (val); | |
2667 | struct type *type = check_typedef (val->type ()); | |
2668 | ||
2669 | switch (type->code ()) | |
2670 | { | |
2671 | case TYPE_CODE_ENUM: | |
2672 | case TYPE_CODE_BOOL: | |
2673 | case TYPE_CODE_INT: | |
2674 | case TYPE_CODE_CHAR: | |
2675 | case TYPE_CODE_RANGE: | |
2676 | break; | |
2677 | ||
2678 | default: | |
2679 | return gdb_mpz (value_as_long (val)); | |
2680 | } | |
2681 | ||
2682 | gdb_mpz result; | |
2683 | ||
2684 | gdb::array_view<const gdb_byte> valbytes = val->contents (); | |
2685 | enum bfd_endian byte_order = type_byte_order (type); | |
2686 | ||
2687 | /* Handle integers that are either not a multiple of the word size, | |
2688 | or that are stored at some bit offset. */ | |
2689 | unsigned bit_off = 0, bit_size = 0; | |
2690 | if (type->bit_size_differs_p ()) | |
2691 | { | |
2692 | bit_size = type->bit_size (); | |
2693 | if (bit_size == 0) | |
2694 | { | |
2695 | /* We can just handle this immediately. */ | |
2696 | return result; | |
2697 | } | |
2698 | ||
2699 | bit_off = type->bit_offset (); | |
2700 | ||
2701 | unsigned n_bytes = ((bit_off % 8) + bit_size + 7) / 8; | |
2702 | valbytes = valbytes.slice (bit_off / 8, n_bytes); | |
2703 | ||
2704 | if (byte_order == BFD_ENDIAN_BIG) | |
2705 | bit_off = (n_bytes * 8 - bit_off % 8 - bit_size); | |
2706 | else | |
2707 | bit_off %= 8; | |
2708 | } | |
2709 | ||
2710 | result.read (val->contents (), byte_order, type->is_unsigned ()); | |
2711 | ||
2712 | /* Shift off any low bits, if needed. */ | |
2713 | if (bit_off != 0) | |
2714 | result >>= bit_off; | |
2715 | ||
2716 | /* Mask off any high bits, if needed. */ | |
2717 | if (bit_size) | |
2718 | result.mask (bit_size); | |
2719 | ||
2720 | /* Now handle any range bias. */ | |
2721 | if (type->code () == TYPE_CODE_RANGE && type->bounds ()->bias != 0) | |
2722 | { | |
2723 | /* Unfortunately we have to box here, because LONGEST is | |
2724 | probably wider than long. */ | |
2725 | result += gdb_mpz (type->bounds ()->bias); | |
2726 | } | |
2727 | ||
2728 | return result; | |
2729 | } | |
2730 | ||
2731 | /* Extract a value as a C pointer. */ | |
2732 | ||
2733 | CORE_ADDR | |
2734 | value_as_address (struct value *val) | |
2735 | { | |
2736 | struct gdbarch *gdbarch = val->type ()->arch (); | |
2737 | ||
2738 | /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure | |
2739 | whether we want this to be true eventually. */ | |
2740 | #if 0 | |
2741 | /* gdbarch_addr_bits_remove is wrong if we are being called for a | |
2742 | non-address (e.g. argument to "signal", "info break", etc.), or | |
2743 | for pointers to char, in which the low bits *are* significant. */ | |
2744 | return gdbarch_addr_bits_remove (gdbarch, value_as_long (val)); | |
2745 | #else | |
2746 | ||
2747 | /* There are several targets (IA-64, PowerPC, and others) which | |
2748 | don't represent pointers to functions as simply the address of | |
2749 | the function's entry point. For example, on the IA-64, a | |
2750 | function pointer points to a two-word descriptor, generated by | |
2751 | the linker, which contains the function's entry point, and the | |
2752 | value the IA-64 "global pointer" register should have --- to | |
2753 | support position-independent code. The linker generates | |
2754 | descriptors only for those functions whose addresses are taken. | |
2755 | ||
2756 | On such targets, it's difficult for GDB to convert an arbitrary | |
2757 | function address into a function pointer; it has to either find | |
2758 | an existing descriptor for that function, or call malloc and | |
2759 | build its own. On some targets, it is impossible for GDB to | |
2760 | build a descriptor at all: the descriptor must contain a jump | |
2761 | instruction; data memory cannot be executed; and code memory | |
2762 | cannot be modified. | |
2763 | ||
2764 | Upon entry to this function, if VAL is a value of type `function' | |
2765 | (that is, TYPE_CODE (val->type ()) == TYPE_CODE_FUNC), then | |
2766 | val->address () is the address of the function. This is what | |
2767 | you'll get if you evaluate an expression like `main'. The call | |
2768 | to COERCE_ARRAY below actually does all the usual unary | |
2769 | conversions, which includes converting values of type `function' | |
2770 | to `pointer to function'. This is the challenging conversion | |
2771 | discussed above. Then, `unpack_pointer' will convert that pointer | |
2772 | back into an address. | |
2773 | ||
2774 | So, suppose the user types `disassemble foo' on an architecture | |
2775 | with a strange function pointer representation, on which GDB | |
2776 | cannot build its own descriptors, and suppose further that `foo' | |
2777 | has no linker-built descriptor. The address->pointer conversion | |
2778 | will signal an error and prevent the command from running, even | |
2779 | though the next step would have been to convert the pointer | |
2780 | directly back into the same address. | |
2781 | ||
2782 | The following shortcut avoids this whole mess. If VAL is a | |
2783 | function, just return its address directly. */ | |
2784 | if (val->type ()->code () == TYPE_CODE_FUNC | |
2785 | || val->type ()->code () == TYPE_CODE_METHOD) | |
2786 | return val->address (); | |
2787 | ||
2788 | val = coerce_array (val); | |
2789 | ||
2790 | /* Some architectures (e.g. Harvard), map instruction and data | |
2791 | addresses onto a single large unified address space. For | |
2792 | instance: An architecture may consider a large integer in the | |
2793 | range 0x10000000 .. 0x1000ffff to already represent a data | |
2794 | addresses (hence not need a pointer to address conversion) while | |
2795 | a small integer would still need to be converted integer to | |
2796 | pointer to address. Just assume such architectures handle all | |
2797 | integer conversions in a single function. */ | |
2798 | ||
2799 | /* JimB writes: | |
2800 | ||
2801 | I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we | |
2802 | must admonish GDB hackers to make sure its behavior matches the | |
2803 | compiler's, whenever possible. | |
2804 | ||
2805 | In general, I think GDB should evaluate expressions the same way | |
2806 | the compiler does. When the user copies an expression out of | |
2807 | their source code and hands it to a `print' command, they should | |
2808 | get the same value the compiler would have computed. Any | |
2809 | deviation from this rule can cause major confusion and annoyance, | |
2810 | and needs to be justified carefully. In other words, GDB doesn't | |
2811 | really have the freedom to do these conversions in clever and | |
2812 | useful ways. | |
2813 | ||
2814 | AndrewC pointed out that users aren't complaining about how GDB | |
2815 | casts integers to pointers; they are complaining that they can't | |
2816 | take an address from a disassembly listing and give it to `x/i'. | |
2817 | This is certainly important. | |
2818 | ||
2819 | Adding an architecture method like integer_to_address() certainly | |
2820 | makes it possible for GDB to "get it right" in all circumstances | |
2821 | --- the target has complete control over how things get done, so | |
2822 | people can Do The Right Thing for their target without breaking | |
2823 | anyone else. The standard doesn't specify how integers get | |
2824 | converted to pointers; usually, the ABI doesn't either, but | |
2825 | ABI-specific code is a more reasonable place to handle it. */ | |
2826 | ||
2827 | if (!val->type ()->is_pointer_or_reference () | |
2828 | && gdbarch_integer_to_address_p (gdbarch)) | |
2829 | return gdbarch_integer_to_address (gdbarch, val->type (), | |
2830 | val->contents ().data ()); | |
2831 | ||
2832 | return unpack_pointer (val->type (), val->contents ().data ()); | |
2833 | #endif | |
2834 | } | |
2835 | \f | |
2836 | /* Unpack raw data (copied from debuggee, target byte order) at VALADDR | |
2837 | as a long, or as a double, assuming the raw data is described | |
2838 | by type TYPE. Knows how to convert different sizes of values | |
2839 | and can convert between fixed and floating point. We don't assume | |
2840 | any alignment for the raw data. Return value is in host byte order. | |
2841 | ||
2842 | If you want functions and arrays to be coerced to pointers, and | |
2843 | references to be dereferenced, call value_as_long() instead. | |
2844 | ||
2845 | C++: It is assumed that the front-end has taken care of | |
2846 | all matters concerning pointers to members. A pointer | |
2847 | to member which reaches here is considered to be equivalent | |
2848 | to an INT (or some size). After all, it is only an offset. */ | |
2849 | ||
2850 | LONGEST | |
2851 | unpack_long (struct type *type, const gdb_byte *valaddr) | |
2852 | { | |
2853 | if (is_fixed_point_type (type)) | |
2854 | type = type->fixed_point_type_base_type (); | |
2855 | ||
2856 | enum bfd_endian byte_order = type_byte_order (type); | |
2857 | enum type_code code = type->code (); | |
2858 | int len = type->length (); | |
2859 | int nosign = type->is_unsigned (); | |
2860 | ||
2861 | switch (code) | |
2862 | { | |
2863 | case TYPE_CODE_TYPEDEF: | |
2864 | return unpack_long (check_typedef (type), valaddr); | |
2865 | case TYPE_CODE_ENUM: | |
2866 | case TYPE_CODE_FLAGS: | |
2867 | case TYPE_CODE_BOOL: | |
2868 | case TYPE_CODE_INT: | |
2869 | case TYPE_CODE_CHAR: | |
2870 | case TYPE_CODE_RANGE: | |
2871 | case TYPE_CODE_MEMBERPTR: | |
2872 | { | |
2873 | LONGEST result; | |
2874 | ||
2875 | if (type->bit_size_differs_p ()) | |
2876 | { | |
2877 | unsigned bit_off = type->bit_offset (); | |
2878 | unsigned bit_size = type->bit_size (); | |
2879 | if (bit_size == 0) | |
2880 | { | |
2881 | /* unpack_bits_as_long doesn't handle this case the | |
2882 | way we'd like, so handle it here. */ | |
2883 | result = 0; | |
2884 | } | |
2885 | else | |
2886 | result = unpack_bits_as_long (type, valaddr, bit_off, bit_size); | |
2887 | } | |
2888 | else | |
2889 | { | |
2890 | if (nosign) | |
2891 | result = extract_unsigned_integer (valaddr, len, byte_order); | |
2892 | else | |
2893 | result = extract_signed_integer (valaddr, len, byte_order); | |
2894 | } | |
2895 | if (code == TYPE_CODE_RANGE) | |
2896 | result += type->bounds ()->bias; | |
2897 | return result; | |
2898 | } | |
2899 | ||
2900 | case TYPE_CODE_FLT: | |
2901 | case TYPE_CODE_DECFLOAT: | |
2902 | return target_float_to_longest (valaddr, type); | |
2903 | ||
2904 | case TYPE_CODE_FIXED_POINT: | |
2905 | { | |
2906 | gdb_mpq vq; | |
2907 | vq.read_fixed_point (gdb::make_array_view (valaddr, len), | |
2908 | byte_order, nosign, | |
2909 | type->fixed_point_scaling_factor ()); | |
2910 | ||
2911 | gdb_mpz vz = vq.as_integer (); | |
2912 | return vz.as_integer<LONGEST> (); | |
2913 | } | |
2914 | ||
2915 | case TYPE_CODE_PTR: | |
2916 | case TYPE_CODE_REF: | |
2917 | case TYPE_CODE_RVALUE_REF: | |
2918 | /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure | |
2919 | whether we want this to be true eventually. */ | |
2920 | return extract_typed_address (valaddr, type); | |
2921 | ||
2922 | default: | |
2923 | error (_("Value can't be converted to integer.")); | |
2924 | } | |
2925 | } | |
2926 | ||
2927 | /* Unpack raw data (copied from debuggee, target byte order) at VALADDR | |
2928 | as a CORE_ADDR, assuming the raw data is described by type TYPE. | |
2929 | We don't assume any alignment for the raw data. Return value is in | |
2930 | host byte order. | |
2931 | ||
2932 | If you want functions and arrays to be coerced to pointers, and | |
2933 | references to be dereferenced, call value_as_address() instead. | |
2934 | ||
2935 | C++: It is assumed that the front-end has taken care of | |
2936 | all matters concerning pointers to members. A pointer | |
2937 | to member which reaches here is considered to be equivalent | |
2938 | to an INT (or some size). After all, it is only an offset. */ | |
2939 | ||
2940 | CORE_ADDR | |
2941 | unpack_pointer (struct type *type, const gdb_byte *valaddr) | |
2942 | { | |
2943 | /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure | |
2944 | whether we want this to be true eventually. */ | |
2945 | return unpack_long (type, valaddr); | |
2946 | } | |
2947 | ||
2948 | bool | |
2949 | is_floating_value (struct value *val) | |
2950 | { | |
2951 | struct type *type = check_typedef (val->type ()); | |
2952 | ||
2953 | if (is_floating_type (type)) | |
2954 | { | |
2955 | if (!target_float_is_valid (val->contents ().data (), type)) | |
2956 | error (_("Invalid floating value found in program.")); | |
2957 | return true; | |
2958 | } | |
2959 | ||
2960 | return false; | |
2961 | } | |
2962 | ||
2963 | \f | |
2964 | /* Get the value of the FIELDNO'th field (which must be static) of | |
2965 | TYPE. */ | |
2966 | ||
2967 | struct value * | |
2968 | value_static_field (struct type *type, int fieldno) | |
2969 | { | |
2970 | struct value *retval; | |
2971 | ||
2972 | switch (type->field (fieldno).loc_kind ()) | |
2973 | { | |
2974 | case FIELD_LOC_KIND_PHYSADDR: | |
2975 | retval = value_at_lazy (type->field (fieldno).type (), | |
2976 | type->field (fieldno).loc_physaddr ()); | |
2977 | break; | |
2978 | case FIELD_LOC_KIND_PHYSNAME: | |
2979 | { | |
2980 | const char *phys_name = type->field (fieldno).loc_physname (); | |
2981 | /* type->field (fieldno).name (); */ | |
2982 | struct block_symbol sym = lookup_symbol (phys_name, nullptr, | |
2983 | SEARCH_VAR_DOMAIN, nullptr); | |
2984 | ||
2985 | if (sym.symbol == NULL) | |
2986 | { | |
2987 | /* With some compilers, e.g. HP aCC, static data members are | |
2988 | reported as non-debuggable symbols. */ | |
2989 | bound_minimal_symbol msym | |
2990 | = lookup_minimal_symbol (current_program_space, phys_name); | |
2991 | struct type *field_type = type->field (fieldno).type (); | |
2992 | ||
2993 | if (!msym.minsym) | |
2994 | retval = value::allocate_optimized_out (field_type); | |
2995 | else | |
2996 | retval = value_at_lazy (field_type, msym.value_address ()); | |
2997 | } | |
2998 | else | |
2999 | retval = value_of_variable (sym.symbol, sym.block); | |
3000 | break; | |
3001 | } | |
3002 | default: | |
3003 | gdb_assert_not_reached ("unexpected field location kind"); | |
3004 | } | |
3005 | ||
3006 | return retval; | |
3007 | } | |
3008 | ||
3009 | /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE. | |
3010 | You have to be careful here, since the size of the data area for the value | |
3011 | is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger | |
3012 | than the old enclosing type, you have to allocate more space for the | |
3013 | data. */ | |
3014 | ||
3015 | void | |
3016 | value::set_enclosing_type (struct type *new_encl_type) | |
3017 | { | |
3018 | if (new_encl_type->length () > enclosing_type ()->length ()) | |
3019 | { | |
3020 | check_type_length_before_alloc (new_encl_type); | |
3021 | m_contents.reset ((gdb_byte *) xrealloc (m_contents.release (), | |
3022 | new_encl_type->length ())); | |
3023 | } | |
3024 | ||
3025 | m_enclosing_type = new_encl_type; | |
3026 | } | |
3027 | ||
3028 | /* See value.h. */ | |
3029 | ||
3030 | struct value * | |
3031 | value::primitive_field (LONGEST offset, int fieldno, struct type *arg_type) | |
3032 | { | |
3033 | struct value *v; | |
3034 | struct type *type; | |
3035 | int unit_size = gdbarch_addressable_memory_unit_size (arch ()); | |
3036 | ||
3037 | arg_type = check_typedef (arg_type); | |
3038 | type = arg_type->field (fieldno).type (); | |
3039 | ||
3040 | /* Call check_typedef on our type to make sure that, if TYPE | |
3041 | is a TYPE_CODE_TYPEDEF, its length is set to the length | |
3042 | of the target type instead of zero. However, we do not | |
3043 | replace the typedef type by the target type, because we want | |
3044 | to keep the typedef in order to be able to print the type | |
3045 | description correctly. */ | |
3046 | check_typedef (type); | |
3047 | ||
3048 | if (arg_type->field (fieldno).bitsize ()) | |
3049 | { | |
3050 | /* Handle packed fields. | |
3051 | ||
3052 | Create a new value for the bitfield, with bitpos and bitsize | |
3053 | set. If possible, arrange offset and bitpos so that we can | |
3054 | do a single aligned read of the size of the containing type. | |
3055 | Otherwise, adjust offset to the byte containing the first | |
3056 | bit. Assume that the address, offset, and embedded offset | |
3057 | are sufficiently aligned. */ | |
3058 | ||
3059 | LONGEST bitpos = arg_type->field (fieldno).loc_bitpos (); | |
3060 | LONGEST container_bitsize = type->length () * 8; | |
3061 | ||
3062 | v = value::allocate_lazy (type); | |
3063 | v->set_bitsize (arg_type->field (fieldno).bitsize ()); | |
3064 | if ((bitpos % container_bitsize) + v->bitsize () <= container_bitsize | |
3065 | && type->length () <= (int) sizeof (LONGEST)) | |
3066 | v->set_bitpos (bitpos % container_bitsize); | |
3067 | else | |
3068 | v->set_bitpos (bitpos % 8); | |
3069 | v->set_offset ((embedded_offset () | |
3070 | + offset | |
3071 | + (bitpos - v->bitpos ()) / 8)); | |
3072 | v->set_parent (this); | |
3073 | if (!lazy ()) | |
3074 | v->fetch_lazy (); | |
3075 | } | |
3076 | else if (fieldno < TYPE_N_BASECLASSES (arg_type)) | |
3077 | { | |
3078 | /* This field is actually a base subobject, so preserve the | |
3079 | entire object's contents for later references to virtual | |
3080 | bases, etc. */ | |
3081 | LONGEST boffset; | |
3082 | ||
3083 | /* Lazy register values with offsets are not supported. */ | |
3084 | if (this->lval () == lval_register && lazy ()) | |
3085 | fetch_lazy (); | |
3086 | ||
3087 | /* We special case virtual inheritance here because this | |
3088 | requires access to the contents, which we would rather avoid | |
3089 | for references to ordinary fields of unavailable values. */ | |
3090 | if (BASETYPE_VIA_VIRTUAL (arg_type, fieldno)) | |
3091 | boffset = baseclass_offset (arg_type, fieldno, | |
3092 | contents ().data (), | |
3093 | embedded_offset (), | |
3094 | address (), | |
3095 | this); | |
3096 | else | |
3097 | boffset = arg_type->field (fieldno).loc_bitpos () / 8; | |
3098 | ||
3099 | if (lazy ()) | |
3100 | v = value::allocate_lazy (enclosing_type ()); | |
3101 | else | |
3102 | { | |
3103 | v = value::allocate (enclosing_type ()); | |
3104 | contents_copy_raw (v, 0, 0, enclosing_type ()->length ()); | |
3105 | } | |
3106 | v->deprecated_set_type (type); | |
3107 | v->set_offset (this->offset ()); | |
3108 | v->set_embedded_offset (offset + embedded_offset () + boffset); | |
3109 | } | |
3110 | else if (NULL != TYPE_DATA_LOCATION (type)) | |
3111 | { | |
3112 | /* Field is a dynamic data member. */ | |
3113 | ||
3114 | gdb_assert (0 == offset); | |
3115 | /* We expect an already resolved data location. */ | |
3116 | gdb_assert (TYPE_DATA_LOCATION (type)->is_constant ()); | |
3117 | /* For dynamic data types defer memory allocation | |
3118 | until we actual access the value. */ | |
3119 | v = value::allocate_lazy (type); | |
3120 | } | |
3121 | else | |
3122 | { | |
3123 | /* Plain old data member */ | |
3124 | offset += (arg_type->field (fieldno).loc_bitpos () | |
3125 | / (HOST_CHAR_BIT * unit_size)); | |
3126 | ||
3127 | /* Lazy register values with offsets are not supported. */ | |
3128 | if (this->lval () == lval_register && lazy ()) | |
3129 | fetch_lazy (); | |
3130 | ||
3131 | if (lazy ()) | |
3132 | v = value::allocate_lazy (type); | |
3133 | else | |
3134 | { | |
3135 | v = value::allocate (type); | |
3136 | contents_copy_raw (v, v->embedded_offset (), | |
3137 | embedded_offset () + offset, | |
3138 | type_length_units (type)); | |
3139 | } | |
3140 | v->set_offset (this->offset () + offset + embedded_offset ()); | |
3141 | } | |
3142 | v->set_component_location (this); | |
3143 | return v; | |
3144 | } | |
3145 | ||
3146 | /* Given a value ARG1 of a struct or union type, | |
3147 | extract and return the value of one of its (non-static) fields. | |
3148 | FIELDNO says which field. */ | |
3149 | ||
3150 | struct value * | |
3151 | value_field (struct value *arg1, int fieldno) | |
3152 | { | |
3153 | return arg1->primitive_field (0, fieldno, arg1->type ()); | |
3154 | } | |
3155 | ||
3156 | /* Return a non-virtual function as a value. | |
3157 | F is the list of member functions which contains the desired method. | |
3158 | J is an index into F which provides the desired method. | |
3159 | ||
3160 | We only use the symbol for its address, so be happy with either a | |
3161 | full symbol or a minimal symbol. */ | |
3162 | ||
3163 | struct value * | |
3164 | value_fn_field (struct value **arg1p, struct fn_field *f, | |
3165 | int j, struct type *type, | |
3166 | LONGEST offset) | |
3167 | { | |
3168 | struct value *v; | |
3169 | struct type *ftype = TYPE_FN_FIELD_TYPE (f, j); | |
3170 | const char *physname = TYPE_FN_FIELD_PHYSNAME (f, j); | |
3171 | struct symbol *sym; | |
3172 | bound_minimal_symbol msym; | |
3173 | ||
3174 | sym = lookup_symbol (physname, nullptr, SEARCH_FUNCTION_DOMAIN, | |
3175 | nullptr).symbol; | |
3176 | if (sym == nullptr) | |
3177 | { | |
3178 | msym = lookup_minimal_symbol (current_program_space, physname); | |
3179 | if (msym.minsym == NULL) | |
3180 | return NULL; | |
3181 | } | |
3182 | ||
3183 | v = value::allocate (ftype); | |
3184 | v->set_lval (lval_memory); | |
3185 | if (sym) | |
3186 | { | |
3187 | v->set_address (sym->value_block ()->entry_pc ()); | |
3188 | } | |
3189 | else | |
3190 | { | |
3191 | /* The minimal symbol might point to a function descriptor; | |
3192 | resolve it to the actual code address instead. */ | |
3193 | struct objfile *objfile = msym.objfile; | |
3194 | struct gdbarch *gdbarch = objfile->arch (); | |
3195 | ||
3196 | v->set_address (gdbarch_convert_from_func_ptr_addr | |
3197 | (gdbarch, msym.value_address (), | |
3198 | current_inferior ()->top_target ())); | |
3199 | } | |
3200 | ||
3201 | if (arg1p) | |
3202 | { | |
3203 | if (type != (*arg1p)->type ()) | |
3204 | *arg1p = value_ind (value_cast (lookup_pointer_type (type), | |
3205 | value_addr (*arg1p))); | |
3206 | ||
3207 | /* Move the `this' pointer according to the offset. | |
3208 | (*arg1p)->offset () += offset; */ | |
3209 | } | |
3210 | ||
3211 | return v; | |
3212 | } | |
3213 | ||
3214 | \f | |
3215 | ||
3216 | /* See value.h. */ | |
3217 | ||
3218 | LONGEST | |
3219 | unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr, | |
3220 | LONGEST bitpos, LONGEST bitsize) | |
3221 | { | |
3222 | enum bfd_endian byte_order = type_byte_order (field_type); | |
3223 | ULONGEST val; | |
3224 | ULONGEST valmask; | |
3225 | int lsbcount; | |
3226 | LONGEST bytes_read; | |
3227 | LONGEST read_offset; | |
3228 | ||
3229 | /* Read the minimum number of bytes required; there may not be | |
3230 | enough bytes to read an entire ULONGEST. */ | |
3231 | field_type = check_typedef (field_type); | |
3232 | if (bitsize) | |
3233 | bytes_read = ((bitpos % 8) + bitsize + 7) / 8; | |
3234 | else | |
3235 | { | |
3236 | bytes_read = field_type->length (); | |
3237 | bitsize = 8 * bytes_read; | |
3238 | } | |
3239 | ||
3240 | read_offset = bitpos / 8; | |
3241 | ||
3242 | val = extract_unsigned_integer (valaddr + read_offset, | |
3243 | bytes_read, byte_order); | |
3244 | ||
3245 | /* Extract bits. See comment above. */ | |
3246 | ||
3247 | if (byte_order == BFD_ENDIAN_BIG) | |
3248 | lsbcount = (bytes_read * 8 - bitpos % 8 - bitsize); | |
3249 | else | |
3250 | lsbcount = (bitpos % 8); | |
3251 | val >>= lsbcount; | |
3252 | ||
3253 | /* If the field does not entirely fill a LONGEST, then zero the sign bits. | |
3254 | If the field is signed, and is negative, then sign extend. */ | |
3255 | ||
3256 | if (bitsize < 8 * (int) sizeof (val)) | |
3257 | { | |
3258 | valmask = (((ULONGEST) 1) << bitsize) - 1; | |
3259 | val &= valmask; | |
3260 | if (!field_type->is_unsigned ()) | |
3261 | { | |
3262 | if (val & (valmask ^ (valmask >> 1))) | |
3263 | { | |
3264 | val |= ~valmask; | |
3265 | } | |
3266 | } | |
3267 | } | |
3268 | ||
3269 | if (field_type->code () == TYPE_CODE_RANGE) | |
3270 | val += field_type->bounds ()->bias; | |
3271 | ||
3272 | return val; | |
3273 | } | |
3274 | ||
3275 | /* Unpack a field FIELDNO of the specified TYPE, from the object at | |
3276 | VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of | |
3277 | ORIGINAL_VALUE, which must not be NULL. See | |
3278 | unpack_value_bits_as_long for more details. */ | |
3279 | ||
3280 | int | |
3281 | unpack_value_field_as_long (struct type *type, const gdb_byte *valaddr, | |
3282 | LONGEST embedded_offset, int fieldno, | |
3283 | const struct value *val, LONGEST *result) | |
3284 | { | |
3285 | int bitpos = type->field (fieldno).loc_bitpos (); | |
3286 | int bitsize = type->field (fieldno).bitsize (); | |
3287 | struct type *field_type = type->field (fieldno).type (); | |
3288 | int bit_offset; | |
3289 | ||
3290 | gdb_assert (val != NULL); | |
3291 | ||
3292 | bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos; | |
3293 | if (val->bits_any_optimized_out (bit_offset, bitsize) | |
3294 | || !val->bits_available (bit_offset, bitsize)) | |
3295 | return 0; | |
3296 | ||
3297 | *result = unpack_bits_as_long (field_type, valaddr + embedded_offset, | |
3298 | bitpos, bitsize); | |
3299 | return 1; | |
3300 | } | |
3301 | ||
3302 | /* See value.h. */ | |
3303 | ||
3304 | LONGEST | |
3305 | unpack_field_as_long (const gdb_byte *valaddr, struct field *field) | |
3306 | { | |
3307 | int bitpos = field->loc_bitpos (); | |
3308 | int bitsize = field->bitsize (); | |
3309 | struct type *field_type = field->type (); | |
3310 | ||
3311 | return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize); | |
3312 | } | |
3313 | ||
3314 | /* See value.h. */ | |
3315 | ||
3316 | LONGEST | |
3317 | unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno) | |
3318 | { | |
3319 | return unpack_field_as_long (valaddr, &type->field (fieldno)); | |
3320 | } | |
3321 | ||
3322 | /* See value.h. */ | |
3323 | ||
3324 | void | |
3325 | value::unpack_bitfield (struct value *dest_val, | |
3326 | LONGEST bitpos, LONGEST bitsize, | |
3327 | const gdb_byte *valaddr, LONGEST embedded_offset) | |
3328 | const | |
3329 | { | |
3330 | enum bfd_endian byte_order; | |
3331 | int src_bit_offset; | |
3332 | int dst_bit_offset; | |
3333 | struct type *field_type = dest_val->type (); | |
3334 | ||
3335 | byte_order = type_byte_order (field_type); | |
3336 | ||
3337 | /* First, unpack and sign extend the bitfield as if it was wholly | |
3338 | valid. Optimized out/unavailable bits are read as zero, but | |
3339 | that's OK, as they'll end up marked below. If the VAL is | |
3340 | wholly-invalid we may have skipped allocating its contents, | |
3341 | though. See value::allocate_optimized_out. */ | |
3342 | if (valaddr != NULL) | |
3343 | { | |
3344 | LONGEST num; | |
3345 | ||
3346 | num = unpack_bits_as_long (field_type, valaddr + embedded_offset, | |
3347 | bitpos, bitsize); | |
3348 | store_signed_integer (dest_val->contents_raw ().data (), | |
3349 | field_type->length (), byte_order, num); | |
3350 | } | |
3351 | ||
3352 | /* Now copy the optimized out / unavailability ranges to the right | |
3353 | bits. */ | |
3354 | src_bit_offset = embedded_offset * TARGET_CHAR_BIT + bitpos; | |
3355 | if (byte_order == BFD_ENDIAN_BIG) | |
3356 | dst_bit_offset = field_type->length () * TARGET_CHAR_BIT - bitsize; | |
3357 | else | |
3358 | dst_bit_offset = 0; | |
3359 | ranges_copy_adjusted (dest_val, dst_bit_offset, src_bit_offset, bitsize); | |
3360 | } | |
3361 | ||
3362 | /* Return a new value with type TYPE, which is FIELDNO field of the | |
3363 | object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents | |
3364 | of VAL. If the VAL's contents required to extract the bitfield | |
3365 | from are unavailable/optimized out, the new value is | |
3366 | correspondingly marked unavailable/optimized out. */ | |
3367 | ||
3368 | struct value * | |
3369 | value_field_bitfield (struct type *type, int fieldno, | |
3370 | const gdb_byte *valaddr, | |
3371 | LONGEST embedded_offset, const struct value *val) | |
3372 | { | |
3373 | int bitpos = type->field (fieldno).loc_bitpos (); | |
3374 | int bitsize = type->field (fieldno).bitsize (); | |
3375 | struct value *res_val = value::allocate (type->field (fieldno).type ()); | |
3376 | ||
3377 | val->unpack_bitfield (res_val, bitpos, bitsize, valaddr, embedded_offset); | |
3378 | ||
3379 | return res_val; | |
3380 | } | |
3381 | ||
3382 | /* Modify the value of a bitfield. ADDR points to a block of memory in | |
3383 | target byte order; the bitfield starts in the byte pointed to. FIELDVAL | |
3384 | is the desired value of the field, in host byte order. BITPOS and BITSIZE | |
3385 | indicate which bits (in target bit order) comprise the bitfield. | |
3386 | Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and | |
3387 | 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */ | |
3388 | ||
3389 | void | |
3390 | modify_field (struct type *type, gdb_byte *addr, | |
3391 | LONGEST fieldval, LONGEST bitpos, LONGEST bitsize) | |
3392 | { | |
3393 | enum bfd_endian byte_order = type_byte_order (type); | |
3394 | ULONGEST oword; | |
3395 | ULONGEST mask = (ULONGEST) -1 >> (8 * sizeof (ULONGEST) - bitsize); | |
3396 | LONGEST bytesize; | |
3397 | ||
3398 | /* Normalize BITPOS. */ | |
3399 | addr += bitpos / 8; | |
3400 | bitpos %= 8; | |
3401 | ||
3402 | /* If a negative fieldval fits in the field in question, chop | |
3403 | off the sign extension bits. */ | |
3404 | if ((~fieldval & ~(mask >> 1)) == 0) | |
3405 | fieldval &= mask; | |
3406 | ||
3407 | /* Warn if value is too big to fit in the field in question. */ | |
3408 | if (0 != (fieldval & ~mask)) | |
3409 | { | |
3410 | /* FIXME: would like to include fieldval in the message, but | |
3411 | we don't have a sprintf_longest. */ | |
3412 | warning (_("Value does not fit in %s bits."), plongest (bitsize)); | |
3413 | ||
3414 | /* Truncate it, otherwise adjoining fields may be corrupted. */ | |
3415 | fieldval &= mask; | |
3416 | } | |
3417 | ||
3418 | /* Ensure no bytes outside of the modified ones get accessed as it may cause | |
3419 | false valgrind reports. */ | |
3420 | ||
3421 | bytesize = (bitpos + bitsize + 7) / 8; | |
3422 | oword = extract_unsigned_integer (addr, bytesize, byte_order); | |
3423 | ||
3424 | /* Shifting for bit field depends on endianness of the target machine. */ | |
3425 | if (byte_order == BFD_ENDIAN_BIG) | |
3426 | bitpos = bytesize * 8 - bitpos - bitsize; | |
3427 | ||
3428 | oword &= ~(mask << bitpos); | |
3429 | oword |= fieldval << bitpos; | |
3430 | ||
3431 | store_unsigned_integer (addr, bytesize, byte_order, oword); | |
3432 | } | |
3433 | \f | |
3434 | /* Pack NUM into BUF using a target format of TYPE. */ | |
3435 | ||
3436 | void | |
3437 | pack_long (gdb_byte *buf, struct type *type, LONGEST num) | |
3438 | { | |
3439 | enum bfd_endian byte_order = type_byte_order (type); | |
3440 | LONGEST len; | |
3441 | ||
3442 | type = check_typedef (type); | |
3443 | len = type->length (); | |
3444 | ||
3445 | switch (type->code ()) | |
3446 | { | |
3447 | case TYPE_CODE_RANGE: | |
3448 | num -= type->bounds ()->bias; | |
3449 | [[fallthrough]]; | |
3450 | case TYPE_CODE_INT: | |
3451 | case TYPE_CODE_CHAR: | |
3452 | case TYPE_CODE_ENUM: | |
3453 | case TYPE_CODE_FLAGS: | |
3454 | case TYPE_CODE_BOOL: | |
3455 | case TYPE_CODE_MEMBERPTR: | |
3456 | if (type->bit_size_differs_p ()) | |
3457 | { | |
3458 | unsigned bit_off = type->bit_offset (); | |
3459 | unsigned bit_size = type->bit_size (); | |
3460 | num &= ((ULONGEST) 1 << bit_size) - 1; | |
3461 | num <<= bit_off; | |
3462 | } | |
3463 | store_signed_integer (buf, len, byte_order, num); | |
3464 | break; | |
3465 | ||
3466 | case TYPE_CODE_REF: | |
3467 | case TYPE_CODE_RVALUE_REF: | |
3468 | case TYPE_CODE_PTR: | |
3469 | store_typed_address (buf, type, (CORE_ADDR) num); | |
3470 | break; | |
3471 | ||
3472 | case TYPE_CODE_FLT: | |
3473 | case TYPE_CODE_DECFLOAT: | |
3474 | target_float_from_longest (buf, type, num); | |
3475 | break; | |
3476 | ||
3477 | default: | |
3478 | error (_("Unexpected type (%d) encountered for integer constant."), | |
3479 | type->code ()); | |
3480 | } | |
3481 | } | |
3482 | ||
3483 | ||
3484 | /* Pack NUM into BUF using a target format of TYPE. */ | |
3485 | ||
3486 | static void | |
3487 | pack_unsigned_long (gdb_byte *buf, struct type *type, ULONGEST num) | |
3488 | { | |
3489 | LONGEST len; | |
3490 | enum bfd_endian byte_order; | |
3491 | ||
3492 | type = check_typedef (type); | |
3493 | len = type->length (); | |
3494 | byte_order = type_byte_order (type); | |
3495 | ||
3496 | switch (type->code ()) | |
3497 | { | |
3498 | case TYPE_CODE_INT: | |
3499 | case TYPE_CODE_CHAR: | |
3500 | case TYPE_CODE_ENUM: | |
3501 | case TYPE_CODE_FLAGS: | |
3502 | case TYPE_CODE_BOOL: | |
3503 | case TYPE_CODE_RANGE: | |
3504 | case TYPE_CODE_MEMBERPTR: | |
3505 | if (type->bit_size_differs_p ()) | |
3506 | { | |
3507 | unsigned bit_off = type->bit_offset (); | |
3508 | unsigned bit_size = type->bit_size (); | |
3509 | num &= ((ULONGEST) 1 << bit_size) - 1; | |
3510 | num <<= bit_off; | |
3511 | } | |
3512 | store_unsigned_integer (buf, len, byte_order, num); | |
3513 | break; | |
3514 | ||
3515 | case TYPE_CODE_REF: | |
3516 | case TYPE_CODE_RVALUE_REF: | |
3517 | case TYPE_CODE_PTR: | |
3518 | store_typed_address (buf, type, (CORE_ADDR) num); | |
3519 | break; | |
3520 | ||
3521 | case TYPE_CODE_FLT: | |
3522 | case TYPE_CODE_DECFLOAT: | |
3523 | target_float_from_ulongest (buf, type, num); | |
3524 | break; | |
3525 | ||
3526 | default: | |
3527 | error (_("Unexpected type (%d) encountered " | |
3528 | "for unsigned integer constant."), | |
3529 | type->code ()); | |
3530 | } | |
3531 | } | |
3532 | ||
3533 | /* See value.h. */ | |
3534 | ||
3535 | struct value * | |
3536 | value::zero (struct type *type, enum lval_type lv) | |
3537 | { | |
3538 | struct value *val = value::allocate_lazy (type); | |
3539 | ||
3540 | val->set_lval (lv == lval_computed ? not_lval : lv); | |
3541 | val->m_is_zero = true; | |
3542 | return val; | |
3543 | } | |
3544 | ||
3545 | /* Convert C numbers into newly allocated values. */ | |
3546 | ||
3547 | struct value * | |
3548 | value_from_longest (struct type *type, LONGEST num) | |
3549 | { | |
3550 | struct value *val = value::allocate (type); | |
3551 | ||
3552 | pack_long (val->contents_raw ().data (), type, num); | |
3553 | return val; | |
3554 | } | |
3555 | ||
3556 | ||
3557 | /* Convert C unsigned numbers into newly allocated values. */ | |
3558 | ||
3559 | struct value * | |
3560 | value_from_ulongest (struct type *type, ULONGEST num) | |
3561 | { | |
3562 | struct value *val = value::allocate (type); | |
3563 | ||
3564 | pack_unsigned_long (val->contents_raw ().data (), type, num); | |
3565 | ||
3566 | return val; | |
3567 | } | |
3568 | ||
3569 | /* See value.h. */ | |
3570 | ||
3571 | struct value * | |
3572 | value_from_mpz (struct type *type, const gdb_mpz &v) | |
3573 | { | |
3574 | struct type *real_type = check_typedef (type); | |
3575 | ||
3576 | const gdb_mpz *val = &v; | |
3577 | gdb_mpz storage; | |
3578 | if (real_type->code () == TYPE_CODE_RANGE && type->bounds ()->bias != 0) | |
3579 | { | |
3580 | storage = *val; | |
3581 | val = &storage; | |
3582 | storage -= type->bounds ()->bias; | |
3583 | } | |
3584 | ||
3585 | if (type->bit_size_differs_p ()) | |
3586 | { | |
3587 | unsigned bit_off = type->bit_offset (); | |
3588 | unsigned bit_size = type->bit_size (); | |
3589 | ||
3590 | if (val != &storage) | |
3591 | { | |
3592 | storage = *val; | |
3593 | val = &storage; | |
3594 | } | |
3595 | ||
3596 | storage.mask (bit_size); | |
3597 | storage <<= bit_off; | |
3598 | } | |
3599 | ||
3600 | struct value *result = value::allocate (type); | |
3601 | val->truncate (result->contents_raw (), type_byte_order (type), | |
3602 | type->is_unsigned ()); | |
3603 | return result; | |
3604 | } | |
3605 | ||
3606 | /* Create a value representing a pointer of type TYPE to the address | |
3607 | ADDR. */ | |
3608 | ||
3609 | struct value * | |
3610 | value_from_pointer (struct type *type, CORE_ADDR addr) | |
3611 | { | |
3612 | struct value *val = value::allocate (type); | |
3613 | ||
3614 | store_typed_address (val->contents_raw ().data (), | |
3615 | check_typedef (type), addr); | |
3616 | return val; | |
3617 | } | |
3618 | ||
3619 | /* Create and return a value object of TYPE containing the value D. The | |
3620 | TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once | |
3621 | it is converted to target format. */ | |
3622 | ||
3623 | struct value * | |
3624 | value_from_host_double (struct type *type, double d) | |
3625 | { | |
3626 | struct value *value = value::allocate (type); | |
3627 | gdb_assert (type->code () == TYPE_CODE_FLT); | |
3628 | target_float_from_host_double (value->contents_raw ().data (), | |
3629 | value->type (), d); | |
3630 | return value; | |
3631 | } | |
3632 | ||
3633 | /* Create a value of type TYPE whose contents come from VALADDR, if it | |
3634 | is non-null, and whose memory address (in the inferior) is | |
3635 | ADDRESS. The type of the created value may differ from the passed | |
3636 | type TYPE. Make sure to retrieve values new type after this call. | |
3637 | Note that TYPE is not passed through resolve_dynamic_type; this is | |
3638 | a special API intended for use only by Ada. */ | |
3639 | ||
3640 | struct value * | |
3641 | value_from_contents_and_address_unresolved (struct type *type, | |
3642 | const gdb_byte *valaddr, | |
3643 | CORE_ADDR address) | |
3644 | { | |
3645 | struct value *v; | |
3646 | ||
3647 | if (valaddr == NULL) | |
3648 | v = value::allocate_lazy (type); | |
3649 | else | |
3650 | v = value_from_contents (type, valaddr); | |
3651 | v->set_lval (lval_memory); | |
3652 | v->set_address (address); | |
3653 | return v; | |
3654 | } | |
3655 | ||
3656 | /* Create a value of type TYPE whose contents come from VALADDR, if it | |
3657 | is non-null, and whose memory address (in the inferior) is | |
3658 | ADDRESS. The type of the created value may differ from the passed | |
3659 | type TYPE. Make sure to retrieve values new type after this call. */ | |
3660 | ||
3661 | struct value * | |
3662 | value_from_contents_and_address (struct type *type, | |
3663 | const gdb_byte *valaddr, | |
3664 | CORE_ADDR address, | |
3665 | const frame_info_ptr &frame) | |
3666 | { | |
3667 | gdb::array_view<const gdb_byte> view; | |
3668 | if (valaddr != nullptr) | |
3669 | view = gdb::make_array_view (valaddr, type->length ()); | |
3670 | struct type *resolved_type = resolve_dynamic_type (type, view, address, | |
3671 | &frame); | |
3672 | struct type *resolved_type_no_typedef = check_typedef (resolved_type); | |
3673 | ||
3674 | struct value *v; | |
3675 | if (resolved_type_no_typedef->code () == TYPE_CODE_ARRAY | |
3676 | && resolved_type_no_typedef->bound_optimized_out ()) | |
3677 | { | |
3678 | /* Resolution found that the bounds are optimized out. In this | |
3679 | case, mark the array itself as optimized-out. */ | |
3680 | v = value::allocate_optimized_out (resolved_type); | |
3681 | } | |
3682 | else if (valaddr == nullptr) | |
3683 | v = value::allocate_lazy (resolved_type); | |
3684 | else | |
3685 | v = value_from_contents (resolved_type, valaddr); | |
3686 | if (TYPE_DATA_LOCATION (resolved_type_no_typedef) != NULL | |
3687 | && TYPE_DATA_LOCATION (resolved_type_no_typedef)->is_constant ()) | |
3688 | address = TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef); | |
3689 | v->set_lval (lval_memory); | |
3690 | v->set_address (address); | |
3691 | return v; | |
3692 | } | |
3693 | ||
3694 | /* Create a value of type TYPE holding the contents CONTENTS. | |
3695 | The new value is `not_lval'. */ | |
3696 | ||
3697 | struct value * | |
3698 | value_from_contents (struct type *type, const gdb_byte *contents) | |
3699 | { | |
3700 | struct value *result; | |
3701 | ||
3702 | result = value::allocate (type); | |
3703 | memcpy (result->contents_raw ().data (), contents, type->length ()); | |
3704 | return result; | |
3705 | } | |
3706 | ||
3707 | /* Extract a value from the history file. Input will be of the form | |
3708 | $digits or $$digits. See block comment above 'write_dollar_variable' | |
3709 | for details. */ | |
3710 | ||
3711 | struct value * | |
3712 | value_from_history_ref (const char *h, const char **endp) | |
3713 | { | |
3714 | int index, len; | |
3715 | ||
3716 | if (h[0] == '$') | |
3717 | len = 1; | |
3718 | else | |
3719 | return NULL; | |
3720 | ||
3721 | if (h[1] == '$') | |
3722 | len = 2; | |
3723 | ||
3724 | /* Find length of numeral string. */ | |
3725 | for (; isdigit (h[len]); len++) | |
3726 | ; | |
3727 | ||
3728 | /* Make sure numeral string is not part of an identifier. */ | |
3729 | if (h[len] == '_' || isalpha (h[len])) | |
3730 | return NULL; | |
3731 | ||
3732 | /* Now collect the index value. */ | |
3733 | if (h[1] == '$') | |
3734 | { | |
3735 | if (len == 2) | |
3736 | { | |
3737 | /* For some bizarre reason, "$$" is equivalent to "$$1", | |
3738 | rather than to "$$0" as it ought to be! */ | |
3739 | index = -1; | |
3740 | *endp += len; | |
3741 | } | |
3742 | else | |
3743 | { | |
3744 | char *local_end; | |
3745 | ||
3746 | index = -strtol (&h[2], &local_end, 10); | |
3747 | *endp = local_end; | |
3748 | } | |
3749 | } | |
3750 | else | |
3751 | { | |
3752 | if (len == 1) | |
3753 | { | |
3754 | /* "$" is equivalent to "$0". */ | |
3755 | index = 0; | |
3756 | *endp += len; | |
3757 | } | |
3758 | else | |
3759 | { | |
3760 | char *local_end; | |
3761 | ||
3762 | index = strtol (&h[1], &local_end, 10); | |
3763 | *endp = local_end; | |
3764 | } | |
3765 | } | |
3766 | ||
3767 | return access_value_history (index); | |
3768 | } | |
3769 | ||
3770 | /* Get the component value (offset by OFFSET bytes) of a struct or | |
3771 | union WHOLE. Component's type is TYPE. */ | |
3772 | ||
3773 | struct value * | |
3774 | value_from_component (struct value *whole, struct type *type, LONGEST offset) | |
3775 | { | |
3776 | struct value *v; | |
3777 | ||
3778 | if (whole->lval () == lval_memory && whole->lazy ()) | |
3779 | v = value::allocate_lazy (type); | |
3780 | else | |
3781 | { | |
3782 | v = value::allocate (type); | |
3783 | whole->contents_copy (v, v->embedded_offset (), | |
3784 | whole->embedded_offset () + offset, | |
3785 | type_length_units (type)); | |
3786 | } | |
3787 | v->set_offset (whole->offset () + offset + whole->embedded_offset ()); | |
3788 | v->set_component_location (whole); | |
3789 | ||
3790 | return v; | |
3791 | } | |
3792 | ||
3793 | /* See value.h. */ | |
3794 | ||
3795 | struct value * | |
3796 | value::from_component_bitsize (struct type *type, | |
3797 | LONGEST bit_offset, LONGEST bit_length) | |
3798 | { | |
3799 | gdb_assert (!lazy ()); | |
3800 | ||
3801 | /* Preserve lvalue-ness if possible. This is needed to avoid | |
3802 | array-printing failures (including crashes) when printing Ada | |
3803 | arrays in programs compiled with -fgnat-encodings=all. */ | |
3804 | if ((bit_offset % TARGET_CHAR_BIT) == 0 | |
3805 | && (bit_length % TARGET_CHAR_BIT) == 0 | |
3806 | && bit_length == TARGET_CHAR_BIT * type->length ()) | |
3807 | return value_from_component (this, type, bit_offset / TARGET_CHAR_BIT); | |
3808 | ||
3809 | struct value *v = value::allocate (type); | |
3810 | ||
3811 | LONGEST dst_offset = TARGET_CHAR_BIT * v->embedded_offset (); | |
3812 | if (is_scalar_type (type) && type_byte_order (type) == BFD_ENDIAN_BIG) | |
3813 | dst_offset += TARGET_CHAR_BIT * type->length () - bit_length; | |
3814 | ||
3815 | contents_copy_raw_bitwise (v, dst_offset, | |
3816 | TARGET_CHAR_BIT | |
3817 | * embedded_offset () | |
3818 | + bit_offset, | |
3819 | bit_length); | |
3820 | return v; | |
3821 | } | |
3822 | ||
3823 | struct value * | |
3824 | coerce_ref_if_computed (const struct value *arg) | |
3825 | { | |
3826 | const struct lval_funcs *funcs; | |
3827 | ||
3828 | if (!TYPE_IS_REFERENCE (check_typedef (arg->type ()))) | |
3829 | return NULL; | |
3830 | ||
3831 | if (arg->lval () != lval_computed) | |
3832 | return NULL; | |
3833 | ||
3834 | funcs = arg->computed_funcs (); | |
3835 | if (funcs->coerce_ref == NULL) | |
3836 | return NULL; | |
3837 | ||
3838 | return funcs->coerce_ref (arg); | |
3839 | } | |
3840 | ||
3841 | /* Look at value.h for description. */ | |
3842 | ||
3843 | struct value * | |
3844 | readjust_indirect_value_type (struct value *value, struct type *enc_type, | |
3845 | const struct type *original_type, | |
3846 | struct value *original_value, | |
3847 | CORE_ADDR original_value_address) | |
3848 | { | |
3849 | gdb_assert (original_type->is_pointer_or_reference ()); | |
3850 | ||
3851 | struct type *original_target_type = original_type->target_type (); | |
3852 | gdb::array_view<const gdb_byte> view; | |
3853 | struct type *resolved_original_target_type | |
3854 | = resolve_dynamic_type (original_target_type, view, | |
3855 | original_value_address); | |
3856 | ||
3857 | /* Re-adjust type. */ | |
3858 | value->deprecated_set_type (resolved_original_target_type); | |
3859 | ||
3860 | /* Add embedding info. */ | |
3861 | value->set_enclosing_type (enc_type); | |
3862 | value->set_embedded_offset (original_value->pointed_to_offset ()); | |
3863 | ||
3864 | /* We may be pointing to an object of some derived type. */ | |
3865 | return value_full_object (value, NULL, 0, 0, 0); | |
3866 | } | |
3867 | ||
3868 | struct value * | |
3869 | coerce_ref (struct value *arg) | |
3870 | { | |
3871 | struct type *value_type_arg_tmp = check_typedef (arg->type ()); | |
3872 | struct value *retval; | |
3873 | struct type *enc_type; | |
3874 | ||
3875 | retval = coerce_ref_if_computed (arg); | |
3876 | if (retval) | |
3877 | return retval; | |
3878 | ||
3879 | if (!TYPE_IS_REFERENCE (value_type_arg_tmp)) | |
3880 | return arg; | |
3881 | ||
3882 | enc_type = check_typedef (arg->enclosing_type ()); | |
3883 | enc_type = enc_type->target_type (); | |
3884 | ||
3885 | CORE_ADDR addr = unpack_pointer (arg->type (), arg->contents ().data ()); | |
3886 | retval = value_at_lazy (enc_type, addr); | |
3887 | enc_type = retval->type (); | |
3888 | return readjust_indirect_value_type (retval, enc_type, value_type_arg_tmp, | |
3889 | arg, addr); | |
3890 | } | |
3891 | ||
3892 | struct value * | |
3893 | coerce_array (struct value *arg) | |
3894 | { | |
3895 | struct type *type; | |
3896 | ||
3897 | arg = coerce_ref (arg); | |
3898 | type = check_typedef (arg->type ()); | |
3899 | ||
3900 | switch (type->code ()) | |
3901 | { | |
3902 | case TYPE_CODE_ARRAY: | |
3903 | if (!type->is_vector () && current_language->c_style_arrays_p ()) | |
3904 | arg = value_coerce_array (arg); | |
3905 | break; | |
3906 | case TYPE_CODE_FUNC: | |
3907 | arg = value_coerce_function (arg); | |
3908 | break; | |
3909 | } | |
3910 | return arg; | |
3911 | } | |
3912 | \f | |
3913 | ||
3914 | /* Return the return value convention that will be used for the | |
3915 | specified type. */ | |
3916 | ||
3917 | enum return_value_convention | |
3918 | struct_return_convention (struct gdbarch *gdbarch, | |
3919 | struct value *function, struct type *value_type) | |
3920 | { | |
3921 | enum type_code code = value_type->code (); | |
3922 | ||
3923 | if (code == TYPE_CODE_ERROR) | |
3924 | error (_("Function return type unknown.")); | |
3925 | ||
3926 | /* Probe the architecture for the return-value convention. */ | |
3927 | return gdbarch_return_value_as_value (gdbarch, function, value_type, | |
3928 | NULL, NULL, NULL); | |
3929 | } | |
3930 | ||
3931 | /* Return true if the function returning the specified type is using | |
3932 | the convention of returning structures in memory (passing in the | |
3933 | address as a hidden first parameter). */ | |
3934 | ||
3935 | int | |
3936 | using_struct_return (struct gdbarch *gdbarch, | |
3937 | struct value *function, struct type *value_type) | |
3938 | { | |
3939 | if (value_type->code () == TYPE_CODE_VOID) | |
3940 | /* A void return value is never in memory. See also corresponding | |
3941 | code in "print_return_value". */ | |
3942 | return 0; | |
3943 | ||
3944 | return (struct_return_convention (gdbarch, function, value_type) | |
3945 | != RETURN_VALUE_REGISTER_CONVENTION); | |
3946 | } | |
3947 | ||
3948 | /* See value.h. */ | |
3949 | ||
3950 | void | |
3951 | value::fetch_lazy_bitfield () | |
3952 | { | |
3953 | gdb_assert (bitsize () != 0); | |
3954 | ||
3955 | /* To read a lazy bitfield, read the entire enclosing value. This | |
3956 | prevents reading the same block of (possibly volatile) memory once | |
3957 | per bitfield. It would be even better to read only the containing | |
3958 | word, but we have no way to record that just specific bits of a | |
3959 | value have been fetched. */ | |
3960 | struct value *parent = this->parent (); | |
3961 | ||
3962 | if (parent->lazy ()) | |
3963 | parent->fetch_lazy (); | |
3964 | ||
3965 | parent->unpack_bitfield (this, bitpos (), bitsize (), | |
3966 | parent->contents_for_printing ().data (), | |
3967 | offset ()); | |
3968 | } | |
3969 | ||
3970 | /* See value.h. */ | |
3971 | ||
3972 | void | |
3973 | value::fetch_lazy_memory () | |
3974 | { | |
3975 | gdb_assert (m_lval == lval_memory); | |
3976 | ||
3977 | CORE_ADDR addr = address (); | |
3978 | struct type *type = check_typedef (enclosing_type ()); | |
3979 | ||
3980 | /* Figure out how much we should copy from memory. Usually, this is just | |
3981 | the size of the type, but, for arrays, we might only be loading a | |
3982 | small part of the array (this is only done for very large arrays). */ | |
3983 | int len = 0; | |
3984 | if (m_limited_length > 0) | |
3985 | { | |
3986 | gdb_assert (this->type ()->code () == TYPE_CODE_ARRAY); | |
3987 | len = m_limited_length; | |
3988 | } | |
3989 | else if (type->length () > 0) | |
3990 | len = type_length_units (type); | |
3991 | ||
3992 | gdb_assert (len >= 0); | |
3993 | ||
3994 | if (len > 0) | |
3995 | read_value_memory (this, 0, stack (), addr, | |
3996 | contents_all_raw ().data (), len); | |
3997 | ||
3998 | /* If only part of an array was loaded, mark the rest as unavailable. */ | |
3999 | if (m_limited_length > 0) | |
4000 | mark_bytes_unavailable (m_limited_length, | |
4001 | m_enclosing_type->length () - m_limited_length); | |
4002 | } | |
4003 | ||
4004 | /* See value.h. */ | |
4005 | ||
4006 | void | |
4007 | value::fetch_lazy_register () | |
4008 | { | |
4009 | struct type *type = check_typedef (this->type ()); | |
4010 | struct value *new_val = this; | |
4011 | ||
4012 | scoped_value_mark mark; | |
4013 | ||
4014 | /* Offsets are not supported here; lazy register values must | |
4015 | refer to the entire register. */ | |
4016 | gdb_assert (offset () == 0); | |
4017 | ||
4018 | while (new_val->lval () == lval_register && new_val->lazy ()) | |
4019 | { | |
4020 | frame_id next_frame_id = new_val->next_frame_id (); | |
4021 | frame_info_ptr next_frame = frame_find_by_id (next_frame_id); | |
4022 | gdb_assert (next_frame != NULL); | |
4023 | ||
4024 | int regnum = new_val->regnum (); | |
4025 | ||
4026 | /* Convertible register routines are used for multi-register | |
4027 | values and for interpretation in different types | |
4028 | (e.g. float or int from a double register). Lazy | |
4029 | register values should have the register's natural type, | |
4030 | so they do not apply. */ | |
4031 | gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame), | |
4032 | regnum, type)); | |
4033 | ||
4034 | new_val = frame_unwind_register_value (next_frame, regnum); | |
4035 | ||
4036 | /* If we get another lazy lval_register value, it means the | |
4037 | register is found by reading it from NEXT_FRAME's next frame. | |
4038 | frame_unwind_register_value should never return a value with | |
4039 | the frame id pointing to NEXT_FRAME. If it does, it means we | |
4040 | either have two consecutive frames with the same frame id | |
4041 | in the frame chain, or some code is trying to unwind | |
4042 | behind get_prev_frame's back (e.g., a frame unwind | |
4043 | sniffer trying to unwind), bypassing its validations. In | |
4044 | any case, it should always be an internal error to end up | |
4045 | in this situation. */ | |
4046 | if (new_val->lval () == lval_register | |
4047 | && new_val->lazy () | |
4048 | && new_val->next_frame_id () == next_frame_id) | |
4049 | internal_error (_("infinite loop while fetching a register")); | |
4050 | } | |
4051 | ||
4052 | /* If it's still lazy (for instance, a saved register on the | |
4053 | stack), fetch it. */ | |
4054 | if (new_val->lazy ()) | |
4055 | new_val->fetch_lazy (); | |
4056 | ||
4057 | /* Copy the contents and the unavailability/optimized-out | |
4058 | meta-data from NEW_VAL to VAL. */ | |
4059 | set_lazy (false); | |
4060 | new_val->contents_copy (this, embedded_offset (), | |
4061 | new_val->embedded_offset (), | |
4062 | type_length_units (type)); | |
4063 | ||
4064 | if (frame_debug) | |
4065 | { | |
4066 | frame_info_ptr frame = frame_find_by_id (this->next_frame_id ()); | |
4067 | frame = get_prev_frame_always (frame); | |
4068 | int regnum = this->regnum (); | |
4069 | gdbarch *gdbarch = get_frame_arch (frame); | |
4070 | ||
4071 | string_file debug_file; | |
4072 | gdb_printf (&debug_file, | |
4073 | "(frame=%d, regnum=%d(%s), ...) ", | |
4074 | frame_relative_level (frame), regnum, | |
4075 | user_reg_map_regnum_to_name (gdbarch, regnum)); | |
4076 | ||
4077 | gdb_printf (&debug_file, "->"); | |
4078 | if (new_val->optimized_out ()) | |
4079 | { | |
4080 | gdb_printf (&debug_file, " "); | |
4081 | val_print_optimized_out (new_val, &debug_file); | |
4082 | } | |
4083 | else | |
4084 | { | |
4085 | if (new_val->lval () == lval_register) | |
4086 | gdb_printf (&debug_file, " register=%d", new_val->regnum ()); | |
4087 | else if (new_val->lval () == lval_memory) | |
4088 | gdb_printf (&debug_file, " address=%s", | |
4089 | paddress (gdbarch, | |
4090 | new_val->address ())); | |
4091 | else | |
4092 | gdb_printf (&debug_file, " computed"); | |
4093 | ||
4094 | if (new_val->entirely_available ()) | |
4095 | { | |
4096 | int i; | |
4097 | gdb::array_view<const gdb_byte> buf = new_val->contents (); | |
4098 | ||
4099 | gdb_printf (&debug_file, " bytes="); | |
4100 | gdb_printf (&debug_file, "["); | |
4101 | for (i = 0; i < register_size (gdbarch, regnum); i++) | |
4102 | gdb_printf (&debug_file, "%02x", buf[i]); | |
4103 | gdb_printf (&debug_file, "]"); | |
4104 | } | |
4105 | else if (new_val->entirely_unavailable ()) | |
4106 | gdb_printf (&debug_file, " unavailable"); | |
4107 | else | |
4108 | gdb_printf (&debug_file, " partly unavailable"); | |
4109 | } | |
4110 | ||
4111 | frame_debug_printf ("%s", debug_file.c_str ()); | |
4112 | } | |
4113 | } | |
4114 | ||
4115 | /* See value.h. */ | |
4116 | ||
4117 | void | |
4118 | value::fetch_lazy () | |
4119 | { | |
4120 | gdb_assert (lazy ()); | |
4121 | allocate_contents (true); | |
4122 | /* A value is either lazy, or fully fetched. The | |
4123 | availability/validity is only established as we try to fetch a | |
4124 | value. */ | |
4125 | gdb_assert (m_optimized_out.empty ()); | |
4126 | gdb_assert (m_unavailable.empty ()); | |
4127 | if (m_is_zero) | |
4128 | { | |
4129 | /* Nothing. */ | |
4130 | } | |
4131 | else if (bitsize ()) | |
4132 | fetch_lazy_bitfield (); | |
4133 | else if (this->lval () == lval_memory) | |
4134 | fetch_lazy_memory (); | |
4135 | else if (this->lval () == lval_register) | |
4136 | fetch_lazy_register (); | |
4137 | else if (this->lval () == lval_computed | |
4138 | && computed_funcs ()->read != NULL) | |
4139 | computed_funcs ()->read (this); | |
4140 | else | |
4141 | internal_error (_("Unexpected lazy value type.")); | |
4142 | ||
4143 | set_lazy (false); | |
4144 | } | |
4145 | ||
4146 | /* See value.h. */ | |
4147 | ||
4148 | value * | |
4149 | pseudo_from_raw_part (const frame_info_ptr &next_frame, int pseudo_reg_num, | |
4150 | int raw_reg_num, int raw_offset) | |
4151 | { | |
4152 | value *pseudo_reg_val | |
4153 | = value::allocate_register (next_frame, pseudo_reg_num); | |
4154 | value *raw_reg_val = value_of_register (raw_reg_num, next_frame); | |
4155 | raw_reg_val->contents_copy (pseudo_reg_val, 0, raw_offset, | |
4156 | pseudo_reg_val->type ()->length ()); | |
4157 | return pseudo_reg_val; | |
4158 | } | |
4159 | ||
4160 | /* See value.h. */ | |
4161 | ||
4162 | void | |
4163 | pseudo_to_raw_part (const frame_info_ptr &next_frame, | |
4164 | gdb::array_view<const gdb_byte> pseudo_buf, | |
4165 | int raw_reg_num, int raw_offset) | |
4166 | { | |
4167 | int raw_reg_size | |
4168 | = register_size (frame_unwind_arch (next_frame), raw_reg_num); | |
4169 | ||
4170 | /* When overflowing a register, put_frame_register_bytes writes to the | |
4171 | subsequent registers. We don't want that behavior here, so make sure | |
4172 | the write is wholly within register RAW_REG_NUM. */ | |
4173 | gdb_assert (raw_offset + pseudo_buf.size () <= raw_reg_size); | |
4174 | put_frame_register_bytes (next_frame, raw_reg_num, raw_offset, pseudo_buf); | |
4175 | } | |
4176 | ||
4177 | /* See value.h. */ | |
4178 | ||
4179 | value * | |
4180 | pseudo_from_concat_raw (const frame_info_ptr &next_frame, int pseudo_reg_num, | |
4181 | int raw_reg_1_num, int raw_reg_2_num) | |
4182 | { | |
4183 | value *pseudo_reg_val | |
4184 | = value::allocate_register (next_frame, pseudo_reg_num); | |
4185 | int dst_offset = 0; | |
4186 | ||
4187 | value *raw_reg_1_val = value_of_register (raw_reg_1_num, next_frame); | |
4188 | raw_reg_1_val->contents_copy (pseudo_reg_val, dst_offset, 0, | |
4189 | raw_reg_1_val->type ()->length ()); | |
4190 | dst_offset += raw_reg_1_val->type ()->length (); | |
4191 | ||
4192 | value *raw_reg_2_val = value_of_register (raw_reg_2_num, next_frame); | |
4193 | raw_reg_2_val->contents_copy (pseudo_reg_val, dst_offset, 0, | |
4194 | raw_reg_2_val->type ()->length ()); | |
4195 | dst_offset += raw_reg_2_val->type ()->length (); | |
4196 | ||
4197 | gdb_assert (dst_offset == pseudo_reg_val->type ()->length ()); | |
4198 | ||
4199 | return pseudo_reg_val; | |
4200 | } | |
4201 | ||
4202 | /* See value.h. */ | |
4203 | ||
4204 | void | |
4205 | pseudo_to_concat_raw (const frame_info_ptr &next_frame, | |
4206 | gdb::array_view<const gdb_byte> pseudo_buf, | |
4207 | int raw_reg_1_num, int raw_reg_2_num) | |
4208 | { | |
4209 | int src_offset = 0; | |
4210 | gdbarch *arch = frame_unwind_arch (next_frame); | |
4211 | ||
4212 | int raw_reg_1_size = register_size (arch, raw_reg_1_num); | |
4213 | put_frame_register (next_frame, raw_reg_1_num, | |
4214 | pseudo_buf.slice (src_offset, raw_reg_1_size)); | |
4215 | src_offset += raw_reg_1_size; | |
4216 | ||
4217 | int raw_reg_2_size = register_size (arch, raw_reg_2_num); | |
4218 | put_frame_register (next_frame, raw_reg_2_num, | |
4219 | pseudo_buf.slice (src_offset, raw_reg_2_size)); | |
4220 | src_offset += raw_reg_2_size; | |
4221 | ||
4222 | gdb_assert (src_offset == pseudo_buf.size ()); | |
4223 | } | |
4224 | ||
4225 | /* See value.h. */ | |
4226 | ||
4227 | value * | |
4228 | pseudo_from_concat_raw (const frame_info_ptr &next_frame, int pseudo_reg_num, | |
4229 | int raw_reg_1_num, int raw_reg_2_num, | |
4230 | int raw_reg_3_num) | |
4231 | { | |
4232 | value *pseudo_reg_val | |
4233 | = value::allocate_register (next_frame, pseudo_reg_num); | |
4234 | int dst_offset = 0; | |
4235 | ||
4236 | value *raw_reg_1_val = value_of_register (raw_reg_1_num, next_frame); | |
4237 | raw_reg_1_val->contents_copy (pseudo_reg_val, dst_offset, 0, | |
4238 | raw_reg_1_val->type ()->length ()); | |
4239 | dst_offset += raw_reg_1_val->type ()->length (); | |
4240 | ||
4241 | value *raw_reg_2_val = value_of_register (raw_reg_2_num, next_frame); | |
4242 | raw_reg_2_val->contents_copy (pseudo_reg_val, dst_offset, 0, | |
4243 | raw_reg_2_val->type ()->length ()); | |
4244 | dst_offset += raw_reg_2_val->type ()->length (); | |
4245 | ||
4246 | value *raw_reg_3_val = value_of_register (raw_reg_3_num, next_frame); | |
4247 | raw_reg_3_val->contents_copy (pseudo_reg_val, dst_offset, 0, | |
4248 | raw_reg_3_val->type ()->length ()); | |
4249 | dst_offset += raw_reg_3_val->type ()->length (); | |
4250 | ||
4251 | gdb_assert (dst_offset == pseudo_reg_val->type ()->length ()); | |
4252 | ||
4253 | return pseudo_reg_val; | |
4254 | } | |
4255 | ||
4256 | /* See value.h. */ | |
4257 | ||
4258 | void | |
4259 | pseudo_to_concat_raw (const frame_info_ptr &next_frame, | |
4260 | gdb::array_view<const gdb_byte> pseudo_buf, | |
4261 | int raw_reg_1_num, int raw_reg_2_num, int raw_reg_3_num) | |
4262 | { | |
4263 | int src_offset = 0; | |
4264 | gdbarch *arch = frame_unwind_arch (next_frame); | |
4265 | ||
4266 | int raw_reg_1_size = register_size (arch, raw_reg_1_num); | |
4267 | put_frame_register (next_frame, raw_reg_1_num, | |
4268 | pseudo_buf.slice (src_offset, raw_reg_1_size)); | |
4269 | src_offset += raw_reg_1_size; | |
4270 | ||
4271 | int raw_reg_2_size = register_size (arch, raw_reg_2_num); | |
4272 | put_frame_register (next_frame, raw_reg_2_num, | |
4273 | pseudo_buf.slice (src_offset, raw_reg_2_size)); | |
4274 | src_offset += raw_reg_2_size; | |
4275 | ||
4276 | int raw_reg_3_size = register_size (arch, raw_reg_3_num); | |
4277 | put_frame_register (next_frame, raw_reg_3_num, | |
4278 | pseudo_buf.slice (src_offset, raw_reg_3_size)); | |
4279 | src_offset += raw_reg_3_size; | |
4280 | ||
4281 | gdb_assert (src_offset == pseudo_buf.size ()); | |
4282 | } | |
4283 | ||
4284 | /* Implementation of the convenience function $_isvoid. */ | |
4285 | ||
4286 | static struct value * | |
4287 | isvoid_internal_fn (struct gdbarch *gdbarch, | |
4288 | const struct language_defn *language, | |
4289 | void *cookie, int argc, struct value **argv) | |
4290 | { | |
4291 | int ret; | |
4292 | ||
4293 | if (argc != 1) | |
4294 | error (_("You must provide one argument for $_isvoid.")); | |
4295 | ||
4296 | ret = argv[0]->type ()->code () == TYPE_CODE_VOID; | |
4297 | ||
4298 | return value_from_longest (builtin_type (gdbarch)->builtin_int, ret); | |
4299 | } | |
4300 | ||
4301 | /* Implementation of the convenience function $_creal. Extracts the | |
4302 | real part from a complex number. */ | |
4303 | ||
4304 | static struct value * | |
4305 | creal_internal_fn (struct gdbarch *gdbarch, | |
4306 | const struct language_defn *language, | |
4307 | void *cookie, int argc, struct value **argv, | |
4308 | enum noside noside) | |
4309 | { | |
4310 | if (argc != 1) | |
4311 | error (_("You must provide one argument for $_creal.")); | |
4312 | ||
4313 | value *cval = argv[0]; | |
4314 | type *ctype = check_typedef (cval->type ()); | |
4315 | if (ctype->code () != TYPE_CODE_COMPLEX) | |
4316 | error (_("expected a complex number")); | |
4317 | if (noside == EVAL_AVOID_SIDE_EFFECTS) | |
4318 | return value::zero (ctype->target_type (), not_lval); | |
4319 | return value_real_part (cval); | |
4320 | } | |
4321 | ||
4322 | /* Implementation of the convenience function $_cimag. Extracts the | |
4323 | imaginary part from a complex number. */ | |
4324 | ||
4325 | static struct value * | |
4326 | cimag_internal_fn (struct gdbarch *gdbarch, | |
4327 | const struct language_defn *language, | |
4328 | void *cookie, int argc, | |
4329 | struct value **argv, enum noside noside) | |
4330 | { | |
4331 | if (argc != 1) | |
4332 | error (_("You must provide one argument for $_cimag.")); | |
4333 | ||
4334 | value *cval = argv[0]; | |
4335 | type *ctype = check_typedef (cval->type ()); | |
4336 | if (ctype->code () != TYPE_CODE_COMPLEX) | |
4337 | error (_("expected a complex number")); | |
4338 | if (noside == EVAL_AVOID_SIDE_EFFECTS) | |
4339 | return value::zero (ctype->target_type (), not_lval); | |
4340 | return value_imaginary_part (cval); | |
4341 | } | |
4342 | ||
4343 | #if GDB_SELF_TEST | |
4344 | namespace selftests | |
4345 | { | |
4346 | ||
4347 | /* Test the ranges_contain function. */ | |
4348 | ||
4349 | static void | |
4350 | test_ranges_contain () | |
4351 | { | |
4352 | std::vector<range> ranges; | |
4353 | range r; | |
4354 | ||
4355 | /* [10, 14] */ | |
4356 | r.offset = 10; | |
4357 | r.length = 5; | |
4358 | ranges.push_back (r); | |
4359 | ||
4360 | /* [20, 24] */ | |
4361 | r.offset = 20; | |
4362 | r.length = 5; | |
4363 | ranges.push_back (r); | |
4364 | ||
4365 | /* [2, 6] */ | |
4366 | SELF_CHECK (!ranges_contain (ranges, 2, 5)); | |
4367 | /* [9, 13] */ | |
4368 | SELF_CHECK (ranges_contain (ranges, 9, 5)); | |
4369 | /* [10, 11] */ | |
4370 | SELF_CHECK (ranges_contain (ranges, 10, 2)); | |
4371 | /* [10, 14] */ | |
4372 | SELF_CHECK (ranges_contain (ranges, 10, 5)); | |
4373 | /* [13, 18] */ | |
4374 | SELF_CHECK (ranges_contain (ranges, 13, 6)); | |
4375 | /* [14, 18] */ | |
4376 | SELF_CHECK (ranges_contain (ranges, 14, 5)); | |
4377 | /* [15, 18] */ | |
4378 | SELF_CHECK (!ranges_contain (ranges, 15, 4)); | |
4379 | /* [16, 19] */ | |
4380 | SELF_CHECK (!ranges_contain (ranges, 16, 4)); | |
4381 | /* [16, 21] */ | |
4382 | SELF_CHECK (ranges_contain (ranges, 16, 6)); | |
4383 | /* [21, 21] */ | |
4384 | SELF_CHECK (ranges_contain (ranges, 21, 1)); | |
4385 | /* [21, 25] */ | |
4386 | SELF_CHECK (ranges_contain (ranges, 21, 5)); | |
4387 | /* [26, 28] */ | |
4388 | SELF_CHECK (!ranges_contain (ranges, 26, 3)); | |
4389 | } | |
4390 | ||
4391 | /* Check that RANGES contains the same ranges as EXPECTED. */ | |
4392 | ||
4393 | static bool | |
4394 | check_ranges_vector (gdb::array_view<const range> ranges, | |
4395 | gdb::array_view<const range> expected) | |
4396 | { | |
4397 | return ranges == expected; | |
4398 | } | |
4399 | ||
4400 | /* Test the insert_into_bit_range_vector function. */ | |
4401 | ||
4402 | static void | |
4403 | test_insert_into_bit_range_vector () | |
4404 | { | |
4405 | std::vector<range> ranges; | |
4406 | ||
4407 | /* [10, 14] */ | |
4408 | { | |
4409 | insert_into_bit_range_vector (&ranges, 10, 5); | |
4410 | static const range expected[] = { | |
4411 | {10, 5} | |
4412 | }; | |
4413 | SELF_CHECK (check_ranges_vector (ranges, expected)); | |
4414 | } | |
4415 | ||
4416 | /* [10, 14] */ | |
4417 | { | |
4418 | insert_into_bit_range_vector (&ranges, 11, 4); | |
4419 | static const range expected = {10, 5}; | |
4420 | SELF_CHECK (check_ranges_vector (ranges, expected)); | |
4421 | } | |
4422 | ||
4423 | /* [10, 14] [20, 24] */ | |
4424 | { | |
4425 | insert_into_bit_range_vector (&ranges, 20, 5); | |
4426 | static const range expected[] = { | |
4427 | {10, 5}, | |
4428 | {20, 5}, | |
4429 | }; | |
4430 | SELF_CHECK (check_ranges_vector (ranges, expected)); | |
4431 | } | |
4432 | ||
4433 | /* [10, 14] [17, 24] */ | |
4434 | { | |
4435 | insert_into_bit_range_vector (&ranges, 17, 5); | |
4436 | static const range expected[] = { | |
4437 | {10, 5}, | |
4438 | {17, 8}, | |
4439 | }; | |
4440 | SELF_CHECK (check_ranges_vector (ranges, expected)); | |
4441 | } | |
4442 | ||
4443 | /* [2, 8] [10, 14] [17, 24] */ | |
4444 | { | |
4445 | insert_into_bit_range_vector (&ranges, 2, 7); | |
4446 | static const range expected[] = { | |
4447 | {2, 7}, | |
4448 | {10, 5}, | |
4449 | {17, 8}, | |
4450 | }; | |
4451 | SELF_CHECK (check_ranges_vector (ranges, expected)); | |
4452 | } | |
4453 | ||
4454 | /* [2, 14] [17, 24] */ | |
4455 | { | |
4456 | insert_into_bit_range_vector (&ranges, 9, 1); | |
4457 | static const range expected[] = { | |
4458 | {2, 13}, | |
4459 | {17, 8}, | |
4460 | }; | |
4461 | SELF_CHECK (check_ranges_vector (ranges, expected)); | |
4462 | } | |
4463 | ||
4464 | /* [2, 14] [17, 24] */ | |
4465 | { | |
4466 | insert_into_bit_range_vector (&ranges, 9, 1); | |
4467 | static const range expected[] = { | |
4468 | {2, 13}, | |
4469 | {17, 8}, | |
4470 | }; | |
4471 | SELF_CHECK (check_ranges_vector (ranges, expected)); | |
4472 | } | |
4473 | ||
4474 | /* [2, 33] */ | |
4475 | { | |
4476 | insert_into_bit_range_vector (&ranges, 4, 30); | |
4477 | static const range expected = {2, 32}; | |
4478 | SELF_CHECK (check_ranges_vector (ranges, expected)); | |
4479 | } | |
4480 | } | |
4481 | ||
4482 | static void | |
4483 | test_value_copy () | |
4484 | { | |
4485 | type *type = builtin_type (current_inferior ()->arch ())->builtin_int; | |
4486 | ||
4487 | /* Verify that we can copy an entirely optimized out value, that may not have | |
4488 | its contents allocated. */ | |
4489 | value_ref_ptr val = release_value (value::allocate_optimized_out (type)); | |
4490 | value_ref_ptr copy = release_value (val->copy ()); | |
4491 | ||
4492 | SELF_CHECK (val->entirely_optimized_out ()); | |
4493 | SELF_CHECK (copy->entirely_optimized_out ()); | |
4494 | } | |
4495 | ||
4496 | } /* namespace selftests */ | |
4497 | #endif /* GDB_SELF_TEST */ | |
4498 | ||
4499 | INIT_GDB_FILE (values) | |
4500 | { | |
4501 | cmd_list_element *show_convenience_cmd | |
4502 | = add_cmd ("convenience", no_class, show_convenience, _("\ | |
4503 | Debugger convenience (\"$foo\") variables and functions.\n\ | |
4504 | Convenience variables are created when you assign them values;\n\ | |
4505 | thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\ | |
4506 | \n\ | |
4507 | A few convenience variables are given values automatically:\n\ | |
4508 | \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\ | |
4509 | \"$__\" holds the contents of the last address examined with \"x\"." | |
4510 | #ifdef HAVE_PYTHON | |
4511 | "\n\n\ | |
4512 | Convenience functions are defined via the Python API." | |
4513 | #endif | |
4514 | ), &showlist); | |
4515 | add_alias_cmd ("conv", show_convenience_cmd, no_class, 1, &showlist); | |
4516 | ||
4517 | add_cmd ("values", no_set_class, show_values, _("\ | |
4518 | Elements of value history around item number IDX (or last ten)."), | |
4519 | &showlist); | |
4520 | ||
4521 | add_com ("init-if-undefined", class_vars, init_if_undefined_command, _("\ | |
4522 | Initialize a convenience variable if necessary.\n\ | |
4523 | init-if-undefined VARIABLE = EXPRESSION\n\ | |
4524 | Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\ | |
4525 | exist or does not contain a value. The EXPRESSION is not evaluated if the\n\ | |
4526 | VARIABLE is already initialized.")); | |
4527 | ||
4528 | add_prefix_cmd ("function", no_class, function_command, _("\ | |
4529 | Placeholder command for showing help on convenience functions."), | |
4530 | &functionlist, 0, &cmdlist); | |
4531 | ||
4532 | add_internal_function ("_isvoid", _("\ | |
4533 | Check whether an expression is void.\n\ | |
4534 | Usage: $_isvoid (expression)\n\ | |
4535 | Return 1 if the expression is void, zero otherwise."), | |
4536 | isvoid_internal_fn, NULL); | |
4537 | ||
4538 | add_internal_function ("_creal", _("\ | |
4539 | Extract the real part of a complex number.\n\ | |
4540 | Usage: $_creal (expression)\n\ | |
4541 | Return the real part of a complex number, the type depends on the\n\ | |
4542 | type of a complex number."), | |
4543 | creal_internal_fn, NULL); | |
4544 | ||
4545 | add_internal_function ("_cimag", _("\ | |
4546 | Extract the imaginary part of a complex number.\n\ | |
4547 | Usage: $_cimag (expression)\n\ | |
4548 | Return the imaginary part of a complex number, the type depends on the\n\ | |
4549 | type of a complex number."), | |
4550 | cimag_internal_fn, NULL); | |
4551 | ||
4552 | add_setshow_zuinteger_unlimited_cmd ("max-value-size", | |
4553 | class_support, &max_value_size, _("\ | |
4554 | Set maximum sized value gdb will load from the inferior."), _("\ | |
4555 | Show maximum sized value gdb will load from the inferior."), _("\ | |
4556 | Use this to control the maximum size, in bytes, of a value that gdb\n\ | |
4557 | will load from the inferior. Setting this value to 'unlimited'\n\ | |
4558 | disables checking.\n\ | |
4559 | Setting this does not invalidate already allocated values, it only\n\ | |
4560 | prevents future values, larger than this size, from being allocated."), | |
4561 | set_max_value_size, | |
4562 | show_max_value_size, | |
4563 | &setlist, &showlist); | |
4564 | set_show_commands vsize_limit | |
4565 | = add_setshow_zuinteger_unlimited_cmd ("varsize-limit", class_support, | |
4566 | &max_value_size, _("\ | |
4567 | Set the maximum number of bytes allowed in a variable-size object."), _("\ | |
4568 | Show the maximum number of bytes allowed in a variable-size object."), _("\ | |
4569 | Attempts to access an object whose size is not a compile-time constant\n\ | |
4570 | and exceeds this limit will cause an error."), | |
4571 | NULL, NULL, &setlist, &showlist); | |
4572 | deprecate_cmd (vsize_limit.set, "set max-value-size"); | |
4573 | ||
4574 | #if GDB_SELF_TEST | |
4575 | selftests::register_test ("ranges_contain", selftests::test_ranges_contain); | |
4576 | selftests::register_test ("insert_into_bit_range_vector", | |
4577 | selftests::test_insert_into_bit_range_vector); | |
4578 | selftests::register_test ("value_copy", selftests::test_value_copy); | |
4579 | #endif | |
4580 | ||
4581 | /* Destroy any values currently allocated in a final cleanup instead | |
4582 | of leaving it to global destructors, because that may be too | |
4583 | late. For example, the destructors of xmethod values call into | |
4584 | the Python runtime. */ | |
4585 | add_final_cleanup ([] () | |
4586 | { | |
4587 | all_values.clear (); | |
4588 | }); | |
4589 | } |