]>
Commit | Line | Data |
---|---|---|
1 | /* Perform non-arithmetic operations on values, for GDB. | |
2 | ||
3 | Copyright (C) 1986-2024 Free Software Foundation, Inc. | |
4 | ||
5 | This file is part of GDB. | |
6 | ||
7 | This program is free software; you can redistribute it and/or modify | |
8 | it under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation; either version 3 of the License, or | |
10 | (at your option) any later version. | |
11 | ||
12 | This program is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | GNU General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ | |
19 | ||
20 | #include "event-top.h" | |
21 | #include "extract-store-integer.h" | |
22 | #include "symtab.h" | |
23 | #include "gdbtypes.h" | |
24 | #include "value.h" | |
25 | #include "frame.h" | |
26 | #include "inferior.h" | |
27 | #include "gdbcore.h" | |
28 | #include "target.h" | |
29 | #include "demangle.h" | |
30 | #include "language.h" | |
31 | #include "cli/cli-cmds.h" | |
32 | #include "regcache.h" | |
33 | #include "cp-abi.h" | |
34 | #include "block.h" | |
35 | #include "infcall.h" | |
36 | #include "dictionary.h" | |
37 | #include "cp-support.h" | |
38 | #include "target-float.h" | |
39 | #include "tracepoint.h" | |
40 | #include "observable.h" | |
41 | #include "objfiles.h" | |
42 | #include "extension.h" | |
43 | #include "gdbtypes.h" | |
44 | #include "gdbsupport/byte-vector.h" | |
45 | #include "typeprint.h" | |
46 | ||
47 | /* Local functions. */ | |
48 | ||
49 | static int typecmp (bool staticp, bool varargs, int nargs, | |
50 | struct field t1[], const gdb::array_view<value *> t2); | |
51 | ||
52 | static struct value *search_struct_field (const char *, struct value *, | |
53 | struct type *, int); | |
54 | ||
55 | static struct value *search_struct_method (const char *, struct value **, | |
56 | std::optional<gdb::array_view<value *>>, | |
57 | LONGEST, int *, struct type *); | |
58 | ||
59 | static int find_oload_champ_namespace (gdb::array_view<value *> args, | |
60 | const char *, const char *, | |
61 | std::vector<symbol *> *oload_syms, | |
62 | badness_vector *, | |
63 | const int no_adl); | |
64 | ||
65 | static int find_oload_champ_namespace_loop (gdb::array_view<value *> args, | |
66 | const char *, const char *, | |
67 | int, std::vector<symbol *> *oload_syms, | |
68 | badness_vector *, int *, | |
69 | const int no_adl); | |
70 | ||
71 | static int find_oload_champ (gdb::array_view<value *> args, | |
72 | size_t num_fns, | |
73 | fn_field *methods, | |
74 | xmethod_worker_up *xmethods, | |
75 | symbol **functions, | |
76 | badness_vector *oload_champ_bv); | |
77 | ||
78 | static int oload_method_static_p (struct fn_field *, int); | |
79 | ||
80 | enum oload_classification { STANDARD, NON_STANDARD, INCOMPATIBLE }; | |
81 | ||
82 | static enum oload_classification classify_oload_match | |
83 | (const badness_vector &, int, int); | |
84 | ||
85 | static struct value *value_struct_elt_for_reference (struct type *, | |
86 | int, struct type *, | |
87 | const char *, | |
88 | struct type *, | |
89 | int, enum noside); | |
90 | ||
91 | static struct value *value_namespace_elt (const struct type *, | |
92 | const char *, int , enum noside); | |
93 | ||
94 | static struct value *value_maybe_namespace_elt (const struct type *, | |
95 | const char *, int, | |
96 | enum noside); | |
97 | ||
98 | static CORE_ADDR allocate_space_in_inferior (int); | |
99 | ||
100 | static struct value *cast_into_complex (struct type *, struct value *); | |
101 | ||
102 | bool overload_resolution = false; | |
103 | static void | |
104 | show_overload_resolution (struct ui_file *file, int from_tty, | |
105 | struct cmd_list_element *c, | |
106 | const char *value) | |
107 | { | |
108 | gdb_printf (file, _("Overload resolution in evaluating " | |
109 | "C++ functions is %s.\n"), | |
110 | value); | |
111 | } | |
112 | ||
113 | /* Find the address of function name NAME in the inferior. If OBJF_P | |
114 | is non-NULL, *OBJF_P will be set to the OBJFILE where the function | |
115 | is defined. */ | |
116 | ||
117 | struct value * | |
118 | find_function_in_inferior (const char *name, struct objfile **objf_p) | |
119 | { | |
120 | struct block_symbol sym; | |
121 | ||
122 | sym = lookup_symbol (name, nullptr, SEARCH_TYPE_DOMAIN, nullptr); | |
123 | if (sym.symbol != NULL) | |
124 | { | |
125 | if (objf_p) | |
126 | *objf_p = sym.symbol->objfile (); | |
127 | ||
128 | return value_of_variable (sym.symbol, sym.block); | |
129 | } | |
130 | else | |
131 | { | |
132 | struct bound_minimal_symbol msymbol = | |
133 | lookup_bound_minimal_symbol (name); | |
134 | ||
135 | if (msymbol.minsym != NULL) | |
136 | { | |
137 | struct objfile *objfile = msymbol.objfile; | |
138 | struct gdbarch *gdbarch = objfile->arch (); | |
139 | ||
140 | struct type *type; | |
141 | CORE_ADDR maddr; | |
142 | type = lookup_pointer_type (builtin_type (gdbarch)->builtin_char); | |
143 | type = lookup_function_type (type); | |
144 | type = lookup_pointer_type (type); | |
145 | maddr = msymbol.value_address (); | |
146 | ||
147 | if (objf_p) | |
148 | *objf_p = objfile; | |
149 | ||
150 | return value_from_pointer (type, maddr); | |
151 | } | |
152 | else | |
153 | { | |
154 | if (!target_has_execution ()) | |
155 | error (_("evaluation of this expression " | |
156 | "requires the target program to be active")); | |
157 | else | |
158 | error (_("evaluation of this expression requires the " | |
159 | "program to have a function \"%s\"."), | |
160 | name); | |
161 | } | |
162 | } | |
163 | } | |
164 | ||
165 | /* Allocate NBYTES of space in the inferior using the inferior's | |
166 | malloc and return a value that is a pointer to the allocated | |
167 | space. */ | |
168 | ||
169 | struct value * | |
170 | value_allocate_space_in_inferior (int len) | |
171 | { | |
172 | struct objfile *objf; | |
173 | struct value *val = find_function_in_inferior ("malloc", &objf); | |
174 | struct gdbarch *gdbarch = objf->arch (); | |
175 | struct value *blocklen; | |
176 | ||
177 | blocklen = value_from_longest (builtin_type (gdbarch)->builtin_int, len); | |
178 | val = call_function_by_hand (val, NULL, blocklen); | |
179 | if (value_logical_not (val)) | |
180 | { | |
181 | if (!target_has_execution ()) | |
182 | error (_("No memory available to program now: " | |
183 | "you need to start the target first")); | |
184 | else | |
185 | error (_("No memory available to program: call to malloc failed")); | |
186 | } | |
187 | return val; | |
188 | } | |
189 | ||
190 | static CORE_ADDR | |
191 | allocate_space_in_inferior (int len) | |
192 | { | |
193 | return value_as_long (value_allocate_space_in_inferior (len)); | |
194 | } | |
195 | ||
196 | /* Cast struct value VAL to type TYPE and return as a value. | |
197 | Both type and val must be of TYPE_CODE_STRUCT or TYPE_CODE_UNION | |
198 | for this to work. Typedef to one of the codes is permitted. | |
199 | Returns NULL if the cast is neither an upcast nor a downcast. */ | |
200 | ||
201 | static struct value * | |
202 | value_cast_structs (struct type *type, struct value *v2) | |
203 | { | |
204 | struct type *t1; | |
205 | struct type *t2; | |
206 | struct value *v; | |
207 | ||
208 | gdb_assert (type != NULL && v2 != NULL); | |
209 | ||
210 | t1 = check_typedef (type); | |
211 | t2 = check_typedef (v2->type ()); | |
212 | ||
213 | /* Check preconditions. */ | |
214 | gdb_assert ((t1->code () == TYPE_CODE_STRUCT | |
215 | || t1->code () == TYPE_CODE_UNION) | |
216 | && !!"Precondition is that type is of STRUCT or UNION kind."); | |
217 | gdb_assert ((t2->code () == TYPE_CODE_STRUCT | |
218 | || t2->code () == TYPE_CODE_UNION) | |
219 | && !!"Precondition is that value is of STRUCT or UNION kind"); | |
220 | ||
221 | if (t1->name () != NULL | |
222 | && t2->name () != NULL | |
223 | && !strcmp (t1->name (), t2->name ())) | |
224 | return NULL; | |
225 | ||
226 | /* Upcasting: look in the type of the source to see if it contains the | |
227 | type of the target as a superclass. If so, we'll need to | |
228 | offset the pointer rather than just change its type. */ | |
229 | if (t1->name () != NULL) | |
230 | { | |
231 | v = search_struct_field (t1->name (), | |
232 | v2, t2, 1); | |
233 | if (v) | |
234 | return v; | |
235 | } | |
236 | ||
237 | /* Downcasting: look in the type of the target to see if it contains the | |
238 | type of the source as a superclass. If so, we'll need to | |
239 | offset the pointer rather than just change its type. */ | |
240 | if (t2->name () != NULL) | |
241 | { | |
242 | /* Try downcasting using the run-time type of the value. */ | |
243 | int full, using_enc; | |
244 | LONGEST top; | |
245 | struct type *real_type; | |
246 | ||
247 | real_type = value_rtti_type (v2, &full, &top, &using_enc); | |
248 | if (real_type) | |
249 | { | |
250 | v = value_full_object (v2, real_type, full, top, using_enc); | |
251 | v = value_at_lazy (real_type, v->address ()); | |
252 | real_type = v->type (); | |
253 | ||
254 | /* We might be trying to cast to the outermost enclosing | |
255 | type, in which case search_struct_field won't work. */ | |
256 | if (real_type->name () != NULL | |
257 | && !strcmp (real_type->name (), t1->name ())) | |
258 | return v; | |
259 | ||
260 | v = search_struct_field (t2->name (), v, real_type, 1); | |
261 | if (v) | |
262 | return v; | |
263 | } | |
264 | ||
265 | /* Try downcasting using information from the destination type | |
266 | T2. This wouldn't work properly for classes with virtual | |
267 | bases, but those were handled above. */ | |
268 | v = search_struct_field (t2->name (), | |
269 | value::zero (t1, not_lval), t1, 1); | |
270 | if (v) | |
271 | { | |
272 | /* Downcasting is possible (t1 is superclass of v2). */ | |
273 | CORE_ADDR addr2 = v2->address () + v2->embedded_offset (); | |
274 | ||
275 | addr2 -= v->address () + v->embedded_offset (); | |
276 | return value_at (type, addr2); | |
277 | } | |
278 | } | |
279 | ||
280 | return NULL; | |
281 | } | |
282 | ||
283 | /* Cast one pointer or reference type to another. Both TYPE and | |
284 | the type of ARG2 should be pointer types, or else both should be | |
285 | reference types. If SUBCLASS_CHECK is non-zero, this will force a | |
286 | check to see whether TYPE is a superclass of ARG2's type. If | |
287 | SUBCLASS_CHECK is zero, then the subclass check is done only when | |
288 | ARG2 is itself non-zero. Returns the new pointer or reference. */ | |
289 | ||
290 | struct value * | |
291 | value_cast_pointers (struct type *type, struct value *arg2, | |
292 | int subclass_check) | |
293 | { | |
294 | struct type *type1 = check_typedef (type); | |
295 | struct type *type2 = check_typedef (arg2->type ()); | |
296 | struct type *t1 = check_typedef (type1->target_type ()); | |
297 | struct type *t2 = check_typedef (type2->target_type ()); | |
298 | ||
299 | if (t1->code () == TYPE_CODE_STRUCT | |
300 | && t2->code () == TYPE_CODE_STRUCT | |
301 | && (subclass_check || !value_logical_not (arg2))) | |
302 | { | |
303 | struct value *v2; | |
304 | ||
305 | if (TYPE_IS_REFERENCE (type2)) | |
306 | v2 = coerce_ref (arg2); | |
307 | else | |
308 | v2 = value_ind (arg2); | |
309 | gdb_assert (check_typedef (v2->type ())->code () | |
310 | == TYPE_CODE_STRUCT && !!"Why did coercion fail?"); | |
311 | v2 = value_cast_structs (t1, v2); | |
312 | /* At this point we have what we can have, un-dereference if needed. */ | |
313 | if (v2) | |
314 | { | |
315 | struct value *v = value_addr (v2); | |
316 | ||
317 | v->deprecated_set_type (type); | |
318 | return v; | |
319 | } | |
320 | } | |
321 | ||
322 | /* No superclass found, just change the pointer type. */ | |
323 | arg2 = arg2->copy (); | |
324 | arg2->deprecated_set_type (type); | |
325 | arg2->set_enclosing_type (type); | |
326 | arg2->set_pointed_to_offset (0); /* pai: chk_val */ | |
327 | return arg2; | |
328 | } | |
329 | ||
330 | /* See value.h. */ | |
331 | ||
332 | gdb_mpq | |
333 | value_to_gdb_mpq (struct value *value) | |
334 | { | |
335 | struct type *type = check_typedef (value->type ()); | |
336 | ||
337 | gdb_mpq result; | |
338 | if (is_floating_type (type)) | |
339 | result = target_float_to_host_double (value->contents ().data (), type); | |
340 | else | |
341 | { | |
342 | gdb_assert (is_integral_type (type) | |
343 | || is_fixed_point_type (type)); | |
344 | ||
345 | gdb_mpz vz; | |
346 | vz.read (value->contents (), type_byte_order (type), | |
347 | type->is_unsigned ()); | |
348 | result = vz; | |
349 | ||
350 | if (is_fixed_point_type (type)) | |
351 | result *= type->fixed_point_scaling_factor (); | |
352 | } | |
353 | ||
354 | return result; | |
355 | } | |
356 | ||
357 | /* Assuming that TO_TYPE is a fixed point type, return a value | |
358 | corresponding to the cast of FROM_VAL to that type. */ | |
359 | ||
360 | static struct value * | |
361 | value_cast_to_fixed_point (struct type *to_type, struct value *from_val) | |
362 | { | |
363 | struct type *from_type = from_val->type (); | |
364 | ||
365 | if (from_type == to_type) | |
366 | return from_val; | |
367 | ||
368 | if (!is_floating_type (from_type) | |
369 | && !is_integral_type (from_type) | |
370 | && !is_fixed_point_type (from_type)) | |
371 | error (_("Invalid conversion from type %s to fixed point type %s"), | |
372 | from_type->name (), to_type->name ()); | |
373 | ||
374 | gdb_mpq vq = value_to_gdb_mpq (from_val); | |
375 | ||
376 | /* Divide that value by the scaling factor to obtain the unscaled | |
377 | value, first in rational form, and then in integer form. */ | |
378 | ||
379 | vq /= to_type->fixed_point_scaling_factor (); | |
380 | gdb_mpz unscaled = vq.get_rounded (); | |
381 | ||
382 | /* Finally, create the result value, and pack the unscaled value | |
383 | in it. */ | |
384 | struct value *result = value::allocate (to_type); | |
385 | unscaled.write (result->contents_raw (), | |
386 | type_byte_order (to_type), | |
387 | to_type->is_unsigned ()); | |
388 | ||
389 | return result; | |
390 | } | |
391 | ||
392 | /* Cast value ARG2 to type TYPE and return as a value. | |
393 | More general than a C cast: accepts any two types of the same length, | |
394 | and if ARG2 is an lvalue it can be cast into anything at all. */ | |
395 | /* In C++, casts may change pointer or object representations. */ | |
396 | ||
397 | struct value * | |
398 | value_cast (struct type *type, struct value *arg2) | |
399 | { | |
400 | enum type_code code1; | |
401 | enum type_code code2; | |
402 | int scalar; | |
403 | struct type *type2; | |
404 | ||
405 | int convert_to_boolean = 0; | |
406 | ||
407 | /* TYPE might be equal in meaning to the existing type of ARG2, but for | |
408 | many reasons, might be a different type object (e.g. TYPE might be a | |
409 | gdbarch owned type, while ARG2->type () could be an objfile owned | |
410 | type). | |
411 | ||
412 | In this case we want to preserve the LVAL of ARG2 as this allows the | |
413 | resulting value to be used in more places. We do this by calling | |
414 | VALUE_COPY if appropriate. */ | |
415 | if (types_deeply_equal (make_unqualified_type (arg2->type ()), | |
416 | make_unqualified_type (type))) | |
417 | { | |
418 | /* If the types are exactly equal then we can avoid creating a new | |
419 | value completely. */ | |
420 | if (arg2->type () != type) | |
421 | { | |
422 | arg2 = arg2->copy (); | |
423 | arg2->deprecated_set_type (type); | |
424 | } | |
425 | return arg2; | |
426 | } | |
427 | ||
428 | if (is_fixed_point_type (type)) | |
429 | return value_cast_to_fixed_point (type, arg2); | |
430 | ||
431 | /* Check if we are casting struct reference to struct reference. */ | |
432 | if (TYPE_IS_REFERENCE (check_typedef (type))) | |
433 | { | |
434 | /* We dereference type; then we recurse and finally | |
435 | we generate value of the given reference. Nothing wrong with | |
436 | that. */ | |
437 | struct type *t1 = check_typedef (type); | |
438 | struct type *dereftype = check_typedef (t1->target_type ()); | |
439 | struct value *val = value_cast (dereftype, arg2); | |
440 | ||
441 | return value_ref (val, t1->code ()); | |
442 | } | |
443 | ||
444 | if (TYPE_IS_REFERENCE (check_typedef (arg2->type ()))) | |
445 | /* We deref the value and then do the cast. */ | |
446 | return value_cast (type, coerce_ref (arg2)); | |
447 | ||
448 | /* Strip typedefs / resolve stubs in order to get at the type's | |
449 | code/length, but remember the original type, to use as the | |
450 | resulting type of the cast, in case it was a typedef. */ | |
451 | struct type *to_type = type; | |
452 | ||
453 | type = check_typedef (type); | |
454 | code1 = type->code (); | |
455 | arg2 = coerce_ref (arg2); | |
456 | type2 = check_typedef (arg2->type ()); | |
457 | ||
458 | /* You can't cast to a reference type. See value_cast_pointers | |
459 | instead. */ | |
460 | gdb_assert (!TYPE_IS_REFERENCE (type)); | |
461 | ||
462 | /* A cast to an undetermined-length array_type, such as | |
463 | (TYPE [])OBJECT, is treated like a cast to (TYPE [N])OBJECT, | |
464 | where N is sizeof(OBJECT)/sizeof(TYPE). */ | |
465 | if (code1 == TYPE_CODE_ARRAY) | |
466 | { | |
467 | struct type *element_type = type->target_type (); | |
468 | unsigned element_length = check_typedef (element_type)->length (); | |
469 | ||
470 | if (element_length > 0 && type->bounds ()->high.kind () == PROP_UNDEFINED) | |
471 | { | |
472 | struct type *range_type = type->index_type (); | |
473 | int val_length = type2->length (); | |
474 | LONGEST low_bound, high_bound, new_length; | |
475 | ||
476 | if (!get_discrete_bounds (range_type, &low_bound, &high_bound)) | |
477 | low_bound = 0, high_bound = 0; | |
478 | new_length = val_length / element_length; | |
479 | if (val_length % element_length != 0) | |
480 | warning (_("array element type size does not " | |
481 | "divide object size in cast")); | |
482 | /* FIXME-type-allocation: need a way to free this type when | |
483 | we are done with it. */ | |
484 | type_allocator alloc (range_type->target_type ()); | |
485 | range_type = create_static_range_type (alloc, | |
486 | range_type->target_type (), | |
487 | low_bound, | |
488 | new_length + low_bound - 1); | |
489 | arg2->deprecated_set_type (create_array_type (alloc, | |
490 | element_type, | |
491 | range_type)); | |
492 | return arg2; | |
493 | } | |
494 | } | |
495 | ||
496 | if (current_language->c_style_arrays_p () | |
497 | && type2->code () == TYPE_CODE_ARRAY | |
498 | && !type2->is_vector ()) | |
499 | arg2 = value_coerce_array (arg2); | |
500 | ||
501 | if (type2->code () == TYPE_CODE_FUNC) | |
502 | arg2 = value_coerce_function (arg2); | |
503 | ||
504 | type2 = check_typedef (arg2->type ()); | |
505 | code2 = type2->code (); | |
506 | ||
507 | if (code1 == TYPE_CODE_COMPLEX) | |
508 | return cast_into_complex (to_type, arg2); | |
509 | if (code1 == TYPE_CODE_BOOL) | |
510 | { | |
511 | code1 = TYPE_CODE_INT; | |
512 | convert_to_boolean = 1; | |
513 | } | |
514 | if (code1 == TYPE_CODE_CHAR) | |
515 | code1 = TYPE_CODE_INT; | |
516 | if (code2 == TYPE_CODE_BOOL || code2 == TYPE_CODE_CHAR) | |
517 | code2 = TYPE_CODE_INT; | |
518 | ||
519 | scalar = (code2 == TYPE_CODE_INT || code2 == TYPE_CODE_FLT | |
520 | || code2 == TYPE_CODE_DECFLOAT || code2 == TYPE_CODE_ENUM | |
521 | || code2 == TYPE_CODE_RANGE | |
522 | || is_fixed_point_type (type2)); | |
523 | ||
524 | if ((code1 == TYPE_CODE_STRUCT || code1 == TYPE_CODE_UNION) | |
525 | && (code2 == TYPE_CODE_STRUCT || code2 == TYPE_CODE_UNION) | |
526 | && type->name () != 0) | |
527 | { | |
528 | struct value *v = value_cast_structs (to_type, arg2); | |
529 | ||
530 | if (v) | |
531 | return v; | |
532 | } | |
533 | ||
534 | if (is_floating_type (type) && scalar) | |
535 | { | |
536 | if (is_floating_value (arg2)) | |
537 | { | |
538 | struct value *v = value::allocate (to_type); | |
539 | target_float_convert (arg2->contents ().data (), type2, | |
540 | v->contents_raw ().data (), type); | |
541 | return v; | |
542 | } | |
543 | else if (is_fixed_point_type (type2)) | |
544 | { | |
545 | gdb_mpq fp_val; | |
546 | ||
547 | fp_val.read_fixed_point (arg2->contents (), | |
548 | type_byte_order (type2), | |
549 | type2->is_unsigned (), | |
550 | type2->fixed_point_scaling_factor ()); | |
551 | ||
552 | struct value *v = value::allocate (to_type); | |
553 | target_float_from_host_double (v->contents_raw ().data (), | |
554 | to_type, fp_val.as_double ()); | |
555 | return v; | |
556 | } | |
557 | ||
558 | /* The only option left is an integral type. */ | |
559 | if (type2->is_unsigned ()) | |
560 | return value_from_ulongest (to_type, value_as_long (arg2)); | |
561 | else | |
562 | return value_from_longest (to_type, value_as_long (arg2)); | |
563 | } | |
564 | else if ((code1 == TYPE_CODE_INT || code1 == TYPE_CODE_ENUM | |
565 | || code1 == TYPE_CODE_RANGE) | |
566 | && (scalar || code2 == TYPE_CODE_PTR | |
567 | || code2 == TYPE_CODE_MEMBERPTR)) | |
568 | { | |
569 | gdb_mpz longest; | |
570 | ||
571 | /* When we cast pointers to integers, we mustn't use | |
572 | gdbarch_pointer_to_address to find the address the pointer | |
573 | represents, as value_as_long would. GDB should evaluate | |
574 | expressions just as the compiler would --- and the compiler | |
575 | sees a cast as a simple reinterpretation of the pointer's | |
576 | bits. */ | |
577 | if (code2 == TYPE_CODE_PTR) | |
578 | longest = extract_unsigned_integer (arg2->contents (), | |
579 | type_byte_order (type2)); | |
580 | else | |
581 | longest = value_as_mpz (arg2); | |
582 | if (convert_to_boolean) | |
583 | longest = bool (longest); | |
584 | ||
585 | return value_from_mpz (to_type, longest); | |
586 | } | |
587 | else if (code1 == TYPE_CODE_PTR && (code2 == TYPE_CODE_INT | |
588 | || code2 == TYPE_CODE_ENUM | |
589 | || code2 == TYPE_CODE_RANGE)) | |
590 | { | |
591 | /* type->length () is the length of a pointer, but we really | |
592 | want the length of an address! -- we are really dealing with | |
593 | addresses (i.e., gdb representations) not pointers (i.e., | |
594 | target representations) here. | |
595 | ||
596 | This allows things like "print *(int *)0x01000234" to work | |
597 | without printing a misleading message -- which would | |
598 | otherwise occur when dealing with a target having two byte | |
599 | pointers and four byte addresses. */ | |
600 | ||
601 | int addr_bit = gdbarch_addr_bit (type2->arch ()); | |
602 | gdb_mpz longest = value_as_mpz (arg2); | |
603 | ||
604 | gdb_mpz addr_val = gdb_mpz (1) << addr_bit; | |
605 | if (longest >= addr_val || longest <= -addr_val) | |
606 | warning (_("value truncated")); | |
607 | ||
608 | return value_from_mpz (to_type, longest); | |
609 | } | |
610 | else if (code1 == TYPE_CODE_METHODPTR && code2 == TYPE_CODE_INT | |
611 | && value_as_long (arg2) == 0) | |
612 | { | |
613 | struct value *result = value::allocate (to_type); | |
614 | ||
615 | cplus_make_method_ptr (to_type, | |
616 | result->contents_writeable ().data (), 0, 0); | |
617 | return result; | |
618 | } | |
619 | else if (code1 == TYPE_CODE_MEMBERPTR && code2 == TYPE_CODE_INT | |
620 | && value_as_long (arg2) == 0) | |
621 | { | |
622 | /* The Itanium C++ ABI represents NULL pointers to members as | |
623 | minus one, instead of biasing the normal case. */ | |
624 | return value_from_longest (to_type, -1); | |
625 | } | |
626 | else if (code1 == TYPE_CODE_ARRAY && type->is_vector () | |
627 | && code2 == TYPE_CODE_ARRAY && type2->is_vector () | |
628 | && type->length () != type2->length ()) | |
629 | error (_("Cannot convert between vector values of different sizes")); | |
630 | else if (code1 == TYPE_CODE_ARRAY && type->is_vector () && scalar | |
631 | && type->length () != type2->length ()) | |
632 | error (_("can only cast scalar to vector of same size")); | |
633 | else if (code1 == TYPE_CODE_VOID) | |
634 | { | |
635 | return value::zero (to_type, not_lval); | |
636 | } | |
637 | else if (type->length () == type2->length ()) | |
638 | { | |
639 | if (code1 == TYPE_CODE_PTR && code2 == TYPE_CODE_PTR) | |
640 | return value_cast_pointers (to_type, arg2, 0); | |
641 | ||
642 | arg2 = arg2->copy (); | |
643 | arg2->deprecated_set_type (to_type); | |
644 | arg2->set_enclosing_type (to_type); | |
645 | arg2->set_pointed_to_offset (0); /* pai: chk_val */ | |
646 | return arg2; | |
647 | } | |
648 | else if (arg2->lval () == lval_memory) | |
649 | return value_at_lazy (to_type, arg2->address ()); | |
650 | else | |
651 | { | |
652 | if (current_language->la_language == language_ada) | |
653 | error (_("Invalid type conversion.")); | |
654 | error (_("Invalid cast.")); | |
655 | } | |
656 | } | |
657 | ||
658 | /* The C++ reinterpret_cast operator. */ | |
659 | ||
660 | struct value * | |
661 | value_reinterpret_cast (struct type *type, struct value *arg) | |
662 | { | |
663 | struct value *result; | |
664 | struct type *real_type = check_typedef (type); | |
665 | struct type *arg_type, *dest_type; | |
666 | int is_ref = 0; | |
667 | enum type_code dest_code, arg_code; | |
668 | ||
669 | /* Do reference, function, and array conversion. */ | |
670 | arg = coerce_array (arg); | |
671 | ||
672 | /* Attempt to preserve the type the user asked for. */ | |
673 | dest_type = type; | |
674 | ||
675 | /* If we are casting to a reference type, transform | |
676 | reinterpret_cast<T&[&]>(V) to *reinterpret_cast<T*>(&V). */ | |
677 | if (TYPE_IS_REFERENCE (real_type)) | |
678 | { | |
679 | is_ref = 1; | |
680 | arg = value_addr (arg); | |
681 | dest_type = lookup_pointer_type (dest_type->target_type ()); | |
682 | real_type = lookup_pointer_type (real_type); | |
683 | } | |
684 | ||
685 | arg_type = arg->type (); | |
686 | ||
687 | dest_code = real_type->code (); | |
688 | arg_code = arg_type->code (); | |
689 | ||
690 | /* We can convert pointer types, or any pointer type to int, or int | |
691 | type to pointer. */ | |
692 | if ((dest_code == TYPE_CODE_PTR && arg_code == TYPE_CODE_INT) | |
693 | || (dest_code == TYPE_CODE_INT && arg_code == TYPE_CODE_PTR) | |
694 | || (dest_code == TYPE_CODE_METHODPTR && arg_code == TYPE_CODE_INT) | |
695 | || (dest_code == TYPE_CODE_INT && arg_code == TYPE_CODE_METHODPTR) | |
696 | || (dest_code == TYPE_CODE_MEMBERPTR && arg_code == TYPE_CODE_INT) | |
697 | || (dest_code == TYPE_CODE_INT && arg_code == TYPE_CODE_MEMBERPTR) | |
698 | || (dest_code == arg_code | |
699 | && (dest_code == TYPE_CODE_METHODPTR | |
700 | || dest_code == TYPE_CODE_MEMBERPTR))) | |
701 | result = value_cast (dest_type, arg); | |
702 | else if (dest_code == TYPE_CODE_PTR && arg_code == TYPE_CODE_PTR) | |
703 | { | |
704 | /* Don't do any up- or downcasting. */ | |
705 | result = arg->copy (); | |
706 | result->deprecated_set_type (dest_type); | |
707 | result->set_enclosing_type (dest_type); | |
708 | result->set_pointed_to_offset (0); | |
709 | } | |
710 | else | |
711 | error (_("Invalid reinterpret_cast")); | |
712 | ||
713 | if (is_ref) | |
714 | result = value_cast (type, value_ref (value_ind (result), | |
715 | type->code ())); | |
716 | ||
717 | return result; | |
718 | } | |
719 | ||
720 | /* A helper for value_dynamic_cast. This implements the first of two | |
721 | runtime checks: we iterate over all the base classes of the value's | |
722 | class which are equal to the desired class; if only one of these | |
723 | holds the value, then it is the answer. */ | |
724 | ||
725 | static int | |
726 | dynamic_cast_check_1 (struct type *desired_type, | |
727 | const gdb_byte *valaddr, | |
728 | LONGEST embedded_offset, | |
729 | CORE_ADDR address, | |
730 | struct value *val, | |
731 | struct type *search_type, | |
732 | CORE_ADDR arg_addr, | |
733 | struct type *arg_type, | |
734 | struct value **result) | |
735 | { | |
736 | int i, result_count = 0; | |
737 | ||
738 | for (i = 0; i < TYPE_N_BASECLASSES (search_type) && result_count < 2; ++i) | |
739 | { | |
740 | LONGEST offset = baseclass_offset (search_type, i, valaddr, | |
741 | embedded_offset, | |
742 | address, val); | |
743 | ||
744 | if (class_types_same_p (desired_type, TYPE_BASECLASS (search_type, i))) | |
745 | { | |
746 | if (address + embedded_offset + offset >= arg_addr | |
747 | && address + embedded_offset + offset < arg_addr + arg_type->length ()) | |
748 | { | |
749 | ++result_count; | |
750 | if (!*result) | |
751 | *result = value_at_lazy (TYPE_BASECLASS (search_type, i), | |
752 | address + embedded_offset + offset); | |
753 | } | |
754 | } | |
755 | else | |
756 | result_count += dynamic_cast_check_1 (desired_type, | |
757 | valaddr, | |
758 | embedded_offset + offset, | |
759 | address, val, | |
760 | TYPE_BASECLASS (search_type, i), | |
761 | arg_addr, | |
762 | arg_type, | |
763 | result); | |
764 | } | |
765 | ||
766 | return result_count; | |
767 | } | |
768 | ||
769 | /* A helper for value_dynamic_cast. This implements the second of two | |
770 | runtime checks: we look for a unique public sibling class of the | |
771 | argument's declared class. */ | |
772 | ||
773 | static int | |
774 | dynamic_cast_check_2 (struct type *desired_type, | |
775 | const gdb_byte *valaddr, | |
776 | LONGEST embedded_offset, | |
777 | CORE_ADDR address, | |
778 | struct value *val, | |
779 | struct type *search_type, | |
780 | struct value **result) | |
781 | { | |
782 | int i, result_count = 0; | |
783 | ||
784 | for (i = 0; i < TYPE_N_BASECLASSES (search_type) && result_count < 2; ++i) | |
785 | { | |
786 | LONGEST offset; | |
787 | ||
788 | if (! BASETYPE_VIA_PUBLIC (search_type, i)) | |
789 | continue; | |
790 | ||
791 | offset = baseclass_offset (search_type, i, valaddr, embedded_offset, | |
792 | address, val); | |
793 | if (class_types_same_p (desired_type, TYPE_BASECLASS (search_type, i))) | |
794 | { | |
795 | ++result_count; | |
796 | if (*result == NULL) | |
797 | *result = value_at_lazy (TYPE_BASECLASS (search_type, i), | |
798 | address + embedded_offset + offset); | |
799 | } | |
800 | else | |
801 | result_count += dynamic_cast_check_2 (desired_type, | |
802 | valaddr, | |
803 | embedded_offset + offset, | |
804 | address, val, | |
805 | TYPE_BASECLASS (search_type, i), | |
806 | result); | |
807 | } | |
808 | ||
809 | return result_count; | |
810 | } | |
811 | ||
812 | /* The C++ dynamic_cast operator. */ | |
813 | ||
814 | struct value * | |
815 | value_dynamic_cast (struct type *type, struct value *arg) | |
816 | { | |
817 | int full, using_enc; | |
818 | LONGEST top; | |
819 | struct type *resolved_type = check_typedef (type); | |
820 | struct type *arg_type = check_typedef (arg->type ()); | |
821 | struct type *class_type, *rtti_type; | |
822 | struct value *result, *tem, *original_arg = arg; | |
823 | CORE_ADDR addr; | |
824 | int is_ref = TYPE_IS_REFERENCE (resolved_type); | |
825 | ||
826 | if (resolved_type->code () != TYPE_CODE_PTR | |
827 | && !TYPE_IS_REFERENCE (resolved_type)) | |
828 | error (_("Argument to dynamic_cast must be a pointer or reference type")); | |
829 | if (resolved_type->target_type ()->code () != TYPE_CODE_VOID | |
830 | && resolved_type->target_type ()->code () != TYPE_CODE_STRUCT) | |
831 | error (_("Argument to dynamic_cast must be pointer to class or `void *'")); | |
832 | ||
833 | class_type = check_typedef (resolved_type->target_type ()); | |
834 | if (resolved_type->code () == TYPE_CODE_PTR) | |
835 | { | |
836 | if (arg_type->code () != TYPE_CODE_PTR | |
837 | && ! (arg_type->code () == TYPE_CODE_INT | |
838 | && value_as_long (arg) == 0)) | |
839 | error (_("Argument to dynamic_cast does not have pointer type")); | |
840 | if (arg_type->code () == TYPE_CODE_PTR) | |
841 | { | |
842 | arg_type = check_typedef (arg_type->target_type ()); | |
843 | if (arg_type->code () != TYPE_CODE_STRUCT) | |
844 | error (_("Argument to dynamic_cast does " | |
845 | "not have pointer to class type")); | |
846 | } | |
847 | ||
848 | /* Handle NULL pointers. */ | |
849 | if (value_as_long (arg) == 0) | |
850 | return value::zero (type, not_lval); | |
851 | ||
852 | arg = value_ind (arg); | |
853 | } | |
854 | else | |
855 | { | |
856 | if (arg_type->code () != TYPE_CODE_STRUCT) | |
857 | error (_("Argument to dynamic_cast does not have class type")); | |
858 | } | |
859 | ||
860 | /* If the classes are the same, just return the argument. */ | |
861 | if (class_types_same_p (class_type, arg_type)) | |
862 | return value_cast (type, original_arg); | |
863 | ||
864 | /* If the target type is a unique base class of the argument's | |
865 | declared type, just cast it. */ | |
866 | if (is_ancestor (class_type, arg_type)) | |
867 | { | |
868 | if (is_unique_ancestor (class_type, arg)) | |
869 | return value_cast (type, original_arg); | |
870 | error (_("Ambiguous dynamic_cast")); | |
871 | } | |
872 | ||
873 | rtti_type = value_rtti_type (arg, &full, &top, &using_enc); | |
874 | if (! rtti_type) | |
875 | error (_("Couldn't determine value's most derived type for dynamic_cast")); | |
876 | ||
877 | /* Compute the most derived object's address. */ | |
878 | addr = arg->address (); | |
879 | if (full) | |
880 | { | |
881 | /* Done. */ | |
882 | } | |
883 | else if (using_enc) | |
884 | addr += top; | |
885 | else | |
886 | addr += top + arg->embedded_offset (); | |
887 | ||
888 | /* dynamic_cast<void *> means to return a pointer to the | |
889 | most-derived object. */ | |
890 | if (resolved_type->code () == TYPE_CODE_PTR | |
891 | && resolved_type->target_type ()->code () == TYPE_CODE_VOID) | |
892 | return value_at_lazy (type, addr); | |
893 | ||
894 | tem = value_at (resolved_type->target_type (), addr); | |
895 | type = (is_ref | |
896 | ? lookup_reference_type (tem->type (), resolved_type->code ()) | |
897 | : lookup_pointer_type (tem->type ())); | |
898 | ||
899 | /* The first dynamic check specified in 5.2.7. */ | |
900 | if (is_public_ancestor (arg_type, resolved_type->target_type ())) | |
901 | { | |
902 | if (class_types_same_p (rtti_type, resolved_type->target_type ())) | |
903 | return (is_ref | |
904 | ? value_ref (tem, resolved_type->code ()) | |
905 | : value_addr (tem)); | |
906 | result = NULL; | |
907 | if (dynamic_cast_check_1 (resolved_type->target_type (), | |
908 | tem->contents_for_printing ().data (), | |
909 | tem->embedded_offset (), | |
910 | tem->address (), tem, | |
911 | rtti_type, addr, | |
912 | arg_type, | |
913 | &result) == 1) | |
914 | return value_cast (type, | |
915 | is_ref | |
916 | ? value_ref (result, resolved_type->code ()) | |
917 | : value_addr (result)); | |
918 | } | |
919 | ||
920 | /* The second dynamic check specified in 5.2.7. */ | |
921 | result = NULL; | |
922 | if (is_public_ancestor (arg_type, rtti_type) | |
923 | && dynamic_cast_check_2 (resolved_type->target_type (), | |
924 | tem->contents_for_printing ().data (), | |
925 | tem->embedded_offset (), | |
926 | tem->address (), tem, | |
927 | rtti_type, &result) == 1) | |
928 | return value_cast (type, | |
929 | is_ref | |
930 | ? value_ref (result, resolved_type->code ()) | |
931 | : value_addr (result)); | |
932 | ||
933 | if (resolved_type->code () == TYPE_CODE_PTR) | |
934 | return value::zero (type, not_lval); | |
935 | ||
936 | error (_("dynamic_cast failed")); | |
937 | } | |
938 | ||
939 | /* Create a not_lval value of numeric type TYPE that is one, and return it. */ | |
940 | ||
941 | struct value * | |
942 | value_one (struct type *type) | |
943 | { | |
944 | struct type *type1 = check_typedef (type); | |
945 | struct value *val; | |
946 | ||
947 | if (is_integral_type (type1) || is_floating_type (type1)) | |
948 | { | |
949 | val = value_from_longest (type, (LONGEST) 1); | |
950 | } | |
951 | else if (type1->code () == TYPE_CODE_ARRAY && type1->is_vector ()) | |
952 | { | |
953 | struct type *eltype = check_typedef (type1->target_type ()); | |
954 | int i; | |
955 | LONGEST low_bound, high_bound; | |
956 | ||
957 | if (!get_array_bounds (type1, &low_bound, &high_bound)) | |
958 | error (_("Could not determine the vector bounds")); | |
959 | ||
960 | val = value::allocate (type); | |
961 | gdb::array_view<gdb_byte> val_contents = val->contents_writeable (); | |
962 | int elt_len = eltype->length (); | |
963 | ||
964 | for (i = 0; i < high_bound - low_bound + 1; i++) | |
965 | { | |
966 | value *tmp = value_one (eltype); | |
967 | copy (tmp->contents_all (), | |
968 | val_contents.slice (i * elt_len, elt_len)); | |
969 | } | |
970 | } | |
971 | else | |
972 | { | |
973 | error (_("Not a numeric type.")); | |
974 | } | |
975 | ||
976 | /* value_one result is never used for assignments to. */ | |
977 | gdb_assert (val->lval () == not_lval); | |
978 | ||
979 | return val; | |
980 | } | |
981 | ||
982 | /* Helper function for value_at, value_at_lazy, and value_at_lazy_stack. | |
983 | The type of the created value may differ from the passed type TYPE. | |
984 | Make sure to retrieve the returned values's new type after this call | |
985 | e.g. in case the type is a variable length array. */ | |
986 | ||
987 | static struct value * | |
988 | get_value_at (struct type *type, CORE_ADDR addr, const frame_info_ptr &frame, | |
989 | int lazy) | |
990 | { | |
991 | struct value *val; | |
992 | ||
993 | if (check_typedef (type)->code () == TYPE_CODE_VOID) | |
994 | error (_("Attempt to dereference a generic pointer.")); | |
995 | ||
996 | val = value_from_contents_and_address (type, NULL, addr, frame); | |
997 | ||
998 | if (!lazy) | |
999 | val->fetch_lazy (); | |
1000 | ||
1001 | return val; | |
1002 | } | |
1003 | ||
1004 | /* Return a value with type TYPE located at ADDR. | |
1005 | ||
1006 | Call value_at only if the data needs to be fetched immediately; | |
1007 | if we can be 'lazy' and defer the fetch, perhaps indefinitely, call | |
1008 | value_at_lazy instead. value_at_lazy simply records the address of | |
1009 | the data and sets the lazy-evaluation-required flag. The lazy flag | |
1010 | is tested in the value_contents macro, which is used if and when | |
1011 | the contents are actually required. The type of the created value | |
1012 | may differ from the passed type TYPE. Make sure to retrieve the | |
1013 | returned values's new type after this call e.g. in case the type | |
1014 | is a variable length array. | |
1015 | ||
1016 | Note: value_at does *NOT* handle embedded offsets; perform such | |
1017 | adjustments before or after calling it. */ | |
1018 | ||
1019 | struct value * | |
1020 | value_at (struct type *type, CORE_ADDR addr) | |
1021 | { | |
1022 | return get_value_at (type, addr, nullptr, 0); | |
1023 | } | |
1024 | ||
1025 | /* See value.h. */ | |
1026 | ||
1027 | struct value * | |
1028 | value_at_non_lval (struct type *type, CORE_ADDR addr) | |
1029 | { | |
1030 | struct value *result = value_at (type, addr); | |
1031 | result->set_lval (not_lval); | |
1032 | return result; | |
1033 | } | |
1034 | ||
1035 | /* Return a lazy value with type TYPE located at ADDR (cf. value_at). | |
1036 | The type of the created value may differ from the passed type TYPE. | |
1037 | Make sure to retrieve the returned values's new type after this call | |
1038 | e.g. in case the type is a variable length array. */ | |
1039 | ||
1040 | struct value * | |
1041 | value_at_lazy (struct type *type, CORE_ADDR addr, const frame_info_ptr &frame) | |
1042 | { | |
1043 | return get_value_at (type, addr, frame, 1); | |
1044 | } | |
1045 | ||
1046 | void | |
1047 | read_value_memory (struct value *val, LONGEST bit_offset, | |
1048 | bool stack, CORE_ADDR memaddr, | |
1049 | gdb_byte *buffer, size_t length) | |
1050 | { | |
1051 | ULONGEST xfered_total = 0; | |
1052 | struct gdbarch *arch = val->arch (); | |
1053 | int unit_size = gdbarch_addressable_memory_unit_size (arch); | |
1054 | enum target_object object; | |
1055 | ||
1056 | object = stack ? TARGET_OBJECT_STACK_MEMORY : TARGET_OBJECT_MEMORY; | |
1057 | ||
1058 | while (xfered_total < length) | |
1059 | { | |
1060 | enum target_xfer_status status; | |
1061 | ULONGEST xfered_partial; | |
1062 | ||
1063 | status = target_xfer_partial (current_inferior ()->top_target (), | |
1064 | object, NULL, | |
1065 | buffer + xfered_total * unit_size, NULL, | |
1066 | memaddr + xfered_total, | |
1067 | length - xfered_total, | |
1068 | &xfered_partial); | |
1069 | ||
1070 | if (status == TARGET_XFER_OK) | |
1071 | /* nothing */; | |
1072 | else if (status == TARGET_XFER_UNAVAILABLE) | |
1073 | val->mark_bits_unavailable ((xfered_total * HOST_CHAR_BIT | |
1074 | + bit_offset), | |
1075 | xfered_partial * HOST_CHAR_BIT); | |
1076 | else if (status == TARGET_XFER_EOF) | |
1077 | memory_error (TARGET_XFER_E_IO, memaddr + xfered_total); | |
1078 | else | |
1079 | memory_error (status, memaddr + xfered_total); | |
1080 | ||
1081 | xfered_total += xfered_partial; | |
1082 | QUIT; | |
1083 | } | |
1084 | } | |
1085 | ||
1086 | /* Store the contents of FROMVAL into the location of TOVAL. | |
1087 | Return a new value with the location of TOVAL and contents of FROMVAL. */ | |
1088 | ||
1089 | struct value * | |
1090 | value_assign (struct value *toval, struct value *fromval) | |
1091 | { | |
1092 | struct type *type; | |
1093 | struct value *val; | |
1094 | struct frame_id old_frame; | |
1095 | ||
1096 | if (!toval->deprecated_modifiable ()) | |
1097 | error (_("Left operand of assignment is not a modifiable lvalue.")); | |
1098 | ||
1099 | toval = coerce_ref (toval); | |
1100 | ||
1101 | type = toval->type (); | |
1102 | if (toval->lval () != lval_internalvar) | |
1103 | fromval = value_cast (type, fromval); | |
1104 | else | |
1105 | { | |
1106 | /* Coerce arrays and functions to pointers, except for arrays | |
1107 | which only live in GDB's storage. */ | |
1108 | if (!value_must_coerce_to_target (fromval)) | |
1109 | fromval = coerce_array (fromval); | |
1110 | } | |
1111 | ||
1112 | type = check_typedef (type); | |
1113 | ||
1114 | /* Since modifying a register can trash the frame chain, and | |
1115 | modifying memory can trash the frame cache, we save the old frame | |
1116 | and then restore the new frame afterwards. */ | |
1117 | old_frame = get_frame_id (deprecated_safe_get_selected_frame ()); | |
1118 | ||
1119 | switch (toval->lval ()) | |
1120 | { | |
1121 | case lval_internalvar: | |
1122 | set_internalvar (VALUE_INTERNALVAR (toval), fromval); | |
1123 | return value_of_internalvar (type->arch (), | |
1124 | VALUE_INTERNALVAR (toval)); | |
1125 | ||
1126 | case lval_internalvar_component: | |
1127 | { | |
1128 | LONGEST offset = toval->offset (); | |
1129 | ||
1130 | /* Are we dealing with a bitfield? | |
1131 | ||
1132 | It is important to mention that `toval->parent ()' is | |
1133 | non-NULL iff `toval->bitsize ()' is non-zero. */ | |
1134 | if (toval->bitsize ()) | |
1135 | { | |
1136 | /* VALUE_INTERNALVAR below refers to the parent value, while | |
1137 | the offset is relative to this parent value. */ | |
1138 | gdb_assert (toval->parent ()->parent () == NULL); | |
1139 | offset += toval->parent ()->offset (); | |
1140 | } | |
1141 | ||
1142 | set_internalvar_component (VALUE_INTERNALVAR (toval), | |
1143 | offset, | |
1144 | toval->bitpos (), | |
1145 | toval->bitsize (), | |
1146 | fromval); | |
1147 | } | |
1148 | break; | |
1149 | ||
1150 | case lval_memory: | |
1151 | { | |
1152 | const gdb_byte *dest_buffer; | |
1153 | CORE_ADDR changed_addr; | |
1154 | int changed_len; | |
1155 | gdb_byte buffer[sizeof (LONGEST)]; | |
1156 | ||
1157 | if (toval->bitsize ()) | |
1158 | { | |
1159 | struct value *parent = toval->parent (); | |
1160 | ||
1161 | changed_addr = parent->address () + toval->offset (); | |
1162 | changed_len = (toval->bitpos () | |
1163 | + toval->bitsize () | |
1164 | + HOST_CHAR_BIT - 1) | |
1165 | / HOST_CHAR_BIT; | |
1166 | ||
1167 | /* If we can read-modify-write exactly the size of the | |
1168 | containing type (e.g. short or int) then do so. This | |
1169 | is safer for volatile bitfields mapped to hardware | |
1170 | registers. */ | |
1171 | if (changed_len < type->length () | |
1172 | && type->length () <= (int) sizeof (LONGEST) | |
1173 | && ((LONGEST) changed_addr % type->length ()) == 0) | |
1174 | changed_len = type->length (); | |
1175 | ||
1176 | if (changed_len > (int) sizeof (LONGEST)) | |
1177 | error (_("Can't handle bitfields which " | |
1178 | "don't fit in a %d bit word."), | |
1179 | (int) sizeof (LONGEST) * HOST_CHAR_BIT); | |
1180 | ||
1181 | read_memory (changed_addr, buffer, changed_len); | |
1182 | modify_field (type, buffer, value_as_long (fromval), | |
1183 | toval->bitpos (), toval->bitsize ()); | |
1184 | dest_buffer = buffer; | |
1185 | } | |
1186 | else | |
1187 | { | |
1188 | changed_addr = toval->address (); | |
1189 | changed_len = type_length_units (type); | |
1190 | dest_buffer = fromval->contents ().data (); | |
1191 | } | |
1192 | ||
1193 | write_memory_with_notification (changed_addr, dest_buffer, changed_len); | |
1194 | } | |
1195 | break; | |
1196 | ||
1197 | case lval_register: | |
1198 | { | |
1199 | frame_info_ptr next_frame = frame_find_by_id (toval->next_frame_id ()); | |
1200 | int value_reg = toval->regnum (); | |
1201 | ||
1202 | if (next_frame == nullptr) | |
1203 | error (_("Value being assigned to is no longer active.")); | |
1204 | ||
1205 | gdbarch *gdbarch = frame_unwind_arch (next_frame); | |
1206 | ||
1207 | if (toval->bitsize ()) | |
1208 | { | |
1209 | struct value *parent = toval->parent (); | |
1210 | LONGEST offset = parent->offset () + toval->offset (); | |
1211 | size_t changed_len; | |
1212 | gdb_byte buffer[sizeof (LONGEST)]; | |
1213 | int optim, unavail; | |
1214 | ||
1215 | changed_len = (toval->bitpos () | |
1216 | + toval->bitsize () | |
1217 | + HOST_CHAR_BIT - 1) | |
1218 | / HOST_CHAR_BIT; | |
1219 | ||
1220 | if (changed_len > sizeof (LONGEST)) | |
1221 | error (_("Can't handle bitfields which " | |
1222 | "don't fit in a %d bit word."), | |
1223 | (int) sizeof (LONGEST) * HOST_CHAR_BIT); | |
1224 | ||
1225 | if (!get_frame_register_bytes (next_frame, value_reg, offset, | |
1226 | { buffer, changed_len }, &optim, | |
1227 | &unavail)) | |
1228 | { | |
1229 | if (optim) | |
1230 | throw_error (OPTIMIZED_OUT_ERROR, | |
1231 | _("value has been optimized out")); | |
1232 | if (unavail) | |
1233 | throw_error (NOT_AVAILABLE_ERROR, | |
1234 | _("value is not available")); | |
1235 | } | |
1236 | ||
1237 | modify_field (type, buffer, value_as_long (fromval), | |
1238 | toval->bitpos (), toval->bitsize ()); | |
1239 | ||
1240 | put_frame_register_bytes (next_frame, value_reg, offset, | |
1241 | { buffer, changed_len }); | |
1242 | } | |
1243 | else | |
1244 | { | |
1245 | if (gdbarch_convert_register_p (gdbarch, toval->regnum (), type)) | |
1246 | { | |
1247 | /* If TOVAL is a special machine register requiring | |
1248 | conversion of program values to a special raw | |
1249 | format. */ | |
1250 | gdbarch_value_to_register (gdbarch, | |
1251 | get_prev_frame_always (next_frame), | |
1252 | toval->regnum (), type, | |
1253 | fromval->contents ().data ()); | |
1254 | } | |
1255 | else | |
1256 | put_frame_register_bytes (next_frame, value_reg, | |
1257 | toval->offset (), | |
1258 | fromval->contents ()); | |
1259 | } | |
1260 | ||
1261 | gdb::observers::register_changed.notify | |
1262 | (get_prev_frame_always (next_frame), value_reg); | |
1263 | break; | |
1264 | } | |
1265 | ||
1266 | case lval_computed: | |
1267 | { | |
1268 | const struct lval_funcs *funcs = toval->computed_funcs (); | |
1269 | ||
1270 | if (funcs->write != NULL) | |
1271 | { | |
1272 | funcs->write (toval, fromval); | |
1273 | break; | |
1274 | } | |
1275 | } | |
1276 | [[fallthrough]]; | |
1277 | ||
1278 | default: | |
1279 | error (_("Left operand of assignment is not an lvalue.")); | |
1280 | } | |
1281 | ||
1282 | /* Assigning to the stack pointer, frame pointer, and other | |
1283 | (architecture and calling convention specific) registers may | |
1284 | cause the frame cache and regcache to be out of date. Assigning to memory | |
1285 | also can. We just do this on all assignments to registers or | |
1286 | memory, for simplicity's sake; I doubt the slowdown matters. */ | |
1287 | switch (toval->lval ()) | |
1288 | { | |
1289 | case lval_memory: | |
1290 | case lval_register: | |
1291 | case lval_computed: | |
1292 | ||
1293 | gdb::observers::target_changed.notify | |
1294 | (current_inferior ()->top_target ()); | |
1295 | ||
1296 | /* Having destroyed the frame cache, restore the selected | |
1297 | frame. */ | |
1298 | ||
1299 | /* FIXME: cagney/2002-11-02: There has to be a better way of | |
1300 | doing this. Instead of constantly saving/restoring the | |
1301 | frame. Why not create a get_selected_frame() function that, | |
1302 | having saved the selected frame's ID can automatically | |
1303 | re-find the previously selected frame automatically. */ | |
1304 | ||
1305 | { | |
1306 | frame_info_ptr fi = frame_find_by_id (old_frame); | |
1307 | ||
1308 | if (fi != NULL) | |
1309 | select_frame (fi); | |
1310 | } | |
1311 | ||
1312 | break; | |
1313 | default: | |
1314 | break; | |
1315 | } | |
1316 | ||
1317 | /* If the field does not entirely fill a LONGEST, then zero the sign | |
1318 | bits. If the field is signed, and is negative, then sign | |
1319 | extend. */ | |
1320 | if ((toval->bitsize () > 0) | |
1321 | && (toval->bitsize () < 8 * (int) sizeof (LONGEST))) | |
1322 | { | |
1323 | LONGEST fieldval = value_as_long (fromval); | |
1324 | LONGEST valmask = (((ULONGEST) 1) << toval->bitsize ()) - 1; | |
1325 | ||
1326 | fieldval &= valmask; | |
1327 | if (!type->is_unsigned () | |
1328 | && (fieldval & (valmask ^ (valmask >> 1)))) | |
1329 | fieldval |= ~valmask; | |
1330 | ||
1331 | fromval = value_from_longest (type, fieldval); | |
1332 | } | |
1333 | ||
1334 | /* The return value is a copy of TOVAL so it shares its location | |
1335 | information, but its contents are updated from FROMVAL. This | |
1336 | implies the returned value is not lazy, even if TOVAL was. */ | |
1337 | val = toval->copy (); | |
1338 | val->set_lazy (false); | |
1339 | copy (fromval->contents (), val->contents_raw ()); | |
1340 | ||
1341 | /* We copy over the enclosing type and pointed-to offset from FROMVAL | |
1342 | in the case of pointer types. For object types, the enclosing type | |
1343 | and embedded offset must *not* be copied: the target object referred | |
1344 | to by TOVAL retains its original dynamic type after assignment. */ | |
1345 | if (type->code () == TYPE_CODE_PTR) | |
1346 | { | |
1347 | val->set_enclosing_type (fromval->enclosing_type ()); | |
1348 | val->set_pointed_to_offset (fromval->pointed_to_offset ()); | |
1349 | } | |
1350 | ||
1351 | return val; | |
1352 | } | |
1353 | ||
1354 | /* Extend a value ARG1 to COUNT repetitions of its type. */ | |
1355 | ||
1356 | struct value * | |
1357 | value_repeat (struct value *arg1, int count) | |
1358 | { | |
1359 | struct value *val; | |
1360 | ||
1361 | arg1 = coerce_ref (arg1); | |
1362 | ||
1363 | if (arg1->lval () != lval_memory) | |
1364 | error (_("Only values in memory can be extended with '@'.")); | |
1365 | if (count < 1) | |
1366 | error (_("Invalid number %d of repetitions."), count); | |
1367 | ||
1368 | val = allocate_repeat_value (arg1->enclosing_type (), count); | |
1369 | ||
1370 | val->set_lval (lval_memory); | |
1371 | val->set_address (arg1->address ()); | |
1372 | ||
1373 | read_value_memory (val, 0, val->stack (), val->address (), | |
1374 | val->contents_all_raw ().data (), | |
1375 | type_length_units (val->enclosing_type ())); | |
1376 | ||
1377 | return val; | |
1378 | } | |
1379 | ||
1380 | struct value * | |
1381 | value_of_variable (struct symbol *var, const struct block *b) | |
1382 | { | |
1383 | frame_info_ptr frame = NULL; | |
1384 | ||
1385 | if (symbol_read_needs_frame (var)) | |
1386 | frame = get_selected_frame (_("No frame selected.")); | |
1387 | ||
1388 | return read_var_value (var, b, frame); | |
1389 | } | |
1390 | ||
1391 | struct value * | |
1392 | address_of_variable (struct symbol *var, const struct block *b) | |
1393 | { | |
1394 | struct type *type = var->type (); | |
1395 | struct value *val; | |
1396 | ||
1397 | /* Evaluate it first; if the result is a memory address, we're fine. | |
1398 | Lazy evaluation pays off here. */ | |
1399 | ||
1400 | val = value_of_variable (var, b); | |
1401 | type = val->type (); | |
1402 | ||
1403 | if ((val->lval () == lval_memory && val->lazy ()) | |
1404 | || type->code () == TYPE_CODE_FUNC) | |
1405 | { | |
1406 | CORE_ADDR addr = val->address (); | |
1407 | ||
1408 | return value_from_pointer (lookup_pointer_type (type), addr); | |
1409 | } | |
1410 | ||
1411 | /* Not a memory address; check what the problem was. */ | |
1412 | switch (val->lval ()) | |
1413 | { | |
1414 | case lval_register: | |
1415 | { | |
1416 | const char *regname; | |
1417 | ||
1418 | frame_info_ptr frame = frame_find_by_id (val->next_frame_id ()); | |
1419 | gdb_assert (frame != nullptr); | |
1420 | ||
1421 | regname | |
1422 | = gdbarch_register_name (get_frame_arch (frame), val->regnum ()); | |
1423 | gdb_assert (regname != nullptr && *regname != '\0'); | |
1424 | ||
1425 | error (_("Address requested for identifier " | |
1426 | "\"%s\" which is in register $%s"), | |
1427 | var->print_name (), regname); | |
1428 | break; | |
1429 | } | |
1430 | ||
1431 | default: | |
1432 | error (_("Can't take address of \"%s\" which isn't an lvalue."), | |
1433 | var->print_name ()); | |
1434 | break; | |
1435 | } | |
1436 | ||
1437 | return val; | |
1438 | } | |
1439 | ||
1440 | /* See value.h. */ | |
1441 | ||
1442 | bool | |
1443 | value_must_coerce_to_target (struct value *val) | |
1444 | { | |
1445 | struct type *valtype; | |
1446 | ||
1447 | /* The only lval kinds which do not live in target memory. */ | |
1448 | if (val->lval () != not_lval | |
1449 | && val->lval () != lval_internalvar | |
1450 | && val->lval () != lval_xcallable) | |
1451 | return false; | |
1452 | ||
1453 | valtype = check_typedef (val->type ()); | |
1454 | ||
1455 | switch (valtype->code ()) | |
1456 | { | |
1457 | case TYPE_CODE_ARRAY: | |
1458 | return valtype->is_vector () ? 0 : 1; | |
1459 | case TYPE_CODE_STRING: | |
1460 | return true; | |
1461 | default: | |
1462 | return false; | |
1463 | } | |
1464 | } | |
1465 | ||
1466 | /* Make sure that VAL lives in target memory if it's supposed to. For | |
1467 | instance, strings are constructed as character arrays in GDB's | |
1468 | storage, and this function copies them to the target. */ | |
1469 | ||
1470 | struct value * | |
1471 | value_coerce_to_target (struct value *val) | |
1472 | { | |
1473 | LONGEST length; | |
1474 | CORE_ADDR addr; | |
1475 | ||
1476 | if (!value_must_coerce_to_target (val)) | |
1477 | return val; | |
1478 | ||
1479 | length = check_typedef (val->type ())->length (); | |
1480 | addr = allocate_space_in_inferior (length); | |
1481 | write_memory (addr, val->contents ().data (), length); | |
1482 | return value_at_lazy (val->type (), addr); | |
1483 | } | |
1484 | ||
1485 | /* Given a value which is an array, return a value which is a pointer | |
1486 | to its first element, regardless of whether or not the array has a | |
1487 | nonzero lower bound. | |
1488 | ||
1489 | FIXME: A previous comment here indicated that this routine should | |
1490 | be substracting the array's lower bound. It's not clear to me that | |
1491 | this is correct. Given an array subscripting operation, it would | |
1492 | certainly work to do the adjustment here, essentially computing: | |
1493 | ||
1494 | (&array[0] - (lowerbound * sizeof array[0])) + (index * sizeof array[0]) | |
1495 | ||
1496 | However I believe a more appropriate and logical place to account | |
1497 | for the lower bound is to do so in value_subscript, essentially | |
1498 | computing: | |
1499 | ||
1500 | (&array[0] + ((index - lowerbound) * sizeof array[0])) | |
1501 | ||
1502 | As further evidence consider what would happen with operations | |
1503 | other than array subscripting, where the caller would get back a | |
1504 | value that had an address somewhere before the actual first element | |
1505 | of the array, and the information about the lower bound would be | |
1506 | lost because of the coercion to pointer type. */ | |
1507 | ||
1508 | struct value * | |
1509 | value_coerce_array (struct value *arg1) | |
1510 | { | |
1511 | struct type *type = check_typedef (arg1->type ()); | |
1512 | ||
1513 | /* If the user tries to do something requiring a pointer with an | |
1514 | array that has not yet been pushed to the target, then this would | |
1515 | be a good time to do so. */ | |
1516 | arg1 = value_coerce_to_target (arg1); | |
1517 | ||
1518 | if (arg1->lval () != lval_memory) | |
1519 | error (_("Attempt to take address of value not located in memory.")); | |
1520 | ||
1521 | return value_from_pointer (lookup_pointer_type (type->target_type ()), | |
1522 | arg1->address ()); | |
1523 | } | |
1524 | ||
1525 | /* Given a value which is a function, return a value which is a pointer | |
1526 | to it. */ | |
1527 | ||
1528 | struct value * | |
1529 | value_coerce_function (struct value *arg1) | |
1530 | { | |
1531 | struct value *retval; | |
1532 | ||
1533 | if (arg1->lval () != lval_memory) | |
1534 | error (_("Attempt to take address of value not located in memory.")); | |
1535 | ||
1536 | retval = value_from_pointer (lookup_pointer_type (arg1->type ()), | |
1537 | arg1->address ()); | |
1538 | return retval; | |
1539 | } | |
1540 | ||
1541 | /* Return a pointer value for the object for which ARG1 is the | |
1542 | contents. */ | |
1543 | ||
1544 | struct value * | |
1545 | value_addr (struct value *arg1) | |
1546 | { | |
1547 | struct value *arg2; | |
1548 | struct type *type = check_typedef (arg1->type ()); | |
1549 | ||
1550 | if (TYPE_IS_REFERENCE (type)) | |
1551 | { | |
1552 | if (arg1->bits_synthetic_pointer (arg1->embedded_offset (), | |
1553 | TARGET_CHAR_BIT * type->length ())) | |
1554 | arg1 = coerce_ref (arg1); | |
1555 | else | |
1556 | { | |
1557 | /* Copy the value, but change the type from (T&) to (T*). We | |
1558 | keep the same location information, which is efficient, and | |
1559 | allows &(&X) to get the location containing the reference. | |
1560 | Do the same to its enclosing type for consistency. */ | |
1561 | struct type *type_ptr | |
1562 | = lookup_pointer_type (type->target_type ()); | |
1563 | struct type *enclosing_type | |
1564 | = check_typedef (arg1->enclosing_type ()); | |
1565 | struct type *enclosing_type_ptr | |
1566 | = lookup_pointer_type (enclosing_type->target_type ()); | |
1567 | ||
1568 | arg2 = arg1->copy (); | |
1569 | arg2->deprecated_set_type (type_ptr); | |
1570 | arg2->set_enclosing_type (enclosing_type_ptr); | |
1571 | ||
1572 | return arg2; | |
1573 | } | |
1574 | } | |
1575 | if (type->code () == TYPE_CODE_FUNC) | |
1576 | return value_coerce_function (arg1); | |
1577 | ||
1578 | /* If this is an array that has not yet been pushed to the target, | |
1579 | then this would be a good time to force it to memory. */ | |
1580 | arg1 = value_coerce_to_target (arg1); | |
1581 | ||
1582 | if (arg1->lval () != lval_memory) | |
1583 | error (_("Attempt to take address of value not located in memory.")); | |
1584 | ||
1585 | /* Get target memory address. */ | |
1586 | arg2 = value_from_pointer (lookup_pointer_type (arg1->type ()), | |
1587 | (arg1->address () | |
1588 | + arg1->embedded_offset ())); | |
1589 | ||
1590 | /* This may be a pointer to a base subobject; so remember the | |
1591 | full derived object's type ... */ | |
1592 | arg2->set_enclosing_type (lookup_pointer_type (arg1->enclosing_type ())); | |
1593 | /* ... and also the relative position of the subobject in the full | |
1594 | object. */ | |
1595 | arg2->set_pointed_to_offset (arg1->embedded_offset ()); | |
1596 | return arg2; | |
1597 | } | |
1598 | ||
1599 | /* Return a reference value for the object for which ARG1 is the | |
1600 | contents. */ | |
1601 | ||
1602 | struct value * | |
1603 | value_ref (struct value *arg1, enum type_code refcode) | |
1604 | { | |
1605 | struct value *arg2; | |
1606 | struct type *type = check_typedef (arg1->type ()); | |
1607 | ||
1608 | gdb_assert (refcode == TYPE_CODE_REF || refcode == TYPE_CODE_RVALUE_REF); | |
1609 | ||
1610 | if ((type->code () == TYPE_CODE_REF | |
1611 | || type->code () == TYPE_CODE_RVALUE_REF) | |
1612 | && type->code () == refcode) | |
1613 | return arg1; | |
1614 | ||
1615 | arg2 = value_addr (arg1); | |
1616 | arg2->deprecated_set_type (lookup_reference_type (type, refcode)); | |
1617 | return arg2; | |
1618 | } | |
1619 | ||
1620 | /* Given a value of a pointer type, apply the C unary * operator to | |
1621 | it. */ | |
1622 | ||
1623 | struct value * | |
1624 | value_ind (struct value *arg1) | |
1625 | { | |
1626 | struct type *base_type; | |
1627 | struct value *arg2; | |
1628 | ||
1629 | arg1 = coerce_array (arg1); | |
1630 | ||
1631 | base_type = check_typedef (arg1->type ()); | |
1632 | ||
1633 | if (arg1->lval () == lval_computed) | |
1634 | { | |
1635 | const struct lval_funcs *funcs = arg1->computed_funcs (); | |
1636 | ||
1637 | if (funcs->indirect) | |
1638 | { | |
1639 | struct value *result = funcs->indirect (arg1); | |
1640 | ||
1641 | if (result) | |
1642 | return result; | |
1643 | } | |
1644 | } | |
1645 | ||
1646 | if (base_type->code () == TYPE_CODE_PTR) | |
1647 | { | |
1648 | struct type *enc_type; | |
1649 | ||
1650 | /* We may be pointing to something embedded in a larger object. | |
1651 | Get the real type of the enclosing object. */ | |
1652 | enc_type = check_typedef (arg1->enclosing_type ()); | |
1653 | enc_type = enc_type->target_type (); | |
1654 | ||
1655 | CORE_ADDR base_addr; | |
1656 | if (check_typedef (enc_type)->code () == TYPE_CODE_FUNC | |
1657 | || check_typedef (enc_type)->code () == TYPE_CODE_METHOD) | |
1658 | { | |
1659 | /* For functions, go through find_function_addr, which knows | |
1660 | how to handle function descriptors. */ | |
1661 | base_addr = find_function_addr (arg1, NULL); | |
1662 | } | |
1663 | else | |
1664 | { | |
1665 | /* Retrieve the enclosing object pointed to. */ | |
1666 | base_addr = (value_as_address (arg1) | |
1667 | - arg1->pointed_to_offset ()); | |
1668 | } | |
1669 | arg2 = value_at_lazy (enc_type, base_addr); | |
1670 | enc_type = arg2->type (); | |
1671 | return readjust_indirect_value_type (arg2, enc_type, base_type, | |
1672 | arg1, base_addr); | |
1673 | } | |
1674 | ||
1675 | error (_("Attempt to take contents of a non-pointer value.")); | |
1676 | } | |
1677 | \f | |
1678 | /* Create a value for an array by allocating space in GDB, copying the | |
1679 | data into that space, and then setting up an array value. | |
1680 | ||
1681 | The array bounds are set from LOWBOUND and the size of ELEMVEC, and | |
1682 | the array is populated from the values passed in ELEMVEC. | |
1683 | ||
1684 | The element type of the array is inherited from the type of the | |
1685 | first element, and all elements must have the same size (though we | |
1686 | don't currently enforce any restriction on their types). */ | |
1687 | ||
1688 | struct value * | |
1689 | value_array (int lowbound, gdb::array_view<struct value *> elemvec) | |
1690 | { | |
1691 | int idx; | |
1692 | ULONGEST typelength; | |
1693 | struct value *val; | |
1694 | struct type *arraytype; | |
1695 | ||
1696 | /* Validate that the bounds are reasonable and that each of the | |
1697 | elements have the same size. */ | |
1698 | ||
1699 | typelength = type_length_units (elemvec[0]->enclosing_type ()); | |
1700 | for (struct value *other : elemvec.slice (1)) | |
1701 | { | |
1702 | if (type_length_units (other->enclosing_type ()) != typelength) | |
1703 | { | |
1704 | error (_("array elements must all be the same size")); | |
1705 | } | |
1706 | } | |
1707 | ||
1708 | arraytype = lookup_array_range_type (elemvec[0]->enclosing_type (), | |
1709 | lowbound, | |
1710 | lowbound + elemvec.size () - 1); | |
1711 | ||
1712 | if (!current_language->c_style_arrays_p ()) | |
1713 | { | |
1714 | val = value::allocate (arraytype); | |
1715 | for (idx = 0; idx < elemvec.size (); idx++) | |
1716 | elemvec[idx]->contents_copy (val, idx * typelength, 0, typelength); | |
1717 | return val; | |
1718 | } | |
1719 | ||
1720 | /* Allocate space to store the array, and then initialize it by | |
1721 | copying in each element. */ | |
1722 | ||
1723 | val = value::allocate (arraytype); | |
1724 | for (idx = 0; idx < elemvec.size (); idx++) | |
1725 | elemvec[idx]->contents_copy (val, idx * typelength, 0, typelength); | |
1726 | return val; | |
1727 | } | |
1728 | ||
1729 | /* See value.h. */ | |
1730 | ||
1731 | struct value * | |
1732 | value_cstring (const gdb_byte *ptr, ssize_t count, struct type *char_type) | |
1733 | { | |
1734 | struct value *val; | |
1735 | int lowbound = current_language->string_lower_bound (); | |
1736 | ssize_t highbound = count + 1; | |
1737 | struct type *stringtype | |
1738 | = lookup_array_range_type (char_type, lowbound, highbound + lowbound - 1); | |
1739 | ||
1740 | val = value::allocate (stringtype); | |
1741 | ssize_t len = count * char_type->length (); | |
1742 | memcpy (val->contents_raw ().data (), ptr, len); | |
1743 | /* Write the terminating null-character. */ | |
1744 | memset (val->contents_raw ().data () + len, 0, char_type->length ()); | |
1745 | return val; | |
1746 | } | |
1747 | ||
1748 | /* See value.h. */ | |
1749 | ||
1750 | struct value * | |
1751 | value_string (const gdb_byte *ptr, ssize_t count, struct type *char_type) | |
1752 | { | |
1753 | struct value *val; | |
1754 | int lowbound = current_language->string_lower_bound (); | |
1755 | ssize_t highbound = count; | |
1756 | struct type *stringtype | |
1757 | = lookup_string_range_type (char_type, lowbound, highbound + lowbound - 1); | |
1758 | ||
1759 | val = value::allocate (stringtype); | |
1760 | ssize_t len = count * char_type->length (); | |
1761 | memcpy (val->contents_raw ().data (), ptr, len); | |
1762 | return val; | |
1763 | } | |
1764 | ||
1765 | \f | |
1766 | /* See if we can pass arguments in T2 to a function which takes arguments | |
1767 | of types T1. T1 is a list of NARGS arguments, and T2 is an array_view | |
1768 | of the values we're trying to pass. If some arguments need coercion of | |
1769 | some sort, then the coerced values are written into T2. Return value is | |
1770 | 0 if the arguments could be matched, or the position at which they | |
1771 | differ if not. | |
1772 | ||
1773 | STATICP is nonzero if the T1 argument list came from a static | |
1774 | member function. T2 must still include the ``this'' pointer, but | |
1775 | it will be skipped. | |
1776 | ||
1777 | For non-static member functions, we ignore the first argument, | |
1778 | which is the type of the instance variable. This is because we | |
1779 | want to handle calls with objects from derived classes. This is | |
1780 | not entirely correct: we should actually check to make sure that a | |
1781 | requested operation is type secure, shouldn't we? FIXME. */ | |
1782 | ||
1783 | static int | |
1784 | typecmp (bool staticp, bool varargs, int nargs, | |
1785 | struct field t1[], gdb::array_view<value *> t2) | |
1786 | { | |
1787 | int i; | |
1788 | ||
1789 | /* Skip ``this'' argument if applicable. T2 will always include | |
1790 | THIS. */ | |
1791 | if (staticp) | |
1792 | t2 = t2.slice (1); | |
1793 | ||
1794 | for (i = 0; | |
1795 | (i < nargs) && t1[i].type ()->code () != TYPE_CODE_VOID; | |
1796 | i++) | |
1797 | { | |
1798 | struct type *tt1, *tt2; | |
1799 | ||
1800 | if (i == t2.size ()) | |
1801 | return i + 1; | |
1802 | ||
1803 | tt1 = check_typedef (t1[i].type ()); | |
1804 | tt2 = check_typedef (t2[i]->type ()); | |
1805 | ||
1806 | if (TYPE_IS_REFERENCE (tt1) | |
1807 | /* We should be doing hairy argument matching, as below. */ | |
1808 | && (check_typedef (tt1->target_type ())->code () | |
1809 | == tt2->code ())) | |
1810 | { | |
1811 | if (tt2->code () == TYPE_CODE_ARRAY) | |
1812 | t2[i] = value_coerce_array (t2[i]); | |
1813 | else | |
1814 | t2[i] = value_ref (t2[i], tt1->code ()); | |
1815 | continue; | |
1816 | } | |
1817 | ||
1818 | /* djb - 20000715 - Until the new type structure is in the | |
1819 | place, and we can attempt things like implicit conversions, | |
1820 | we need to do this so you can take something like a map<const | |
1821 | char *>, and properly access map["hello"], because the | |
1822 | argument to [] will be a reference to a pointer to a char, | |
1823 | and the argument will be a pointer to a char. */ | |
1824 | while (TYPE_IS_REFERENCE (tt1) || tt1->code () == TYPE_CODE_PTR) | |
1825 | { | |
1826 | tt1 = check_typedef ( tt1->target_type () ); | |
1827 | } | |
1828 | while (tt2->code () == TYPE_CODE_ARRAY | |
1829 | || tt2->code () == TYPE_CODE_PTR | |
1830 | || TYPE_IS_REFERENCE (tt2)) | |
1831 | { | |
1832 | tt2 = check_typedef (tt2->target_type ()); | |
1833 | } | |
1834 | if (tt1->code () == tt2->code ()) | |
1835 | continue; | |
1836 | /* Array to pointer is a `trivial conversion' according to the | |
1837 | ARM. */ | |
1838 | ||
1839 | /* We should be doing much hairier argument matching (see | |
1840 | section 13.2 of the ARM), but as a quick kludge, just check | |
1841 | for the same type code. */ | |
1842 | if (t1[i].type ()->code () != t2[i]->type ()->code ()) | |
1843 | return i + 1; | |
1844 | } | |
1845 | if (varargs || i == t2.size ()) | |
1846 | return 0; | |
1847 | return i + 1; | |
1848 | } | |
1849 | ||
1850 | /* Helper class for search_struct_field that keeps track of found | |
1851 | results and possibly throws an exception if the search yields | |
1852 | ambiguous results. See search_struct_field for description of | |
1853 | LOOKING_FOR_BASECLASS. */ | |
1854 | ||
1855 | struct struct_field_searcher | |
1856 | { | |
1857 | /* A found field. */ | |
1858 | struct found_field | |
1859 | { | |
1860 | /* Path to the structure where the field was found. */ | |
1861 | std::vector<struct type *> path; | |
1862 | ||
1863 | /* The field found. */ | |
1864 | struct value *field_value; | |
1865 | }; | |
1866 | ||
1867 | /* See corresponding fields for description of parameters. */ | |
1868 | struct_field_searcher (const char *name, | |
1869 | struct type *outermost_type, | |
1870 | bool looking_for_baseclass) | |
1871 | : m_name (name), | |
1872 | m_looking_for_baseclass (looking_for_baseclass), | |
1873 | m_outermost_type (outermost_type) | |
1874 | { | |
1875 | } | |
1876 | ||
1877 | /* The search entry point. If LOOKING_FOR_BASECLASS is true and the | |
1878 | base class search yields ambiguous results, this throws an | |
1879 | exception. If LOOKING_FOR_BASECLASS is false, the found fields | |
1880 | are accumulated and the caller (search_struct_field) takes care | |
1881 | of throwing an error if the field search yields ambiguous | |
1882 | results. The latter is done that way so that the error message | |
1883 | can include a list of all the found candidates. */ | |
1884 | void search (struct value *arg, LONGEST offset, struct type *type); | |
1885 | ||
1886 | const std::vector<found_field> &fields () | |
1887 | { | |
1888 | return m_fields; | |
1889 | } | |
1890 | ||
1891 | struct value *baseclass () | |
1892 | { | |
1893 | return m_baseclass; | |
1894 | } | |
1895 | ||
1896 | private: | |
1897 | /* Update results to include V, a found field/baseclass. */ | |
1898 | void update_result (struct value *v, LONGEST boffset); | |
1899 | ||
1900 | /* The name of the field/baseclass we're searching for. */ | |
1901 | const char *m_name; | |
1902 | ||
1903 | /* Whether we're looking for a baseclass, or a field. */ | |
1904 | const bool m_looking_for_baseclass; | |
1905 | ||
1906 | /* The offset of the baseclass containing the field/baseclass we | |
1907 | last recorded. */ | |
1908 | LONGEST m_last_boffset = 0; | |
1909 | ||
1910 | /* If looking for a baseclass, then the result is stored here. */ | |
1911 | struct value *m_baseclass = nullptr; | |
1912 | ||
1913 | /* When looking for fields, the found candidates are stored | |
1914 | here. */ | |
1915 | std::vector<found_field> m_fields; | |
1916 | ||
1917 | /* The type of the initial type passed to search_struct_field; this | |
1918 | is used for error reporting when the lookup is ambiguous. */ | |
1919 | struct type *m_outermost_type; | |
1920 | ||
1921 | /* The full path to the struct being inspected. E.g. for field 'x' | |
1922 | defined in class B inherited by class A, we have A and B pushed | |
1923 | on the path. */ | |
1924 | std::vector <struct type *> m_struct_path; | |
1925 | }; | |
1926 | ||
1927 | void | |
1928 | struct_field_searcher::update_result (struct value *v, LONGEST boffset) | |
1929 | { | |
1930 | if (v != NULL) | |
1931 | { | |
1932 | if (m_looking_for_baseclass) | |
1933 | { | |
1934 | if (m_baseclass != nullptr | |
1935 | /* The result is not ambiguous if all the classes that are | |
1936 | found occupy the same space. */ | |
1937 | && m_last_boffset != boffset) | |
1938 | error (_("base class '%s' is ambiguous in type '%s'"), | |
1939 | m_name, TYPE_SAFE_NAME (m_outermost_type)); | |
1940 | ||
1941 | m_baseclass = v; | |
1942 | m_last_boffset = boffset; | |
1943 | } | |
1944 | else | |
1945 | { | |
1946 | /* The field is not ambiguous if it occupies the same | |
1947 | space. */ | |
1948 | if (m_fields.empty () || m_last_boffset != boffset) | |
1949 | m_fields.push_back ({m_struct_path, v}); | |
1950 | else | |
1951 | { | |
1952 | /*Fields can occupy the same space and have the same name (be | |
1953 | ambiguous). This can happen when fields in two different base | |
1954 | classes are marked [[no_unique_address]] and have the same name. | |
1955 | The C++ standard says that such fields can only occupy the same | |
1956 | space if they are of different type, but we don't rely on that in | |
1957 | the following code. */ | |
1958 | bool ambiguous = false, insert = true; | |
1959 | for (const found_field &field: m_fields) | |
1960 | { | |
1961 | if(field.path.back () != m_struct_path.back ()) | |
1962 | { | |
1963 | /* Same boffset points to members of different classes. | |
1964 | We have found an ambiguity and should record it. */ | |
1965 | ambiguous = true; | |
1966 | } | |
1967 | else | |
1968 | { | |
1969 | /* We don't need to insert this value again, because a | |
1970 | non-ambiguous path already leads to it. */ | |
1971 | insert = false; | |
1972 | break; | |
1973 | } | |
1974 | } | |
1975 | if (ambiguous && insert) | |
1976 | m_fields.push_back ({m_struct_path, v}); | |
1977 | } | |
1978 | } | |
1979 | } | |
1980 | } | |
1981 | ||
1982 | /* A helper for search_struct_field. This does all the work; most | |
1983 | arguments are as passed to search_struct_field. */ | |
1984 | ||
1985 | void | |
1986 | struct_field_searcher::search (struct value *arg1, LONGEST offset, | |
1987 | struct type *type) | |
1988 | { | |
1989 | int i; | |
1990 | int nbases; | |
1991 | ||
1992 | m_struct_path.push_back (type); | |
1993 | SCOPE_EXIT { m_struct_path.pop_back (); }; | |
1994 | ||
1995 | type = check_typedef (type); | |
1996 | nbases = TYPE_N_BASECLASSES (type); | |
1997 | ||
1998 | if (!m_looking_for_baseclass) | |
1999 | for (i = type->num_fields () - 1; i >= nbases; i--) | |
2000 | { | |
2001 | const char *t_field_name = type->field (i).name (); | |
2002 | ||
2003 | if (t_field_name && (strcmp_iw (t_field_name, m_name) == 0)) | |
2004 | { | |
2005 | struct value *v; | |
2006 | ||
2007 | if (type->field (i).is_static ()) | |
2008 | v = value_static_field (type, i); | |
2009 | else | |
2010 | v = arg1->primitive_field (offset, i, type); | |
2011 | ||
2012 | update_result (v, offset); | |
2013 | return; | |
2014 | } | |
2015 | ||
2016 | if (t_field_name | |
2017 | && t_field_name[0] == '\0') | |
2018 | { | |
2019 | struct type *field_type = type->field (i).type (); | |
2020 | ||
2021 | if (field_type->code () == TYPE_CODE_UNION | |
2022 | || field_type->code () == TYPE_CODE_STRUCT) | |
2023 | { | |
2024 | /* Look for a match through the fields of an anonymous | |
2025 | union, or anonymous struct. C++ provides anonymous | |
2026 | unions. | |
2027 | ||
2028 | In the GNU Chill (now deleted from GDB) | |
2029 | implementation of variant record types, each | |
2030 | <alternative field> has an (anonymous) union type, | |
2031 | each member of the union represents a <variant | |
2032 | alternative>. Each <variant alternative> is | |
2033 | represented as a struct, with a member for each | |
2034 | <variant field>. */ | |
2035 | ||
2036 | LONGEST new_offset = offset; | |
2037 | ||
2038 | /* This is pretty gross. In G++, the offset in an | |
2039 | anonymous union is relative to the beginning of the | |
2040 | enclosing struct. In the GNU Chill (now deleted | |
2041 | from GDB) implementation of variant records, the | |
2042 | bitpos is zero in an anonymous union field, so we | |
2043 | have to add the offset of the union here. */ | |
2044 | if (field_type->code () == TYPE_CODE_STRUCT | |
2045 | || (field_type->num_fields () > 0 | |
2046 | && field_type->field (0).loc_bitpos () == 0)) | |
2047 | new_offset += type->field (i).loc_bitpos () / 8; | |
2048 | ||
2049 | search (arg1, new_offset, field_type); | |
2050 | } | |
2051 | } | |
2052 | } | |
2053 | ||
2054 | for (i = 0; i < nbases; i++) | |
2055 | { | |
2056 | struct value *v = NULL; | |
2057 | struct type *basetype = check_typedef (TYPE_BASECLASS (type, i)); | |
2058 | /* If we are looking for baseclasses, this is what we get when | |
2059 | we hit them. But it could happen that the base part's member | |
2060 | name is not yet filled in. */ | |
2061 | int found_baseclass = (m_looking_for_baseclass | |
2062 | && TYPE_BASECLASS_NAME (type, i) != NULL | |
2063 | && (strcmp_iw (m_name, basetype->name ()) == 0)); | |
2064 | LONGEST boffset = arg1->embedded_offset () + offset; | |
2065 | ||
2066 | if (BASETYPE_VIA_VIRTUAL (type, i)) | |
2067 | { | |
2068 | struct value *v2; | |
2069 | ||
2070 | boffset = baseclass_offset (type, i, | |
2071 | arg1->contents_for_printing ().data (), | |
2072 | arg1->embedded_offset () + offset, | |
2073 | arg1->address (), | |
2074 | arg1); | |
2075 | ||
2076 | /* The virtual base class pointer might have been clobbered | |
2077 | by the user program. Make sure that it still points to a | |
2078 | valid memory location. */ | |
2079 | ||
2080 | boffset += arg1->embedded_offset () + offset; | |
2081 | if (boffset < 0 | |
2082 | || boffset >= arg1->enclosing_type ()->length ()) | |
2083 | { | |
2084 | CORE_ADDR base_addr; | |
2085 | ||
2086 | base_addr = arg1->address () + boffset; | |
2087 | v2 = value_at_lazy (basetype, base_addr); | |
2088 | if (target_read_memory (base_addr, | |
2089 | v2->contents_raw ().data (), | |
2090 | v2->type ()->length ()) != 0) | |
2091 | error (_("virtual baseclass botch")); | |
2092 | } | |
2093 | else | |
2094 | { | |
2095 | v2 = arg1->copy (); | |
2096 | v2->deprecated_set_type (basetype); | |
2097 | v2->set_embedded_offset (boffset); | |
2098 | } | |
2099 | ||
2100 | if (found_baseclass) | |
2101 | v = v2; | |
2102 | else | |
2103 | search (v2, 0, TYPE_BASECLASS (type, i)); | |
2104 | } | |
2105 | else if (found_baseclass) | |
2106 | v = arg1->primitive_field (offset, i, type); | |
2107 | else | |
2108 | { | |
2109 | search (arg1, offset + TYPE_BASECLASS_BITPOS (type, i) / 8, | |
2110 | basetype); | |
2111 | } | |
2112 | ||
2113 | update_result (v, boffset); | |
2114 | } | |
2115 | } | |
2116 | ||
2117 | /* Helper function used by value_struct_elt to recurse through | |
2118 | baseclasses. Look for a field NAME in ARG1. Search in it assuming | |
2119 | it has (class) type TYPE. If found, return value, else return NULL. | |
2120 | ||
2121 | If LOOKING_FOR_BASECLASS, then instead of looking for struct | |
2122 | fields, look for a baseclass named NAME. */ | |
2123 | ||
2124 | static struct value * | |
2125 | search_struct_field (const char *name, struct value *arg1, | |
2126 | struct type *type, int looking_for_baseclass) | |
2127 | { | |
2128 | struct_field_searcher searcher (name, type, looking_for_baseclass); | |
2129 | ||
2130 | searcher.search (arg1, 0, type); | |
2131 | ||
2132 | if (!looking_for_baseclass) | |
2133 | { | |
2134 | const auto &fields = searcher.fields (); | |
2135 | ||
2136 | if (fields.empty ()) | |
2137 | return nullptr; | |
2138 | else if (fields.size () == 1) | |
2139 | return fields[0].field_value; | |
2140 | else | |
2141 | { | |
2142 | std::string candidates; | |
2143 | ||
2144 | for (auto &&candidate : fields) | |
2145 | { | |
2146 | gdb_assert (!candidate.path.empty ()); | |
2147 | ||
2148 | struct type *field_type = candidate.field_value->type (); | |
2149 | struct type *struct_type = candidate.path.back (); | |
2150 | ||
2151 | std::string path; | |
2152 | bool first = true; | |
2153 | for (struct type *t : candidate.path) | |
2154 | { | |
2155 | if (first) | |
2156 | first = false; | |
2157 | else | |
2158 | path += " -> "; | |
2159 | path += t->name (); | |
2160 | } | |
2161 | ||
2162 | candidates += string_printf ("\n '%s %s::%s' (%s)", | |
2163 | TYPE_SAFE_NAME (field_type), | |
2164 | TYPE_SAFE_NAME (struct_type), | |
2165 | name, | |
2166 | path.c_str ()); | |
2167 | } | |
2168 | ||
2169 | error (_("Request for member '%s' is ambiguous in type '%s'." | |
2170 | " Candidates are:%s"), | |
2171 | name, TYPE_SAFE_NAME (type), | |
2172 | candidates.c_str ()); | |
2173 | } | |
2174 | } | |
2175 | else | |
2176 | return searcher.baseclass (); | |
2177 | } | |
2178 | ||
2179 | /* Helper function used by value_struct_elt to recurse through | |
2180 | baseclasses. Look for a field NAME in ARG1. Adjust the address of | |
2181 | ARG1 by OFFSET bytes, and search in it assuming it has (class) type | |
2182 | TYPE. | |
2183 | ||
2184 | ARGS is an optional array of argument values used to help finding NAME. | |
2185 | The contents of ARGS can be adjusted if type coercion is required in | |
2186 | order to find a matching NAME. | |
2187 | ||
2188 | If found, return value, else if name matched and args not return | |
2189 | (value) -1, else return NULL. */ | |
2190 | ||
2191 | static struct value * | |
2192 | search_struct_method (const char *name, struct value **arg1p, | |
2193 | std::optional<gdb::array_view<value *>> args, | |
2194 | LONGEST offset, int *static_memfuncp, | |
2195 | struct type *type) | |
2196 | { | |
2197 | int i; | |
2198 | struct value *v; | |
2199 | int name_matched = 0; | |
2200 | ||
2201 | type = check_typedef (type); | |
2202 | for (i = TYPE_NFN_FIELDS (type) - 1; i >= 0; i--) | |
2203 | { | |
2204 | const char *t_field_name = TYPE_FN_FIELDLIST_NAME (type, i); | |
2205 | ||
2206 | if (t_field_name && (strcmp_iw (t_field_name, name) == 0)) | |
2207 | { | |
2208 | int j = TYPE_FN_FIELDLIST_LENGTH (type, i) - 1; | |
2209 | struct fn_field *f = TYPE_FN_FIELDLIST1 (type, i); | |
2210 | ||
2211 | name_matched = 1; | |
2212 | check_stub_method_group (type, i); | |
2213 | if (j > 0 && !args.has_value ()) | |
2214 | error (_("cannot resolve overloaded method " | |
2215 | "`%s': no arguments supplied"), name); | |
2216 | else if (j == 0 && !args.has_value ()) | |
2217 | { | |
2218 | v = value_fn_field (arg1p, f, j, type, offset); | |
2219 | if (v != NULL) | |
2220 | return v; | |
2221 | } | |
2222 | else | |
2223 | while (j >= 0) | |
2224 | { | |
2225 | gdb_assert (args.has_value ()); | |
2226 | if (!typecmp (TYPE_FN_FIELD_STATIC_P (f, j), | |
2227 | TYPE_FN_FIELD_TYPE (f, j)->has_varargs (), | |
2228 | TYPE_FN_FIELD_TYPE (f, j)->num_fields (), | |
2229 | TYPE_FN_FIELD_ARGS (f, j), *args)) | |
2230 | { | |
2231 | if (TYPE_FN_FIELD_VIRTUAL_P (f, j)) | |
2232 | return value_virtual_fn_field (arg1p, f, j, | |
2233 | type, offset); | |
2234 | if (TYPE_FN_FIELD_STATIC_P (f, j) | |
2235 | && static_memfuncp) | |
2236 | *static_memfuncp = 1; | |
2237 | v = value_fn_field (arg1p, f, j, type, offset); | |
2238 | if (v != NULL) | |
2239 | return v; | |
2240 | } | |
2241 | j--; | |
2242 | } | |
2243 | } | |
2244 | } | |
2245 | ||
2246 | for (i = TYPE_N_BASECLASSES (type) - 1; i >= 0; i--) | |
2247 | { | |
2248 | LONGEST base_offset; | |
2249 | LONGEST this_offset; | |
2250 | ||
2251 | if (BASETYPE_VIA_VIRTUAL (type, i)) | |
2252 | { | |
2253 | struct type *baseclass = check_typedef (TYPE_BASECLASS (type, i)); | |
2254 | struct value *base_val; | |
2255 | const gdb_byte *base_valaddr; | |
2256 | ||
2257 | /* The virtual base class pointer might have been | |
2258 | clobbered by the user program. Make sure that it | |
2259 | still points to a valid memory location. */ | |
2260 | ||
2261 | if (offset < 0 || offset >= type->length ()) | |
2262 | { | |
2263 | CORE_ADDR address; | |
2264 | ||
2265 | gdb::byte_vector tmp (baseclass->length ()); | |
2266 | address = (*arg1p)->address (); | |
2267 | ||
2268 | if (target_read_memory (address + offset, | |
2269 | tmp.data (), baseclass->length ()) != 0) | |
2270 | error (_("virtual baseclass botch")); | |
2271 | ||
2272 | base_val = value_from_contents_and_address (baseclass, | |
2273 | tmp.data (), | |
2274 | address + offset); | |
2275 | base_valaddr = base_val->contents_for_printing ().data (); | |
2276 | this_offset = 0; | |
2277 | } | |
2278 | else | |
2279 | { | |
2280 | base_val = *arg1p; | |
2281 | base_valaddr = (*arg1p)->contents_for_printing ().data (); | |
2282 | this_offset = offset; | |
2283 | } | |
2284 | ||
2285 | base_offset = baseclass_offset (type, i, base_valaddr, | |
2286 | this_offset, base_val->address (), | |
2287 | base_val); | |
2288 | } | |
2289 | else | |
2290 | { | |
2291 | base_offset = TYPE_BASECLASS_BITPOS (type, i) / 8; | |
2292 | } | |
2293 | v = search_struct_method (name, arg1p, args, base_offset + offset, | |
2294 | static_memfuncp, TYPE_BASECLASS (type, i)); | |
2295 | if (v == (struct value *) - 1) | |
2296 | { | |
2297 | name_matched = 1; | |
2298 | } | |
2299 | else if (v) | |
2300 | { | |
2301 | /* FIXME-bothner: Why is this commented out? Why is it here? */ | |
2302 | /* *arg1p = arg1_tmp; */ | |
2303 | return v; | |
2304 | } | |
2305 | } | |
2306 | if (name_matched) | |
2307 | return (struct value *) - 1; | |
2308 | else | |
2309 | return NULL; | |
2310 | } | |
2311 | ||
2312 | /* Given *ARGP, a value of type (pointer to a)* structure/union, | |
2313 | extract the component named NAME from the ultimate target | |
2314 | structure/union and return it as a value with its appropriate type. | |
2315 | ERR is used in the error message if *ARGP's type is wrong. | |
2316 | ||
2317 | C++: ARGS is a list of argument types to aid in the selection of | |
2318 | an appropriate method. Also, handle derived types. | |
2319 | ||
2320 | STATIC_MEMFUNCP, if non-NULL, points to a caller-supplied location | |
2321 | where the truthvalue of whether the function that was resolved was | |
2322 | a static member function or not is stored. | |
2323 | ||
2324 | ERR is an error message to be printed in case the field is not | |
2325 | found. */ | |
2326 | ||
2327 | struct value * | |
2328 | value_struct_elt (struct value **argp, | |
2329 | std::optional<gdb::array_view<value *>> args, | |
2330 | const char *name, int *static_memfuncp, const char *err) | |
2331 | { | |
2332 | struct type *t; | |
2333 | struct value *v; | |
2334 | ||
2335 | *argp = coerce_array (*argp); | |
2336 | ||
2337 | t = check_typedef ((*argp)->type ()); | |
2338 | ||
2339 | /* Follow pointers until we get to a non-pointer. */ | |
2340 | ||
2341 | while (t->is_pointer_or_reference ()) | |
2342 | { | |
2343 | *argp = value_ind (*argp); | |
2344 | /* Don't coerce fn pointer to fn and then back again! */ | |
2345 | if (check_typedef ((*argp)->type ())->code () != TYPE_CODE_FUNC) | |
2346 | *argp = coerce_array (*argp); | |
2347 | t = check_typedef ((*argp)->type ()); | |
2348 | } | |
2349 | ||
2350 | if (t->code () != TYPE_CODE_STRUCT | |
2351 | && t->code () != TYPE_CODE_UNION) | |
2352 | error (_("Attempt to extract a component of a value that is not a %s."), | |
2353 | err); | |
2354 | ||
2355 | /* Assume it's not, unless we see that it is. */ | |
2356 | if (static_memfuncp) | |
2357 | *static_memfuncp = 0; | |
2358 | ||
2359 | if (!args.has_value ()) | |
2360 | { | |
2361 | /* if there are no arguments ...do this... */ | |
2362 | ||
2363 | /* Try as a field first, because if we succeed, there is less | |
2364 | work to be done. */ | |
2365 | v = search_struct_field (name, *argp, t, 0); | |
2366 | if (v) | |
2367 | return v; | |
2368 | ||
2369 | if (current_language->la_language == language_fortran) | |
2370 | { | |
2371 | /* If it is not a field it is the type name of an inherited | |
2372 | structure. */ | |
2373 | v = search_struct_field (name, *argp, t, 1); | |
2374 | if (v) | |
2375 | return v; | |
2376 | } | |
2377 | ||
2378 | /* C++: If it was not found as a data field, then try to | |
2379 | return it as a pointer to a method. */ | |
2380 | v = search_struct_method (name, argp, args, 0, | |
2381 | static_memfuncp, t); | |
2382 | ||
2383 | if (v == (struct value *) - 1) | |
2384 | error (_("Cannot take address of method %s."), name); | |
2385 | else if (v == 0) | |
2386 | { | |
2387 | if (TYPE_NFN_FIELDS (t)) | |
2388 | error (_("There is no member or method named %s."), name); | |
2389 | else | |
2390 | error (_("There is no member named %s."), name); | |
2391 | } | |
2392 | return v; | |
2393 | } | |
2394 | ||
2395 | v = search_struct_method (name, argp, args, 0, | |
2396 | static_memfuncp, t); | |
2397 | ||
2398 | if (v == (struct value *) - 1) | |
2399 | { | |
2400 | error (_("One of the arguments you tried to pass to %s could not " | |
2401 | "be converted to what the function wants."), name); | |
2402 | } | |
2403 | else if (v == 0) | |
2404 | { | |
2405 | /* See if user tried to invoke data as function. If so, hand it | |
2406 | back. If it's not callable (i.e., a pointer to function), | |
2407 | gdb should give an error. */ | |
2408 | v = search_struct_field (name, *argp, t, 0); | |
2409 | /* If we found an ordinary field, then it is not a method call. | |
2410 | So, treat it as if it were a static member function. */ | |
2411 | if (v && static_memfuncp) | |
2412 | *static_memfuncp = 1; | |
2413 | } | |
2414 | ||
2415 | if (!v) | |
2416 | throw_error (NOT_FOUND_ERROR, | |
2417 | _("Structure has no component named %s."), name); | |
2418 | return v; | |
2419 | } | |
2420 | ||
2421 | /* Given *ARGP, a value of type structure or union, or a pointer/reference | |
2422 | to a structure or union, extract and return its component (field) of | |
2423 | type FTYPE at the specified BITPOS. | |
2424 | Throw an exception on error. */ | |
2425 | ||
2426 | struct value * | |
2427 | value_struct_elt_bitpos (struct value **argp, int bitpos, struct type *ftype, | |
2428 | const char *err) | |
2429 | { | |
2430 | struct type *t; | |
2431 | int i; | |
2432 | ||
2433 | *argp = coerce_array (*argp); | |
2434 | ||
2435 | t = check_typedef ((*argp)->type ()); | |
2436 | ||
2437 | while (t->is_pointer_or_reference ()) | |
2438 | { | |
2439 | *argp = value_ind (*argp); | |
2440 | if (check_typedef ((*argp)->type ())->code () != TYPE_CODE_FUNC) | |
2441 | *argp = coerce_array (*argp); | |
2442 | t = check_typedef ((*argp)->type ()); | |
2443 | } | |
2444 | ||
2445 | if (t->code () != TYPE_CODE_STRUCT | |
2446 | && t->code () != TYPE_CODE_UNION) | |
2447 | error (_("Attempt to extract a component of a value that is not a %s."), | |
2448 | err); | |
2449 | ||
2450 | for (i = TYPE_N_BASECLASSES (t); i < t->num_fields (); i++) | |
2451 | { | |
2452 | if (!t->field (i).is_static () | |
2453 | && bitpos == t->field (i).loc_bitpos () | |
2454 | && types_equal (ftype, t->field (i).type ())) | |
2455 | return (*argp)->primitive_field (0, i, t); | |
2456 | } | |
2457 | ||
2458 | error (_("No field with matching bitpos and type.")); | |
2459 | ||
2460 | /* Never hit. */ | |
2461 | return NULL; | |
2462 | } | |
2463 | ||
2464 | /* Search through the methods of an object (and its bases) to find a | |
2465 | specified method. Return a reference to the fn_field list METHODS of | |
2466 | overloaded instances defined in the source language. If available | |
2467 | and matching, a vector of matching xmethods defined in extension | |
2468 | languages are also returned in XMETHODS. | |
2469 | ||
2470 | Helper function for value_find_oload_list. | |
2471 | ARGP is a pointer to a pointer to a value (the object). | |
2472 | METHOD is a string containing the method name. | |
2473 | OFFSET is the offset within the value. | |
2474 | TYPE is the assumed type of the object. | |
2475 | METHODS is a pointer to the matching overloaded instances defined | |
2476 | in the source language. Since this is a recursive function, | |
2477 | *METHODS should be set to NULL when calling this function. | |
2478 | NUM_FNS is the number of overloaded instances. *NUM_FNS should be set to | |
2479 | 0 when calling this function. | |
2480 | XMETHODS is the vector of matching xmethod workers. *XMETHODS | |
2481 | should also be set to NULL when calling this function. | |
2482 | BASETYPE is set to the actual type of the subobject where the | |
2483 | method is found. | |
2484 | BOFFSET is the offset of the base subobject where the method is found. */ | |
2485 | ||
2486 | static void | |
2487 | find_method_list (struct value **argp, const char *method, | |
2488 | LONGEST offset, struct type *type, | |
2489 | gdb::array_view<fn_field> *methods, | |
2490 | std::vector<xmethod_worker_up> *xmethods, | |
2491 | struct type **basetype, LONGEST *boffset) | |
2492 | { | |
2493 | int i; | |
2494 | struct fn_field *f = NULL; | |
2495 | ||
2496 | gdb_assert (methods != NULL && xmethods != NULL); | |
2497 | type = check_typedef (type); | |
2498 | ||
2499 | /* First check in object itself. | |
2500 | This function is called recursively to search through base classes. | |
2501 | If there is a source method match found at some stage, then we need not | |
2502 | look for source methods in consequent recursive calls. */ | |
2503 | if (methods->empty ()) | |
2504 | { | |
2505 | for (i = TYPE_NFN_FIELDS (type) - 1; i >= 0; i--) | |
2506 | { | |
2507 | /* pai: FIXME What about operators and type conversions? */ | |
2508 | const char *fn_field_name = TYPE_FN_FIELDLIST_NAME (type, i); | |
2509 | ||
2510 | if (fn_field_name && (strcmp_iw (fn_field_name, method) == 0)) | |
2511 | { | |
2512 | int len = TYPE_FN_FIELDLIST_LENGTH (type, i); | |
2513 | f = TYPE_FN_FIELDLIST1 (type, i); | |
2514 | *methods = gdb::make_array_view (f, len); | |
2515 | ||
2516 | *basetype = type; | |
2517 | *boffset = offset; | |
2518 | ||
2519 | /* Resolve any stub methods. */ | |
2520 | check_stub_method_group (type, i); | |
2521 | ||
2522 | break; | |
2523 | } | |
2524 | } | |
2525 | } | |
2526 | ||
2527 | /* Unlike source methods, xmethods can be accumulated over successive | |
2528 | recursive calls. In other words, an xmethod named 'm' in a class | |
2529 | will not hide an xmethod named 'm' in its base class(es). We want | |
2530 | it to be this way because xmethods are after all convenience functions | |
2531 | and hence there is no point restricting them with something like method | |
2532 | hiding. Moreover, if hiding is done for xmethods as well, then we will | |
2533 | have to provide a mechanism to un-hide (like the 'using' construct). */ | |
2534 | get_matching_xmethod_workers (type, method, xmethods); | |
2535 | ||
2536 | /* If source methods are not found in current class, look for them in the | |
2537 | base classes. We also have to go through the base classes to gather | |
2538 | extension methods. */ | |
2539 | for (i = TYPE_N_BASECLASSES (type) - 1; i >= 0; i--) | |
2540 | { | |
2541 | LONGEST base_offset; | |
2542 | ||
2543 | if (BASETYPE_VIA_VIRTUAL (type, i)) | |
2544 | { | |
2545 | base_offset = baseclass_offset (type, i, | |
2546 | (*argp)->contents_for_printing ().data (), | |
2547 | (*argp)->offset () + offset, | |
2548 | (*argp)->address (), *argp); | |
2549 | } | |
2550 | else /* Non-virtual base, simply use bit position from debug | |
2551 | info. */ | |
2552 | { | |
2553 | base_offset = TYPE_BASECLASS_BITPOS (type, i) / 8; | |
2554 | } | |
2555 | ||
2556 | find_method_list (argp, method, base_offset + offset, | |
2557 | TYPE_BASECLASS (type, i), methods, | |
2558 | xmethods, basetype, boffset); | |
2559 | } | |
2560 | } | |
2561 | ||
2562 | /* Return the list of overloaded methods of a specified name. The methods | |
2563 | could be those GDB finds in the binary, or xmethod. Methods found in | |
2564 | the binary are returned in METHODS, and xmethods are returned in | |
2565 | XMETHODS. | |
2566 | ||
2567 | ARGP is a pointer to a pointer to a value (the object). | |
2568 | METHOD is the method name. | |
2569 | OFFSET is the offset within the value contents. | |
2570 | METHODS is the list of matching overloaded instances defined in | |
2571 | the source language. | |
2572 | XMETHODS is the vector of matching xmethod workers defined in | |
2573 | extension languages. | |
2574 | BASETYPE is set to the type of the base subobject that defines the | |
2575 | method. | |
2576 | BOFFSET is the offset of the base subobject which defines the method. */ | |
2577 | ||
2578 | static void | |
2579 | value_find_oload_method_list (struct value **argp, const char *method, | |
2580 | LONGEST offset, | |
2581 | gdb::array_view<fn_field> *methods, | |
2582 | std::vector<xmethod_worker_up> *xmethods, | |
2583 | struct type **basetype, LONGEST *boffset) | |
2584 | { | |
2585 | struct type *t; | |
2586 | ||
2587 | t = check_typedef ((*argp)->type ()); | |
2588 | ||
2589 | /* Code snarfed from value_struct_elt. */ | |
2590 | while (t->is_pointer_or_reference ()) | |
2591 | { | |
2592 | *argp = value_ind (*argp); | |
2593 | /* Don't coerce fn pointer to fn and then back again! */ | |
2594 | if (check_typedef ((*argp)->type ())->code () != TYPE_CODE_FUNC) | |
2595 | *argp = coerce_array (*argp); | |
2596 | t = check_typedef ((*argp)->type ()); | |
2597 | } | |
2598 | ||
2599 | if (t->code () != TYPE_CODE_STRUCT | |
2600 | && t->code () != TYPE_CODE_UNION) | |
2601 | error (_("Attempt to extract a component of a " | |
2602 | "value that is not a struct or union")); | |
2603 | ||
2604 | gdb_assert (methods != NULL && xmethods != NULL); | |
2605 | ||
2606 | /* Clear the lists. */ | |
2607 | *methods = {}; | |
2608 | xmethods->clear (); | |
2609 | ||
2610 | find_method_list (argp, method, 0, t, methods, xmethods, | |
2611 | basetype, boffset); | |
2612 | } | |
2613 | ||
2614 | /* Helper function for find_overload_match. If no matches were | |
2615 | found, this function may generate a hint for the user that some | |
2616 | of the relevant types are incomplete, so GDB can't evaluate | |
2617 | type relationships to properly evaluate overloads. | |
2618 | ||
2619 | If no incomplete types are present, an empty string is returned. */ | |
2620 | static std::string | |
2621 | incomplete_type_hint (gdb::array_view<value *> args) | |
2622 | { | |
2623 | int incomplete_types = 0; | |
2624 | std::string incomplete_arg_names; | |
2625 | for (const struct value *arg : args) | |
2626 | { | |
2627 | struct type *t = arg->type (); | |
2628 | while (t->code () == TYPE_CODE_PTR) | |
2629 | t = t->target_type (); | |
2630 | if (t->is_stub ()) | |
2631 | { | |
2632 | string_file buffer; | |
2633 | if (incomplete_types > 0) | |
2634 | incomplete_arg_names += ", "; | |
2635 | ||
2636 | current_language->print_type (arg->type (), "", &buffer, | |
2637 | -1, 0, &type_print_raw_options); | |
2638 | ||
2639 | incomplete_types++; | |
2640 | incomplete_arg_names += buffer.string (); | |
2641 | } | |
2642 | } | |
2643 | std::string hint; | |
2644 | if (incomplete_types > 1) | |
2645 | hint = string_printf (_("\nThe types: '%s' aren't fully known to GDB." | |
2646 | " Please cast them directly to the desired" | |
2647 | " typed in the function call."), | |
2648 | incomplete_arg_names.c_str ()); | |
2649 | else if (incomplete_types == 1) | |
2650 | hint = string_printf (_("\nThe type: '%s' isn't fully known to GDB." | |
2651 | " Please cast it directly to the desired" | |
2652 | " typed in the function call."), | |
2653 | incomplete_arg_names.c_str ()); | |
2654 | return hint; | |
2655 | } | |
2656 | ||
2657 | /* Given an array of arguments (ARGS) (which includes an entry for | |
2658 | "this" in the case of C++ methods), the NAME of a function, and | |
2659 | whether it's a method or not (METHOD), find the best function that | |
2660 | matches on the argument types according to the overload resolution | |
2661 | rules. | |
2662 | ||
2663 | METHOD can be one of three values: | |
2664 | NON_METHOD for non-member functions. | |
2665 | METHOD: for member functions. | |
2666 | BOTH: used for overload resolution of operators where the | |
2667 | candidates are expected to be either member or non member | |
2668 | functions. In this case the first argument ARGTYPES | |
2669 | (representing 'this') is expected to be a reference to the | |
2670 | target object, and will be dereferenced when attempting the | |
2671 | non-member search. | |
2672 | ||
2673 | In the case of class methods, the parameter OBJ is an object value | |
2674 | in which to search for overloaded methods. | |
2675 | ||
2676 | In the case of non-method functions, the parameter FSYM is a symbol | |
2677 | corresponding to one of the overloaded functions. | |
2678 | ||
2679 | Return value is an integer: 0 -> good match, 10 -> debugger applied | |
2680 | non-standard coercions, 100 -> incompatible. | |
2681 | ||
2682 | If a method is being searched for, VALP will hold the value. | |
2683 | If a non-method is being searched for, SYMP will hold the symbol | |
2684 | for it. | |
2685 | ||
2686 | If a method is being searched for, and it is a static method, | |
2687 | then STATICP will point to a non-zero value. | |
2688 | ||
2689 | If NO_ADL argument dependent lookup is disabled. This is used to prevent | |
2690 | ADL overload candidates when performing overload resolution for a fully | |
2691 | qualified name. | |
2692 | ||
2693 | If NOSIDE is EVAL_AVOID_SIDE_EFFECTS, then OBJP's memory cannot be | |
2694 | read while picking the best overload match (it may be all zeroes and thus | |
2695 | not have a vtable pointer), in which case skip virtual function lookup. | |
2696 | This is ok as typically EVAL_AVOID_SIDE_EFFECTS is only used to determine | |
2697 | the result type. | |
2698 | ||
2699 | Note: This function does *not* check the value of | |
2700 | overload_resolution. Caller must check it to see whether overload | |
2701 | resolution is permitted. */ | |
2702 | ||
2703 | int | |
2704 | find_overload_match (gdb::array_view<value *> args, | |
2705 | const char *name, enum oload_search_type method, | |
2706 | struct value **objp, struct symbol *fsym, | |
2707 | struct value **valp, struct symbol **symp, | |
2708 | int *staticp, const int no_adl, | |
2709 | const enum noside noside) | |
2710 | { | |
2711 | struct value *obj = (objp ? *objp : NULL); | |
2712 | struct type *obj_type = obj ? obj->type () : NULL; | |
2713 | /* Index of best overloaded function. */ | |
2714 | int func_oload_champ = -1; | |
2715 | int method_oload_champ = -1; | |
2716 | int src_method_oload_champ = -1; | |
2717 | int ext_method_oload_champ = -1; | |
2718 | ||
2719 | /* The measure for the current best match. */ | |
2720 | badness_vector method_badness; | |
2721 | badness_vector func_badness; | |
2722 | badness_vector ext_method_badness; | |
2723 | badness_vector src_method_badness; | |
2724 | ||
2725 | struct value *temp = obj; | |
2726 | /* For methods, the list of overloaded methods. */ | |
2727 | gdb::array_view<fn_field> methods; | |
2728 | /* For non-methods, the list of overloaded function symbols. */ | |
2729 | std::vector<symbol *> functions; | |
2730 | /* For xmethods, the vector of xmethod workers. */ | |
2731 | std::vector<xmethod_worker_up> xmethods; | |
2732 | struct type *basetype = NULL; | |
2733 | LONGEST boffset; | |
2734 | ||
2735 | const char *obj_type_name = NULL; | |
2736 | const char *func_name = NULL; | |
2737 | gdb::unique_xmalloc_ptr<char> temp_func; | |
2738 | enum oload_classification match_quality; | |
2739 | enum oload_classification method_match_quality = INCOMPATIBLE; | |
2740 | enum oload_classification src_method_match_quality = INCOMPATIBLE; | |
2741 | enum oload_classification ext_method_match_quality = INCOMPATIBLE; | |
2742 | enum oload_classification func_match_quality = INCOMPATIBLE; | |
2743 | ||
2744 | /* Get the list of overloaded methods or functions. */ | |
2745 | if (method == METHOD || method == BOTH) | |
2746 | { | |
2747 | gdb_assert (obj); | |
2748 | ||
2749 | /* OBJ may be a pointer value rather than the object itself. */ | |
2750 | obj = coerce_ref (obj); | |
2751 | while (check_typedef (obj->type ())->code () == TYPE_CODE_PTR) | |
2752 | obj = coerce_ref (value_ind (obj)); | |
2753 | obj_type_name = obj->type ()->name (); | |
2754 | ||
2755 | /* First check whether this is a data member, e.g. a pointer to | |
2756 | a function. */ | |
2757 | if (check_typedef (obj->type ())->code () == TYPE_CODE_STRUCT) | |
2758 | { | |
2759 | *valp = search_struct_field (name, obj, | |
2760 | check_typedef (obj->type ()), 0); | |
2761 | if (*valp) | |
2762 | { | |
2763 | *staticp = 1; | |
2764 | return 0; | |
2765 | } | |
2766 | } | |
2767 | ||
2768 | /* Retrieve the list of methods with the name NAME. */ | |
2769 | value_find_oload_method_list (&temp, name, 0, &methods, | |
2770 | &xmethods, &basetype, &boffset); | |
2771 | /* If this is a method only search, and no methods were found | |
2772 | the search has failed. */ | |
2773 | if (method == METHOD && methods.empty () && xmethods.empty ()) | |
2774 | error (_("Couldn't find method %s%s%s"), | |
2775 | obj_type_name, | |
2776 | (obj_type_name && *obj_type_name) ? "::" : "", | |
2777 | name); | |
2778 | /* If we are dealing with stub method types, they should have | |
2779 | been resolved by find_method_list via | |
2780 | value_find_oload_method_list above. */ | |
2781 | if (!methods.empty ()) | |
2782 | { | |
2783 | gdb_assert (TYPE_SELF_TYPE (methods[0].type) != NULL); | |
2784 | ||
2785 | src_method_oload_champ | |
2786 | = find_oload_champ (args, | |
2787 | methods.size (), | |
2788 | methods.data (), NULL, NULL, | |
2789 | &src_method_badness); | |
2790 | ||
2791 | src_method_match_quality = classify_oload_match | |
2792 | (src_method_badness, args.size (), | |
2793 | oload_method_static_p (methods.data (), src_method_oload_champ)); | |
2794 | } | |
2795 | ||
2796 | if (!xmethods.empty ()) | |
2797 | { | |
2798 | ext_method_oload_champ | |
2799 | = find_oload_champ (args, | |
2800 | xmethods.size (), | |
2801 | NULL, xmethods.data (), NULL, | |
2802 | &ext_method_badness); | |
2803 | ext_method_match_quality = classify_oload_match (ext_method_badness, | |
2804 | args.size (), 0); | |
2805 | } | |
2806 | ||
2807 | if (src_method_oload_champ >= 0 && ext_method_oload_champ >= 0) | |
2808 | { | |
2809 | switch (compare_badness (ext_method_badness, src_method_badness)) | |
2810 | { | |
2811 | case 0: /* Src method and xmethod are equally good. */ | |
2812 | /* If src method and xmethod are equally good, then | |
2813 | xmethod should be the winner. Hence, fall through to the | |
2814 | case where a xmethod is better than the source | |
2815 | method, except when the xmethod match quality is | |
2816 | non-standard. */ | |
2817 | [[fallthrough]]; | |
2818 | case 1: /* Src method and ext method are incompatible. */ | |
2819 | /* If ext method match is not standard, then let source method | |
2820 | win. Otherwise, fallthrough to let xmethod win. */ | |
2821 | if (ext_method_match_quality != STANDARD) | |
2822 | { | |
2823 | method_oload_champ = src_method_oload_champ; | |
2824 | method_badness = src_method_badness; | |
2825 | ext_method_oload_champ = -1; | |
2826 | method_match_quality = src_method_match_quality; | |
2827 | break; | |
2828 | } | |
2829 | [[fallthrough]]; | |
2830 | case 2: /* Ext method is champion. */ | |
2831 | method_oload_champ = ext_method_oload_champ; | |
2832 | method_badness = ext_method_badness; | |
2833 | src_method_oload_champ = -1; | |
2834 | method_match_quality = ext_method_match_quality; | |
2835 | break; | |
2836 | case 3: /* Src method is champion. */ | |
2837 | method_oload_champ = src_method_oload_champ; | |
2838 | method_badness = src_method_badness; | |
2839 | ext_method_oload_champ = -1; | |
2840 | method_match_quality = src_method_match_quality; | |
2841 | break; | |
2842 | default: | |
2843 | gdb_assert_not_reached ("Unexpected overload comparison " | |
2844 | "result"); | |
2845 | break; | |
2846 | } | |
2847 | } | |
2848 | else if (src_method_oload_champ >= 0) | |
2849 | { | |
2850 | method_oload_champ = src_method_oload_champ; | |
2851 | method_badness = src_method_badness; | |
2852 | method_match_quality = src_method_match_quality; | |
2853 | } | |
2854 | else if (ext_method_oload_champ >= 0) | |
2855 | { | |
2856 | method_oload_champ = ext_method_oload_champ; | |
2857 | method_badness = ext_method_badness; | |
2858 | method_match_quality = ext_method_match_quality; | |
2859 | } | |
2860 | } | |
2861 | ||
2862 | if (method == NON_METHOD || method == BOTH) | |
2863 | { | |
2864 | const char *qualified_name = NULL; | |
2865 | ||
2866 | /* If the overload match is being search for both as a method | |
2867 | and non member function, the first argument must now be | |
2868 | dereferenced. */ | |
2869 | if (method == BOTH) | |
2870 | args[0] = value_ind (args[0]); | |
2871 | ||
2872 | if (fsym) | |
2873 | { | |
2874 | qualified_name = fsym->natural_name (); | |
2875 | ||
2876 | /* If we have a function with a C++ name, try to extract just | |
2877 | the function part. Do not try this for non-functions (e.g. | |
2878 | function pointers). */ | |
2879 | if (qualified_name | |
2880 | && (check_typedef (fsym->type ())->code () | |
2881 | == TYPE_CODE_FUNC)) | |
2882 | { | |
2883 | temp_func = cp_func_name (qualified_name); | |
2884 | ||
2885 | /* If cp_func_name did not remove anything, the name of the | |
2886 | symbol did not include scope or argument types - it was | |
2887 | probably a C-style function. */ | |
2888 | if (temp_func != nullptr) | |
2889 | { | |
2890 | if (strcmp (temp_func.get (), qualified_name) == 0) | |
2891 | func_name = NULL; | |
2892 | else | |
2893 | func_name = temp_func.get (); | |
2894 | } | |
2895 | } | |
2896 | } | |
2897 | else | |
2898 | { | |
2899 | func_name = name; | |
2900 | qualified_name = name; | |
2901 | } | |
2902 | ||
2903 | /* If there was no C++ name, this must be a C-style function or | |
2904 | not a function at all. Just return the same symbol. Do the | |
2905 | same if cp_func_name fails for some reason. */ | |
2906 | if (func_name == NULL) | |
2907 | { | |
2908 | *symp = fsym; | |
2909 | return 0; | |
2910 | } | |
2911 | ||
2912 | func_oload_champ = find_oload_champ_namespace (args, | |
2913 | func_name, | |
2914 | qualified_name, | |
2915 | &functions, | |
2916 | &func_badness, | |
2917 | no_adl); | |
2918 | ||
2919 | if (func_oload_champ >= 0) | |
2920 | func_match_quality = classify_oload_match (func_badness, | |
2921 | args.size (), 0); | |
2922 | } | |
2923 | ||
2924 | /* Did we find a match ? */ | |
2925 | if (method_oload_champ == -1 && func_oload_champ == -1) | |
2926 | throw_error (NOT_FOUND_ERROR, | |
2927 | _("No symbol \"%s\" in current context."), | |
2928 | name); | |
2929 | ||
2930 | /* If we have found both a method match and a function | |
2931 | match, find out which one is better, and calculate match | |
2932 | quality. */ | |
2933 | if (method_oload_champ >= 0 && func_oload_champ >= 0) | |
2934 | { | |
2935 | switch (compare_badness (func_badness, method_badness)) | |
2936 | { | |
2937 | case 0: /* Top two contenders are equally good. */ | |
2938 | /* FIXME: GDB does not support the general ambiguous case. | |
2939 | All candidates should be collected and presented the | |
2940 | user. */ | |
2941 | error (_("Ambiguous overload resolution")); | |
2942 | break; | |
2943 | case 1: /* Incomparable top contenders. */ | |
2944 | /* This is an error incompatible candidates | |
2945 | should not have been proposed. */ | |
2946 | error (_("Internal error: incompatible " | |
2947 | "overload candidates proposed")); | |
2948 | break; | |
2949 | case 2: /* Function champion. */ | |
2950 | method_oload_champ = -1; | |
2951 | match_quality = func_match_quality; | |
2952 | break; | |
2953 | case 3: /* Method champion. */ | |
2954 | func_oload_champ = -1; | |
2955 | match_quality = method_match_quality; | |
2956 | break; | |
2957 | default: | |
2958 | error (_("Internal error: unexpected overload comparison result")); | |
2959 | break; | |
2960 | } | |
2961 | } | |
2962 | else | |
2963 | { | |
2964 | /* We have either a method match or a function match. */ | |
2965 | if (method_oload_champ >= 0) | |
2966 | match_quality = method_match_quality; | |
2967 | else | |
2968 | match_quality = func_match_quality; | |
2969 | } | |
2970 | ||
2971 | if (match_quality == INCOMPATIBLE) | |
2972 | { | |
2973 | std::string hint = incomplete_type_hint (args); | |
2974 | if (method == METHOD) | |
2975 | error (_("Cannot resolve method %s%s%s to any overloaded instance%s"), | |
2976 | obj_type_name, | |
2977 | (obj_type_name && *obj_type_name) ? "::" : "", | |
2978 | name, hint.c_str ()); | |
2979 | else | |
2980 | error (_("Cannot resolve function %s to any overloaded instance%s"), | |
2981 | func_name, hint.c_str ()); | |
2982 | } | |
2983 | else if (match_quality == NON_STANDARD) | |
2984 | { | |
2985 | if (method == METHOD) | |
2986 | warning (_("Using non-standard conversion to match " | |
2987 | "method %s%s%s to supplied arguments"), | |
2988 | obj_type_name, | |
2989 | (obj_type_name && *obj_type_name) ? "::" : "", | |
2990 | name); | |
2991 | else | |
2992 | warning (_("Using non-standard conversion to match " | |
2993 | "function %s to supplied arguments"), | |
2994 | func_name); | |
2995 | } | |
2996 | ||
2997 | if (staticp != NULL) | |
2998 | *staticp = oload_method_static_p (methods.data (), method_oload_champ); | |
2999 | ||
3000 | if (method_oload_champ >= 0) | |
3001 | { | |
3002 | if (src_method_oload_champ >= 0) | |
3003 | { | |
3004 | if (TYPE_FN_FIELD_VIRTUAL_P (methods, method_oload_champ) | |
3005 | && noside != EVAL_AVOID_SIDE_EFFECTS) | |
3006 | { | |
3007 | *valp = value_virtual_fn_field (&temp, methods.data (), | |
3008 | method_oload_champ, basetype, | |
3009 | boffset); | |
3010 | } | |
3011 | else | |
3012 | *valp = value_fn_field (&temp, methods.data (), | |
3013 | method_oload_champ, basetype, boffset); | |
3014 | } | |
3015 | else | |
3016 | *valp = value::from_xmethod | |
3017 | (std::move (xmethods[ext_method_oload_champ])); | |
3018 | } | |
3019 | else | |
3020 | *symp = functions[func_oload_champ]; | |
3021 | ||
3022 | if (objp) | |
3023 | { | |
3024 | struct type *temp_type = check_typedef (temp->type ()); | |
3025 | struct type *objtype = check_typedef (obj_type); | |
3026 | ||
3027 | if (temp_type->code () != TYPE_CODE_PTR | |
3028 | && objtype->is_pointer_or_reference ()) | |
3029 | { | |
3030 | temp = value_addr (temp); | |
3031 | } | |
3032 | *objp = temp; | |
3033 | } | |
3034 | ||
3035 | switch (match_quality) | |
3036 | { | |
3037 | case INCOMPATIBLE: | |
3038 | return 100; | |
3039 | case NON_STANDARD: | |
3040 | return 10; | |
3041 | default: /* STANDARD */ | |
3042 | return 0; | |
3043 | } | |
3044 | } | |
3045 | ||
3046 | /* Find the best overload match, searching for FUNC_NAME in namespaces | |
3047 | contained in QUALIFIED_NAME until it either finds a good match or | |
3048 | runs out of namespaces. It stores the overloaded functions in | |
3049 | *OLOAD_SYMS, and the badness vector in *OLOAD_CHAMP_BV. If NO_ADL, | |
3050 | argument dependent lookup is not performed. */ | |
3051 | ||
3052 | static int | |
3053 | find_oload_champ_namespace (gdb::array_view<value *> args, | |
3054 | const char *func_name, | |
3055 | const char *qualified_name, | |
3056 | std::vector<symbol *> *oload_syms, | |
3057 | badness_vector *oload_champ_bv, | |
3058 | const int no_adl) | |
3059 | { | |
3060 | int oload_champ; | |
3061 | ||
3062 | find_oload_champ_namespace_loop (args, | |
3063 | func_name, | |
3064 | qualified_name, 0, | |
3065 | oload_syms, oload_champ_bv, | |
3066 | &oload_champ, | |
3067 | no_adl); | |
3068 | ||
3069 | return oload_champ; | |
3070 | } | |
3071 | ||
3072 | /* Helper function for find_oload_champ_namespace; NAMESPACE_LEN is | |
3073 | how deep we've looked for namespaces, and the champ is stored in | |
3074 | OLOAD_CHAMP. The return value is 1 if the champ is a good one, 0 | |
3075 | if it isn't. Other arguments are the same as in | |
3076 | find_oload_champ_namespace. */ | |
3077 | ||
3078 | static int | |
3079 | find_oload_champ_namespace_loop (gdb::array_view<value *> args, | |
3080 | const char *func_name, | |
3081 | const char *qualified_name, | |
3082 | int namespace_len, | |
3083 | std::vector<symbol *> *oload_syms, | |
3084 | badness_vector *oload_champ_bv, | |
3085 | int *oload_champ, | |
3086 | const int no_adl) | |
3087 | { | |
3088 | int next_namespace_len = namespace_len; | |
3089 | int searched_deeper = 0; | |
3090 | int new_oload_champ; | |
3091 | char *new_namespace; | |
3092 | ||
3093 | if (next_namespace_len != 0) | |
3094 | { | |
3095 | gdb_assert (qualified_name[next_namespace_len] == ':'); | |
3096 | next_namespace_len += 2; | |
3097 | } | |
3098 | next_namespace_len += | |
3099 | cp_find_first_component (qualified_name + next_namespace_len); | |
3100 | ||
3101 | /* First, see if we have a deeper namespace we can search in. | |
3102 | If we get a good match there, use it. */ | |
3103 | ||
3104 | if (qualified_name[next_namespace_len] == ':') | |
3105 | { | |
3106 | searched_deeper = 1; | |
3107 | ||
3108 | if (find_oload_champ_namespace_loop (args, | |
3109 | func_name, qualified_name, | |
3110 | next_namespace_len, | |
3111 | oload_syms, oload_champ_bv, | |
3112 | oload_champ, no_adl)) | |
3113 | { | |
3114 | return 1; | |
3115 | } | |
3116 | }; | |
3117 | ||
3118 | /* If we reach here, either we're in the deepest namespace or we | |
3119 | didn't find a good match in a deeper namespace. But, in the | |
3120 | latter case, we still have a bad match in a deeper namespace; | |
3121 | note that we might not find any match at all in the current | |
3122 | namespace. (There's always a match in the deepest namespace, | |
3123 | because this overload mechanism only gets called if there's a | |
3124 | function symbol to start off with.) */ | |
3125 | ||
3126 | new_namespace = (char *) alloca (namespace_len + 1); | |
3127 | strncpy (new_namespace, qualified_name, namespace_len); | |
3128 | new_namespace[namespace_len] = '\0'; | |
3129 | ||
3130 | std::vector<symbol *> new_oload_syms | |
3131 | = make_symbol_overload_list (func_name, new_namespace); | |
3132 | ||
3133 | /* If we have reached the deepest level perform argument | |
3134 | determined lookup. */ | |
3135 | if (!searched_deeper && !no_adl) | |
3136 | { | |
3137 | int ix; | |
3138 | struct type **arg_types; | |
3139 | ||
3140 | /* Prepare list of argument types for overload resolution. */ | |
3141 | arg_types = (struct type **) | |
3142 | alloca (args.size () * (sizeof (struct type *))); | |
3143 | for (ix = 0; ix < args.size (); ix++) | |
3144 | arg_types[ix] = args[ix]->type (); | |
3145 | add_symbol_overload_list_adl ({arg_types, args.size ()}, func_name, | |
3146 | &new_oload_syms); | |
3147 | } | |
3148 | ||
3149 | badness_vector new_oload_champ_bv; | |
3150 | new_oload_champ = find_oload_champ (args, | |
3151 | new_oload_syms.size (), | |
3152 | NULL, NULL, new_oload_syms.data (), | |
3153 | &new_oload_champ_bv); | |
3154 | ||
3155 | /* Case 1: We found a good match. Free earlier matches (if any), | |
3156 | and return it. Case 2: We didn't find a good match, but we're | |
3157 | not the deepest function. Then go with the bad match that the | |
3158 | deeper function found. Case 3: We found a bad match, and we're | |
3159 | the deepest function. Then return what we found, even though | |
3160 | it's a bad match. */ | |
3161 | ||
3162 | if (new_oload_champ != -1 | |
3163 | && classify_oload_match (new_oload_champ_bv, args.size (), 0) == STANDARD) | |
3164 | { | |
3165 | *oload_syms = std::move (new_oload_syms); | |
3166 | *oload_champ = new_oload_champ; | |
3167 | *oload_champ_bv = std::move (new_oload_champ_bv); | |
3168 | return 1; | |
3169 | } | |
3170 | else if (searched_deeper) | |
3171 | { | |
3172 | return 0; | |
3173 | } | |
3174 | else | |
3175 | { | |
3176 | *oload_syms = std::move (new_oload_syms); | |
3177 | *oload_champ = new_oload_champ; | |
3178 | *oload_champ_bv = std::move (new_oload_champ_bv); | |
3179 | return 0; | |
3180 | } | |
3181 | } | |
3182 | ||
3183 | /* Look for a function to take ARGS. Find the best match from among | |
3184 | the overloaded methods or functions given by METHODS or FUNCTIONS | |
3185 | or XMETHODS, respectively. One, and only one of METHODS, FUNCTIONS | |
3186 | and XMETHODS can be non-NULL. | |
3187 | ||
3188 | NUM_FNS is the length of the array pointed at by METHODS, FUNCTIONS | |
3189 | or XMETHODS, whichever is non-NULL. | |
3190 | ||
3191 | Return the index of the best match; store an indication of the | |
3192 | quality of the match in OLOAD_CHAMP_BV. */ | |
3193 | ||
3194 | static int | |
3195 | find_oload_champ (gdb::array_view<value *> args, | |
3196 | size_t num_fns, | |
3197 | fn_field *methods, | |
3198 | xmethod_worker_up *xmethods, | |
3199 | symbol **functions, | |
3200 | badness_vector *oload_champ_bv) | |
3201 | { | |
3202 | /* A measure of how good an overloaded instance is. */ | |
3203 | badness_vector bv; | |
3204 | /* Index of best overloaded function. */ | |
3205 | int oload_champ = -1; | |
3206 | /* Current ambiguity state for overload resolution. */ | |
3207 | int oload_ambiguous = 0; | |
3208 | /* 0 => no ambiguity, 1 => two good funcs, 2 => incomparable funcs. */ | |
3209 | ||
3210 | /* A champion can be found among methods alone, or among functions | |
3211 | alone, or in xmethods alone, but not in more than one of these | |
3212 | groups. */ | |
3213 | gdb_assert ((methods != NULL) + (functions != NULL) + (xmethods != NULL) | |
3214 | == 1); | |
3215 | ||
3216 | /* Consider each candidate in turn. */ | |
3217 | for (size_t ix = 0; ix < num_fns; ix++) | |
3218 | { | |
3219 | int jj; | |
3220 | int static_offset = 0; | |
3221 | bool varargs = false; | |
3222 | std::vector<type *> parm_types; | |
3223 | ||
3224 | if (xmethods != NULL) | |
3225 | parm_types = xmethods[ix]->get_arg_types (); | |
3226 | else | |
3227 | { | |
3228 | size_t nparms; | |
3229 | ||
3230 | if (methods != NULL) | |
3231 | { | |
3232 | nparms = TYPE_FN_FIELD_TYPE (methods, ix)->num_fields (); | |
3233 | static_offset = oload_method_static_p (methods, ix); | |
3234 | varargs = TYPE_FN_FIELD_TYPE (methods, ix)->has_varargs (); | |
3235 | } | |
3236 | else | |
3237 | { | |
3238 | nparms = functions[ix]->type ()->num_fields (); | |
3239 | varargs = functions[ix]->type ()->has_varargs (); | |
3240 | } | |
3241 | ||
3242 | parm_types.reserve (nparms); | |
3243 | for (jj = 0; jj < nparms; jj++) | |
3244 | { | |
3245 | type *t = (methods != NULL | |
3246 | ? (TYPE_FN_FIELD_ARGS (methods, ix)[jj].type ()) | |
3247 | : functions[ix]->type ()->field (jj).type ()); | |
3248 | parm_types.push_back (t); | |
3249 | } | |
3250 | } | |
3251 | ||
3252 | /* Compare parameter types to supplied argument types. Skip | |
3253 | THIS for static methods. */ | |
3254 | bv = rank_function (parm_types, | |
3255 | args.slice (static_offset), | |
3256 | varargs); | |
3257 | ||
3258 | if (overload_debug) | |
3259 | { | |
3260 | if (methods != NULL) | |
3261 | gdb_printf (gdb_stderr, | |
3262 | "Overloaded method instance %s, # of parms %d\n", | |
3263 | methods[ix].physname, (int) parm_types.size ()); | |
3264 | else if (xmethods != NULL) | |
3265 | gdb_printf (gdb_stderr, | |
3266 | "Xmethod worker, # of parms %d\n", | |
3267 | (int) parm_types.size ()); | |
3268 | else | |
3269 | gdb_printf (gdb_stderr, | |
3270 | "Overloaded function instance " | |
3271 | "%s # of parms %d\n", | |
3272 | functions[ix]->demangled_name (), | |
3273 | (int) parm_types.size ()); | |
3274 | ||
3275 | gdb_printf (gdb_stderr, | |
3276 | "...Badness of length : {%d, %d}\n", | |
3277 | bv[0].rank, bv[0].subrank); | |
3278 | ||
3279 | for (jj = 1; jj < bv.size (); jj++) | |
3280 | gdb_printf (gdb_stderr, | |
3281 | "...Badness of arg %d : {%d, %d}\n", | |
3282 | jj, bv[jj].rank, bv[jj].subrank); | |
3283 | } | |
3284 | ||
3285 | if (oload_champ_bv->empty ()) | |
3286 | { | |
3287 | *oload_champ_bv = std::move (bv); | |
3288 | oload_champ = 0; | |
3289 | } | |
3290 | else /* See whether current candidate is better or worse than | |
3291 | previous best. */ | |
3292 | switch (compare_badness (bv, *oload_champ_bv)) | |
3293 | { | |
3294 | case 0: /* Top two contenders are equally good. */ | |
3295 | oload_ambiguous = 1; | |
3296 | break; | |
3297 | case 1: /* Incomparable top contenders. */ | |
3298 | oload_ambiguous = 2; | |
3299 | break; | |
3300 | case 2: /* New champion, record details. */ | |
3301 | *oload_champ_bv = std::move (bv); | |
3302 | oload_ambiguous = 0; | |
3303 | oload_champ = ix; | |
3304 | break; | |
3305 | case 3: | |
3306 | default: | |
3307 | break; | |
3308 | } | |
3309 | if (overload_debug) | |
3310 | gdb_printf (gdb_stderr, "Overload resolution " | |
3311 | "champion is %d, ambiguous? %d\n", | |
3312 | oload_champ, oload_ambiguous); | |
3313 | } | |
3314 | ||
3315 | return oload_champ; | |
3316 | } | |
3317 | ||
3318 | /* Return 1 if we're looking at a static method, 0 if we're looking at | |
3319 | a non-static method or a function that isn't a method. */ | |
3320 | ||
3321 | static int | |
3322 | oload_method_static_p (struct fn_field *fns_ptr, int index) | |
3323 | { | |
3324 | if (fns_ptr && index >= 0 && TYPE_FN_FIELD_STATIC_P (fns_ptr, index)) | |
3325 | return 1; | |
3326 | else | |
3327 | return 0; | |
3328 | } | |
3329 | ||
3330 | /* Check how good an overload match OLOAD_CHAMP_BV represents. */ | |
3331 | ||
3332 | static enum oload_classification | |
3333 | classify_oload_match (const badness_vector &oload_champ_bv, | |
3334 | int nargs, | |
3335 | int static_offset) | |
3336 | { | |
3337 | int ix; | |
3338 | enum oload_classification worst = STANDARD; | |
3339 | ||
3340 | for (ix = 1; ix <= nargs - static_offset; ix++) | |
3341 | { | |
3342 | /* If this conversion is as bad as INCOMPATIBLE_TYPE_BADNESS | |
3343 | or worse return INCOMPATIBLE. */ | |
3344 | if (compare_ranks (oload_champ_bv[ix], | |
3345 | INCOMPATIBLE_TYPE_BADNESS) <= 0) | |
3346 | return INCOMPATIBLE; /* Truly mismatched types. */ | |
3347 | /* Otherwise If this conversion is as bad as | |
3348 | NS_POINTER_CONVERSION_BADNESS or worse return NON_STANDARD. */ | |
3349 | else if (compare_ranks (oload_champ_bv[ix], | |
3350 | NS_POINTER_CONVERSION_BADNESS) <= 0) | |
3351 | worst = NON_STANDARD; /* Non-standard type conversions | |
3352 | needed. */ | |
3353 | } | |
3354 | ||
3355 | /* If no INCOMPATIBLE classification was found, return the worst one | |
3356 | that was found (if any). */ | |
3357 | return worst; | |
3358 | } | |
3359 | ||
3360 | /* C++: return 1 is NAME is a legitimate name for the destructor of | |
3361 | type TYPE. If TYPE does not have a destructor, or if NAME is | |
3362 | inappropriate for TYPE, an error is signaled. Parameter TYPE should not yet | |
3363 | have CHECK_TYPEDEF applied, this function will apply it itself. */ | |
3364 | ||
3365 | int | |
3366 | destructor_name_p (const char *name, struct type *type) | |
3367 | { | |
3368 | if (name[0] == '~') | |
3369 | { | |
3370 | const char *dname = type_name_or_error (type); | |
3371 | const char *cp = strchr (dname, '<'); | |
3372 | unsigned int len; | |
3373 | ||
3374 | /* Do not compare the template part for template classes. */ | |
3375 | if (cp == NULL) | |
3376 | len = strlen (dname); | |
3377 | else | |
3378 | len = cp - dname; | |
3379 | if (strlen (name + 1) != len || strncmp (dname, name + 1, len) != 0) | |
3380 | error (_("name of destructor must equal name of class")); | |
3381 | else | |
3382 | return 1; | |
3383 | } | |
3384 | return 0; | |
3385 | } | |
3386 | ||
3387 | /* Find an enum constant named NAME in TYPE. TYPE must be an "enum | |
3388 | class". If the name is found, return a value representing it; | |
3389 | otherwise throw an exception. */ | |
3390 | ||
3391 | static struct value * | |
3392 | enum_constant_from_type (struct type *type, const char *name) | |
3393 | { | |
3394 | int i; | |
3395 | int name_len = strlen (name); | |
3396 | ||
3397 | gdb_assert (type->code () == TYPE_CODE_ENUM | |
3398 | && type->is_declared_class ()); | |
3399 | ||
3400 | for (i = TYPE_N_BASECLASSES (type); i < type->num_fields (); ++i) | |
3401 | { | |
3402 | const char *fname = type->field (i).name (); | |
3403 | int len; | |
3404 | ||
3405 | if (type->field (i).loc_kind () != FIELD_LOC_KIND_ENUMVAL | |
3406 | || fname == NULL) | |
3407 | continue; | |
3408 | ||
3409 | /* Look for the trailing "::NAME", since enum class constant | |
3410 | names are qualified here. */ | |
3411 | len = strlen (fname); | |
3412 | if (len + 2 >= name_len | |
3413 | && fname[len - name_len - 2] == ':' | |
3414 | && fname[len - name_len - 1] == ':' | |
3415 | && strcmp (&fname[len - name_len], name) == 0) | |
3416 | return value_from_longest (type, type->field (i).loc_enumval ()); | |
3417 | } | |
3418 | ||
3419 | error (_("no constant named \"%s\" in enum \"%s\""), | |
3420 | name, type->name ()); | |
3421 | } | |
3422 | ||
3423 | /* C++: Given an aggregate type CURTYPE, and a member name NAME, | |
3424 | return the appropriate member (or the address of the member, if | |
3425 | WANT_ADDRESS). This function is used to resolve user expressions | |
3426 | of the form "DOMAIN::NAME". For more details on what happens, see | |
3427 | the comment before value_struct_elt_for_reference. */ | |
3428 | ||
3429 | struct value * | |
3430 | value_aggregate_elt (struct type *curtype, const char *name, | |
3431 | struct type *expect_type, int want_address, | |
3432 | enum noside noside) | |
3433 | { | |
3434 | switch (curtype->code ()) | |
3435 | { | |
3436 | case TYPE_CODE_STRUCT: | |
3437 | case TYPE_CODE_UNION: | |
3438 | return value_struct_elt_for_reference (curtype, 0, curtype, | |
3439 | name, expect_type, | |
3440 | want_address, noside); | |
3441 | case TYPE_CODE_NAMESPACE: | |
3442 | return value_namespace_elt (curtype, name, | |
3443 | want_address, noside); | |
3444 | ||
3445 | case TYPE_CODE_ENUM: | |
3446 | return enum_constant_from_type (curtype, name); | |
3447 | ||
3448 | default: | |
3449 | internal_error (_("non-aggregate type in value_aggregate_elt")); | |
3450 | } | |
3451 | } | |
3452 | ||
3453 | /* Compares the two method/function types T1 and T2 for "equality" | |
3454 | with respect to the methods' parameters. If the types of the | |
3455 | two parameter lists are the same, returns 1; 0 otherwise. This | |
3456 | comparison may ignore any artificial parameters in T1 if | |
3457 | SKIP_ARTIFICIAL is non-zero. This function will ALWAYS skip | |
3458 | the first artificial parameter in T1, assumed to be a 'this' pointer. | |
3459 | ||
3460 | The type T2 is expected to have come from make_params (in eval.c). */ | |
3461 | ||
3462 | static int | |
3463 | compare_parameters (struct type *t1, struct type *t2, int skip_artificial) | |
3464 | { | |
3465 | int start = 0; | |
3466 | ||
3467 | if (t1->num_fields () > 0 && t1->field (0).is_artificial ()) | |
3468 | ++start; | |
3469 | ||
3470 | /* If skipping artificial fields, find the first real field | |
3471 | in T1. */ | |
3472 | if (skip_artificial) | |
3473 | { | |
3474 | while (start < t1->num_fields () | |
3475 | && t1->field (start).is_artificial ()) | |
3476 | ++start; | |
3477 | } | |
3478 | ||
3479 | /* Now compare parameters. */ | |
3480 | ||
3481 | /* Special case: a method taking void. T1 will contain no | |
3482 | non-artificial fields, and T2 will contain TYPE_CODE_VOID. */ | |
3483 | if ((t1->num_fields () - start) == 0 && t2->num_fields () == 1 | |
3484 | && t2->field (0).type ()->code () == TYPE_CODE_VOID) | |
3485 | return 1; | |
3486 | ||
3487 | if ((t1->num_fields () - start) == t2->num_fields ()) | |
3488 | { | |
3489 | int i; | |
3490 | ||
3491 | for (i = 0; i < t2->num_fields (); ++i) | |
3492 | { | |
3493 | if (compare_ranks (rank_one_type (t1->field (start + i).type (), | |
3494 | t2->field (i).type (), NULL), | |
3495 | EXACT_MATCH_BADNESS) != 0) | |
3496 | return 0; | |
3497 | } | |
3498 | ||
3499 | return 1; | |
3500 | } | |
3501 | ||
3502 | return 0; | |
3503 | } | |
3504 | ||
3505 | /* C++: Given an aggregate type VT, and a class type CLS, search | |
3506 | recursively for CLS using value V; If found, store the offset | |
3507 | which is either fetched from the virtual base pointer if CLS | |
3508 | is virtual or accumulated offset of its parent classes if | |
3509 | CLS is non-virtual in *BOFFS, set ISVIRT to indicate if CLS | |
3510 | is virtual, and return true. If not found, return false. */ | |
3511 | ||
3512 | static bool | |
3513 | get_baseclass_offset (struct type *vt, struct type *cls, | |
3514 | struct value *v, int *boffs, bool *isvirt) | |
3515 | { | |
3516 | for (int i = 0; i < TYPE_N_BASECLASSES (vt); i++) | |
3517 | { | |
3518 | struct type *t = vt->field (i).type (); | |
3519 | if (types_equal (t, cls)) | |
3520 | { | |
3521 | if (BASETYPE_VIA_VIRTUAL (vt, i)) | |
3522 | { | |
3523 | const gdb_byte *adr = v->contents_for_printing ().data (); | |
3524 | *boffs = baseclass_offset (vt, i, adr, v->offset (), | |
3525 | value_as_long (v), v); | |
3526 | *isvirt = true; | |
3527 | } | |
3528 | else | |
3529 | *isvirt = false; | |
3530 | return true; | |
3531 | } | |
3532 | ||
3533 | if (get_baseclass_offset (check_typedef (t), cls, v, boffs, isvirt)) | |
3534 | { | |
3535 | if (*isvirt == false) /* Add non-virtual base offset. */ | |
3536 | { | |
3537 | const gdb_byte *adr = v->contents_for_printing ().data (); | |
3538 | *boffs += baseclass_offset (vt, i, adr, v->offset (), | |
3539 | value_as_long (v), v); | |
3540 | } | |
3541 | return true; | |
3542 | } | |
3543 | } | |
3544 | ||
3545 | return false; | |
3546 | } | |
3547 | ||
3548 | /* C++: Given an aggregate type CURTYPE, and a member name NAME, | |
3549 | return the address of this member as a "pointer to member" type. | |
3550 | If INTYPE is non-null, then it will be the type of the member we | |
3551 | are looking for. This will help us resolve "pointers to member | |
3552 | functions". This function is used to resolve user expressions of | |
3553 | the form "DOMAIN::NAME". */ | |
3554 | ||
3555 | static struct value * | |
3556 | value_struct_elt_for_reference (struct type *domain, int offset, | |
3557 | struct type *curtype, const char *name, | |
3558 | struct type *intype, | |
3559 | int want_address, | |
3560 | enum noside noside) | |
3561 | { | |
3562 | struct type *t = check_typedef (curtype); | |
3563 | int i; | |
3564 | struct value *result; | |
3565 | ||
3566 | if (t->code () != TYPE_CODE_STRUCT | |
3567 | && t->code () != TYPE_CODE_UNION) | |
3568 | error (_("Internal error: non-aggregate type " | |
3569 | "to value_struct_elt_for_reference")); | |
3570 | ||
3571 | for (i = t->num_fields () - 1; i >= TYPE_N_BASECLASSES (t); i--) | |
3572 | { | |
3573 | const char *t_field_name = t->field (i).name (); | |
3574 | ||
3575 | if (t_field_name && strcmp (t_field_name, name) == 0) | |
3576 | { | |
3577 | if (t->field (i).is_static ()) | |
3578 | { | |
3579 | struct value *v = value_static_field (t, i); | |
3580 | if (want_address) | |
3581 | v = value_addr (v); | |
3582 | return v; | |
3583 | } | |
3584 | if (t->field (i).is_packed ()) | |
3585 | error (_("pointers to bitfield members not allowed")); | |
3586 | ||
3587 | if (want_address) | |
3588 | return value_from_longest | |
3589 | (lookup_memberptr_type (t->field (i).type (), domain), | |
3590 | offset + (LONGEST) (t->field (i).loc_bitpos () >> 3)); | |
3591 | else if (noside != EVAL_NORMAL) | |
3592 | return value::allocate (t->field (i).type ()); | |
3593 | else | |
3594 | { | |
3595 | /* Try to evaluate NAME as a qualified name with implicit | |
3596 | this pointer. In this case, attempt to return the | |
3597 | equivalent to `this->*(&TYPE::NAME)'. */ | |
3598 | struct value *v = value_of_this_silent (current_language); | |
3599 | if (v != NULL) | |
3600 | { | |
3601 | struct value *ptr, *this_v = v; | |
3602 | long mem_offset; | |
3603 | struct type *type, *tmp; | |
3604 | ||
3605 | ptr = value_aggregate_elt (domain, name, NULL, 1, noside); | |
3606 | type = check_typedef (ptr->type ()); | |
3607 | gdb_assert (type != NULL | |
3608 | && type->code () == TYPE_CODE_MEMBERPTR); | |
3609 | tmp = lookup_pointer_type (TYPE_SELF_TYPE (type)); | |
3610 | v = value_cast_pointers (tmp, v, 1); | |
3611 | mem_offset = value_as_long (ptr); | |
3612 | if (domain != curtype) | |
3613 | { | |
3614 | /* Find class offset of type CURTYPE from either its | |
3615 | parent type DOMAIN or the type of implied this. */ | |
3616 | int boff = 0; | |
3617 | bool isvirt = false; | |
3618 | if (get_baseclass_offset (domain, curtype, v, &boff, | |
3619 | &isvirt)) | |
3620 | mem_offset += boff; | |
3621 | else | |
3622 | { | |
3623 | struct type *p = check_typedef (this_v->type ()); | |
3624 | p = check_typedef (p->target_type ()); | |
3625 | if (get_baseclass_offset (p, curtype, this_v, | |
3626 | &boff, &isvirt)) | |
3627 | mem_offset += boff; | |
3628 | } | |
3629 | } | |
3630 | tmp = lookup_pointer_type (type->target_type ()); | |
3631 | result = value_from_pointer (tmp, | |
3632 | value_as_long (v) + mem_offset); | |
3633 | return value_ind (result); | |
3634 | } | |
3635 | ||
3636 | error (_("Cannot reference non-static field \"%s\""), name); | |
3637 | } | |
3638 | } | |
3639 | } | |
3640 | ||
3641 | /* C++: If it was not found as a data field, then try to return it | |
3642 | as a pointer to a method. */ | |
3643 | ||
3644 | /* Perform all necessary dereferencing. */ | |
3645 | while (intype && intype->code () == TYPE_CODE_PTR) | |
3646 | intype = intype->target_type (); | |
3647 | ||
3648 | for (i = TYPE_NFN_FIELDS (t) - 1; i >= 0; --i) | |
3649 | { | |
3650 | const char *t_field_name = TYPE_FN_FIELDLIST_NAME (t, i); | |
3651 | ||
3652 | if (t_field_name && strcmp (t_field_name, name) == 0) | |
3653 | { | |
3654 | int j; | |
3655 | int len = TYPE_FN_FIELDLIST_LENGTH (t, i); | |
3656 | struct fn_field *f = TYPE_FN_FIELDLIST1 (t, i); | |
3657 | ||
3658 | check_stub_method_group (t, i); | |
3659 | ||
3660 | if (intype) | |
3661 | { | |
3662 | for (j = 0; j < len; ++j) | |
3663 | { | |
3664 | if (TYPE_CONST (intype) != TYPE_FN_FIELD_CONST (f, j)) | |
3665 | continue; | |
3666 | if (TYPE_VOLATILE (intype) != TYPE_FN_FIELD_VOLATILE (f, j)) | |
3667 | continue; | |
3668 | ||
3669 | if (compare_parameters (TYPE_FN_FIELD_TYPE (f, j), intype, 0) | |
3670 | || compare_parameters (TYPE_FN_FIELD_TYPE (f, j), | |
3671 | intype, 1)) | |
3672 | break; | |
3673 | } | |
3674 | ||
3675 | if (j == len) | |
3676 | error (_("no member function matches " | |
3677 | "that type instantiation")); | |
3678 | } | |
3679 | else | |
3680 | { | |
3681 | int ii; | |
3682 | ||
3683 | j = -1; | |
3684 | for (ii = 0; ii < len; ++ii) | |
3685 | { | |
3686 | /* Skip artificial methods. This is necessary if, | |
3687 | for example, the user wants to "print | |
3688 | subclass::subclass" with only one user-defined | |
3689 | constructor. There is no ambiguity in this case. | |
3690 | We are careful here to allow artificial methods | |
3691 | if they are the unique result. */ | |
3692 | if (TYPE_FN_FIELD_ARTIFICIAL (f, ii)) | |
3693 | { | |
3694 | if (j == -1) | |
3695 | j = ii; | |
3696 | continue; | |
3697 | } | |
3698 | ||
3699 | /* Desired method is ambiguous if more than one | |
3700 | method is defined. */ | |
3701 | if (j != -1 && !TYPE_FN_FIELD_ARTIFICIAL (f, j)) | |
3702 | error (_("non-unique member `%s' requires " | |
3703 | "type instantiation"), name); | |
3704 | ||
3705 | j = ii; | |
3706 | } | |
3707 | ||
3708 | if (j == -1) | |
3709 | error (_("no matching member function")); | |
3710 | } | |
3711 | ||
3712 | if (TYPE_FN_FIELD_STATIC_P (f, j)) | |
3713 | { | |
3714 | struct symbol *s = | |
3715 | lookup_symbol (TYPE_FN_FIELD_PHYSNAME (f, j), | |
3716 | 0, SEARCH_FUNCTION_DOMAIN, 0).symbol; | |
3717 | ||
3718 | if (s == NULL) | |
3719 | return NULL; | |
3720 | ||
3721 | if (want_address) | |
3722 | return value_addr (read_var_value (s, 0, 0)); | |
3723 | else | |
3724 | return read_var_value (s, 0, 0); | |
3725 | } | |
3726 | ||
3727 | if (TYPE_FN_FIELD_VIRTUAL_P (f, j)) | |
3728 | { | |
3729 | if (want_address) | |
3730 | { | |
3731 | result = value::allocate | |
3732 | (lookup_methodptr_type (TYPE_FN_FIELD_TYPE (f, j))); | |
3733 | cplus_make_method_ptr (result->type (), | |
3734 | result->contents_writeable ().data (), | |
3735 | TYPE_FN_FIELD_VOFFSET (f, j), 1); | |
3736 | } | |
3737 | else if (noside == EVAL_AVOID_SIDE_EFFECTS) | |
3738 | return value::allocate (TYPE_FN_FIELD_TYPE (f, j)); | |
3739 | else | |
3740 | error (_("Cannot reference virtual member function \"%s\""), | |
3741 | name); | |
3742 | } | |
3743 | else | |
3744 | { | |
3745 | struct symbol *s = | |
3746 | lookup_symbol (TYPE_FN_FIELD_PHYSNAME (f, j), | |
3747 | 0, SEARCH_FUNCTION_DOMAIN, 0).symbol; | |
3748 | ||
3749 | if (s == NULL) | |
3750 | return NULL; | |
3751 | ||
3752 | struct value *v = read_var_value (s, 0, 0); | |
3753 | if (!want_address) | |
3754 | result = v; | |
3755 | else | |
3756 | { | |
3757 | result = value::allocate (lookup_methodptr_type (TYPE_FN_FIELD_TYPE (f, j))); | |
3758 | cplus_make_method_ptr (result->type (), | |
3759 | result->contents_writeable ().data (), | |
3760 | v->address (), 0); | |
3761 | } | |
3762 | } | |
3763 | return result; | |
3764 | } | |
3765 | } | |
3766 | for (i = TYPE_N_BASECLASSES (t) - 1; i >= 0; i--) | |
3767 | { | |
3768 | struct value *v; | |
3769 | int base_offset; | |
3770 | ||
3771 | if (BASETYPE_VIA_VIRTUAL (t, i)) | |
3772 | base_offset = 0; | |
3773 | else | |
3774 | base_offset = TYPE_BASECLASS_BITPOS (t, i) / 8; | |
3775 | v = value_struct_elt_for_reference (domain, | |
3776 | offset + base_offset, | |
3777 | TYPE_BASECLASS (t, i), | |
3778 | name, intype, | |
3779 | want_address, noside); | |
3780 | if (v) | |
3781 | return v; | |
3782 | } | |
3783 | ||
3784 | /* As a last chance, pretend that CURTYPE is a namespace, and look | |
3785 | it up that way; this (frequently) works for types nested inside | |
3786 | classes. */ | |
3787 | ||
3788 | return value_maybe_namespace_elt (curtype, name, | |
3789 | want_address, noside); | |
3790 | } | |
3791 | ||
3792 | /* C++: Return the member NAME of the namespace given by the type | |
3793 | CURTYPE. */ | |
3794 | ||
3795 | static struct value * | |
3796 | value_namespace_elt (const struct type *curtype, | |
3797 | const char *name, int want_address, | |
3798 | enum noside noside) | |
3799 | { | |
3800 | struct value *retval = value_maybe_namespace_elt (curtype, name, | |
3801 | want_address, | |
3802 | noside); | |
3803 | ||
3804 | if (retval == NULL) | |
3805 | error (_("No symbol \"%s\" in namespace \"%s\"."), | |
3806 | name, curtype->name ()); | |
3807 | ||
3808 | return retval; | |
3809 | } | |
3810 | ||
3811 | /* A helper function used by value_namespace_elt and | |
3812 | value_struct_elt_for_reference. It looks up NAME inside the | |
3813 | context CURTYPE; this works if CURTYPE is a namespace or if CURTYPE | |
3814 | is a class and NAME refers to a type in CURTYPE itself (as opposed | |
3815 | to, say, some base class of CURTYPE). */ | |
3816 | ||
3817 | static struct value * | |
3818 | value_maybe_namespace_elt (const struct type *curtype, | |
3819 | const char *name, int want_address, | |
3820 | enum noside noside) | |
3821 | { | |
3822 | const char *namespace_name = curtype->name (); | |
3823 | struct block_symbol sym; | |
3824 | struct value *result; | |
3825 | ||
3826 | sym = cp_lookup_symbol_namespace (namespace_name, name, | |
3827 | get_selected_block (0), SEARCH_VFT); | |
3828 | ||
3829 | if (sym.symbol == NULL) | |
3830 | return NULL; | |
3831 | else if ((noside == EVAL_AVOID_SIDE_EFFECTS) | |
3832 | && (sym.symbol->aclass () == LOC_TYPEDEF)) | |
3833 | result = value::allocate (sym.symbol->type ()); | |
3834 | else | |
3835 | result = value_of_variable (sym.symbol, sym.block); | |
3836 | ||
3837 | if (want_address) | |
3838 | result = value_addr (result); | |
3839 | ||
3840 | return result; | |
3841 | } | |
3842 | ||
3843 | /* Given a pointer or a reference value V, find its real (RTTI) type. | |
3844 | ||
3845 | Other parameters FULL, TOP, USING_ENC as with value_rtti_type() | |
3846 | and refer to the values computed for the object pointed to. */ | |
3847 | ||
3848 | struct type * | |
3849 | value_rtti_indirect_type (struct value *v, int *full, | |
3850 | LONGEST *top, int *using_enc) | |
3851 | { | |
3852 | struct value *target = NULL; | |
3853 | struct type *type, *real_type, *target_type; | |
3854 | ||
3855 | type = v->type (); | |
3856 | type = check_typedef (type); | |
3857 | if (TYPE_IS_REFERENCE (type)) | |
3858 | target = coerce_ref (v); | |
3859 | else if (type->code () == TYPE_CODE_PTR) | |
3860 | { | |
3861 | ||
3862 | try | |
3863 | { | |
3864 | target = value_ind (v); | |
3865 | } | |
3866 | catch (const gdb_exception_error &except) | |
3867 | { | |
3868 | if (except.error == MEMORY_ERROR) | |
3869 | { | |
3870 | /* value_ind threw a memory error. The pointer is NULL or | |
3871 | contains an uninitialized value: we can't determine any | |
3872 | type. */ | |
3873 | return NULL; | |
3874 | } | |
3875 | throw; | |
3876 | } | |
3877 | } | |
3878 | else | |
3879 | return NULL; | |
3880 | ||
3881 | real_type = value_rtti_type (target, full, top, using_enc); | |
3882 | ||
3883 | if (real_type) | |
3884 | { | |
3885 | /* Copy qualifiers to the referenced object. */ | |
3886 | target_type = target->type (); | |
3887 | real_type = make_cv_type (TYPE_CONST (target_type), | |
3888 | TYPE_VOLATILE (target_type), real_type, NULL); | |
3889 | if (TYPE_IS_REFERENCE (type)) | |
3890 | real_type = lookup_reference_type (real_type, type->code ()); | |
3891 | else if (type->code () == TYPE_CODE_PTR) | |
3892 | real_type = lookup_pointer_type (real_type); | |
3893 | else | |
3894 | internal_error (_("Unexpected value type.")); | |
3895 | ||
3896 | /* Copy qualifiers to the pointer/reference. */ | |
3897 | real_type = make_cv_type (TYPE_CONST (type), TYPE_VOLATILE (type), | |
3898 | real_type, NULL); | |
3899 | } | |
3900 | ||
3901 | return real_type; | |
3902 | } | |
3903 | ||
3904 | /* Given a value pointed to by ARGP, check its real run-time type, and | |
3905 | if that is different from the enclosing type, create a new value | |
3906 | using the real run-time type as the enclosing type (and of the same | |
3907 | type as ARGP) and return it, with the embedded offset adjusted to | |
3908 | be the correct offset to the enclosed object. RTYPE is the type, | |
3909 | and XFULL, XTOP, and XUSING_ENC are the other parameters, computed | |
3910 | by value_rtti_type(). If these are available, they can be supplied | |
3911 | and a second call to value_rtti_type() is avoided. (Pass RTYPE == | |
3912 | NULL if they're not available. */ | |
3913 | ||
3914 | struct value * | |
3915 | value_full_object (struct value *argp, | |
3916 | struct type *rtype, | |
3917 | int xfull, int xtop, | |
3918 | int xusing_enc) | |
3919 | { | |
3920 | struct type *real_type; | |
3921 | int full = 0; | |
3922 | LONGEST top = -1; | |
3923 | int using_enc = 0; | |
3924 | struct value *new_val; | |
3925 | ||
3926 | if (rtype) | |
3927 | { | |
3928 | real_type = rtype; | |
3929 | full = xfull; | |
3930 | top = xtop; | |
3931 | using_enc = xusing_enc; | |
3932 | } | |
3933 | else | |
3934 | real_type = value_rtti_type (argp, &full, &top, &using_enc); | |
3935 | ||
3936 | /* If no RTTI data, or if object is already complete, do nothing. */ | |
3937 | if (!real_type || real_type == argp->enclosing_type ()) | |
3938 | return argp; | |
3939 | ||
3940 | /* In a destructor we might see a real type that is a superclass of | |
3941 | the object's type. In this case it is better to leave the object | |
3942 | as-is. */ | |
3943 | if (full | |
3944 | && real_type->length () < argp->enclosing_type ()->length ()) | |
3945 | return argp; | |
3946 | ||
3947 | /* If we have the full object, but for some reason the enclosing | |
3948 | type is wrong, set it. */ | |
3949 | /* pai: FIXME -- sounds iffy */ | |
3950 | if (full) | |
3951 | { | |
3952 | argp = argp->copy (); | |
3953 | argp->set_enclosing_type (real_type); | |
3954 | return argp; | |
3955 | } | |
3956 | ||
3957 | /* Check if object is in memory. */ | |
3958 | if (argp->lval () != lval_memory) | |
3959 | { | |
3960 | warning (_("Couldn't retrieve complete object of RTTI " | |
3961 | "type %s; object may be in register(s)."), | |
3962 | real_type->name ()); | |
3963 | ||
3964 | return argp; | |
3965 | } | |
3966 | ||
3967 | /* All other cases -- retrieve the complete object. */ | |
3968 | /* Go back by the computed top_offset from the beginning of the | |
3969 | object, adjusting for the embedded offset of argp if that's what | |
3970 | value_rtti_type used for its computation. */ | |
3971 | new_val = value_at_lazy (real_type, argp->address () - top + | |
3972 | (using_enc ? 0 : argp->embedded_offset ())); | |
3973 | new_val->deprecated_set_type (argp->type ()); | |
3974 | new_val->set_embedded_offset ((using_enc | |
3975 | ? top + argp->embedded_offset () | |
3976 | : top)); | |
3977 | return new_val; | |
3978 | } | |
3979 | ||
3980 | ||
3981 | /* Return the value of the local variable, if one exists. Throw error | |
3982 | otherwise, such as if the request is made in an inappropriate context. */ | |
3983 | ||
3984 | struct value * | |
3985 | value_of_this (const struct language_defn *lang) | |
3986 | { | |
3987 | struct block_symbol sym; | |
3988 | const struct block *b; | |
3989 | frame_info_ptr frame; | |
3990 | ||
3991 | if (lang->name_of_this () == NULL) | |
3992 | error (_("no `this' in current language")); | |
3993 | ||
3994 | frame = get_selected_frame (_("no frame selected")); | |
3995 | ||
3996 | b = get_frame_block (frame, NULL); | |
3997 | ||
3998 | sym = lookup_language_this (lang, b); | |
3999 | if (sym.symbol == NULL) | |
4000 | error (_("current stack frame does not contain a variable named `%s'"), | |
4001 | lang->name_of_this ()); | |
4002 | ||
4003 | return read_var_value (sym.symbol, sym.block, frame); | |
4004 | } | |
4005 | ||
4006 | /* Return the value of the local variable, if one exists. Return NULL | |
4007 | otherwise. Never throw error. */ | |
4008 | ||
4009 | struct value * | |
4010 | value_of_this_silent (const struct language_defn *lang) | |
4011 | { | |
4012 | struct value *ret = NULL; | |
4013 | ||
4014 | try | |
4015 | { | |
4016 | ret = value_of_this (lang); | |
4017 | } | |
4018 | catch (const gdb_exception_error &except) | |
4019 | { | |
4020 | } | |
4021 | ||
4022 | return ret; | |
4023 | } | |
4024 | ||
4025 | /* Create a slice (sub-string, sub-array) of ARRAY, that is LENGTH | |
4026 | elements long, starting at LOWBOUND. The result has the same lower | |
4027 | bound as the original ARRAY. */ | |
4028 | ||
4029 | struct value * | |
4030 | value_slice (struct value *array, int lowbound, int length) | |
4031 | { | |
4032 | struct type *slice_range_type, *slice_type, *range_type; | |
4033 | LONGEST lowerbound, upperbound; | |
4034 | struct value *slice; | |
4035 | struct type *array_type; | |
4036 | ||
4037 | array_type = check_typedef (array->type ()); | |
4038 | if (array_type->code () != TYPE_CODE_ARRAY | |
4039 | && array_type->code () != TYPE_CODE_STRING) | |
4040 | error (_("cannot take slice of non-array")); | |
4041 | ||
4042 | if (type_not_allocated (array_type)) | |
4043 | error (_("array not allocated")); | |
4044 | if (type_not_associated (array_type)) | |
4045 | error (_("array not associated")); | |
4046 | ||
4047 | range_type = array_type->index_type (); | |
4048 | if (!get_discrete_bounds (range_type, &lowerbound, &upperbound)) | |
4049 | error (_("slice from bad array or bitstring")); | |
4050 | ||
4051 | if (lowbound < lowerbound || length < 0 | |
4052 | || lowbound + length - 1 > upperbound) | |
4053 | error (_("slice out of range")); | |
4054 | ||
4055 | /* FIXME-type-allocation: need a way to free this type when we are | |
4056 | done with it. */ | |
4057 | type_allocator alloc (range_type->target_type ()); | |
4058 | slice_range_type = create_static_range_type (alloc, | |
4059 | range_type->target_type (), | |
4060 | lowbound, | |
4061 | lowbound + length - 1); | |
4062 | ||
4063 | { | |
4064 | struct type *element_type = array_type->target_type (); | |
4065 | LONGEST offset | |
4066 | = (lowbound - lowerbound) * check_typedef (element_type)->length (); | |
4067 | ||
4068 | slice_type = create_array_type (alloc, | |
4069 | element_type, | |
4070 | slice_range_type); | |
4071 | slice_type->set_code (array_type->code ()); | |
4072 | ||
4073 | if (array->lval () == lval_memory && array->lazy ()) | |
4074 | slice = value::allocate_lazy (slice_type); | |
4075 | else | |
4076 | { | |
4077 | slice = value::allocate (slice_type); | |
4078 | array->contents_copy (slice, 0, offset, | |
4079 | type_length_units (slice_type)); | |
4080 | } | |
4081 | ||
4082 | slice->set_component_location (array); | |
4083 | slice->set_offset (array->offset () + offset); | |
4084 | } | |
4085 | ||
4086 | return slice; | |
4087 | } | |
4088 | ||
4089 | /* See value.h. */ | |
4090 | ||
4091 | struct value * | |
4092 | value_literal_complex (struct value *arg1, | |
4093 | struct value *arg2, | |
4094 | struct type *type) | |
4095 | { | |
4096 | struct value *val; | |
4097 | struct type *real_type = type->target_type (); | |
4098 | ||
4099 | val = value::allocate (type); | |
4100 | arg1 = value_cast (real_type, arg1); | |
4101 | arg2 = value_cast (real_type, arg2); | |
4102 | ||
4103 | int len = real_type->length (); | |
4104 | ||
4105 | copy (arg1->contents (), | |
4106 | val->contents_raw ().slice (0, len)); | |
4107 | copy (arg2->contents (), | |
4108 | val->contents_raw ().slice (len, len)); | |
4109 | ||
4110 | return val; | |
4111 | } | |
4112 | ||
4113 | /* See value.h. */ | |
4114 | ||
4115 | struct value * | |
4116 | value_real_part (struct value *value) | |
4117 | { | |
4118 | struct type *type = check_typedef (value->type ()); | |
4119 | struct type *ttype = type->target_type (); | |
4120 | ||
4121 | gdb_assert (type->code () == TYPE_CODE_COMPLEX); | |
4122 | return value_from_component (value, ttype, 0); | |
4123 | } | |
4124 | ||
4125 | /* See value.h. */ | |
4126 | ||
4127 | struct value * | |
4128 | value_imaginary_part (struct value *value) | |
4129 | { | |
4130 | struct type *type = check_typedef (value->type ()); | |
4131 | struct type *ttype = type->target_type (); | |
4132 | ||
4133 | gdb_assert (type->code () == TYPE_CODE_COMPLEX); | |
4134 | return value_from_component (value, ttype, | |
4135 | check_typedef (ttype)->length ()); | |
4136 | } | |
4137 | ||
4138 | /* Cast a value into the appropriate complex data type. */ | |
4139 | ||
4140 | static struct value * | |
4141 | cast_into_complex (struct type *type, struct value *val) | |
4142 | { | |
4143 | struct type *real_type = type->target_type (); | |
4144 | ||
4145 | if (val->type ()->code () == TYPE_CODE_COMPLEX) | |
4146 | { | |
4147 | struct type *val_real_type = val->type ()->target_type (); | |
4148 | struct value *re_val = value::allocate (val_real_type); | |
4149 | struct value *im_val = value::allocate (val_real_type); | |
4150 | int len = val_real_type->length (); | |
4151 | ||
4152 | copy (val->contents ().slice (0, len), | |
4153 | re_val->contents_raw ()); | |
4154 | copy (val->contents ().slice (len, len), | |
4155 | im_val->contents_raw ()); | |
4156 | ||
4157 | return value_literal_complex (re_val, im_val, type); | |
4158 | } | |
4159 | else if (val->type ()->code () == TYPE_CODE_FLT | |
4160 | || val->type ()->code () == TYPE_CODE_INT) | |
4161 | return value_literal_complex (val, | |
4162 | value::zero (real_type, not_lval), | |
4163 | type); | |
4164 | else | |
4165 | error (_("cannot cast non-number to complex")); | |
4166 | } | |
4167 | ||
4168 | void _initialize_valops (); | |
4169 | void | |
4170 | _initialize_valops () | |
4171 | { | |
4172 | add_setshow_boolean_cmd ("overload-resolution", class_support, | |
4173 | &overload_resolution, _("\ | |
4174 | Set overload resolution in evaluating C++ functions."), _("\ | |
4175 | Show overload resolution in evaluating C++ functions."), | |
4176 | NULL, NULL, | |
4177 | show_overload_resolution, | |
4178 | &setlist, &showlist); | |
4179 | overload_resolution = 1; | |
4180 | } |