]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/convert.c
Factor unrelated declarations out of tree.h.
[thirdparty/gcc.git] / gcc / convert.c
1 /* Utility routines for data type conversion for GCC.
2 Copyright (C) 1987-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 /* These routines are somewhat language-independent utility function
22 intended to be called by the language-specific convert () functions. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "stor-layout.h"
30 #include "flags.h"
31 #include "convert.h"
32 #include "diagnostic-core.h"
33 #include "target.h"
34 #include "langhooks.h"
35
36 /* Convert EXPR to some pointer or reference type TYPE.
37 EXPR must be pointer, reference, integer, enumeral, or literal zero;
38 in other cases error is called. */
39
40 tree
41 convert_to_pointer (tree type, tree expr)
42 {
43 location_t loc = EXPR_LOCATION (expr);
44 if (TREE_TYPE (expr) == type)
45 return expr;
46
47 switch (TREE_CODE (TREE_TYPE (expr)))
48 {
49 case POINTER_TYPE:
50 case REFERENCE_TYPE:
51 {
52 /* If the pointers point to different address spaces, conversion needs
53 to be done via a ADDR_SPACE_CONVERT_EXPR instead of a NOP_EXPR. */
54 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (type));
55 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (expr)));
56
57 if (to_as == from_as)
58 return fold_build1_loc (loc, NOP_EXPR, type, expr);
59 else
60 return fold_build1_loc (loc, ADDR_SPACE_CONVERT_EXPR, type, expr);
61 }
62
63 case INTEGER_TYPE:
64 case ENUMERAL_TYPE:
65 case BOOLEAN_TYPE:
66 {
67 /* If the input precision differs from the target pointer type
68 precision, first convert the input expression to an integer type of
69 the target precision. Some targets, e.g. VMS, need several pointer
70 sizes to coexist so the latter isn't necessarily POINTER_SIZE. */
71 unsigned int pprec = TYPE_PRECISION (type);
72 unsigned int eprec = TYPE_PRECISION (TREE_TYPE (expr));
73
74 if (eprec != pprec)
75 expr = fold_build1_loc (loc, NOP_EXPR,
76 lang_hooks.types.type_for_size (pprec, 0),
77 expr);
78 }
79
80 return fold_build1_loc (loc, CONVERT_EXPR, type, expr);
81
82 default:
83 error ("cannot convert to a pointer type");
84 return convert_to_pointer (type, integer_zero_node);
85 }
86 }
87
88
89 /* Convert EXPR to some floating-point type TYPE.
90
91 EXPR must be float, fixed-point, integer, or enumeral;
92 in other cases error is called. */
93
94 tree
95 convert_to_real (tree type, tree expr)
96 {
97 enum built_in_function fcode = builtin_mathfn_code (expr);
98 tree itype = TREE_TYPE (expr);
99
100 /* Disable until we figure out how to decide whether the functions are
101 present in runtime. */
102 /* Convert (float)sqrt((double)x) where x is float into sqrtf(x) */
103 if (optimize
104 && (TYPE_MODE (type) == TYPE_MODE (double_type_node)
105 || TYPE_MODE (type) == TYPE_MODE (float_type_node)))
106 {
107 switch (fcode)
108 {
109 #define CASE_MATHFN(FN) case BUILT_IN_##FN: case BUILT_IN_##FN##L:
110 CASE_MATHFN (COSH)
111 CASE_MATHFN (EXP)
112 CASE_MATHFN (EXP10)
113 CASE_MATHFN (EXP2)
114 CASE_MATHFN (EXPM1)
115 CASE_MATHFN (GAMMA)
116 CASE_MATHFN (J0)
117 CASE_MATHFN (J1)
118 CASE_MATHFN (LGAMMA)
119 CASE_MATHFN (POW10)
120 CASE_MATHFN (SINH)
121 CASE_MATHFN (TGAMMA)
122 CASE_MATHFN (Y0)
123 CASE_MATHFN (Y1)
124 /* The above functions may set errno differently with float
125 input or output so this transformation is not safe with
126 -fmath-errno. */
127 if (flag_errno_math)
128 break;
129 CASE_MATHFN (ACOS)
130 CASE_MATHFN (ACOSH)
131 CASE_MATHFN (ASIN)
132 CASE_MATHFN (ASINH)
133 CASE_MATHFN (ATAN)
134 CASE_MATHFN (ATANH)
135 CASE_MATHFN (CBRT)
136 CASE_MATHFN (COS)
137 CASE_MATHFN (ERF)
138 CASE_MATHFN (ERFC)
139 CASE_MATHFN (LOG)
140 CASE_MATHFN (LOG10)
141 CASE_MATHFN (LOG2)
142 CASE_MATHFN (LOG1P)
143 CASE_MATHFN (SIN)
144 CASE_MATHFN (TAN)
145 CASE_MATHFN (TANH)
146 /* The above functions are not safe to do this conversion. */
147 if (!flag_unsafe_math_optimizations)
148 break;
149 CASE_MATHFN (SQRT)
150 CASE_MATHFN (FABS)
151 CASE_MATHFN (LOGB)
152 #undef CASE_MATHFN
153 {
154 tree arg0 = strip_float_extensions (CALL_EXPR_ARG (expr, 0));
155 tree newtype = type;
156
157 /* We have (outertype)sqrt((innertype)x). Choose the wider mode from
158 the both as the safe type for operation. */
159 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (type))
160 newtype = TREE_TYPE (arg0);
161
162 /* We consider to convert
163
164 (T1) sqrtT2 ((T2) exprT3)
165 to
166 (T1) sqrtT4 ((T4) exprT3)
167
168 , where T1 is TYPE, T2 is ITYPE, T3 is TREE_TYPE (ARG0),
169 and T4 is NEWTYPE. All those types are of floating point types.
170 T4 (NEWTYPE) should be narrower than T2 (ITYPE). This conversion
171 is safe only if P1 >= P2*2+2, where P1 and P2 are precisions of
172 T2 and T4. See the following URL for a reference:
173 http://stackoverflow.com/questions/9235456/determining-
174 floating-point-square-root
175 */
176 if ((fcode == BUILT_IN_SQRT || fcode == BUILT_IN_SQRTL)
177 && !flag_unsafe_math_optimizations)
178 {
179 /* The following conversion is unsafe even the precision condition
180 below is satisfied:
181
182 (float) sqrtl ((long double) double_val) -> (float) sqrt (double_val)
183 */
184 if (TYPE_MODE (type) != TYPE_MODE (newtype))
185 break;
186
187 int p1 = REAL_MODE_FORMAT (TYPE_MODE (itype))->p;
188 int p2 = REAL_MODE_FORMAT (TYPE_MODE (newtype))->p;
189 if (p1 < p2 * 2 + 2)
190 break;
191 }
192
193 /* Be careful about integer to fp conversions.
194 These may overflow still. */
195 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
196 && TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
197 && (TYPE_MODE (newtype) == TYPE_MODE (double_type_node)
198 || TYPE_MODE (newtype) == TYPE_MODE (float_type_node)))
199 {
200 tree fn = mathfn_built_in (newtype, fcode);
201
202 if (fn)
203 {
204 tree arg = fold (convert_to_real (newtype, arg0));
205 expr = build_call_expr (fn, 1, arg);
206 if (newtype == type)
207 return expr;
208 }
209 }
210 }
211 default:
212 break;
213 }
214 }
215 if (optimize
216 && (((fcode == BUILT_IN_FLOORL
217 || fcode == BUILT_IN_CEILL
218 || fcode == BUILT_IN_ROUNDL
219 || fcode == BUILT_IN_RINTL
220 || fcode == BUILT_IN_TRUNCL
221 || fcode == BUILT_IN_NEARBYINTL)
222 && (TYPE_MODE (type) == TYPE_MODE (double_type_node)
223 || TYPE_MODE (type) == TYPE_MODE (float_type_node)))
224 || ((fcode == BUILT_IN_FLOOR
225 || fcode == BUILT_IN_CEIL
226 || fcode == BUILT_IN_ROUND
227 || fcode == BUILT_IN_RINT
228 || fcode == BUILT_IN_TRUNC
229 || fcode == BUILT_IN_NEARBYINT)
230 && (TYPE_MODE (type) == TYPE_MODE (float_type_node)))))
231 {
232 tree fn = mathfn_built_in (type, fcode);
233
234 if (fn)
235 {
236 tree arg = strip_float_extensions (CALL_EXPR_ARG (expr, 0));
237
238 /* Make sure (type)arg0 is an extension, otherwise we could end up
239 changing (float)floor(double d) into floorf((float)d), which is
240 incorrect because (float)d uses round-to-nearest and can round
241 up to the next integer. */
242 if (TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (arg)))
243 return build_call_expr (fn, 1, fold (convert_to_real (type, arg)));
244 }
245 }
246
247 /* Propagate the cast into the operation. */
248 if (itype != type && FLOAT_TYPE_P (type))
249 switch (TREE_CODE (expr))
250 {
251 /* Convert (float)-x into -(float)x. This is safe for
252 round-to-nearest rounding mode when the inner type is float. */
253 case ABS_EXPR:
254 case NEGATE_EXPR:
255 if (!flag_rounding_math
256 && FLOAT_TYPE_P (itype)
257 && TYPE_PRECISION (type) < TYPE_PRECISION (itype))
258 return build1 (TREE_CODE (expr), type,
259 fold (convert_to_real (type,
260 TREE_OPERAND (expr, 0))));
261 break;
262 /* Convert (outertype)((innertype0)a+(innertype1)b)
263 into ((newtype)a+(newtype)b) where newtype
264 is the widest mode from all of these. */
265 case PLUS_EXPR:
266 case MINUS_EXPR:
267 case MULT_EXPR:
268 case RDIV_EXPR:
269 {
270 tree arg0 = strip_float_extensions (TREE_OPERAND (expr, 0));
271 tree arg1 = strip_float_extensions (TREE_OPERAND (expr, 1));
272
273 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
274 && FLOAT_TYPE_P (TREE_TYPE (arg1))
275 && DECIMAL_FLOAT_TYPE_P (itype) == DECIMAL_FLOAT_TYPE_P (type))
276 {
277 tree newtype = type;
278
279 if (TYPE_MODE (TREE_TYPE (arg0)) == SDmode
280 || TYPE_MODE (TREE_TYPE (arg1)) == SDmode
281 || TYPE_MODE (type) == SDmode)
282 newtype = dfloat32_type_node;
283 if (TYPE_MODE (TREE_TYPE (arg0)) == DDmode
284 || TYPE_MODE (TREE_TYPE (arg1)) == DDmode
285 || TYPE_MODE (type) == DDmode)
286 newtype = dfloat64_type_node;
287 if (TYPE_MODE (TREE_TYPE (arg0)) == TDmode
288 || TYPE_MODE (TREE_TYPE (arg1)) == TDmode
289 || TYPE_MODE (type) == TDmode)
290 newtype = dfloat128_type_node;
291 if (newtype == dfloat32_type_node
292 || newtype == dfloat64_type_node
293 || newtype == dfloat128_type_node)
294 {
295 expr = build2 (TREE_CODE (expr), newtype,
296 fold (convert_to_real (newtype, arg0)),
297 fold (convert_to_real (newtype, arg1)));
298 if (newtype == type)
299 return expr;
300 break;
301 }
302
303 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (newtype))
304 newtype = TREE_TYPE (arg0);
305 if (TYPE_PRECISION (TREE_TYPE (arg1)) > TYPE_PRECISION (newtype))
306 newtype = TREE_TYPE (arg1);
307 /* Sometimes this transformation is safe (cannot
308 change results through affecting double rounding
309 cases) and sometimes it is not. If NEWTYPE is
310 wider than TYPE, e.g. (float)((long double)double
311 + (long double)double) converted to
312 (float)(double + double), the transformation is
313 unsafe regardless of the details of the types
314 involved; double rounding can arise if the result
315 of NEWTYPE arithmetic is a NEWTYPE value half way
316 between two representable TYPE values but the
317 exact value is sufficiently different (in the
318 right direction) for this difference to be
319 visible in ITYPE arithmetic. If NEWTYPE is the
320 same as TYPE, however, the transformation may be
321 safe depending on the types involved: it is safe
322 if the ITYPE has strictly more than twice as many
323 mantissa bits as TYPE, can represent infinities
324 and NaNs if the TYPE can, and has sufficient
325 exponent range for the product or ratio of two
326 values representable in the TYPE to be within the
327 range of normal values of ITYPE. */
328 if (TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
329 && (flag_unsafe_math_optimizations
330 || (TYPE_PRECISION (newtype) == TYPE_PRECISION (type)
331 && real_can_shorten_arithmetic (TYPE_MODE (itype),
332 TYPE_MODE (type))
333 && !excess_precision_type (newtype))))
334 {
335 expr = build2 (TREE_CODE (expr), newtype,
336 fold (convert_to_real (newtype, arg0)),
337 fold (convert_to_real (newtype, arg1)));
338 if (newtype == type)
339 return expr;
340 }
341 }
342 }
343 break;
344 default:
345 break;
346 }
347
348 switch (TREE_CODE (TREE_TYPE (expr)))
349 {
350 case REAL_TYPE:
351 /* Ignore the conversion if we don't need to store intermediate
352 results and neither type is a decimal float. */
353 return build1 ((flag_float_store
354 || DECIMAL_FLOAT_TYPE_P (type)
355 || DECIMAL_FLOAT_TYPE_P (itype))
356 ? CONVERT_EXPR : NOP_EXPR, type, expr);
357
358 case INTEGER_TYPE:
359 case ENUMERAL_TYPE:
360 case BOOLEAN_TYPE:
361 return build1 (FLOAT_EXPR, type, expr);
362
363 case FIXED_POINT_TYPE:
364 return build1 (FIXED_CONVERT_EXPR, type, expr);
365
366 case COMPLEX_TYPE:
367 return convert (type,
368 fold_build1 (REALPART_EXPR,
369 TREE_TYPE (TREE_TYPE (expr)), expr));
370
371 case POINTER_TYPE:
372 case REFERENCE_TYPE:
373 error ("pointer value used where a floating point value was expected");
374 return convert_to_real (type, integer_zero_node);
375
376 default:
377 error ("aggregate value used where a float was expected");
378 return convert_to_real (type, integer_zero_node);
379 }
380 }
381
382 /* Convert EXPR to some integer (or enum) type TYPE.
383
384 EXPR must be pointer, integer, discrete (enum, char, or bool), float,
385 fixed-point or vector; in other cases error is called.
386
387 The result of this is always supposed to be a newly created tree node
388 not in use in any existing structure. */
389
390 tree
391 convert_to_integer (tree type, tree expr)
392 {
393 enum tree_code ex_form = TREE_CODE (expr);
394 tree intype = TREE_TYPE (expr);
395 unsigned int inprec = element_precision (intype);
396 unsigned int outprec = element_precision (type);
397
398 /* An INTEGER_TYPE cannot be incomplete, but an ENUMERAL_TYPE can
399 be. Consider `enum E = { a, b = (enum E) 3 };'. */
400 if (!COMPLETE_TYPE_P (type))
401 {
402 error ("conversion to incomplete type");
403 return error_mark_node;
404 }
405
406 /* Convert e.g. (long)round(d) -> lround(d). */
407 /* If we're converting to char, we may encounter differing behavior
408 between converting from double->char vs double->long->char.
409 We're in "undefined" territory but we prefer to be conservative,
410 so only proceed in "unsafe" math mode. */
411 if (optimize
412 && (flag_unsafe_math_optimizations
413 || (long_integer_type_node
414 && outprec >= TYPE_PRECISION (long_integer_type_node))))
415 {
416 tree s_expr = strip_float_extensions (expr);
417 tree s_intype = TREE_TYPE (s_expr);
418 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
419 tree fn = 0;
420
421 switch (fcode)
422 {
423 CASE_FLT_FN (BUILT_IN_CEIL):
424 /* Only convert in ISO C99 mode. */
425 if (!targetm.libc_has_function (function_c99_misc))
426 break;
427 if (outprec < TYPE_PRECISION (integer_type_node)
428 || (outprec == TYPE_PRECISION (integer_type_node)
429 && !TYPE_UNSIGNED (type)))
430 fn = mathfn_built_in (s_intype, BUILT_IN_ICEIL);
431 else if (outprec == TYPE_PRECISION (long_integer_type_node)
432 && !TYPE_UNSIGNED (type))
433 fn = mathfn_built_in (s_intype, BUILT_IN_LCEIL);
434 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
435 && !TYPE_UNSIGNED (type))
436 fn = mathfn_built_in (s_intype, BUILT_IN_LLCEIL);
437 break;
438
439 CASE_FLT_FN (BUILT_IN_FLOOR):
440 /* Only convert in ISO C99 mode. */
441 if (!targetm.libc_has_function (function_c99_misc))
442 break;
443 if (outprec < TYPE_PRECISION (integer_type_node)
444 || (outprec == TYPE_PRECISION (integer_type_node)
445 && !TYPE_UNSIGNED (type)))
446 fn = mathfn_built_in (s_intype, BUILT_IN_IFLOOR);
447 else if (outprec == TYPE_PRECISION (long_integer_type_node)
448 && !TYPE_UNSIGNED (type))
449 fn = mathfn_built_in (s_intype, BUILT_IN_LFLOOR);
450 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
451 && !TYPE_UNSIGNED (type))
452 fn = mathfn_built_in (s_intype, BUILT_IN_LLFLOOR);
453 break;
454
455 CASE_FLT_FN (BUILT_IN_ROUND):
456 /* Only convert in ISO C99 mode. */
457 if (!targetm.libc_has_function (function_c99_misc))
458 break;
459 if (outprec < TYPE_PRECISION (integer_type_node)
460 || (outprec == TYPE_PRECISION (integer_type_node)
461 && !TYPE_UNSIGNED (type)))
462 fn = mathfn_built_in (s_intype, BUILT_IN_IROUND);
463 else if (outprec == TYPE_PRECISION (long_integer_type_node)
464 && !TYPE_UNSIGNED (type))
465 fn = mathfn_built_in (s_intype, BUILT_IN_LROUND);
466 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
467 && !TYPE_UNSIGNED (type))
468 fn = mathfn_built_in (s_intype, BUILT_IN_LLROUND);
469 break;
470
471 CASE_FLT_FN (BUILT_IN_NEARBYINT):
472 /* Only convert nearbyint* if we can ignore math exceptions. */
473 if (flag_trapping_math)
474 break;
475 /* ... Fall through ... */
476 CASE_FLT_FN (BUILT_IN_RINT):
477 /* Only convert in ISO C99 mode. */
478 if (!targetm.libc_has_function (function_c99_misc))
479 break;
480 if (outprec < TYPE_PRECISION (integer_type_node)
481 || (outprec == TYPE_PRECISION (integer_type_node)
482 && !TYPE_UNSIGNED (type)))
483 fn = mathfn_built_in (s_intype, BUILT_IN_IRINT);
484 else if (outprec == TYPE_PRECISION (long_integer_type_node)
485 && !TYPE_UNSIGNED (type))
486 fn = mathfn_built_in (s_intype, BUILT_IN_LRINT);
487 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
488 && !TYPE_UNSIGNED (type))
489 fn = mathfn_built_in (s_intype, BUILT_IN_LLRINT);
490 break;
491
492 CASE_FLT_FN (BUILT_IN_TRUNC):
493 return convert_to_integer (type, CALL_EXPR_ARG (s_expr, 0));
494
495 default:
496 break;
497 }
498
499 if (fn)
500 {
501 tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
502 return convert_to_integer (type, newexpr);
503 }
504 }
505
506 /* Convert (int)logb(d) -> ilogb(d). */
507 if (optimize
508 && flag_unsafe_math_optimizations
509 && !flag_trapping_math && !flag_errno_math && flag_finite_math_only
510 && integer_type_node
511 && (outprec > TYPE_PRECISION (integer_type_node)
512 || (outprec == TYPE_PRECISION (integer_type_node)
513 && !TYPE_UNSIGNED (type))))
514 {
515 tree s_expr = strip_float_extensions (expr);
516 tree s_intype = TREE_TYPE (s_expr);
517 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
518 tree fn = 0;
519
520 switch (fcode)
521 {
522 CASE_FLT_FN (BUILT_IN_LOGB):
523 fn = mathfn_built_in (s_intype, BUILT_IN_ILOGB);
524 break;
525
526 default:
527 break;
528 }
529
530 if (fn)
531 {
532 tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
533 return convert_to_integer (type, newexpr);
534 }
535 }
536
537 switch (TREE_CODE (intype))
538 {
539 case POINTER_TYPE:
540 case REFERENCE_TYPE:
541 if (integer_zerop (expr))
542 return build_int_cst (type, 0);
543
544 /* Convert to an unsigned integer of the correct width first, and from
545 there widen/truncate to the required type. Some targets support the
546 coexistence of multiple valid pointer sizes, so fetch the one we need
547 from the type. */
548 expr = fold_build1 (CONVERT_EXPR,
549 lang_hooks.types.type_for_size
550 (TYPE_PRECISION (intype), 0),
551 expr);
552 return fold_convert (type, expr);
553
554 case INTEGER_TYPE:
555 case ENUMERAL_TYPE:
556 case BOOLEAN_TYPE:
557 case OFFSET_TYPE:
558 /* If this is a logical operation, which just returns 0 or 1, we can
559 change the type of the expression. */
560
561 if (TREE_CODE_CLASS (ex_form) == tcc_comparison)
562 {
563 expr = copy_node (expr);
564 TREE_TYPE (expr) = type;
565 return expr;
566 }
567
568 /* If we are widening the type, put in an explicit conversion.
569 Similarly if we are not changing the width. After this, we know
570 we are truncating EXPR. */
571
572 else if (outprec >= inprec)
573 {
574 enum tree_code code;
575
576 /* If the precision of the EXPR's type is K bits and the
577 destination mode has more bits, and the sign is changing,
578 it is not safe to use a NOP_EXPR. For example, suppose
579 that EXPR's type is a 3-bit unsigned integer type, the
580 TYPE is a 3-bit signed integer type, and the machine mode
581 for the types is 8-bit QImode. In that case, the
582 conversion necessitates an explicit sign-extension. In
583 the signed-to-unsigned case the high-order bits have to
584 be cleared. */
585 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (TREE_TYPE (expr))
586 && (TYPE_PRECISION (TREE_TYPE (expr))
587 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (expr)))))
588 code = CONVERT_EXPR;
589 else
590 code = NOP_EXPR;
591
592 return fold_build1 (code, type, expr);
593 }
594
595 /* If TYPE is an enumeral type or a type with a precision less
596 than the number of bits in its mode, do the conversion to the
597 type corresponding to its mode, then do a nop conversion
598 to TYPE. */
599 else if (TREE_CODE (type) == ENUMERAL_TYPE
600 || outprec != GET_MODE_PRECISION (TYPE_MODE (type)))
601 return build1 (NOP_EXPR, type,
602 convert (lang_hooks.types.type_for_mode
603 (TYPE_MODE (type), TYPE_UNSIGNED (type)),
604 expr));
605
606 /* Here detect when we can distribute the truncation down past some
607 arithmetic. For example, if adding two longs and converting to an
608 int, we can equally well convert both to ints and then add.
609 For the operations handled here, such truncation distribution
610 is always safe.
611 It is desirable in these cases:
612 1) when truncating down to full-word from a larger size
613 2) when truncating takes no work.
614 3) when at least one operand of the arithmetic has been extended
615 (as by C's default conversions). In this case we need two conversions
616 if we do the arithmetic as already requested, so we might as well
617 truncate both and then combine. Perhaps that way we need only one.
618
619 Note that in general we cannot do the arithmetic in a type
620 shorter than the desired result of conversion, even if the operands
621 are both extended from a shorter type, because they might overflow
622 if combined in that type. The exceptions to this--the times when
623 two narrow values can be combined in their narrow type even to
624 make a wider result--are handled by "shorten" in build_binary_op. */
625
626 switch (ex_form)
627 {
628 case RSHIFT_EXPR:
629 /* We can pass truncation down through right shifting
630 when the shift count is a nonpositive constant. */
631 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
632 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) <= 0)
633 goto trunc1;
634 break;
635
636 case LSHIFT_EXPR:
637 /* We can pass truncation down through left shifting
638 when the shift count is a nonnegative constant and
639 the target type is unsigned. */
640 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
641 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) >= 0
642 && TYPE_UNSIGNED (type)
643 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
644 {
645 /* If shift count is less than the width of the truncated type,
646 really shift. */
647 if (tree_int_cst_lt (TREE_OPERAND (expr, 1), TYPE_SIZE (type)))
648 /* In this case, shifting is like multiplication. */
649 goto trunc1;
650 else
651 {
652 /* If it is >= that width, result is zero.
653 Handling this with trunc1 would give the wrong result:
654 (int) ((long long) a << 32) is well defined (as 0)
655 but (int) a << 32 is undefined and would get a
656 warning. */
657
658 tree t = build_int_cst (type, 0);
659
660 /* If the original expression had side-effects, we must
661 preserve it. */
662 if (TREE_SIDE_EFFECTS (expr))
663 return build2 (COMPOUND_EXPR, type, expr, t);
664 else
665 return t;
666 }
667 }
668 break;
669
670 case TRUNC_DIV_EXPR:
671 {
672 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
673 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
674
675 /* Don't distribute unless the output precision is at least as big
676 as the actual inputs and it has the same signedness. */
677 if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
678 && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
679 /* If signedness of arg0 and arg1 don't match,
680 we can't necessarily find a type to compare them in. */
681 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
682 == TYPE_UNSIGNED (TREE_TYPE (arg1)))
683 /* Do not change the sign of the division. */
684 && (TYPE_UNSIGNED (TREE_TYPE (expr))
685 == TYPE_UNSIGNED (TREE_TYPE (arg0)))
686 /* Either require unsigned division or a division by
687 a constant that is not -1. */
688 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
689 || (TREE_CODE (arg1) == INTEGER_CST
690 && !integer_all_onesp (arg1))))
691 goto trunc1;
692 break;
693 }
694
695 case MAX_EXPR:
696 case MIN_EXPR:
697 case MULT_EXPR:
698 {
699 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
700 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
701
702 /* Don't distribute unless the output precision is at least as big
703 as the actual inputs. Otherwise, the comparison of the
704 truncated values will be wrong. */
705 if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
706 && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
707 /* If signedness of arg0 and arg1 don't match,
708 we can't necessarily find a type to compare them in. */
709 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
710 == TYPE_UNSIGNED (TREE_TYPE (arg1))))
711 goto trunc1;
712 break;
713 }
714
715 case PLUS_EXPR:
716 case MINUS_EXPR:
717 case BIT_AND_EXPR:
718 case BIT_IOR_EXPR:
719 case BIT_XOR_EXPR:
720 trunc1:
721 {
722 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
723 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
724
725 /* Do not try to narrow operands of pointer subtraction;
726 that will interfere with other folding. */
727 if (ex_form == MINUS_EXPR
728 && CONVERT_EXPR_P (arg0)
729 && CONVERT_EXPR_P (arg1)
730 && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
731 && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg1, 0))))
732 break;
733
734 if (outprec >= BITS_PER_WORD
735 || TRULY_NOOP_TRUNCATION (outprec, inprec)
736 || inprec > TYPE_PRECISION (TREE_TYPE (arg0))
737 || inprec > TYPE_PRECISION (TREE_TYPE (arg1)))
738 {
739 /* Do the arithmetic in type TYPEX,
740 then convert result to TYPE. */
741 tree typex = type;
742
743 /* Can't do arithmetic in enumeral types
744 so use an integer type that will hold the values. */
745 if (TREE_CODE (typex) == ENUMERAL_TYPE)
746 typex = lang_hooks.types.type_for_size
747 (TYPE_PRECISION (typex), TYPE_UNSIGNED (typex));
748
749 /* But now perhaps TYPEX is as wide as INPREC.
750 In that case, do nothing special here.
751 (Otherwise would recurse infinitely in convert. */
752 if (TYPE_PRECISION (typex) != inprec)
753 {
754 /* Don't do unsigned arithmetic where signed was wanted,
755 or vice versa.
756 Exception: if both of the original operands were
757 unsigned then we can safely do the work as unsigned.
758 Exception: shift operations take their type solely
759 from the first argument.
760 Exception: the LSHIFT_EXPR case above requires that
761 we perform this operation unsigned lest we produce
762 signed-overflow undefinedness.
763 And we may need to do it as unsigned
764 if we truncate to the original size. */
765 if (TYPE_UNSIGNED (TREE_TYPE (expr))
766 || (TYPE_UNSIGNED (TREE_TYPE (arg0))
767 && (TYPE_UNSIGNED (TREE_TYPE (arg1))
768 || ex_form == LSHIFT_EXPR
769 || ex_form == RSHIFT_EXPR
770 || ex_form == LROTATE_EXPR
771 || ex_form == RROTATE_EXPR))
772 || ex_form == LSHIFT_EXPR
773 /* If we have !flag_wrapv, and either ARG0 or
774 ARG1 is of a signed type, we have to do
775 PLUS_EXPR, MINUS_EXPR or MULT_EXPR in an unsigned
776 type in case the operation in outprec precision
777 could overflow. Otherwise, we would introduce
778 signed-overflow undefinedness. */
779 || ((!TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0))
780 || !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1)))
781 && ((TYPE_PRECISION (TREE_TYPE (arg0)) * 2u
782 > outprec)
783 || (TYPE_PRECISION (TREE_TYPE (arg1)) * 2u
784 > outprec))
785 && (ex_form == PLUS_EXPR
786 || ex_form == MINUS_EXPR
787 || ex_form == MULT_EXPR)))
788 typex = unsigned_type_for (typex);
789 else
790 typex = signed_type_for (typex);
791 return convert (type,
792 fold_build2 (ex_form, typex,
793 convert (typex, arg0),
794 convert (typex, arg1)));
795 }
796 }
797 }
798 break;
799
800 case NEGATE_EXPR:
801 case BIT_NOT_EXPR:
802 /* This is not correct for ABS_EXPR,
803 since we must test the sign before truncation. */
804 {
805 tree typex = unsigned_type_for (type);
806 return convert (type,
807 fold_build1 (ex_form, typex,
808 convert (typex,
809 TREE_OPERAND (expr, 0))));
810 }
811
812 case NOP_EXPR:
813 /* Don't introduce a
814 "can't convert between vector values of different size" error. */
815 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == VECTOR_TYPE
816 && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_OPERAND (expr, 0))))
817 != GET_MODE_SIZE (TYPE_MODE (type))))
818 break;
819 /* If truncating after truncating, might as well do all at once.
820 If truncating after extending, we may get rid of wasted work. */
821 return convert (type, get_unwidened (TREE_OPERAND (expr, 0), type));
822
823 case COND_EXPR:
824 /* It is sometimes worthwhile to push the narrowing down through
825 the conditional and never loses. A COND_EXPR may have a throw
826 as one operand, which then has void type. Just leave void
827 operands as they are. */
828 return fold_build3 (COND_EXPR, type, TREE_OPERAND (expr, 0),
829 VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 1)))
830 ? TREE_OPERAND (expr, 1)
831 : convert (type, TREE_OPERAND (expr, 1)),
832 VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 2)))
833 ? TREE_OPERAND (expr, 2)
834 : convert (type, TREE_OPERAND (expr, 2)));
835
836 default:
837 break;
838 }
839
840 /* When parsing long initializers, we might end up with a lot of casts.
841 Shortcut this. */
842 if (TREE_CODE (expr) == INTEGER_CST)
843 return fold_convert (type, expr);
844 return build1 (CONVERT_EXPR, type, expr);
845
846 case REAL_TYPE:
847 return build1 (FIX_TRUNC_EXPR, type, expr);
848
849 case FIXED_POINT_TYPE:
850 return build1 (FIXED_CONVERT_EXPR, type, expr);
851
852 case COMPLEX_TYPE:
853 return convert (type,
854 fold_build1 (REALPART_EXPR,
855 TREE_TYPE (TREE_TYPE (expr)), expr));
856
857 case VECTOR_TYPE:
858 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
859 {
860 error ("can%'t convert between vector values of different size");
861 return error_mark_node;
862 }
863 return build1 (VIEW_CONVERT_EXPR, type, expr);
864
865 default:
866 error ("aggregate value used where an integer was expected");
867 return convert (type, integer_zero_node);
868 }
869 }
870
871 /* Convert EXPR to the complex type TYPE in the usual ways. */
872
873 tree
874 convert_to_complex (tree type, tree expr)
875 {
876 tree subtype = TREE_TYPE (type);
877
878 switch (TREE_CODE (TREE_TYPE (expr)))
879 {
880 case REAL_TYPE:
881 case FIXED_POINT_TYPE:
882 case INTEGER_TYPE:
883 case ENUMERAL_TYPE:
884 case BOOLEAN_TYPE:
885 return build2 (COMPLEX_EXPR, type, convert (subtype, expr),
886 convert (subtype, integer_zero_node));
887
888 case COMPLEX_TYPE:
889 {
890 tree elt_type = TREE_TYPE (TREE_TYPE (expr));
891
892 if (TYPE_MAIN_VARIANT (elt_type) == TYPE_MAIN_VARIANT (subtype))
893 return expr;
894 else if (TREE_CODE (expr) == COMPLEX_EXPR)
895 return fold_build2 (COMPLEX_EXPR, type,
896 convert (subtype, TREE_OPERAND (expr, 0)),
897 convert (subtype, TREE_OPERAND (expr, 1)));
898 else
899 {
900 expr = save_expr (expr);
901 return
902 fold_build2 (COMPLEX_EXPR, type,
903 convert (subtype,
904 fold_build1 (REALPART_EXPR,
905 TREE_TYPE (TREE_TYPE (expr)),
906 expr)),
907 convert (subtype,
908 fold_build1 (IMAGPART_EXPR,
909 TREE_TYPE (TREE_TYPE (expr)),
910 expr)));
911 }
912 }
913
914 case POINTER_TYPE:
915 case REFERENCE_TYPE:
916 error ("pointer value used where a complex was expected");
917 return convert_to_complex (type, integer_zero_node);
918
919 default:
920 error ("aggregate value used where a complex was expected");
921 return convert_to_complex (type, integer_zero_node);
922 }
923 }
924
925 /* Convert EXPR to the vector type TYPE in the usual ways. */
926
927 tree
928 convert_to_vector (tree type, tree expr)
929 {
930 switch (TREE_CODE (TREE_TYPE (expr)))
931 {
932 case INTEGER_TYPE:
933 case VECTOR_TYPE:
934 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
935 {
936 error ("can%'t convert between vector values of different size");
937 return error_mark_node;
938 }
939 return build1 (VIEW_CONVERT_EXPR, type, expr);
940
941 default:
942 error ("can%'t convert value to a vector");
943 return error_mark_node;
944 }
945 }
946
947 /* Convert EXPR to some fixed-point type TYPE.
948
949 EXPR must be fixed-point, float, integer, or enumeral;
950 in other cases error is called. */
951
952 tree
953 convert_to_fixed (tree type, tree expr)
954 {
955 if (integer_zerop (expr))
956 {
957 tree fixed_zero_node = build_fixed (type, FCONST0 (TYPE_MODE (type)));
958 return fixed_zero_node;
959 }
960 else if (integer_onep (expr) && ALL_SCALAR_ACCUM_MODE_P (TYPE_MODE (type)))
961 {
962 tree fixed_one_node = build_fixed (type, FCONST1 (TYPE_MODE (type)));
963 return fixed_one_node;
964 }
965
966 switch (TREE_CODE (TREE_TYPE (expr)))
967 {
968 case FIXED_POINT_TYPE:
969 case INTEGER_TYPE:
970 case ENUMERAL_TYPE:
971 case BOOLEAN_TYPE:
972 case REAL_TYPE:
973 return build1 (FIXED_CONVERT_EXPR, type, expr);
974
975 case COMPLEX_TYPE:
976 return convert (type,
977 fold_build1 (REALPART_EXPR,
978 TREE_TYPE (TREE_TYPE (expr)), expr));
979
980 default:
981 error ("aggregate value used where a fixed-point was expected");
982 return error_mark_node;
983 }
984 }