]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/convert.c
re PR c++/40566 (rejects promoted throw)
[thirdparty/gcc.git] / gcc / convert.c
1 /* Utility routines for data type conversion for GCC.
2 Copyright (C) 1987, 1988, 1991, 1992, 1993, 1994, 1995, 1997, 1998,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 /* These routines are somewhat language-independent utility function
24 intended to be called by the language-specific convert () functions. */
25
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include "tree.h"
31 #include "flags.h"
32 #include "convert.h"
33 #include "toplev.h"
34 #include "langhooks.h"
35 #include "real.h"
36 #include "fixed-value.h"
37
38 /* Convert EXPR to some pointer or reference type TYPE.
39 EXPR must be pointer, reference, integer, enumeral, or literal zero;
40 in other cases error is called. */
41
42 tree
43 convert_to_pointer (tree type, tree expr)
44 {
45 if (TREE_TYPE (expr) == type)
46 return expr;
47
48 /* Propagate overflow to the NULL pointer. */
49 if (integer_zerop (expr))
50 return force_fit_type_double (type, 0, 0, 0, TREE_OVERFLOW (expr));
51
52 switch (TREE_CODE (TREE_TYPE (expr)))
53 {
54 case POINTER_TYPE:
55 case REFERENCE_TYPE:
56 return fold_build1 (NOP_EXPR, type, expr);
57
58 case INTEGER_TYPE:
59 case ENUMERAL_TYPE:
60 case BOOLEAN_TYPE:
61 if (TYPE_PRECISION (TREE_TYPE (expr)) != POINTER_SIZE)
62 expr = fold_build1 (NOP_EXPR,
63 lang_hooks.types.type_for_size (POINTER_SIZE, 0),
64 expr);
65 return fold_build1 (CONVERT_EXPR, type, expr);
66
67
68 default:
69 error ("cannot convert to a pointer type");
70 return convert_to_pointer (type, integer_zero_node);
71 }
72 }
73
74 /* Avoid any floating point extensions from EXP. */
75 tree
76 strip_float_extensions (tree exp)
77 {
78 tree sub, expt, subt;
79
80 /* For floating point constant look up the narrowest type that can hold
81 it properly and handle it like (type)(narrowest_type)constant.
82 This way we can optimize for instance a=a*2.0 where "a" is float
83 but 2.0 is double constant. */
84 if (TREE_CODE (exp) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (TREE_TYPE (exp)))
85 {
86 REAL_VALUE_TYPE orig;
87 tree type = NULL;
88
89 orig = TREE_REAL_CST (exp);
90 if (TYPE_PRECISION (TREE_TYPE (exp)) > TYPE_PRECISION (float_type_node)
91 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
92 type = float_type_node;
93 else if (TYPE_PRECISION (TREE_TYPE (exp))
94 > TYPE_PRECISION (double_type_node)
95 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
96 type = double_type_node;
97 if (type)
98 return build_real (type, real_value_truncate (TYPE_MODE (type), orig));
99 }
100
101 if (!CONVERT_EXPR_P (exp))
102 return exp;
103
104 sub = TREE_OPERAND (exp, 0);
105 subt = TREE_TYPE (sub);
106 expt = TREE_TYPE (exp);
107
108 if (!FLOAT_TYPE_P (subt))
109 return exp;
110
111 if (DECIMAL_FLOAT_TYPE_P (expt) != DECIMAL_FLOAT_TYPE_P (subt))
112 return exp;
113
114 if (TYPE_PRECISION (subt) > TYPE_PRECISION (expt))
115 return exp;
116
117 return strip_float_extensions (sub);
118 }
119
120
121 /* Convert EXPR to some floating-point type TYPE.
122
123 EXPR must be float, fixed-point, integer, or enumeral;
124 in other cases error is called. */
125
126 tree
127 convert_to_real (tree type, tree expr)
128 {
129 enum built_in_function fcode = builtin_mathfn_code (expr);
130 tree itype = TREE_TYPE (expr);
131
132 /* Disable until we figure out how to decide whether the functions are
133 present in runtime. */
134 /* Convert (float)sqrt((double)x) where x is float into sqrtf(x) */
135 if (optimize
136 && (TYPE_MODE (type) == TYPE_MODE (double_type_node)
137 || TYPE_MODE (type) == TYPE_MODE (float_type_node)))
138 {
139 switch (fcode)
140 {
141 #define CASE_MATHFN(FN) case BUILT_IN_##FN: case BUILT_IN_##FN##L:
142 CASE_MATHFN (COSH)
143 CASE_MATHFN (EXP)
144 CASE_MATHFN (EXP10)
145 CASE_MATHFN (EXP2)
146 CASE_MATHFN (EXPM1)
147 CASE_MATHFN (GAMMA)
148 CASE_MATHFN (J0)
149 CASE_MATHFN (J1)
150 CASE_MATHFN (LGAMMA)
151 CASE_MATHFN (POW10)
152 CASE_MATHFN (SINH)
153 CASE_MATHFN (TGAMMA)
154 CASE_MATHFN (Y0)
155 CASE_MATHFN (Y1)
156 /* The above functions may set errno differently with float
157 input or output so this transformation is not safe with
158 -fmath-errno. */
159 if (flag_errno_math)
160 break;
161 CASE_MATHFN (ACOS)
162 CASE_MATHFN (ACOSH)
163 CASE_MATHFN (ASIN)
164 CASE_MATHFN (ASINH)
165 CASE_MATHFN (ATAN)
166 CASE_MATHFN (ATANH)
167 CASE_MATHFN (CBRT)
168 CASE_MATHFN (COS)
169 CASE_MATHFN (ERF)
170 CASE_MATHFN (ERFC)
171 CASE_MATHFN (FABS)
172 CASE_MATHFN (LOG)
173 CASE_MATHFN (LOG10)
174 CASE_MATHFN (LOG2)
175 CASE_MATHFN (LOG1P)
176 CASE_MATHFN (LOGB)
177 CASE_MATHFN (SIN)
178 CASE_MATHFN (SQRT)
179 CASE_MATHFN (TAN)
180 CASE_MATHFN (TANH)
181 #undef CASE_MATHFN
182 {
183 tree arg0 = strip_float_extensions (CALL_EXPR_ARG (expr, 0));
184 tree newtype = type;
185
186 /* We have (outertype)sqrt((innertype)x). Choose the wider mode from
187 the both as the safe type for operation. */
188 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (type))
189 newtype = TREE_TYPE (arg0);
190
191 /* Be careful about integer to fp conversions.
192 These may overflow still. */
193 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
194 && TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
195 && (TYPE_MODE (newtype) == TYPE_MODE (double_type_node)
196 || TYPE_MODE (newtype) == TYPE_MODE (float_type_node)))
197 {
198 tree fn = mathfn_built_in (newtype, fcode);
199
200 if (fn)
201 {
202 tree arg = fold (convert_to_real (newtype, arg0));
203 expr = build_call_expr (fn, 1, arg);
204 if (newtype == type)
205 return expr;
206 }
207 }
208 }
209 default:
210 break;
211 }
212 }
213 if (optimize
214 && (((fcode == BUILT_IN_FLOORL
215 || fcode == BUILT_IN_CEILL
216 || fcode == BUILT_IN_ROUNDL
217 || fcode == BUILT_IN_RINTL
218 || fcode == BUILT_IN_TRUNCL
219 || fcode == BUILT_IN_NEARBYINTL)
220 && (TYPE_MODE (type) == TYPE_MODE (double_type_node)
221 || TYPE_MODE (type) == TYPE_MODE (float_type_node)))
222 || ((fcode == BUILT_IN_FLOOR
223 || fcode == BUILT_IN_CEIL
224 || fcode == BUILT_IN_ROUND
225 || fcode == BUILT_IN_RINT
226 || fcode == BUILT_IN_TRUNC
227 || fcode == BUILT_IN_NEARBYINT)
228 && (TYPE_MODE (type) == TYPE_MODE (float_type_node)))))
229 {
230 tree fn = mathfn_built_in (type, fcode);
231
232 if (fn)
233 {
234 tree arg = strip_float_extensions (CALL_EXPR_ARG (expr, 0));
235
236 /* Make sure (type)arg0 is an extension, otherwise we could end up
237 changing (float)floor(double d) into floorf((float)d), which is
238 incorrect because (float)d uses round-to-nearest and can round
239 up to the next integer. */
240 if (TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (arg)))
241 return build_call_expr (fn, 1, fold (convert_to_real (type, arg)));
242 }
243 }
244
245 /* Propagate the cast into the operation. */
246 if (itype != type && FLOAT_TYPE_P (type))
247 switch (TREE_CODE (expr))
248 {
249 /* Convert (float)-x into -(float)x. This is safe for
250 round-to-nearest rounding mode. */
251 case ABS_EXPR:
252 case NEGATE_EXPR:
253 if (!flag_rounding_math
254 && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (expr)))
255 return build1 (TREE_CODE (expr), type,
256 fold (convert_to_real (type,
257 TREE_OPERAND (expr, 0))));
258 break;
259 /* Convert (outertype)((innertype0)a+(innertype1)b)
260 into ((newtype)a+(newtype)b) where newtype
261 is the widest mode from all of these. */
262 case PLUS_EXPR:
263 case MINUS_EXPR:
264 case MULT_EXPR:
265 case RDIV_EXPR:
266 {
267 tree arg0 = strip_float_extensions (TREE_OPERAND (expr, 0));
268 tree arg1 = strip_float_extensions (TREE_OPERAND (expr, 1));
269
270 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
271 && FLOAT_TYPE_P (TREE_TYPE (arg1))
272 && DECIMAL_FLOAT_TYPE_P (itype) == DECIMAL_FLOAT_TYPE_P (type))
273 {
274 tree newtype = type;
275
276 if (TYPE_MODE (TREE_TYPE (arg0)) == SDmode
277 || TYPE_MODE (TREE_TYPE (arg1)) == SDmode
278 || TYPE_MODE (type) == SDmode)
279 newtype = dfloat32_type_node;
280 if (TYPE_MODE (TREE_TYPE (arg0)) == DDmode
281 || TYPE_MODE (TREE_TYPE (arg1)) == DDmode
282 || TYPE_MODE (type) == DDmode)
283 newtype = dfloat64_type_node;
284 if (TYPE_MODE (TREE_TYPE (arg0)) == TDmode
285 || TYPE_MODE (TREE_TYPE (arg1)) == TDmode
286 || TYPE_MODE (type) == TDmode)
287 newtype = dfloat128_type_node;
288 if (newtype == dfloat32_type_node
289 || newtype == dfloat64_type_node
290 || newtype == dfloat128_type_node)
291 {
292 expr = build2 (TREE_CODE (expr), newtype,
293 fold (convert_to_real (newtype, arg0)),
294 fold (convert_to_real (newtype, arg1)));
295 if (newtype == type)
296 return expr;
297 break;
298 }
299
300 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (newtype))
301 newtype = TREE_TYPE (arg0);
302 if (TYPE_PRECISION (TREE_TYPE (arg1)) > TYPE_PRECISION (newtype))
303 newtype = TREE_TYPE (arg1);
304 /* Sometimes this transformation is safe (cannot
305 change results through affecting double rounding
306 cases) and sometimes it is not. If NEWTYPE is
307 wider than TYPE, e.g. (float)((long double)double
308 + (long double)double) converted to
309 (float)(double + double), the transformation is
310 unsafe regardless of the details of the types
311 involved; double rounding can arise if the result
312 of NEWTYPE arithmetic is a NEWTYPE value half way
313 between two representable TYPE values but the
314 exact value is sufficiently different (in the
315 right direction) for this difference to be
316 visible in ITYPE arithmetic. If NEWTYPE is the
317 same as TYPE, however, the transformation may be
318 safe depending on the types involved: it is safe
319 if the ITYPE has strictly more than twice as many
320 mantissa bits as TYPE, can represent infinities
321 and NaNs if the TYPE can, and has sufficient
322 exponent range for the product or ratio of two
323 values representable in the TYPE to be within the
324 range of normal values of ITYPE. */
325 if (TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
326 && (flag_unsafe_math_optimizations
327 || (TYPE_PRECISION (newtype) == TYPE_PRECISION (type)
328 && real_can_shorten_arithmetic (TYPE_MODE (itype),
329 TYPE_MODE (type))
330 && !excess_precision_type (newtype))))
331 {
332 expr = build2 (TREE_CODE (expr), newtype,
333 fold (convert_to_real (newtype, arg0)),
334 fold (convert_to_real (newtype, arg1)));
335 if (newtype == type)
336 return expr;
337 }
338 }
339 }
340 break;
341 default:
342 break;
343 }
344
345 switch (TREE_CODE (TREE_TYPE (expr)))
346 {
347 case REAL_TYPE:
348 /* Ignore the conversion if we don't need to store intermediate
349 results and neither type is a decimal float. */
350 return build1 ((flag_float_store
351 || DECIMAL_FLOAT_TYPE_P (type)
352 || DECIMAL_FLOAT_TYPE_P (itype))
353 ? CONVERT_EXPR : NOP_EXPR, type, expr);
354
355 case INTEGER_TYPE:
356 case ENUMERAL_TYPE:
357 case BOOLEAN_TYPE:
358 return build1 (FLOAT_EXPR, type, expr);
359
360 case FIXED_POINT_TYPE:
361 return build1 (FIXED_CONVERT_EXPR, type, expr);
362
363 case COMPLEX_TYPE:
364 return convert (type,
365 fold_build1 (REALPART_EXPR,
366 TREE_TYPE (TREE_TYPE (expr)), expr));
367
368 case POINTER_TYPE:
369 case REFERENCE_TYPE:
370 error ("pointer value used where a floating point value was expected");
371 return convert_to_real (type, integer_zero_node);
372
373 default:
374 error ("aggregate value used where a float was expected");
375 return convert_to_real (type, integer_zero_node);
376 }
377 }
378
379 /* Convert EXPR to some integer (or enum) type TYPE.
380
381 EXPR must be pointer, integer, discrete (enum, char, or bool), float,
382 fixed-point or vector; in other cases error is called.
383
384 The result of this is always supposed to be a newly created tree node
385 not in use in any existing structure. */
386
387 tree
388 convert_to_integer (tree type, tree expr)
389 {
390 enum tree_code ex_form = TREE_CODE (expr);
391 tree intype = TREE_TYPE (expr);
392 unsigned int inprec = TYPE_PRECISION (intype);
393 unsigned int outprec = TYPE_PRECISION (type);
394
395 /* An INTEGER_TYPE cannot be incomplete, but an ENUMERAL_TYPE can
396 be. Consider `enum E = { a, b = (enum E) 3 };'. */
397 if (!COMPLETE_TYPE_P (type))
398 {
399 error ("conversion to incomplete type");
400 return error_mark_node;
401 }
402
403 /* Convert e.g. (long)round(d) -> lround(d). */
404 /* If we're converting to char, we may encounter differing behavior
405 between converting from double->char vs double->long->char.
406 We're in "undefined" territory but we prefer to be conservative,
407 so only proceed in "unsafe" math mode. */
408 if (optimize
409 && (flag_unsafe_math_optimizations
410 || (long_integer_type_node
411 && outprec >= TYPE_PRECISION (long_integer_type_node))))
412 {
413 tree s_expr = strip_float_extensions (expr);
414 tree s_intype = TREE_TYPE (s_expr);
415 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
416 tree fn = 0;
417
418 switch (fcode)
419 {
420 CASE_FLT_FN (BUILT_IN_CEIL):
421 /* Only convert in ISO C99 mode. */
422 if (!TARGET_C99_FUNCTIONS)
423 break;
424 if (outprec < TYPE_PRECISION (long_integer_type_node)
425 || (outprec == TYPE_PRECISION (long_integer_type_node)
426 && !TYPE_UNSIGNED (type)))
427 fn = mathfn_built_in (s_intype, BUILT_IN_LCEIL);
428 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
429 && !TYPE_UNSIGNED (type))
430 fn = mathfn_built_in (s_intype, BUILT_IN_LLCEIL);
431 break;
432
433 CASE_FLT_FN (BUILT_IN_FLOOR):
434 /* Only convert in ISO C99 mode. */
435 if (!TARGET_C99_FUNCTIONS)
436 break;
437 if (outprec < TYPE_PRECISION (long_integer_type_node)
438 || (outprec == TYPE_PRECISION (long_integer_type_node)
439 && !TYPE_UNSIGNED (type)))
440 fn = mathfn_built_in (s_intype, BUILT_IN_LFLOOR);
441 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
442 && !TYPE_UNSIGNED (type))
443 fn = mathfn_built_in (s_intype, BUILT_IN_LLFLOOR);
444 break;
445
446 CASE_FLT_FN (BUILT_IN_ROUND):
447 if (outprec < TYPE_PRECISION (long_integer_type_node)
448 || (outprec == TYPE_PRECISION (long_integer_type_node)
449 && !TYPE_UNSIGNED (type)))
450 fn = mathfn_built_in (s_intype, BUILT_IN_LROUND);
451 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
452 && !TYPE_UNSIGNED (type))
453 fn = mathfn_built_in (s_intype, BUILT_IN_LLROUND);
454 break;
455
456 CASE_FLT_FN (BUILT_IN_NEARBYINT):
457 /* Only convert nearbyint* if we can ignore math exceptions. */
458 if (flag_trapping_math)
459 break;
460 /* ... Fall through ... */
461 CASE_FLT_FN (BUILT_IN_RINT):
462 if (outprec < TYPE_PRECISION (long_integer_type_node)
463 || (outprec == TYPE_PRECISION (long_integer_type_node)
464 && !TYPE_UNSIGNED (type)))
465 fn = mathfn_built_in (s_intype, BUILT_IN_LRINT);
466 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
467 && !TYPE_UNSIGNED (type))
468 fn = mathfn_built_in (s_intype, BUILT_IN_LLRINT);
469 break;
470
471 CASE_FLT_FN (BUILT_IN_TRUNC):
472 return convert_to_integer (type, CALL_EXPR_ARG (s_expr, 0));
473
474 default:
475 break;
476 }
477
478 if (fn)
479 {
480 tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
481 return convert_to_integer (type, newexpr);
482 }
483 }
484
485 /* Convert (int)logb(d) -> ilogb(d). */
486 if (optimize
487 && flag_unsafe_math_optimizations
488 && !flag_trapping_math && !flag_errno_math && flag_finite_math_only
489 && integer_type_node
490 && (outprec > TYPE_PRECISION (integer_type_node)
491 || (outprec == TYPE_PRECISION (integer_type_node)
492 && !TYPE_UNSIGNED (type))))
493 {
494 tree s_expr = strip_float_extensions (expr);
495 tree s_intype = TREE_TYPE (s_expr);
496 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
497 tree fn = 0;
498
499 switch (fcode)
500 {
501 CASE_FLT_FN (BUILT_IN_LOGB):
502 fn = mathfn_built_in (s_intype, BUILT_IN_ILOGB);
503 break;
504
505 default:
506 break;
507 }
508
509 if (fn)
510 {
511 tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
512 return convert_to_integer (type, newexpr);
513 }
514 }
515
516 switch (TREE_CODE (intype))
517 {
518 case POINTER_TYPE:
519 case REFERENCE_TYPE:
520 if (integer_zerop (expr))
521 return build_int_cst (type, 0);
522
523 /* Convert to an unsigned integer of the correct width first,
524 and from there widen/truncate to the required type. */
525 expr = fold_build1 (CONVERT_EXPR,
526 lang_hooks.types.type_for_size (POINTER_SIZE, 0),
527 expr);
528 return fold_convert (type, expr);
529
530 case INTEGER_TYPE:
531 case ENUMERAL_TYPE:
532 case BOOLEAN_TYPE:
533 case OFFSET_TYPE:
534 /* If this is a logical operation, which just returns 0 or 1, we can
535 change the type of the expression. */
536
537 if (TREE_CODE_CLASS (ex_form) == tcc_comparison)
538 {
539 expr = copy_node (expr);
540 TREE_TYPE (expr) = type;
541 return expr;
542 }
543
544 /* If we are widening the type, put in an explicit conversion.
545 Similarly if we are not changing the width. After this, we know
546 we are truncating EXPR. */
547
548 else if (outprec >= inprec)
549 {
550 enum tree_code code;
551 tree tem;
552
553 /* If the precision of the EXPR's type is K bits and the
554 destination mode has more bits, and the sign is changing,
555 it is not safe to use a NOP_EXPR. For example, suppose
556 that EXPR's type is a 3-bit unsigned integer type, the
557 TYPE is a 3-bit signed integer type, and the machine mode
558 for the types is 8-bit QImode. In that case, the
559 conversion necessitates an explicit sign-extension. In
560 the signed-to-unsigned case the high-order bits have to
561 be cleared. */
562 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (TREE_TYPE (expr))
563 && (TYPE_PRECISION (TREE_TYPE (expr))
564 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (expr)))))
565 code = CONVERT_EXPR;
566 else
567 code = NOP_EXPR;
568
569 tem = fold_unary (code, type, expr);
570 if (tem)
571 return tem;
572
573 tem = build1 (code, type, expr);
574 TREE_NO_WARNING (tem) = 1;
575 return tem;
576 }
577
578 /* If TYPE is an enumeral type or a type with a precision less
579 than the number of bits in its mode, do the conversion to the
580 type corresponding to its mode, then do a nop conversion
581 to TYPE. */
582 else if (TREE_CODE (type) == ENUMERAL_TYPE
583 || outprec != GET_MODE_BITSIZE (TYPE_MODE (type)))
584 return build1 (NOP_EXPR, type,
585 convert (lang_hooks.types.type_for_mode
586 (TYPE_MODE (type), TYPE_UNSIGNED (type)),
587 expr));
588
589 /* Here detect when we can distribute the truncation down past some
590 arithmetic. For example, if adding two longs and converting to an
591 int, we can equally well convert both to ints and then add.
592 For the operations handled here, such truncation distribution
593 is always safe.
594 It is desirable in these cases:
595 1) when truncating down to full-word from a larger size
596 2) when truncating takes no work.
597 3) when at least one operand of the arithmetic has been extended
598 (as by C's default conversions). In this case we need two conversions
599 if we do the arithmetic as already requested, so we might as well
600 truncate both and then combine. Perhaps that way we need only one.
601
602 Note that in general we cannot do the arithmetic in a type
603 shorter than the desired result of conversion, even if the operands
604 are both extended from a shorter type, because they might overflow
605 if combined in that type. The exceptions to this--the times when
606 two narrow values can be combined in their narrow type even to
607 make a wider result--are handled by "shorten" in build_binary_op. */
608
609 switch (ex_form)
610 {
611 case RSHIFT_EXPR:
612 /* We can pass truncation down through right shifting
613 when the shift count is a nonpositive constant. */
614 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
615 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) <= 0)
616 goto trunc1;
617 break;
618
619 case LSHIFT_EXPR:
620 /* We can pass truncation down through left shifting
621 when the shift count is a nonnegative constant and
622 the target type is unsigned. */
623 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
624 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) >= 0
625 && TYPE_UNSIGNED (type)
626 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
627 {
628 /* If shift count is less than the width of the truncated type,
629 really shift. */
630 if (tree_int_cst_lt (TREE_OPERAND (expr, 1), TYPE_SIZE (type)))
631 /* In this case, shifting is like multiplication. */
632 goto trunc1;
633 else
634 {
635 /* If it is >= that width, result is zero.
636 Handling this with trunc1 would give the wrong result:
637 (int) ((long long) a << 32) is well defined (as 0)
638 but (int) a << 32 is undefined and would get a
639 warning. */
640
641 tree t = build_int_cst (type, 0);
642
643 /* If the original expression had side-effects, we must
644 preserve it. */
645 if (TREE_SIDE_EFFECTS (expr))
646 return build2 (COMPOUND_EXPR, type, expr, t);
647 else
648 return t;
649 }
650 }
651 break;
652
653 case MAX_EXPR:
654 case MIN_EXPR:
655 case MULT_EXPR:
656 {
657 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
658 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
659
660 /* Don't distribute unless the output precision is at least as big
661 as the actual inputs. Otherwise, the comparison of the
662 truncated values will be wrong. */
663 if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
664 && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
665 /* If signedness of arg0 and arg1 don't match,
666 we can't necessarily find a type to compare them in. */
667 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
668 == TYPE_UNSIGNED (TREE_TYPE (arg1))))
669 goto trunc1;
670 break;
671 }
672
673 case PLUS_EXPR:
674 case MINUS_EXPR:
675 case BIT_AND_EXPR:
676 case BIT_IOR_EXPR:
677 case BIT_XOR_EXPR:
678 trunc1:
679 {
680 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
681 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
682
683 if (outprec >= BITS_PER_WORD
684 || TRULY_NOOP_TRUNCATION (outprec, inprec)
685 || inprec > TYPE_PRECISION (TREE_TYPE (arg0))
686 || inprec > TYPE_PRECISION (TREE_TYPE (arg1)))
687 {
688 /* Do the arithmetic in type TYPEX,
689 then convert result to TYPE. */
690 tree typex = type;
691
692 /* Can't do arithmetic in enumeral types
693 so use an integer type that will hold the values. */
694 if (TREE_CODE (typex) == ENUMERAL_TYPE)
695 typex = lang_hooks.types.type_for_size
696 (TYPE_PRECISION (typex), TYPE_UNSIGNED (typex));
697
698 /* But now perhaps TYPEX is as wide as INPREC.
699 In that case, do nothing special here.
700 (Otherwise would recurse infinitely in convert. */
701 if (TYPE_PRECISION (typex) != inprec)
702 {
703 /* Don't do unsigned arithmetic where signed was wanted,
704 or vice versa.
705 Exception: if both of the original operands were
706 unsigned then we can safely do the work as unsigned.
707 Exception: shift operations take their type solely
708 from the first argument.
709 Exception: the LSHIFT_EXPR case above requires that
710 we perform this operation unsigned lest we produce
711 signed-overflow undefinedness.
712 And we may need to do it as unsigned
713 if we truncate to the original size. */
714 if (TYPE_UNSIGNED (TREE_TYPE (expr))
715 || (TYPE_UNSIGNED (TREE_TYPE (arg0))
716 && (TYPE_UNSIGNED (TREE_TYPE (arg1))
717 || ex_form == LSHIFT_EXPR
718 || ex_form == RSHIFT_EXPR
719 || ex_form == LROTATE_EXPR
720 || ex_form == RROTATE_EXPR))
721 || ex_form == LSHIFT_EXPR
722 /* If we have !flag_wrapv, and either ARG0 or
723 ARG1 is of a signed type, we have to do
724 PLUS_EXPR or MINUS_EXPR in an unsigned
725 type. Otherwise, we would introduce
726 signed-overflow undefinedness. */
727 || ((!TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0))
728 || !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1)))
729 && (ex_form == PLUS_EXPR
730 || ex_form == MINUS_EXPR)))
731 typex = unsigned_type_for (typex);
732 else
733 typex = signed_type_for (typex);
734 return convert (type,
735 fold_build2 (ex_form, typex,
736 convert (typex, arg0),
737 convert (typex, arg1)));
738 }
739 }
740 }
741 break;
742
743 case NEGATE_EXPR:
744 case BIT_NOT_EXPR:
745 /* This is not correct for ABS_EXPR,
746 since we must test the sign before truncation. */
747 {
748 tree typex;
749
750 /* Don't do unsigned arithmetic where signed was wanted,
751 or vice versa. */
752 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
753 typex = unsigned_type_for (type);
754 else
755 typex = signed_type_for (type);
756 return convert (type,
757 fold_build1 (ex_form, typex,
758 convert (typex,
759 TREE_OPERAND (expr, 0))));
760 }
761
762 case NOP_EXPR:
763 /* Don't introduce a
764 "can't convert between vector values of different size" error. */
765 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == VECTOR_TYPE
766 && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_OPERAND (expr, 0))))
767 != GET_MODE_SIZE (TYPE_MODE (type))))
768 break;
769 /* If truncating after truncating, might as well do all at once.
770 If truncating after extending, we may get rid of wasted work. */
771 return convert (type, get_unwidened (TREE_OPERAND (expr, 0), type));
772
773 case COND_EXPR:
774 /* It is sometimes worthwhile to push the narrowing down through
775 the conditional and never loses. A COND_EXPR may have a throw
776 as one operand, which then has void type. Just leave void
777 operands as they are. */
778 return fold_build3 (COND_EXPR, type, TREE_OPERAND (expr, 0),
779 VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 1)))
780 ? TREE_OPERAND (expr, 1)
781 : convert (type, TREE_OPERAND (expr, 1)),
782 VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 2)))
783 ? TREE_OPERAND (expr, 2)
784 : convert (type, TREE_OPERAND (expr, 2)));
785
786 default:
787 break;
788 }
789
790 return build1 (CONVERT_EXPR, type, expr);
791
792 case REAL_TYPE:
793 return build1 (FIX_TRUNC_EXPR, type, expr);
794
795 case FIXED_POINT_TYPE:
796 return build1 (FIXED_CONVERT_EXPR, type, expr);
797
798 case COMPLEX_TYPE:
799 return convert (type,
800 fold_build1 (REALPART_EXPR,
801 TREE_TYPE (TREE_TYPE (expr)), expr));
802
803 case VECTOR_TYPE:
804 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
805 {
806 error ("can't convert between vector values of different size");
807 return error_mark_node;
808 }
809 return build1 (VIEW_CONVERT_EXPR, type, expr);
810
811 default:
812 error ("aggregate value used where an integer was expected");
813 return convert (type, integer_zero_node);
814 }
815 }
816
817 /* Convert EXPR to the complex type TYPE in the usual ways. */
818
819 tree
820 convert_to_complex (tree type, tree expr)
821 {
822 tree subtype = TREE_TYPE (type);
823
824 switch (TREE_CODE (TREE_TYPE (expr)))
825 {
826 case REAL_TYPE:
827 case FIXED_POINT_TYPE:
828 case INTEGER_TYPE:
829 case ENUMERAL_TYPE:
830 case BOOLEAN_TYPE:
831 return build2 (COMPLEX_EXPR, type, convert (subtype, expr),
832 convert (subtype, integer_zero_node));
833
834 case COMPLEX_TYPE:
835 {
836 tree elt_type = TREE_TYPE (TREE_TYPE (expr));
837
838 if (TYPE_MAIN_VARIANT (elt_type) == TYPE_MAIN_VARIANT (subtype))
839 return expr;
840 else if (TREE_CODE (expr) == COMPLEX_EXPR)
841 return fold_build2 (COMPLEX_EXPR, type,
842 convert (subtype, TREE_OPERAND (expr, 0)),
843 convert (subtype, TREE_OPERAND (expr, 1)));
844 else
845 {
846 expr = save_expr (expr);
847 return
848 fold_build2 (COMPLEX_EXPR, type,
849 convert (subtype,
850 fold_build1 (REALPART_EXPR,
851 TREE_TYPE (TREE_TYPE (expr)),
852 expr)),
853 convert (subtype,
854 fold_build1 (IMAGPART_EXPR,
855 TREE_TYPE (TREE_TYPE (expr)),
856 expr)));
857 }
858 }
859
860 case POINTER_TYPE:
861 case REFERENCE_TYPE:
862 error ("pointer value used where a complex was expected");
863 return convert_to_complex (type, integer_zero_node);
864
865 default:
866 error ("aggregate value used where a complex was expected");
867 return convert_to_complex (type, integer_zero_node);
868 }
869 }
870
871 /* Convert EXPR to the vector type TYPE in the usual ways. */
872
873 tree
874 convert_to_vector (tree type, tree expr)
875 {
876 switch (TREE_CODE (TREE_TYPE (expr)))
877 {
878 case INTEGER_TYPE:
879 case VECTOR_TYPE:
880 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
881 {
882 error ("can't convert between vector values of different size");
883 return error_mark_node;
884 }
885 return build1 (VIEW_CONVERT_EXPR, type, expr);
886
887 default:
888 error ("can't convert value to a vector");
889 return error_mark_node;
890 }
891 }
892
893 /* Convert EXPR to some fixed-point type TYPE.
894
895 EXPR must be fixed-point, float, integer, or enumeral;
896 in other cases error is called. */
897
898 tree
899 convert_to_fixed (tree type, tree expr)
900 {
901 if (integer_zerop (expr))
902 {
903 tree fixed_zero_node = build_fixed (type, FCONST0 (TYPE_MODE (type)));
904 return fixed_zero_node;
905 }
906 else if (integer_onep (expr) && ALL_SCALAR_ACCUM_MODE_P (TYPE_MODE (type)))
907 {
908 tree fixed_one_node = build_fixed (type, FCONST1 (TYPE_MODE (type)));
909 return fixed_one_node;
910 }
911
912 switch (TREE_CODE (TREE_TYPE (expr)))
913 {
914 case FIXED_POINT_TYPE:
915 case INTEGER_TYPE:
916 case ENUMERAL_TYPE:
917 case BOOLEAN_TYPE:
918 case REAL_TYPE:
919 return build1 (FIXED_CONVERT_EXPR, type, expr);
920
921 case COMPLEX_TYPE:
922 return convert (type,
923 fold_build1 (REALPART_EXPR,
924 TREE_TYPE (TREE_TYPE (expr)), expr));
925
926 default:
927 error ("aggregate value used where a fixed-point was expected");
928 return error_mark_node;
929 }
930 }