]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/convert.c
re PR c/12245 (Uses lots of memory when compiling large initialized arrays)
[thirdparty/gcc.git] / gcc / convert.c
1 /* Utility routines for data type conversion for GCC.
2 Copyright (C) 1987, 1988, 1991, 1992, 1993, 1994, 1995, 1997, 1998,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 /* These routines are somewhat language-independent utility function
24 intended to be called by the language-specific convert () functions. */
25
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include "tree.h"
31 #include "flags.h"
32 #include "convert.h"
33 #include "diagnostic-core.h"
34 #include "langhooks.h"
35
36 /* Convert EXPR to some pointer or reference type TYPE.
37 EXPR must be pointer, reference, integer, enumeral, or literal zero;
38 in other cases error is called. */
39
40 tree
41 convert_to_pointer (tree type, tree expr)
42 {
43 location_t loc = EXPR_LOCATION (expr);
44 if (TREE_TYPE (expr) == type)
45 return expr;
46
47 /* Propagate overflow to the NULL pointer. */
48 if (integer_zerop (expr))
49 return force_fit_type_double (type, double_int_zero, 0,
50 TREE_OVERFLOW (expr));
51
52 switch (TREE_CODE (TREE_TYPE (expr)))
53 {
54 case POINTER_TYPE:
55 case REFERENCE_TYPE:
56 {
57 /* If the pointers point to different address spaces, conversion needs
58 to be done via a ADDR_SPACE_CONVERT_EXPR instead of a NOP_EXPR. */
59 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (type));
60 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (expr)));
61
62 if (to_as == from_as)
63 return fold_build1_loc (loc, NOP_EXPR, type, expr);
64 else
65 return fold_build1_loc (loc, ADDR_SPACE_CONVERT_EXPR, type, expr);
66 }
67
68 case INTEGER_TYPE:
69 case ENUMERAL_TYPE:
70 case BOOLEAN_TYPE:
71 {
72 /* If the input precision differs from the target pointer type
73 precision, first convert the input expression to an integer type of
74 the target precision. Some targets, e.g. VMS, need several pointer
75 sizes to coexist so the latter isn't necessarily POINTER_SIZE. */
76 unsigned int pprec = TYPE_PRECISION (type);
77 unsigned int eprec = TYPE_PRECISION (TREE_TYPE (expr));
78
79 if (eprec != pprec)
80 expr = fold_build1_loc (loc, NOP_EXPR,
81 lang_hooks.types.type_for_size (pprec, 0),
82 expr);
83 }
84
85 return fold_build1_loc (loc, CONVERT_EXPR, type, expr);
86
87 default:
88 error ("cannot convert to a pointer type");
89 return convert_to_pointer (type, integer_zero_node);
90 }
91 }
92
93 /* Avoid any floating point extensions from EXP. */
94 tree
95 strip_float_extensions (tree exp)
96 {
97 tree sub, expt, subt;
98
99 /* For floating point constant look up the narrowest type that can hold
100 it properly and handle it like (type)(narrowest_type)constant.
101 This way we can optimize for instance a=a*2.0 where "a" is float
102 but 2.0 is double constant. */
103 if (TREE_CODE (exp) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (TREE_TYPE (exp)))
104 {
105 REAL_VALUE_TYPE orig;
106 tree type = NULL;
107
108 orig = TREE_REAL_CST (exp);
109 if (TYPE_PRECISION (TREE_TYPE (exp)) > TYPE_PRECISION (float_type_node)
110 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
111 type = float_type_node;
112 else if (TYPE_PRECISION (TREE_TYPE (exp))
113 > TYPE_PRECISION (double_type_node)
114 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
115 type = double_type_node;
116 if (type)
117 return build_real (type, real_value_truncate (TYPE_MODE (type), orig));
118 }
119
120 if (!CONVERT_EXPR_P (exp))
121 return exp;
122
123 sub = TREE_OPERAND (exp, 0);
124 subt = TREE_TYPE (sub);
125 expt = TREE_TYPE (exp);
126
127 if (!FLOAT_TYPE_P (subt))
128 return exp;
129
130 if (DECIMAL_FLOAT_TYPE_P (expt) != DECIMAL_FLOAT_TYPE_P (subt))
131 return exp;
132
133 if (TYPE_PRECISION (subt) > TYPE_PRECISION (expt))
134 return exp;
135
136 return strip_float_extensions (sub);
137 }
138
139
140 /* Convert EXPR to some floating-point type TYPE.
141
142 EXPR must be float, fixed-point, integer, or enumeral;
143 in other cases error is called. */
144
145 tree
146 convert_to_real (tree type, tree expr)
147 {
148 enum built_in_function fcode = builtin_mathfn_code (expr);
149 tree itype = TREE_TYPE (expr);
150
151 /* Disable until we figure out how to decide whether the functions are
152 present in runtime. */
153 /* Convert (float)sqrt((double)x) where x is float into sqrtf(x) */
154 if (optimize
155 && (TYPE_MODE (type) == TYPE_MODE (double_type_node)
156 || TYPE_MODE (type) == TYPE_MODE (float_type_node)))
157 {
158 switch (fcode)
159 {
160 #define CASE_MATHFN(FN) case BUILT_IN_##FN: case BUILT_IN_##FN##L:
161 CASE_MATHFN (COSH)
162 CASE_MATHFN (EXP)
163 CASE_MATHFN (EXP10)
164 CASE_MATHFN (EXP2)
165 CASE_MATHFN (EXPM1)
166 CASE_MATHFN (GAMMA)
167 CASE_MATHFN (J0)
168 CASE_MATHFN (J1)
169 CASE_MATHFN (LGAMMA)
170 CASE_MATHFN (POW10)
171 CASE_MATHFN (SINH)
172 CASE_MATHFN (TGAMMA)
173 CASE_MATHFN (Y0)
174 CASE_MATHFN (Y1)
175 /* The above functions may set errno differently with float
176 input or output so this transformation is not safe with
177 -fmath-errno. */
178 if (flag_errno_math)
179 break;
180 CASE_MATHFN (ACOS)
181 CASE_MATHFN (ACOSH)
182 CASE_MATHFN (ASIN)
183 CASE_MATHFN (ASINH)
184 CASE_MATHFN (ATAN)
185 CASE_MATHFN (ATANH)
186 CASE_MATHFN (CBRT)
187 CASE_MATHFN (COS)
188 CASE_MATHFN (ERF)
189 CASE_MATHFN (ERFC)
190 CASE_MATHFN (FABS)
191 CASE_MATHFN (LOG)
192 CASE_MATHFN (LOG10)
193 CASE_MATHFN (LOG2)
194 CASE_MATHFN (LOG1P)
195 CASE_MATHFN (LOGB)
196 CASE_MATHFN (SIN)
197 CASE_MATHFN (SQRT)
198 CASE_MATHFN (TAN)
199 CASE_MATHFN (TANH)
200 #undef CASE_MATHFN
201 {
202 tree arg0 = strip_float_extensions (CALL_EXPR_ARG (expr, 0));
203 tree newtype = type;
204
205 /* We have (outertype)sqrt((innertype)x). Choose the wider mode from
206 the both as the safe type for operation. */
207 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (type))
208 newtype = TREE_TYPE (arg0);
209
210 /* Be careful about integer to fp conversions.
211 These may overflow still. */
212 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
213 && TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
214 && (TYPE_MODE (newtype) == TYPE_MODE (double_type_node)
215 || TYPE_MODE (newtype) == TYPE_MODE (float_type_node)))
216 {
217 tree fn = mathfn_built_in (newtype, fcode);
218
219 if (fn)
220 {
221 tree arg = fold (convert_to_real (newtype, arg0));
222 expr = build_call_expr (fn, 1, arg);
223 if (newtype == type)
224 return expr;
225 }
226 }
227 }
228 default:
229 break;
230 }
231 }
232 if (optimize
233 && (((fcode == BUILT_IN_FLOORL
234 || fcode == BUILT_IN_CEILL
235 || fcode == BUILT_IN_ROUNDL
236 || fcode == BUILT_IN_RINTL
237 || fcode == BUILT_IN_TRUNCL
238 || fcode == BUILT_IN_NEARBYINTL)
239 && (TYPE_MODE (type) == TYPE_MODE (double_type_node)
240 || TYPE_MODE (type) == TYPE_MODE (float_type_node)))
241 || ((fcode == BUILT_IN_FLOOR
242 || fcode == BUILT_IN_CEIL
243 || fcode == BUILT_IN_ROUND
244 || fcode == BUILT_IN_RINT
245 || fcode == BUILT_IN_TRUNC
246 || fcode == BUILT_IN_NEARBYINT)
247 && (TYPE_MODE (type) == TYPE_MODE (float_type_node)))))
248 {
249 tree fn = mathfn_built_in (type, fcode);
250
251 if (fn)
252 {
253 tree arg = strip_float_extensions (CALL_EXPR_ARG (expr, 0));
254
255 /* Make sure (type)arg0 is an extension, otherwise we could end up
256 changing (float)floor(double d) into floorf((float)d), which is
257 incorrect because (float)d uses round-to-nearest and can round
258 up to the next integer. */
259 if (TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (arg)))
260 return build_call_expr (fn, 1, fold (convert_to_real (type, arg)));
261 }
262 }
263
264 /* Propagate the cast into the operation. */
265 if (itype != type && FLOAT_TYPE_P (type))
266 switch (TREE_CODE (expr))
267 {
268 /* Convert (float)-x into -(float)x. This is safe for
269 round-to-nearest rounding mode. */
270 case ABS_EXPR:
271 case NEGATE_EXPR:
272 if (!flag_rounding_math
273 && TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (expr)))
274 return build1 (TREE_CODE (expr), type,
275 fold (convert_to_real (type,
276 TREE_OPERAND (expr, 0))));
277 break;
278 /* Convert (outertype)((innertype0)a+(innertype1)b)
279 into ((newtype)a+(newtype)b) where newtype
280 is the widest mode from all of these. */
281 case PLUS_EXPR:
282 case MINUS_EXPR:
283 case MULT_EXPR:
284 case RDIV_EXPR:
285 {
286 tree arg0 = strip_float_extensions (TREE_OPERAND (expr, 0));
287 tree arg1 = strip_float_extensions (TREE_OPERAND (expr, 1));
288
289 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
290 && FLOAT_TYPE_P (TREE_TYPE (arg1))
291 && DECIMAL_FLOAT_TYPE_P (itype) == DECIMAL_FLOAT_TYPE_P (type))
292 {
293 tree newtype = type;
294
295 if (TYPE_MODE (TREE_TYPE (arg0)) == SDmode
296 || TYPE_MODE (TREE_TYPE (arg1)) == SDmode
297 || TYPE_MODE (type) == SDmode)
298 newtype = dfloat32_type_node;
299 if (TYPE_MODE (TREE_TYPE (arg0)) == DDmode
300 || TYPE_MODE (TREE_TYPE (arg1)) == DDmode
301 || TYPE_MODE (type) == DDmode)
302 newtype = dfloat64_type_node;
303 if (TYPE_MODE (TREE_TYPE (arg0)) == TDmode
304 || TYPE_MODE (TREE_TYPE (arg1)) == TDmode
305 || TYPE_MODE (type) == TDmode)
306 newtype = dfloat128_type_node;
307 if (newtype == dfloat32_type_node
308 || newtype == dfloat64_type_node
309 || newtype == dfloat128_type_node)
310 {
311 expr = build2 (TREE_CODE (expr), newtype,
312 fold (convert_to_real (newtype, arg0)),
313 fold (convert_to_real (newtype, arg1)));
314 if (newtype == type)
315 return expr;
316 break;
317 }
318
319 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (newtype))
320 newtype = TREE_TYPE (arg0);
321 if (TYPE_PRECISION (TREE_TYPE (arg1)) > TYPE_PRECISION (newtype))
322 newtype = TREE_TYPE (arg1);
323 /* Sometimes this transformation is safe (cannot
324 change results through affecting double rounding
325 cases) and sometimes it is not. If NEWTYPE is
326 wider than TYPE, e.g. (float)((long double)double
327 + (long double)double) converted to
328 (float)(double + double), the transformation is
329 unsafe regardless of the details of the types
330 involved; double rounding can arise if the result
331 of NEWTYPE arithmetic is a NEWTYPE value half way
332 between two representable TYPE values but the
333 exact value is sufficiently different (in the
334 right direction) for this difference to be
335 visible in ITYPE arithmetic. If NEWTYPE is the
336 same as TYPE, however, the transformation may be
337 safe depending on the types involved: it is safe
338 if the ITYPE has strictly more than twice as many
339 mantissa bits as TYPE, can represent infinities
340 and NaNs if the TYPE can, and has sufficient
341 exponent range for the product or ratio of two
342 values representable in the TYPE to be within the
343 range of normal values of ITYPE. */
344 if (TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
345 && (flag_unsafe_math_optimizations
346 || (TYPE_PRECISION (newtype) == TYPE_PRECISION (type)
347 && real_can_shorten_arithmetic (TYPE_MODE (itype),
348 TYPE_MODE (type))
349 && !excess_precision_type (newtype))))
350 {
351 expr = build2 (TREE_CODE (expr), newtype,
352 fold (convert_to_real (newtype, arg0)),
353 fold (convert_to_real (newtype, arg1)));
354 if (newtype == type)
355 return expr;
356 }
357 }
358 }
359 break;
360 default:
361 break;
362 }
363
364 switch (TREE_CODE (TREE_TYPE (expr)))
365 {
366 case REAL_TYPE:
367 /* Ignore the conversion if we don't need to store intermediate
368 results and neither type is a decimal float. */
369 return build1 ((flag_float_store
370 || DECIMAL_FLOAT_TYPE_P (type)
371 || DECIMAL_FLOAT_TYPE_P (itype))
372 ? CONVERT_EXPR : NOP_EXPR, type, expr);
373
374 case INTEGER_TYPE:
375 case ENUMERAL_TYPE:
376 case BOOLEAN_TYPE:
377 return build1 (FLOAT_EXPR, type, expr);
378
379 case FIXED_POINT_TYPE:
380 return build1 (FIXED_CONVERT_EXPR, type, expr);
381
382 case COMPLEX_TYPE:
383 return convert (type,
384 fold_build1 (REALPART_EXPR,
385 TREE_TYPE (TREE_TYPE (expr)), expr));
386
387 case POINTER_TYPE:
388 case REFERENCE_TYPE:
389 error ("pointer value used where a floating point value was expected");
390 return convert_to_real (type, integer_zero_node);
391
392 default:
393 error ("aggregate value used where a float was expected");
394 return convert_to_real (type, integer_zero_node);
395 }
396 }
397
398 /* Convert EXPR to some integer (or enum) type TYPE.
399
400 EXPR must be pointer, integer, discrete (enum, char, or bool), float,
401 fixed-point or vector; in other cases error is called.
402
403 The result of this is always supposed to be a newly created tree node
404 not in use in any existing structure. */
405
406 tree
407 convert_to_integer (tree type, tree expr)
408 {
409 enum tree_code ex_form = TREE_CODE (expr);
410 tree intype = TREE_TYPE (expr);
411 unsigned int inprec = TYPE_PRECISION (intype);
412 unsigned int outprec = TYPE_PRECISION (type);
413
414 /* An INTEGER_TYPE cannot be incomplete, but an ENUMERAL_TYPE can
415 be. Consider `enum E = { a, b = (enum E) 3 };'. */
416 if (!COMPLETE_TYPE_P (type))
417 {
418 error ("conversion to incomplete type");
419 return error_mark_node;
420 }
421
422 /* Convert e.g. (long)round(d) -> lround(d). */
423 /* If we're converting to char, we may encounter differing behavior
424 between converting from double->char vs double->long->char.
425 We're in "undefined" territory but we prefer to be conservative,
426 so only proceed in "unsafe" math mode. */
427 if (optimize
428 && (flag_unsafe_math_optimizations
429 || (long_integer_type_node
430 && outprec >= TYPE_PRECISION (long_integer_type_node))))
431 {
432 tree s_expr = strip_float_extensions (expr);
433 tree s_intype = TREE_TYPE (s_expr);
434 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
435 tree fn = 0;
436
437 switch (fcode)
438 {
439 CASE_FLT_FN (BUILT_IN_CEIL):
440 /* Only convert in ISO C99 mode. */
441 if (!TARGET_C99_FUNCTIONS)
442 break;
443 if (outprec < TYPE_PRECISION (integer_type_node)
444 || (outprec == TYPE_PRECISION (integer_type_node)
445 && !TYPE_UNSIGNED (type)))
446 fn = mathfn_built_in (s_intype, BUILT_IN_ICEIL);
447 else if (outprec == TYPE_PRECISION (long_integer_type_node)
448 && !TYPE_UNSIGNED (type))
449 fn = mathfn_built_in (s_intype, BUILT_IN_LCEIL);
450 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
451 && !TYPE_UNSIGNED (type))
452 fn = mathfn_built_in (s_intype, BUILT_IN_LLCEIL);
453 break;
454
455 CASE_FLT_FN (BUILT_IN_FLOOR):
456 /* Only convert in ISO C99 mode. */
457 if (!TARGET_C99_FUNCTIONS)
458 break;
459 if (outprec < TYPE_PRECISION (integer_type_node)
460 || (outprec == TYPE_PRECISION (integer_type_node)
461 && !TYPE_UNSIGNED (type)))
462 fn = mathfn_built_in (s_intype, BUILT_IN_IFLOOR);
463 else if (outprec == TYPE_PRECISION (long_integer_type_node)
464 && !TYPE_UNSIGNED (type))
465 fn = mathfn_built_in (s_intype, BUILT_IN_LFLOOR);
466 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
467 && !TYPE_UNSIGNED (type))
468 fn = mathfn_built_in (s_intype, BUILT_IN_LLFLOOR);
469 break;
470
471 CASE_FLT_FN (BUILT_IN_ROUND):
472 /* Only convert in ISO C99 mode. */
473 if (!TARGET_C99_FUNCTIONS)
474 break;
475 if (outprec < TYPE_PRECISION (integer_type_node)
476 || (outprec == TYPE_PRECISION (integer_type_node)
477 && !TYPE_UNSIGNED (type)))
478 fn = mathfn_built_in (s_intype, BUILT_IN_IROUND);
479 else if (outprec == TYPE_PRECISION (long_integer_type_node)
480 && !TYPE_UNSIGNED (type))
481 fn = mathfn_built_in (s_intype, BUILT_IN_LROUND);
482 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
483 && !TYPE_UNSIGNED (type))
484 fn = mathfn_built_in (s_intype, BUILT_IN_LLROUND);
485 break;
486
487 CASE_FLT_FN (BUILT_IN_NEARBYINT):
488 /* Only convert nearbyint* if we can ignore math exceptions. */
489 if (flag_trapping_math)
490 break;
491 /* ... Fall through ... */
492 CASE_FLT_FN (BUILT_IN_RINT):
493 /* Only convert in ISO C99 mode. */
494 if (!TARGET_C99_FUNCTIONS)
495 break;
496 if (outprec < TYPE_PRECISION (integer_type_node)
497 || (outprec == TYPE_PRECISION (integer_type_node)
498 && !TYPE_UNSIGNED (type)))
499 fn = mathfn_built_in (s_intype, BUILT_IN_IRINT);
500 else if (outprec == TYPE_PRECISION (long_integer_type_node)
501 && !TYPE_UNSIGNED (type))
502 fn = mathfn_built_in (s_intype, BUILT_IN_LRINT);
503 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
504 && !TYPE_UNSIGNED (type))
505 fn = mathfn_built_in (s_intype, BUILT_IN_LLRINT);
506 break;
507
508 CASE_FLT_FN (BUILT_IN_TRUNC):
509 return convert_to_integer (type, CALL_EXPR_ARG (s_expr, 0));
510
511 default:
512 break;
513 }
514
515 if (fn)
516 {
517 tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
518 return convert_to_integer (type, newexpr);
519 }
520 }
521
522 /* Convert (int)logb(d) -> ilogb(d). */
523 if (optimize
524 && flag_unsafe_math_optimizations
525 && !flag_trapping_math && !flag_errno_math && flag_finite_math_only
526 && integer_type_node
527 && (outprec > TYPE_PRECISION (integer_type_node)
528 || (outprec == TYPE_PRECISION (integer_type_node)
529 && !TYPE_UNSIGNED (type))))
530 {
531 tree s_expr = strip_float_extensions (expr);
532 tree s_intype = TREE_TYPE (s_expr);
533 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
534 tree fn = 0;
535
536 switch (fcode)
537 {
538 CASE_FLT_FN (BUILT_IN_LOGB):
539 fn = mathfn_built_in (s_intype, BUILT_IN_ILOGB);
540 break;
541
542 default:
543 break;
544 }
545
546 if (fn)
547 {
548 tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
549 return convert_to_integer (type, newexpr);
550 }
551 }
552
553 switch (TREE_CODE (intype))
554 {
555 case POINTER_TYPE:
556 case REFERENCE_TYPE:
557 if (integer_zerop (expr))
558 return build_int_cst (type, 0);
559
560 /* Convert to an unsigned integer of the correct width first, and from
561 there widen/truncate to the required type. Some targets support the
562 coexistence of multiple valid pointer sizes, so fetch the one we need
563 from the type. */
564 expr = fold_build1 (CONVERT_EXPR,
565 lang_hooks.types.type_for_size
566 (TYPE_PRECISION (intype), 0),
567 expr);
568 return fold_convert (type, expr);
569
570 case INTEGER_TYPE:
571 case ENUMERAL_TYPE:
572 case BOOLEAN_TYPE:
573 case OFFSET_TYPE:
574 /* If this is a logical operation, which just returns 0 or 1, we can
575 change the type of the expression. */
576
577 if (TREE_CODE_CLASS (ex_form) == tcc_comparison)
578 {
579 expr = copy_node (expr);
580 TREE_TYPE (expr) = type;
581 return expr;
582 }
583
584 /* If we are widening the type, put in an explicit conversion.
585 Similarly if we are not changing the width. After this, we know
586 we are truncating EXPR. */
587
588 else if (outprec >= inprec)
589 {
590 enum tree_code code;
591 tree tem;
592
593 /* If the precision of the EXPR's type is K bits and the
594 destination mode has more bits, and the sign is changing,
595 it is not safe to use a NOP_EXPR. For example, suppose
596 that EXPR's type is a 3-bit unsigned integer type, the
597 TYPE is a 3-bit signed integer type, and the machine mode
598 for the types is 8-bit QImode. In that case, the
599 conversion necessitates an explicit sign-extension. In
600 the signed-to-unsigned case the high-order bits have to
601 be cleared. */
602 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (TREE_TYPE (expr))
603 && (TYPE_PRECISION (TREE_TYPE (expr))
604 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (expr)))))
605 code = CONVERT_EXPR;
606 else
607 code = NOP_EXPR;
608
609 tem = fold_unary (code, type, expr);
610 if (tem)
611 return tem;
612
613 tem = build1 (code, type, expr);
614 TREE_NO_WARNING (tem) = 1;
615 return tem;
616 }
617
618 /* If TYPE is an enumeral type or a type with a precision less
619 than the number of bits in its mode, do the conversion to the
620 type corresponding to its mode, then do a nop conversion
621 to TYPE. */
622 else if (TREE_CODE (type) == ENUMERAL_TYPE
623 || outprec != GET_MODE_PRECISION (TYPE_MODE (type)))
624 return build1 (NOP_EXPR, type,
625 convert (lang_hooks.types.type_for_mode
626 (TYPE_MODE (type), TYPE_UNSIGNED (type)),
627 expr));
628
629 /* Here detect when we can distribute the truncation down past some
630 arithmetic. For example, if adding two longs and converting to an
631 int, we can equally well convert both to ints and then add.
632 For the operations handled here, such truncation distribution
633 is always safe.
634 It is desirable in these cases:
635 1) when truncating down to full-word from a larger size
636 2) when truncating takes no work.
637 3) when at least one operand of the arithmetic has been extended
638 (as by C's default conversions). In this case we need two conversions
639 if we do the arithmetic as already requested, so we might as well
640 truncate both and then combine. Perhaps that way we need only one.
641
642 Note that in general we cannot do the arithmetic in a type
643 shorter than the desired result of conversion, even if the operands
644 are both extended from a shorter type, because they might overflow
645 if combined in that type. The exceptions to this--the times when
646 two narrow values can be combined in their narrow type even to
647 make a wider result--are handled by "shorten" in build_binary_op. */
648
649 switch (ex_form)
650 {
651 case RSHIFT_EXPR:
652 /* We can pass truncation down through right shifting
653 when the shift count is a nonpositive constant. */
654 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
655 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) <= 0)
656 goto trunc1;
657 break;
658
659 case LSHIFT_EXPR:
660 /* We can pass truncation down through left shifting
661 when the shift count is a nonnegative constant and
662 the target type is unsigned. */
663 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
664 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) >= 0
665 && TYPE_UNSIGNED (type)
666 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
667 {
668 /* If shift count is less than the width of the truncated type,
669 really shift. */
670 if (tree_int_cst_lt (TREE_OPERAND (expr, 1), TYPE_SIZE (type)))
671 /* In this case, shifting is like multiplication. */
672 goto trunc1;
673 else
674 {
675 /* If it is >= that width, result is zero.
676 Handling this with trunc1 would give the wrong result:
677 (int) ((long long) a << 32) is well defined (as 0)
678 but (int) a << 32 is undefined and would get a
679 warning. */
680
681 tree t = build_int_cst (type, 0);
682
683 /* If the original expression had side-effects, we must
684 preserve it. */
685 if (TREE_SIDE_EFFECTS (expr))
686 return build2 (COMPOUND_EXPR, type, expr, t);
687 else
688 return t;
689 }
690 }
691 break;
692
693 case TRUNC_DIV_EXPR:
694 {
695 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
696 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
697
698 /* Don't distribute unless the output precision is at least as big
699 as the actual inputs and it has the same signedness. */
700 if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
701 && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
702 /* If signedness of arg0 and arg1 don't match,
703 we can't necessarily find a type to compare them in. */
704 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
705 == TYPE_UNSIGNED (TREE_TYPE (arg1)))
706 /* Do not change the sign of the division. */
707 && (TYPE_UNSIGNED (TREE_TYPE (expr))
708 == TYPE_UNSIGNED (TREE_TYPE (arg0)))
709 /* Either require unsigned division or a division by
710 a constant that is not -1. */
711 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
712 || (TREE_CODE (arg1) == INTEGER_CST
713 && !integer_all_onesp (arg1))))
714 goto trunc1;
715 break;
716 }
717
718 case MAX_EXPR:
719 case MIN_EXPR:
720 case MULT_EXPR:
721 {
722 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
723 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
724
725 /* Don't distribute unless the output precision is at least as big
726 as the actual inputs. Otherwise, the comparison of the
727 truncated values will be wrong. */
728 if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
729 && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
730 /* If signedness of arg0 and arg1 don't match,
731 we can't necessarily find a type to compare them in. */
732 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
733 == TYPE_UNSIGNED (TREE_TYPE (arg1))))
734 goto trunc1;
735 break;
736 }
737
738 case PLUS_EXPR:
739 case MINUS_EXPR:
740 case BIT_AND_EXPR:
741 case BIT_IOR_EXPR:
742 case BIT_XOR_EXPR:
743 trunc1:
744 {
745 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
746 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
747
748 /* Do not try to narrow operands of pointer subtraction;
749 that will interfere with other folding. */
750 if (ex_form == MINUS_EXPR
751 && CONVERT_EXPR_P (arg0)
752 && CONVERT_EXPR_P (arg1)
753 && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
754 && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg1, 0))))
755 break;
756
757 if (outprec >= BITS_PER_WORD
758 || TRULY_NOOP_TRUNCATION (outprec, inprec)
759 || inprec > TYPE_PRECISION (TREE_TYPE (arg0))
760 || inprec > TYPE_PRECISION (TREE_TYPE (arg1)))
761 {
762 /* Do the arithmetic in type TYPEX,
763 then convert result to TYPE. */
764 tree typex = type;
765
766 /* Can't do arithmetic in enumeral types
767 so use an integer type that will hold the values. */
768 if (TREE_CODE (typex) == ENUMERAL_TYPE)
769 typex = lang_hooks.types.type_for_size
770 (TYPE_PRECISION (typex), TYPE_UNSIGNED (typex));
771
772 /* But now perhaps TYPEX is as wide as INPREC.
773 In that case, do nothing special here.
774 (Otherwise would recurse infinitely in convert. */
775 if (TYPE_PRECISION (typex) != inprec)
776 {
777 /* Don't do unsigned arithmetic where signed was wanted,
778 or vice versa.
779 Exception: if both of the original operands were
780 unsigned then we can safely do the work as unsigned.
781 Exception: shift operations take their type solely
782 from the first argument.
783 Exception: the LSHIFT_EXPR case above requires that
784 we perform this operation unsigned lest we produce
785 signed-overflow undefinedness.
786 And we may need to do it as unsigned
787 if we truncate to the original size. */
788 if (TYPE_UNSIGNED (TREE_TYPE (expr))
789 || (TYPE_UNSIGNED (TREE_TYPE (arg0))
790 && (TYPE_UNSIGNED (TREE_TYPE (arg1))
791 || ex_form == LSHIFT_EXPR
792 || ex_form == RSHIFT_EXPR
793 || ex_form == LROTATE_EXPR
794 || ex_form == RROTATE_EXPR))
795 || ex_form == LSHIFT_EXPR
796 /* If we have !flag_wrapv, and either ARG0 or
797 ARG1 is of a signed type, we have to do
798 PLUS_EXPR, MINUS_EXPR or MULT_EXPR in an unsigned
799 type in case the operation in outprec precision
800 could overflow. Otherwise, we would introduce
801 signed-overflow undefinedness. */
802 || ((!TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0))
803 || !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1)))
804 && ((TYPE_PRECISION (TREE_TYPE (arg0)) * 2u
805 > outprec)
806 || (TYPE_PRECISION (TREE_TYPE (arg1)) * 2u
807 > outprec))
808 && (ex_form == PLUS_EXPR
809 || ex_form == MINUS_EXPR
810 || ex_form == MULT_EXPR)))
811 typex = unsigned_type_for (typex);
812 else
813 typex = signed_type_for (typex);
814 return convert (type,
815 fold_build2 (ex_form, typex,
816 convert (typex, arg0),
817 convert (typex, arg1)));
818 }
819 }
820 }
821 break;
822
823 case NEGATE_EXPR:
824 case BIT_NOT_EXPR:
825 /* This is not correct for ABS_EXPR,
826 since we must test the sign before truncation. */
827 {
828 tree typex = unsigned_type_for (type);
829 return convert (type,
830 fold_build1 (ex_form, typex,
831 convert (typex,
832 TREE_OPERAND (expr, 0))));
833 }
834
835 case NOP_EXPR:
836 /* Don't introduce a
837 "can't convert between vector values of different size" error. */
838 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == VECTOR_TYPE
839 && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_OPERAND (expr, 0))))
840 != GET_MODE_SIZE (TYPE_MODE (type))))
841 break;
842 /* If truncating after truncating, might as well do all at once.
843 If truncating after extending, we may get rid of wasted work. */
844 return convert (type, get_unwidened (TREE_OPERAND (expr, 0), type));
845
846 case COND_EXPR:
847 /* It is sometimes worthwhile to push the narrowing down through
848 the conditional and never loses. A COND_EXPR may have a throw
849 as one operand, which then has void type. Just leave void
850 operands as they are. */
851 return fold_build3 (COND_EXPR, type, TREE_OPERAND (expr, 0),
852 VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 1)))
853 ? TREE_OPERAND (expr, 1)
854 : convert (type, TREE_OPERAND (expr, 1)),
855 VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 2)))
856 ? TREE_OPERAND (expr, 2)
857 : convert (type, TREE_OPERAND (expr, 2)));
858
859 default:
860 break;
861 }
862
863 /* When parsing long initializers, we might end up with a lot of casts.
864 Shortcut this. */
865 if (TREE_CODE (expr) == INTEGER_CST)
866 return fold_convert (type, expr);
867 return build1 (CONVERT_EXPR, type, expr);
868
869 case REAL_TYPE:
870 return build1 (FIX_TRUNC_EXPR, type, expr);
871
872 case FIXED_POINT_TYPE:
873 return build1 (FIXED_CONVERT_EXPR, type, expr);
874
875 case COMPLEX_TYPE:
876 return convert (type,
877 fold_build1 (REALPART_EXPR,
878 TREE_TYPE (TREE_TYPE (expr)), expr));
879
880 case VECTOR_TYPE:
881 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
882 {
883 error ("can%'t convert between vector values of different size");
884 return error_mark_node;
885 }
886 return build1 (VIEW_CONVERT_EXPR, type, expr);
887
888 default:
889 error ("aggregate value used where an integer was expected");
890 return convert (type, integer_zero_node);
891 }
892 }
893
894 /* Convert EXPR to the complex type TYPE in the usual ways. */
895
896 tree
897 convert_to_complex (tree type, tree expr)
898 {
899 tree subtype = TREE_TYPE (type);
900
901 switch (TREE_CODE (TREE_TYPE (expr)))
902 {
903 case REAL_TYPE:
904 case FIXED_POINT_TYPE:
905 case INTEGER_TYPE:
906 case ENUMERAL_TYPE:
907 case BOOLEAN_TYPE:
908 return build2 (COMPLEX_EXPR, type, convert (subtype, expr),
909 convert (subtype, integer_zero_node));
910
911 case COMPLEX_TYPE:
912 {
913 tree elt_type = TREE_TYPE (TREE_TYPE (expr));
914
915 if (TYPE_MAIN_VARIANT (elt_type) == TYPE_MAIN_VARIANT (subtype))
916 return expr;
917 else if (TREE_CODE (expr) == COMPLEX_EXPR)
918 return fold_build2 (COMPLEX_EXPR, type,
919 convert (subtype, TREE_OPERAND (expr, 0)),
920 convert (subtype, TREE_OPERAND (expr, 1)));
921 else
922 {
923 expr = save_expr (expr);
924 return
925 fold_build2 (COMPLEX_EXPR, type,
926 convert (subtype,
927 fold_build1 (REALPART_EXPR,
928 TREE_TYPE (TREE_TYPE (expr)),
929 expr)),
930 convert (subtype,
931 fold_build1 (IMAGPART_EXPR,
932 TREE_TYPE (TREE_TYPE (expr)),
933 expr)));
934 }
935 }
936
937 case POINTER_TYPE:
938 case REFERENCE_TYPE:
939 error ("pointer value used where a complex was expected");
940 return convert_to_complex (type, integer_zero_node);
941
942 default:
943 error ("aggregate value used where a complex was expected");
944 return convert_to_complex (type, integer_zero_node);
945 }
946 }
947
948 /* Convert EXPR to the vector type TYPE in the usual ways. */
949
950 tree
951 convert_to_vector (tree type, tree expr)
952 {
953 switch (TREE_CODE (TREE_TYPE (expr)))
954 {
955 case INTEGER_TYPE:
956 case VECTOR_TYPE:
957 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
958 {
959 error ("can%'t convert between vector values of different size");
960 return error_mark_node;
961 }
962 return build1 (VIEW_CONVERT_EXPR, type, expr);
963
964 default:
965 error ("can%'t convert value to a vector");
966 return error_mark_node;
967 }
968 }
969
970 /* Convert EXPR to some fixed-point type TYPE.
971
972 EXPR must be fixed-point, float, integer, or enumeral;
973 in other cases error is called. */
974
975 tree
976 convert_to_fixed (tree type, tree expr)
977 {
978 if (integer_zerop (expr))
979 {
980 tree fixed_zero_node = build_fixed (type, FCONST0 (TYPE_MODE (type)));
981 return fixed_zero_node;
982 }
983 else if (integer_onep (expr) && ALL_SCALAR_ACCUM_MODE_P (TYPE_MODE (type)))
984 {
985 tree fixed_one_node = build_fixed (type, FCONST1 (TYPE_MODE (type)));
986 return fixed_one_node;
987 }
988
989 switch (TREE_CODE (TREE_TYPE (expr)))
990 {
991 case FIXED_POINT_TYPE:
992 case INTEGER_TYPE:
993 case ENUMERAL_TYPE:
994 case BOOLEAN_TYPE:
995 case REAL_TYPE:
996 return build1 (FIXED_CONVERT_EXPR, type, expr);
997
998 case COMPLEX_TYPE:
999 return convert (type,
1000 fold_build1 (REALPART_EXPR,
1001 TREE_TYPE (TREE_TYPE (expr)), expr));
1002
1003 default:
1004 error ("aggregate value used where a fixed-point was expected");
1005 return error_mark_node;
1006 }
1007 }