]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/internal-fn.c
re PR rtl-optimization/71673 (FAIL: c-c++-common/torture/builtin-arith-overflow-p...
[thirdparty/gcc.git] / gcc / internal-fn.c
1 /* Internal functions.
2 Copyright (C) 2011-2016 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "predict.h"
29 #include "stringpool.h"
30 #include "tree-ssanames.h"
31 #include "expmed.h"
32 #include "optabs.h"
33 #include "emit-rtl.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "internal-fn.h"
37 #include "stor-layout.h"
38 #include "dojump.h"
39 #include "expr.h"
40 #include "ubsan.h"
41 #include "recog.h"
42 #include "builtins.h"
43
44 /* The names of each internal function, indexed by function number. */
45 const char *const internal_fn_name_array[] = {
46 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
47 #include "internal-fn.def"
48 "<invalid-fn>"
49 };
50
51 /* The ECF_* flags of each internal function, indexed by function number. */
52 const int internal_fn_flags_array[] = {
53 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
54 #include "internal-fn.def"
55 0
56 };
57
58 /* Fnspec of each internal function, indexed by function number. */
59 const_tree internal_fn_fnspec_array[IFN_LAST + 1];
60
61 void
62 init_internal_fns ()
63 {
64 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
65 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
66 build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
67 #include "internal-fn.def"
68 internal_fn_fnspec_array[IFN_LAST] = 0;
69 }
70
71 /* Create static initializers for the information returned by
72 direct_internal_fn. */
73 #define not_direct { -2, -2, false }
74 #define mask_load_direct { -1, 2, false }
75 #define load_lanes_direct { -1, -1, false }
76 #define mask_store_direct { 3, 2, false }
77 #define store_lanes_direct { 0, 0, false }
78 #define unary_direct { 0, 0, true }
79 #define binary_direct { 0, 0, true }
80
81 const direct_internal_fn_info direct_internal_fn_array[IFN_LAST + 1] = {
82 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) not_direct,
83 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) TYPE##_direct,
84 #include "internal-fn.def"
85 not_direct
86 };
87
88 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
89 for load-lanes-style optab OPTAB, or CODE_FOR_nothing if none. */
90
91 static enum insn_code
92 get_multi_vector_move (tree array_type, convert_optab optab)
93 {
94 machine_mode imode;
95 machine_mode vmode;
96
97 gcc_assert (TREE_CODE (array_type) == ARRAY_TYPE);
98 imode = TYPE_MODE (array_type);
99 vmode = TYPE_MODE (TREE_TYPE (array_type));
100
101 return convert_optab_handler (optab, imode, vmode);
102 }
103
104 /* Expand LOAD_LANES call STMT using optab OPTAB. */
105
106 static void
107 expand_load_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
108 {
109 struct expand_operand ops[2];
110 tree type, lhs, rhs;
111 rtx target, mem;
112
113 lhs = gimple_call_lhs (stmt);
114 rhs = gimple_call_arg (stmt, 0);
115 type = TREE_TYPE (lhs);
116
117 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
118 mem = expand_normal (rhs);
119
120 gcc_assert (MEM_P (mem));
121 PUT_MODE (mem, TYPE_MODE (type));
122
123 create_output_operand (&ops[0], target, TYPE_MODE (type));
124 create_fixed_operand (&ops[1], mem);
125 expand_insn (get_multi_vector_move (type, optab), 2, ops);
126 }
127
128 /* Expand STORE_LANES call STMT using optab OPTAB. */
129
130 static void
131 expand_store_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
132 {
133 struct expand_operand ops[2];
134 tree type, lhs, rhs;
135 rtx target, reg;
136
137 lhs = gimple_call_lhs (stmt);
138 rhs = gimple_call_arg (stmt, 0);
139 type = TREE_TYPE (rhs);
140
141 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
142 reg = expand_normal (rhs);
143
144 gcc_assert (MEM_P (target));
145 PUT_MODE (target, TYPE_MODE (type));
146
147 create_fixed_operand (&ops[0], target);
148 create_input_operand (&ops[1], reg, TYPE_MODE (type));
149 expand_insn (get_multi_vector_move (type, optab), 2, ops);
150 }
151
152 static void
153 expand_ANNOTATE (internal_fn, gcall *)
154 {
155 gcc_unreachable ();
156 }
157
158 /* This should get expanded in adjust_simduid_builtins. */
159
160 static void
161 expand_GOMP_SIMD_LANE (internal_fn, gcall *)
162 {
163 gcc_unreachable ();
164 }
165
166 /* This should get expanded in adjust_simduid_builtins. */
167
168 static void
169 expand_GOMP_SIMD_VF (internal_fn, gcall *)
170 {
171 gcc_unreachable ();
172 }
173
174 /* This should get expanded in adjust_simduid_builtins. */
175
176 static void
177 expand_GOMP_SIMD_LAST_LANE (internal_fn, gcall *)
178 {
179 gcc_unreachable ();
180 }
181
182 /* This should get expanded in adjust_simduid_builtins. */
183
184 static void
185 expand_GOMP_SIMD_ORDERED_START (internal_fn, gcall *)
186 {
187 gcc_unreachable ();
188 }
189
190 /* This should get expanded in adjust_simduid_builtins. */
191
192 static void
193 expand_GOMP_SIMD_ORDERED_END (internal_fn, gcall *)
194 {
195 gcc_unreachable ();
196 }
197
198 /* This should get expanded in the sanopt pass. */
199
200 static void
201 expand_UBSAN_NULL (internal_fn, gcall *)
202 {
203 gcc_unreachable ();
204 }
205
206 /* This should get expanded in the sanopt pass. */
207
208 static void
209 expand_UBSAN_BOUNDS (internal_fn, gcall *)
210 {
211 gcc_unreachable ();
212 }
213
214 /* This should get expanded in the sanopt pass. */
215
216 static void
217 expand_UBSAN_VPTR (internal_fn, gcall *)
218 {
219 gcc_unreachable ();
220 }
221
222 /* This should get expanded in the sanopt pass. */
223
224 static void
225 expand_UBSAN_OBJECT_SIZE (internal_fn, gcall *)
226 {
227 gcc_unreachable ();
228 }
229
230 /* This should get expanded in the sanopt pass. */
231
232 static void
233 expand_ASAN_CHECK (internal_fn, gcall *)
234 {
235 gcc_unreachable ();
236 }
237
238 /* This should get expanded in the tsan pass. */
239
240 static void
241 expand_TSAN_FUNC_EXIT (internal_fn, gcall *)
242 {
243 gcc_unreachable ();
244 }
245
246 /* Helper function for expand_addsub_overflow. Return 1
247 if ARG interpreted as signed in its precision is known to be always
248 positive or 2 if ARG is known to be always negative, or 3 if ARG may
249 be positive or negative. */
250
251 static int
252 get_range_pos_neg (tree arg)
253 {
254 if (arg == error_mark_node)
255 return 3;
256
257 int prec = TYPE_PRECISION (TREE_TYPE (arg));
258 int cnt = 0;
259 if (TREE_CODE (arg) == INTEGER_CST)
260 {
261 wide_int w = wi::sext (arg, prec);
262 if (wi::neg_p (w))
263 return 2;
264 else
265 return 1;
266 }
267 while (CONVERT_EXPR_P (arg)
268 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
269 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
270 {
271 arg = TREE_OPERAND (arg, 0);
272 /* Narrower value zero extended into wider type
273 will always result in positive values. */
274 if (TYPE_UNSIGNED (TREE_TYPE (arg))
275 && TYPE_PRECISION (TREE_TYPE (arg)) < prec)
276 return 1;
277 prec = TYPE_PRECISION (TREE_TYPE (arg));
278 if (++cnt > 30)
279 return 3;
280 }
281
282 if (TREE_CODE (arg) != SSA_NAME)
283 return 3;
284 wide_int arg_min, arg_max;
285 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
286 {
287 gimple *g = SSA_NAME_DEF_STMT (arg);
288 if (is_gimple_assign (g)
289 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
290 {
291 tree t = gimple_assign_rhs1 (g);
292 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
293 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
294 {
295 if (TYPE_UNSIGNED (TREE_TYPE (t))
296 && TYPE_PRECISION (TREE_TYPE (t)) < prec)
297 return 1;
298 prec = TYPE_PRECISION (TREE_TYPE (t));
299 arg = t;
300 if (++cnt > 30)
301 return 3;
302 continue;
303 }
304 }
305 return 3;
306 }
307 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
308 {
309 /* For unsigned values, the "positive" range comes
310 below the "negative" range. */
311 if (!wi::neg_p (wi::sext (arg_max, prec), SIGNED))
312 return 1;
313 if (wi::neg_p (wi::sext (arg_min, prec), SIGNED))
314 return 2;
315 }
316 else
317 {
318 if (!wi::neg_p (wi::sext (arg_min, prec), SIGNED))
319 return 1;
320 if (wi::neg_p (wi::sext (arg_max, prec), SIGNED))
321 return 2;
322 }
323 return 3;
324 }
325
326 /* Return minimum precision needed to represent all values
327 of ARG in SIGNed integral type. */
328
329 static int
330 get_min_precision (tree arg, signop sign)
331 {
332 int prec = TYPE_PRECISION (TREE_TYPE (arg));
333 int cnt = 0;
334 signop orig_sign = sign;
335 if (TREE_CODE (arg) == INTEGER_CST)
336 {
337 int p;
338 if (TYPE_SIGN (TREE_TYPE (arg)) != sign)
339 {
340 widest_int w = wi::to_widest (arg);
341 w = wi::ext (w, prec, sign);
342 p = wi::min_precision (w, sign);
343 }
344 else
345 p = wi::min_precision (arg, sign);
346 return MIN (p, prec);
347 }
348 while (CONVERT_EXPR_P (arg)
349 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
350 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
351 {
352 arg = TREE_OPERAND (arg, 0);
353 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
354 {
355 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
356 sign = UNSIGNED;
357 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
358 return prec + (orig_sign != sign);
359 prec = TYPE_PRECISION (TREE_TYPE (arg));
360 }
361 if (++cnt > 30)
362 return prec + (orig_sign != sign);
363 }
364 if (TREE_CODE (arg) != SSA_NAME)
365 return prec + (orig_sign != sign);
366 wide_int arg_min, arg_max;
367 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
368 {
369 gimple *g = SSA_NAME_DEF_STMT (arg);
370 if (is_gimple_assign (g)
371 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
372 {
373 tree t = gimple_assign_rhs1 (g);
374 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
375 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
376 {
377 arg = t;
378 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
379 {
380 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
381 sign = UNSIGNED;
382 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
383 return prec + (orig_sign != sign);
384 prec = TYPE_PRECISION (TREE_TYPE (arg));
385 }
386 if (++cnt > 30)
387 return prec + (orig_sign != sign);
388 continue;
389 }
390 }
391 return prec + (orig_sign != sign);
392 }
393 if (sign == TYPE_SIGN (TREE_TYPE (arg)))
394 {
395 int p1 = wi::min_precision (arg_min, sign);
396 int p2 = wi::min_precision (arg_max, sign);
397 p1 = MAX (p1, p2);
398 prec = MIN (prec, p1);
399 }
400 else if (sign == UNSIGNED && !wi::neg_p (arg_min, SIGNED))
401 {
402 int p = wi::min_precision (arg_max, UNSIGNED);
403 prec = MIN (prec, p);
404 }
405 return prec + (orig_sign != sign);
406 }
407
408 /* Helper for expand_*_overflow. Set the __imag__ part to true
409 (1 except for signed:1 type, in which case store -1). */
410
411 static void
412 expand_arith_set_overflow (tree lhs, rtx target)
413 {
414 if (TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs))) == 1
415 && !TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs))))
416 write_complex_part (target, constm1_rtx, true);
417 else
418 write_complex_part (target, const1_rtx, true);
419 }
420
421 /* Helper for expand_*_overflow. Store RES into the __real__ part
422 of TARGET. If RES has larger MODE than __real__ part of TARGET,
423 set the __imag__ part to 1 if RES doesn't fit into it. Similarly
424 if LHS has smaller precision than its mode. */
425
426 static void
427 expand_arith_overflow_result_store (tree lhs, rtx target,
428 machine_mode mode, rtx res)
429 {
430 machine_mode tgtmode = GET_MODE_INNER (GET_MODE (target));
431 rtx lres = res;
432 if (tgtmode != mode)
433 {
434 rtx_code_label *done_label = gen_label_rtx ();
435 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
436 lres = convert_modes (tgtmode, mode, res, uns);
437 gcc_assert (GET_MODE_PRECISION (tgtmode) < GET_MODE_PRECISION (mode));
438 do_compare_rtx_and_jump (res, convert_modes (mode, tgtmode, lres, uns),
439 EQ, true, mode, NULL_RTX, NULL, done_label,
440 PROB_VERY_LIKELY);
441 expand_arith_set_overflow (lhs, target);
442 emit_label (done_label);
443 }
444 int prec = TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs)));
445 int tgtprec = GET_MODE_PRECISION (tgtmode);
446 if (prec < tgtprec)
447 {
448 rtx_code_label *done_label = gen_label_rtx ();
449 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
450 res = lres;
451 if (uns)
452 {
453 rtx mask
454 = immed_wide_int_const (wi::shifted_mask (0, prec, false, tgtprec),
455 tgtmode);
456 lres = expand_simple_binop (tgtmode, AND, res, mask, NULL_RTX,
457 true, OPTAB_LIB_WIDEN);
458 }
459 else
460 {
461 lres = expand_shift (LSHIFT_EXPR, tgtmode, res, tgtprec - prec,
462 NULL_RTX, 1);
463 lres = expand_shift (RSHIFT_EXPR, tgtmode, lres, tgtprec - prec,
464 NULL_RTX, 0);
465 }
466 do_compare_rtx_and_jump (res, lres,
467 EQ, true, tgtmode, NULL_RTX, NULL, done_label,
468 PROB_VERY_LIKELY);
469 expand_arith_set_overflow (lhs, target);
470 emit_label (done_label);
471 }
472 write_complex_part (target, lres, false);
473 }
474
475 /* Helper for expand_*_overflow. Store RES into TARGET. */
476
477 static void
478 expand_ubsan_result_store (rtx target, rtx res)
479 {
480 if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
481 /* If this is a scalar in a register that is stored in a wider mode
482 than the declared mode, compute the result into its declared mode
483 and then convert to the wider mode. Our value is the computed
484 expression. */
485 convert_move (SUBREG_REG (target), res, SUBREG_PROMOTED_SIGN (target));
486 else
487 emit_move_insn (target, res);
488 }
489
490 /* Add sub/add overflow checking to the statement STMT.
491 CODE says whether the operation is +, or -. */
492
493 static void
494 expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
495 tree arg0, tree arg1, bool unsr_p, bool uns0_p,
496 bool uns1_p, bool is_ubsan)
497 {
498 rtx res, target = NULL_RTX;
499 tree fn;
500 rtx_code_label *done_label = gen_label_rtx ();
501 rtx_code_label *do_error = gen_label_rtx ();
502 do_pending_stack_adjust ();
503 rtx op0 = expand_normal (arg0);
504 rtx op1 = expand_normal (arg1);
505 machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
506 int prec = GET_MODE_PRECISION (mode);
507 rtx sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
508 bool do_xor = false;
509
510 if (is_ubsan)
511 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
512
513 if (lhs)
514 {
515 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
516 if (!is_ubsan)
517 write_complex_part (target, const0_rtx, true);
518 }
519
520 /* We assume both operands and result have the same precision
521 here (GET_MODE_BITSIZE (mode)), S stands for signed type
522 with that precision, U for unsigned type with that precision,
523 sgn for unsigned most significant bit in that precision.
524 s1 is signed first operand, u1 is unsigned first operand,
525 s2 is signed second operand, u2 is unsigned second operand,
526 sr is signed result, ur is unsigned result and the following
527 rules say how to compute result (which is always result of
528 the operands as if both were unsigned, cast to the right
529 signedness) and how to compute whether operation overflowed.
530
531 s1 + s2 -> sr
532 res = (S) ((U) s1 + (U) s2)
533 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
534 s1 - s2 -> sr
535 res = (S) ((U) s1 - (U) s2)
536 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
537 u1 + u2 -> ur
538 res = u1 + u2
539 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
540 u1 - u2 -> ur
541 res = u1 - u2
542 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
543 s1 + u2 -> sr
544 res = (S) ((U) s1 + u2)
545 ovf = ((U) res ^ sgn) < u2
546 s1 + u2 -> ur
547 t1 = (S) (u2 ^ sgn)
548 t2 = s1 + t1
549 res = (U) t2 ^ sgn
550 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
551 s1 - u2 -> sr
552 res = (S) ((U) s1 - u2)
553 ovf = u2 > ((U) s1 ^ sgn)
554 s1 - u2 -> ur
555 res = (U) s1 - u2
556 ovf = s1 < 0 || u2 > (U) s1
557 u1 - s2 -> sr
558 res = u1 - (U) s2
559 ovf = u1 >= ((U) s2 ^ sgn)
560 u1 - s2 -> ur
561 t1 = u1 ^ sgn
562 t2 = t1 - (U) s2
563 res = t2 ^ sgn
564 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
565 s1 + s2 -> ur
566 res = (U) s1 + (U) s2
567 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
568 u1 + u2 -> sr
569 res = (S) (u1 + u2)
570 ovf = (U) res < u2 || res < 0
571 u1 - u2 -> sr
572 res = (S) (u1 - u2)
573 ovf = u1 >= u2 ? res < 0 : res >= 0
574 s1 - s2 -> ur
575 res = (U) s1 - (U) s2
576 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
577
578 if (code == PLUS_EXPR && uns0_p && !uns1_p)
579 {
580 /* PLUS_EXPR is commutative, if operand signedness differs,
581 canonicalize to the first operand being signed and second
582 unsigned to simplify following code. */
583 std::swap (op0, op1);
584 std::swap (arg0, arg1);
585 uns0_p = false;
586 uns1_p = true;
587 }
588
589 /* u1 +- u2 -> ur */
590 if (uns0_p && uns1_p && unsr_p)
591 {
592 insn_code icode = optab_handler (code == PLUS_EXPR ? uaddv4_optab
593 : usubv4_optab, mode);
594 if (icode != CODE_FOR_nothing)
595 {
596 struct expand_operand ops[4];
597 rtx_insn *last = get_last_insn ();
598
599 res = gen_reg_rtx (mode);
600 create_output_operand (&ops[0], res, mode);
601 create_input_operand (&ops[1], op0, mode);
602 create_input_operand (&ops[2], op1, mode);
603 create_fixed_operand (&ops[3], do_error);
604 if (maybe_expand_insn (icode, 4, ops))
605 {
606 last = get_last_insn ();
607 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
608 && JUMP_P (last)
609 && any_condjump_p (last)
610 && !find_reg_note (last, REG_BR_PROB, 0))
611 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
612 emit_jump (done_label);
613 goto do_error_label;
614 }
615
616 delete_insns_since (last);
617 }
618
619 /* Compute the operation. On RTL level, the addition is always
620 unsigned. */
621 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
622 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
623 rtx tem = op0;
624 /* For PLUS_EXPR, the operation is commutative, so we can pick
625 operand to compare against. For prec <= BITS_PER_WORD, I think
626 preferring REG operand is better over CONST_INT, because
627 the CONST_INT might enlarge the instruction or CSE would need
628 to figure out we'd already loaded it into a register before.
629 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
630 as then the multi-word comparison can be perhaps simplified. */
631 if (code == PLUS_EXPR
632 && (prec <= BITS_PER_WORD
633 ? (CONST_SCALAR_INT_P (op0) && REG_P (op1))
634 : CONST_SCALAR_INT_P (op1)))
635 tem = op1;
636 do_compare_rtx_and_jump (res, tem, code == PLUS_EXPR ? GEU : LEU,
637 true, mode, NULL_RTX, NULL, done_label,
638 PROB_VERY_LIKELY);
639 goto do_error_label;
640 }
641
642 /* s1 +- u2 -> sr */
643 if (!uns0_p && uns1_p && !unsr_p)
644 {
645 /* Compute the operation. On RTL level, the addition is always
646 unsigned. */
647 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
648 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
649 rtx tem = expand_binop (mode, add_optab,
650 code == PLUS_EXPR ? res : op0, sgn,
651 NULL_RTX, false, OPTAB_LIB_WIDEN);
652 do_compare_rtx_and_jump (tem, op1, GEU, true, mode, NULL_RTX, NULL,
653 done_label, PROB_VERY_LIKELY);
654 goto do_error_label;
655 }
656
657 /* s1 + u2 -> ur */
658 if (code == PLUS_EXPR && !uns0_p && uns1_p && unsr_p)
659 {
660 op1 = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
661 OPTAB_LIB_WIDEN);
662 /* As we've changed op1, we have to avoid using the value range
663 for the original argument. */
664 arg1 = error_mark_node;
665 do_xor = true;
666 goto do_signed;
667 }
668
669 /* u1 - s2 -> ur */
670 if (code == MINUS_EXPR && uns0_p && !uns1_p && unsr_p)
671 {
672 op0 = expand_binop (mode, add_optab, op0, sgn, NULL_RTX, false,
673 OPTAB_LIB_WIDEN);
674 /* As we've changed op0, we have to avoid using the value range
675 for the original argument. */
676 arg0 = error_mark_node;
677 do_xor = true;
678 goto do_signed;
679 }
680
681 /* s1 - u2 -> ur */
682 if (code == MINUS_EXPR && !uns0_p && uns1_p && unsr_p)
683 {
684 /* Compute the operation. On RTL level, the addition is always
685 unsigned. */
686 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
687 OPTAB_LIB_WIDEN);
688 int pos_neg = get_range_pos_neg (arg0);
689 if (pos_neg == 2)
690 /* If ARG0 is known to be always negative, this is always overflow. */
691 emit_jump (do_error);
692 else if (pos_neg == 3)
693 /* If ARG0 is not known to be always positive, check at runtime. */
694 do_compare_rtx_and_jump (op0, const0_rtx, LT, false, mode, NULL_RTX,
695 NULL, do_error, PROB_VERY_UNLIKELY);
696 do_compare_rtx_and_jump (op1, op0, LEU, true, mode, NULL_RTX, NULL,
697 done_label, PROB_VERY_LIKELY);
698 goto do_error_label;
699 }
700
701 /* u1 - s2 -> sr */
702 if (code == MINUS_EXPR && uns0_p && !uns1_p && !unsr_p)
703 {
704 /* Compute the operation. On RTL level, the addition is always
705 unsigned. */
706 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
707 OPTAB_LIB_WIDEN);
708 rtx tem = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
709 OPTAB_LIB_WIDEN);
710 do_compare_rtx_and_jump (op0, tem, LTU, true, mode, NULL_RTX, NULL,
711 done_label, PROB_VERY_LIKELY);
712 goto do_error_label;
713 }
714
715 /* u1 + u2 -> sr */
716 if (code == PLUS_EXPR && uns0_p && uns1_p && !unsr_p)
717 {
718 /* Compute the operation. On RTL level, the addition is always
719 unsigned. */
720 res = expand_binop (mode, add_optab, op0, op1, NULL_RTX, false,
721 OPTAB_LIB_WIDEN);
722 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
723 NULL, do_error, PROB_VERY_UNLIKELY);
724 rtx tem = op1;
725 /* The operation is commutative, so we can pick operand to compare
726 against. For prec <= BITS_PER_WORD, I think preferring REG operand
727 is better over CONST_INT, because the CONST_INT might enlarge the
728 instruction or CSE would need to figure out we'd already loaded it
729 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
730 might be more beneficial, as then the multi-word comparison can be
731 perhaps simplified. */
732 if (prec <= BITS_PER_WORD
733 ? (CONST_SCALAR_INT_P (op1) && REG_P (op0))
734 : CONST_SCALAR_INT_P (op0))
735 tem = op0;
736 do_compare_rtx_and_jump (res, tem, GEU, true, mode, NULL_RTX, NULL,
737 done_label, PROB_VERY_LIKELY);
738 goto do_error_label;
739 }
740
741 /* s1 +- s2 -> ur */
742 if (!uns0_p && !uns1_p && unsr_p)
743 {
744 /* Compute the operation. On RTL level, the addition is always
745 unsigned. */
746 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
747 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
748 int pos_neg = get_range_pos_neg (arg1);
749 if (code == PLUS_EXPR)
750 {
751 int pos_neg0 = get_range_pos_neg (arg0);
752 if (pos_neg0 != 3 && pos_neg == 3)
753 {
754 std::swap (op0, op1);
755 pos_neg = pos_neg0;
756 }
757 }
758 rtx tem;
759 if (pos_neg != 3)
760 {
761 tem = expand_binop (mode, ((pos_neg == 1) ^ (code == MINUS_EXPR))
762 ? and_optab : ior_optab,
763 op0, res, NULL_RTX, false, OPTAB_LIB_WIDEN);
764 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL,
765 NULL, done_label, PROB_VERY_LIKELY);
766 }
767 else
768 {
769 rtx_code_label *do_ior_label = gen_label_rtx ();
770 do_compare_rtx_and_jump (op1, const0_rtx,
771 code == MINUS_EXPR ? GE : LT, false, mode,
772 NULL_RTX, NULL, do_ior_label,
773 PROB_EVEN);
774 tem = expand_binop (mode, and_optab, op0, res, NULL_RTX, false,
775 OPTAB_LIB_WIDEN);
776 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
777 NULL, done_label, PROB_VERY_LIKELY);
778 emit_jump (do_error);
779 emit_label (do_ior_label);
780 tem = expand_binop (mode, ior_optab, op0, res, NULL_RTX, false,
781 OPTAB_LIB_WIDEN);
782 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
783 NULL, done_label, PROB_VERY_LIKELY);
784 }
785 goto do_error_label;
786 }
787
788 /* u1 - u2 -> sr */
789 if (code == MINUS_EXPR && uns0_p && uns1_p && !unsr_p)
790 {
791 /* Compute the operation. On RTL level, the addition is always
792 unsigned. */
793 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
794 OPTAB_LIB_WIDEN);
795 rtx_code_label *op0_geu_op1 = gen_label_rtx ();
796 do_compare_rtx_and_jump (op0, op1, GEU, true, mode, NULL_RTX, NULL,
797 op0_geu_op1, PROB_EVEN);
798 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
799 NULL, done_label, PROB_VERY_LIKELY);
800 emit_jump (do_error);
801 emit_label (op0_geu_op1);
802 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
803 NULL, done_label, PROB_VERY_LIKELY);
804 goto do_error_label;
805 }
806
807 gcc_assert (!uns0_p && !uns1_p && !unsr_p);
808
809 /* s1 +- s2 -> sr */
810 do_signed:
811 {
812 insn_code icode = optab_handler (code == PLUS_EXPR ? addv4_optab
813 : subv4_optab, mode);
814 if (icode != CODE_FOR_nothing)
815 {
816 struct expand_operand ops[4];
817 rtx_insn *last = get_last_insn ();
818
819 res = gen_reg_rtx (mode);
820 create_output_operand (&ops[0], res, mode);
821 create_input_operand (&ops[1], op0, mode);
822 create_input_operand (&ops[2], op1, mode);
823 create_fixed_operand (&ops[3], do_error);
824 if (maybe_expand_insn (icode, 4, ops))
825 {
826 last = get_last_insn ();
827 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
828 && JUMP_P (last)
829 && any_condjump_p (last)
830 && !find_reg_note (last, REG_BR_PROB, 0))
831 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
832 emit_jump (done_label);
833 goto do_error_label;
834 }
835
836 delete_insns_since (last);
837 }
838
839 rtx_code_label *sub_check = gen_label_rtx ();
840 int pos_neg = 3;
841
842 /* Compute the operation. On RTL level, the addition is always
843 unsigned. */
844 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
845 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
846
847 /* If we can prove one of the arguments (for MINUS_EXPR only
848 the second operand, as subtraction is not commutative) is always
849 non-negative or always negative, we can do just one comparison
850 and conditional jump instead of 2 at runtime, 3 present in the
851 emitted code. If one of the arguments is CONST_INT, all we
852 need is to make sure it is op1, then the first
853 do_compare_rtx_and_jump will be just folded. Otherwise try
854 to use range info if available. */
855 if (code == PLUS_EXPR && CONST_INT_P (op0))
856 std::swap (op0, op1);
857 else if (CONST_INT_P (op1))
858 ;
859 else if (code == PLUS_EXPR && TREE_CODE (arg0) == SSA_NAME)
860 {
861 pos_neg = get_range_pos_neg (arg0);
862 if (pos_neg != 3)
863 std::swap (op0, op1);
864 }
865 if (pos_neg == 3 && !CONST_INT_P (op1) && TREE_CODE (arg1) == SSA_NAME)
866 pos_neg = get_range_pos_neg (arg1);
867
868 /* If the op1 is negative, we have to use a different check. */
869 if (pos_neg == 3)
870 do_compare_rtx_and_jump (op1, const0_rtx, LT, false, mode, NULL_RTX,
871 NULL, sub_check, PROB_EVEN);
872
873 /* Compare the result of the operation with one of the operands. */
874 if (pos_neg & 1)
875 do_compare_rtx_and_jump (res, op0, code == PLUS_EXPR ? GE : LE,
876 false, mode, NULL_RTX, NULL, done_label,
877 PROB_VERY_LIKELY);
878
879 /* If we get here, we have to print the error. */
880 if (pos_neg == 3)
881 {
882 emit_jump (do_error);
883 emit_label (sub_check);
884 }
885
886 /* We have k = a + b for b < 0 here. k <= a must hold. */
887 if (pos_neg & 2)
888 do_compare_rtx_and_jump (res, op0, code == PLUS_EXPR ? LE : GE,
889 false, mode, NULL_RTX, NULL, done_label,
890 PROB_VERY_LIKELY);
891 }
892
893 do_error_label:
894 emit_label (do_error);
895 if (is_ubsan)
896 {
897 /* Expand the ubsan builtin call. */
898 push_temp_slots ();
899 fn = ubsan_build_overflow_builtin (code, loc, TREE_TYPE (arg0),
900 arg0, arg1);
901 expand_normal (fn);
902 pop_temp_slots ();
903 do_pending_stack_adjust ();
904 }
905 else if (lhs)
906 expand_arith_set_overflow (lhs, target);
907
908 /* We're done. */
909 emit_label (done_label);
910
911 if (lhs)
912 {
913 if (is_ubsan)
914 expand_ubsan_result_store (target, res);
915 else
916 {
917 if (do_xor)
918 res = expand_binop (mode, add_optab, res, sgn, NULL_RTX, false,
919 OPTAB_LIB_WIDEN);
920
921 expand_arith_overflow_result_store (lhs, target, mode, res);
922 }
923 }
924 }
925
926 /* Add negate overflow checking to the statement STMT. */
927
928 static void
929 expand_neg_overflow (location_t loc, tree lhs, tree arg1, bool is_ubsan)
930 {
931 rtx res, op1;
932 tree fn;
933 rtx_code_label *done_label, *do_error;
934 rtx target = NULL_RTX;
935
936 done_label = gen_label_rtx ();
937 do_error = gen_label_rtx ();
938
939 do_pending_stack_adjust ();
940 op1 = expand_normal (arg1);
941
942 machine_mode mode = TYPE_MODE (TREE_TYPE (arg1));
943 if (lhs)
944 {
945 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
946 if (!is_ubsan)
947 write_complex_part (target, const0_rtx, true);
948 }
949
950 enum insn_code icode = optab_handler (negv3_optab, mode);
951 if (icode != CODE_FOR_nothing)
952 {
953 struct expand_operand ops[3];
954 rtx_insn *last = get_last_insn ();
955
956 res = gen_reg_rtx (mode);
957 create_output_operand (&ops[0], res, mode);
958 create_input_operand (&ops[1], op1, mode);
959 create_fixed_operand (&ops[2], do_error);
960 if (maybe_expand_insn (icode, 3, ops))
961 {
962 last = get_last_insn ();
963 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
964 && JUMP_P (last)
965 && any_condjump_p (last)
966 && !find_reg_note (last, REG_BR_PROB, 0))
967 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
968 emit_jump (done_label);
969 }
970 else
971 {
972 delete_insns_since (last);
973 icode = CODE_FOR_nothing;
974 }
975 }
976
977 if (icode == CODE_FOR_nothing)
978 {
979 /* Compute the operation. On RTL level, the addition is always
980 unsigned. */
981 res = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
982
983 /* Compare the operand with the most negative value. */
984 rtx minv = expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1)));
985 do_compare_rtx_and_jump (op1, minv, NE, true, mode, NULL_RTX, NULL,
986 done_label, PROB_VERY_LIKELY);
987 }
988
989 emit_label (do_error);
990 if (is_ubsan)
991 {
992 /* Expand the ubsan builtin call. */
993 push_temp_slots ();
994 fn = ubsan_build_overflow_builtin (NEGATE_EXPR, loc, TREE_TYPE (arg1),
995 arg1, NULL_TREE);
996 expand_normal (fn);
997 pop_temp_slots ();
998 do_pending_stack_adjust ();
999 }
1000 else if (lhs)
1001 expand_arith_set_overflow (lhs, target);
1002
1003 /* We're done. */
1004 emit_label (done_label);
1005
1006 if (lhs)
1007 {
1008 if (is_ubsan)
1009 expand_ubsan_result_store (target, res);
1010 else
1011 expand_arith_overflow_result_store (lhs, target, mode, res);
1012 }
1013 }
1014
1015 /* Add mul overflow checking to the statement STMT. */
1016
1017 static void
1018 expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
1019 bool unsr_p, bool uns0_p, bool uns1_p, bool is_ubsan)
1020 {
1021 rtx res, op0, op1;
1022 tree fn, type;
1023 rtx_code_label *done_label, *do_error;
1024 rtx target = NULL_RTX;
1025 signop sign;
1026 enum insn_code icode;
1027
1028 done_label = gen_label_rtx ();
1029 do_error = gen_label_rtx ();
1030
1031 do_pending_stack_adjust ();
1032 op0 = expand_normal (arg0);
1033 op1 = expand_normal (arg1);
1034
1035 machine_mode mode = TYPE_MODE (TREE_TYPE (arg0));
1036 bool uns = unsr_p;
1037 if (lhs)
1038 {
1039 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1040 if (!is_ubsan)
1041 write_complex_part (target, const0_rtx, true);
1042 }
1043
1044 if (is_ubsan)
1045 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
1046
1047 /* We assume both operands and result have the same precision
1048 here (GET_MODE_BITSIZE (mode)), S stands for signed type
1049 with that precision, U for unsigned type with that precision,
1050 sgn for unsigned most significant bit in that precision.
1051 s1 is signed first operand, u1 is unsigned first operand,
1052 s2 is signed second operand, u2 is unsigned second operand,
1053 sr is signed result, ur is unsigned result and the following
1054 rules say how to compute result (which is always result of
1055 the operands as if both were unsigned, cast to the right
1056 signedness) and how to compute whether operation overflowed.
1057 main_ovf (false) stands for jump on signed multiplication
1058 overflow or the main algorithm with uns == false.
1059 main_ovf (true) stands for jump on unsigned multiplication
1060 overflow or the main algorithm with uns == true.
1061
1062 s1 * s2 -> sr
1063 res = (S) ((U) s1 * (U) s2)
1064 ovf = main_ovf (false)
1065 u1 * u2 -> ur
1066 res = u1 * u2
1067 ovf = main_ovf (true)
1068 s1 * u2 -> ur
1069 res = (U) s1 * u2
1070 ovf = (s1 < 0 && u2) || main_ovf (true)
1071 u1 * u2 -> sr
1072 res = (S) (u1 * u2)
1073 ovf = res < 0 || main_ovf (true)
1074 s1 * u2 -> sr
1075 res = (S) ((U) s1 * u2)
1076 ovf = (S) u2 >= 0 ? main_ovf (false)
1077 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1078 s1 * s2 -> ur
1079 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1080 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1081 res = t1 * t2
1082 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
1083
1084 if (uns0_p && !uns1_p)
1085 {
1086 /* Multiplication is commutative, if operand signedness differs,
1087 canonicalize to the first operand being signed and second
1088 unsigned to simplify following code. */
1089 std::swap (op0, op1);
1090 std::swap (arg0, arg1);
1091 uns0_p = false;
1092 uns1_p = true;
1093 }
1094
1095 int pos_neg0 = get_range_pos_neg (arg0);
1096 int pos_neg1 = get_range_pos_neg (arg1);
1097
1098 /* s1 * u2 -> ur */
1099 if (!uns0_p && uns1_p && unsr_p)
1100 {
1101 switch (pos_neg0)
1102 {
1103 case 1:
1104 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1105 goto do_main;
1106 case 2:
1107 /* If s1 is negative, avoid the main code, just multiply and
1108 signal overflow if op1 is not 0. */
1109 struct separate_ops ops;
1110 ops.code = MULT_EXPR;
1111 ops.type = TREE_TYPE (arg1);
1112 ops.op0 = make_tree (ops.type, op0);
1113 ops.op1 = make_tree (ops.type, op1);
1114 ops.op2 = NULL_TREE;
1115 ops.location = loc;
1116 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1117 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1118 NULL, done_label, PROB_VERY_LIKELY);
1119 goto do_error_label;
1120 case 3:
1121 rtx_code_label *do_main_label;
1122 do_main_label = gen_label_rtx ();
1123 do_compare_rtx_and_jump (op0, const0_rtx, GE, false, mode, NULL_RTX,
1124 NULL, do_main_label, PROB_VERY_LIKELY);
1125 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1126 NULL, do_main_label, PROB_VERY_LIKELY);
1127 expand_arith_set_overflow (lhs, target);
1128 emit_label (do_main_label);
1129 goto do_main;
1130 default:
1131 gcc_unreachable ();
1132 }
1133 }
1134
1135 /* u1 * u2 -> sr */
1136 if (uns0_p && uns1_p && !unsr_p)
1137 {
1138 uns = true;
1139 /* Rest of handling of this case after res is computed. */
1140 goto do_main;
1141 }
1142
1143 /* s1 * u2 -> sr */
1144 if (!uns0_p && uns1_p && !unsr_p)
1145 {
1146 switch (pos_neg1)
1147 {
1148 case 1:
1149 goto do_main;
1150 case 2:
1151 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1152 avoid the main code, just multiply and signal overflow
1153 unless 0 * u2 or -1 * ((U) Smin). */
1154 struct separate_ops ops;
1155 ops.code = MULT_EXPR;
1156 ops.type = TREE_TYPE (arg1);
1157 ops.op0 = make_tree (ops.type, op0);
1158 ops.op1 = make_tree (ops.type, op1);
1159 ops.op2 = NULL_TREE;
1160 ops.location = loc;
1161 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1162 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1163 NULL, done_label, PROB_VERY_LIKELY);
1164 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1165 NULL, do_error, PROB_VERY_UNLIKELY);
1166 int prec;
1167 prec = GET_MODE_PRECISION (mode);
1168 rtx sgn;
1169 sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
1170 do_compare_rtx_and_jump (op1, sgn, EQ, true, mode, NULL_RTX,
1171 NULL, done_label, PROB_VERY_LIKELY);
1172 goto do_error_label;
1173 case 3:
1174 /* Rest of handling of this case after res is computed. */
1175 goto do_main;
1176 default:
1177 gcc_unreachable ();
1178 }
1179 }
1180
1181 /* s1 * s2 -> ur */
1182 if (!uns0_p && !uns1_p && unsr_p)
1183 {
1184 rtx tem, tem2;
1185 switch (pos_neg0 | pos_neg1)
1186 {
1187 case 1: /* Both operands known to be non-negative. */
1188 goto do_main;
1189 case 2: /* Both operands known to be negative. */
1190 op0 = expand_unop (mode, neg_optab, op0, NULL_RTX, false);
1191 op1 = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1192 /* Avoid looking at arg0/arg1 ranges, as we've changed
1193 the arguments. */
1194 arg0 = error_mark_node;
1195 arg1 = error_mark_node;
1196 goto do_main;
1197 case 3:
1198 if ((pos_neg0 ^ pos_neg1) == 3)
1199 {
1200 /* If one operand is known to be negative and the other
1201 non-negative, this overflows always, unless the non-negative
1202 one is 0. Just do normal multiply and set overflow
1203 unless one of the operands is 0. */
1204 struct separate_ops ops;
1205 ops.code = MULT_EXPR;
1206 ops.type
1207 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1208 1);
1209 ops.op0 = make_tree (ops.type, op0);
1210 ops.op1 = make_tree (ops.type, op1);
1211 ops.op2 = NULL_TREE;
1212 ops.location = loc;
1213 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1214 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1215 OPTAB_LIB_WIDEN);
1216 do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode,
1217 NULL_RTX, NULL, done_label,
1218 PROB_VERY_LIKELY);
1219 goto do_error_label;
1220 }
1221 /* The general case, do all the needed comparisons at runtime. */
1222 rtx_code_label *do_main_label, *after_negate_label;
1223 rtx rop0, rop1;
1224 rop0 = gen_reg_rtx (mode);
1225 rop1 = gen_reg_rtx (mode);
1226 emit_move_insn (rop0, op0);
1227 emit_move_insn (rop1, op1);
1228 op0 = rop0;
1229 op1 = rop1;
1230 do_main_label = gen_label_rtx ();
1231 after_negate_label = gen_label_rtx ();
1232 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1233 OPTAB_LIB_WIDEN);
1234 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1235 NULL, after_negate_label, PROB_VERY_LIKELY);
1236 /* Both arguments negative here, negate them and continue with
1237 normal unsigned overflow checking multiplication. */
1238 emit_move_insn (op0, expand_unop (mode, neg_optab, op0,
1239 NULL_RTX, false));
1240 emit_move_insn (op1, expand_unop (mode, neg_optab, op1,
1241 NULL_RTX, false));
1242 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1243 the arguments. */
1244 arg0 = error_mark_node;
1245 arg1 = error_mark_node;
1246 emit_jump (do_main_label);
1247 emit_label (after_negate_label);
1248 tem2 = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1249 OPTAB_LIB_WIDEN);
1250 do_compare_rtx_and_jump (tem2, const0_rtx, GE, false, mode, NULL_RTX,
1251 NULL, do_main_label, PROB_VERY_LIKELY);
1252 /* One argument is negative here, the other positive. This
1253 overflows always, unless one of the arguments is 0. But
1254 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1255 is, thus we can keep do_main code oring in overflow as is. */
1256 do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode, NULL_RTX,
1257 NULL, do_main_label, PROB_VERY_LIKELY);
1258 expand_arith_set_overflow (lhs, target);
1259 emit_label (do_main_label);
1260 goto do_main;
1261 default:
1262 gcc_unreachable ();
1263 }
1264 }
1265
1266 do_main:
1267 type = build_nonstandard_integer_type (GET_MODE_PRECISION (mode), uns);
1268 sign = uns ? UNSIGNED : SIGNED;
1269 icode = optab_handler (uns ? umulv4_optab : mulv4_optab, mode);
1270 if (icode != CODE_FOR_nothing)
1271 {
1272 struct expand_operand ops[4];
1273 rtx_insn *last = get_last_insn ();
1274
1275 res = gen_reg_rtx (mode);
1276 create_output_operand (&ops[0], res, mode);
1277 create_input_operand (&ops[1], op0, mode);
1278 create_input_operand (&ops[2], op1, mode);
1279 create_fixed_operand (&ops[3], do_error);
1280 if (maybe_expand_insn (icode, 4, ops))
1281 {
1282 last = get_last_insn ();
1283 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1284 && JUMP_P (last)
1285 && any_condjump_p (last)
1286 && !find_reg_note (last, REG_BR_PROB, 0))
1287 add_int_reg_note (last, REG_BR_PROB, PROB_VERY_UNLIKELY);
1288 emit_jump (done_label);
1289 }
1290 else
1291 {
1292 delete_insns_since (last);
1293 icode = CODE_FOR_nothing;
1294 }
1295 }
1296
1297 if (icode == CODE_FOR_nothing)
1298 {
1299 struct separate_ops ops;
1300 int prec = GET_MODE_PRECISION (mode);
1301 machine_mode hmode = mode_for_size (prec / 2, MODE_INT, 1);
1302 ops.op0 = make_tree (type, op0);
1303 ops.op1 = make_tree (type, op1);
1304 ops.op2 = NULL_TREE;
1305 ops.location = loc;
1306 if (GET_MODE_2XWIDER_MODE (mode) != VOIDmode
1307 && targetm.scalar_mode_supported_p (GET_MODE_2XWIDER_MODE (mode)))
1308 {
1309 machine_mode wmode = GET_MODE_2XWIDER_MODE (mode);
1310 ops.code = WIDEN_MULT_EXPR;
1311 ops.type
1312 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), uns);
1313
1314 res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
1315 rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res, prec,
1316 NULL_RTX, uns);
1317 hipart = gen_lowpart (mode, hipart);
1318 res = gen_lowpart (mode, res);
1319 if (uns)
1320 /* For the unsigned multiplication, there was overflow if
1321 HIPART is non-zero. */
1322 do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
1323 NULL_RTX, NULL, done_label,
1324 PROB_VERY_LIKELY);
1325 else
1326 {
1327 rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
1328 NULL_RTX, 0);
1329 /* RES is low half of the double width result, HIPART
1330 the high half. There was overflow if
1331 HIPART is different from RES < 0 ? -1 : 0. */
1332 do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
1333 NULL_RTX, NULL, done_label,
1334 PROB_VERY_LIKELY);
1335 }
1336 }
1337 else if (hmode != BLKmode && 2 * GET_MODE_PRECISION (hmode) == prec)
1338 {
1339 rtx_code_label *large_op0 = gen_label_rtx ();
1340 rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
1341 rtx_code_label *one_small_one_large = gen_label_rtx ();
1342 rtx_code_label *both_ops_large = gen_label_rtx ();
1343 rtx_code_label *after_hipart_neg = uns ? NULL : gen_label_rtx ();
1344 rtx_code_label *after_lopart_neg = uns ? NULL : gen_label_rtx ();
1345 rtx_code_label *do_overflow = gen_label_rtx ();
1346 rtx_code_label *hipart_different = uns ? NULL : gen_label_rtx ();
1347
1348 unsigned int hprec = GET_MODE_PRECISION (hmode);
1349 rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
1350 NULL_RTX, uns);
1351 hipart0 = gen_lowpart (hmode, hipart0);
1352 rtx lopart0 = gen_lowpart (hmode, op0);
1353 rtx signbit0 = const0_rtx;
1354 if (!uns)
1355 signbit0 = expand_shift (RSHIFT_EXPR, hmode, lopart0, hprec - 1,
1356 NULL_RTX, 0);
1357 rtx hipart1 = expand_shift (RSHIFT_EXPR, mode, op1, hprec,
1358 NULL_RTX, uns);
1359 hipart1 = gen_lowpart (hmode, hipart1);
1360 rtx lopart1 = gen_lowpart (hmode, op1);
1361 rtx signbit1 = const0_rtx;
1362 if (!uns)
1363 signbit1 = expand_shift (RSHIFT_EXPR, hmode, lopart1, hprec - 1,
1364 NULL_RTX, 0);
1365
1366 res = gen_reg_rtx (mode);
1367
1368 /* True if op0 resp. op1 are known to be in the range of
1369 halfstype. */
1370 bool op0_small_p = false;
1371 bool op1_small_p = false;
1372 /* True if op0 resp. op1 are known to have all zeros or all ones
1373 in the upper half of bits, but are not known to be
1374 op{0,1}_small_p. */
1375 bool op0_medium_p = false;
1376 bool op1_medium_p = false;
1377 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1378 nonnegative, 1 if unknown. */
1379 int op0_sign = 1;
1380 int op1_sign = 1;
1381
1382 if (pos_neg0 == 1)
1383 op0_sign = 0;
1384 else if (pos_neg0 == 2)
1385 op0_sign = -1;
1386 if (pos_neg1 == 1)
1387 op1_sign = 0;
1388 else if (pos_neg1 == 2)
1389 op1_sign = -1;
1390
1391 unsigned int mprec0 = prec;
1392 if (arg0 != error_mark_node)
1393 mprec0 = get_min_precision (arg0, sign);
1394 if (mprec0 <= hprec)
1395 op0_small_p = true;
1396 else if (!uns && mprec0 <= hprec + 1)
1397 op0_medium_p = true;
1398 unsigned int mprec1 = prec;
1399 if (arg1 != error_mark_node)
1400 mprec1 = get_min_precision (arg1, sign);
1401 if (mprec1 <= hprec)
1402 op1_small_p = true;
1403 else if (!uns && mprec1 <= hprec + 1)
1404 op1_medium_p = true;
1405
1406 int smaller_sign = 1;
1407 int larger_sign = 1;
1408 if (op0_small_p)
1409 {
1410 smaller_sign = op0_sign;
1411 larger_sign = op1_sign;
1412 }
1413 else if (op1_small_p)
1414 {
1415 smaller_sign = op1_sign;
1416 larger_sign = op0_sign;
1417 }
1418 else if (op0_sign == op1_sign)
1419 {
1420 smaller_sign = op0_sign;
1421 larger_sign = op0_sign;
1422 }
1423
1424 if (!op0_small_p)
1425 do_compare_rtx_and_jump (signbit0, hipart0, NE, true, hmode,
1426 NULL_RTX, NULL, large_op0,
1427 PROB_UNLIKELY);
1428
1429 if (!op1_small_p)
1430 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1431 NULL_RTX, NULL, small_op0_large_op1,
1432 PROB_UNLIKELY);
1433
1434 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1435 hmode to mode, the multiplication will never overflow. We can
1436 do just one hmode x hmode => mode widening multiplication. */
1437 rtx lopart0s = lopart0, lopart1s = lopart1;
1438 if (GET_CODE (lopart0) == SUBREG)
1439 {
1440 lopart0s = shallow_copy_rtx (lopart0);
1441 SUBREG_PROMOTED_VAR_P (lopart0s) = 1;
1442 SUBREG_PROMOTED_SET (lopart0s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1443 }
1444 if (GET_CODE (lopart1) == SUBREG)
1445 {
1446 lopart1s = shallow_copy_rtx (lopart1);
1447 SUBREG_PROMOTED_VAR_P (lopart1s) = 1;
1448 SUBREG_PROMOTED_SET (lopart1s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1449 }
1450 tree halfstype = build_nonstandard_integer_type (hprec, uns);
1451 ops.op0 = make_tree (halfstype, lopart0s);
1452 ops.op1 = make_tree (halfstype, lopart1s);
1453 ops.code = WIDEN_MULT_EXPR;
1454 ops.type = type;
1455 rtx thisres
1456 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1457 emit_move_insn (res, thisres);
1458 emit_jump (done_label);
1459
1460 emit_label (small_op0_large_op1);
1461
1462 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1463 but op1 is not, just swap the arguments and handle it as op1
1464 sign/zero extended, op0 not. */
1465 rtx larger = gen_reg_rtx (mode);
1466 rtx hipart = gen_reg_rtx (hmode);
1467 rtx lopart = gen_reg_rtx (hmode);
1468 emit_move_insn (larger, op1);
1469 emit_move_insn (hipart, hipart1);
1470 emit_move_insn (lopart, lopart0);
1471 emit_jump (one_small_one_large);
1472
1473 emit_label (large_op0);
1474
1475 if (!op1_small_p)
1476 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1477 NULL_RTX, NULL, both_ops_large,
1478 PROB_UNLIKELY);
1479
1480 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1481 but op0 is not, prepare larger, hipart and lopart pseudos and
1482 handle it together with small_op0_large_op1. */
1483 emit_move_insn (larger, op0);
1484 emit_move_insn (hipart, hipart0);
1485 emit_move_insn (lopart, lopart1);
1486
1487 emit_label (one_small_one_large);
1488
1489 /* lopart is the low part of the operand that is sign extended
1490 to mode, larger is the other operand, hipart is the
1491 high part of larger and lopart0 and lopart1 are the low parts
1492 of both operands.
1493 We perform lopart0 * lopart1 and lopart * hipart widening
1494 multiplications. */
1495 tree halfutype = build_nonstandard_integer_type (hprec, 1);
1496 ops.op0 = make_tree (halfutype, lopart0);
1497 ops.op1 = make_tree (halfutype, lopart1);
1498 rtx lo0xlo1
1499 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1500
1501 ops.op0 = make_tree (halfutype, lopart);
1502 ops.op1 = make_tree (halfutype, hipart);
1503 rtx loxhi = gen_reg_rtx (mode);
1504 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1505 emit_move_insn (loxhi, tem);
1506
1507 if (!uns)
1508 {
1509 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
1510 if (larger_sign == 0)
1511 emit_jump (after_hipart_neg);
1512 else if (larger_sign != -1)
1513 do_compare_rtx_and_jump (hipart, const0_rtx, GE, false, hmode,
1514 NULL_RTX, NULL, after_hipart_neg,
1515 PROB_EVEN);
1516
1517 tem = convert_modes (mode, hmode, lopart, 1);
1518 tem = expand_shift (LSHIFT_EXPR, mode, tem, hprec, NULL_RTX, 1);
1519 tem = expand_simple_binop (mode, MINUS, loxhi, tem, NULL_RTX,
1520 1, OPTAB_DIRECT);
1521 emit_move_insn (loxhi, tem);
1522
1523 emit_label (after_hipart_neg);
1524
1525 /* if (lopart < 0) loxhi -= larger; */
1526 if (smaller_sign == 0)
1527 emit_jump (after_lopart_neg);
1528 else if (smaller_sign != -1)
1529 do_compare_rtx_and_jump (lopart, const0_rtx, GE, false, hmode,
1530 NULL_RTX, NULL, after_lopart_neg,
1531 PROB_EVEN);
1532
1533 tem = expand_simple_binop (mode, MINUS, loxhi, larger, NULL_RTX,
1534 1, OPTAB_DIRECT);
1535 emit_move_insn (loxhi, tem);
1536
1537 emit_label (after_lopart_neg);
1538 }
1539
1540 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
1541 tem = expand_shift (RSHIFT_EXPR, mode, lo0xlo1, hprec, NULL_RTX, 1);
1542 tem = expand_simple_binop (mode, PLUS, loxhi, tem, NULL_RTX,
1543 1, OPTAB_DIRECT);
1544 emit_move_insn (loxhi, tem);
1545
1546 /* if (loxhi >> (bitsize / 2)
1547 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
1548 if (loxhi >> (bitsize / 2) == 0 (if uns). */
1549 rtx hipartloxhi = expand_shift (RSHIFT_EXPR, mode, loxhi, hprec,
1550 NULL_RTX, 0);
1551 hipartloxhi = gen_lowpart (hmode, hipartloxhi);
1552 rtx signbitloxhi = const0_rtx;
1553 if (!uns)
1554 signbitloxhi = expand_shift (RSHIFT_EXPR, hmode,
1555 gen_lowpart (hmode, loxhi),
1556 hprec - 1, NULL_RTX, 0);
1557
1558 do_compare_rtx_and_jump (signbitloxhi, hipartloxhi, NE, true, hmode,
1559 NULL_RTX, NULL, do_overflow,
1560 PROB_VERY_UNLIKELY);
1561
1562 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
1563 rtx loxhishifted = expand_shift (LSHIFT_EXPR, mode, loxhi, hprec,
1564 NULL_RTX, 1);
1565 tem = convert_modes (mode, hmode, gen_lowpart (hmode, lo0xlo1), 1);
1566
1567 tem = expand_simple_binop (mode, IOR, loxhishifted, tem, res,
1568 1, OPTAB_DIRECT);
1569 if (tem != res)
1570 emit_move_insn (res, tem);
1571 emit_jump (done_label);
1572
1573 emit_label (both_ops_large);
1574
1575 /* If both operands are large (not sign (!uns) or zero (uns)
1576 extended from hmode), then perform the full multiplication
1577 which will be the result of the operation.
1578 The only cases which don't overflow are for signed multiplication
1579 some cases where both hipart0 and highpart1 are 0 or -1.
1580 For unsigned multiplication when high parts are both non-zero
1581 this overflows always. */
1582 ops.code = MULT_EXPR;
1583 ops.op0 = make_tree (type, op0);
1584 ops.op1 = make_tree (type, op1);
1585 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1586 emit_move_insn (res, tem);
1587
1588 if (!uns)
1589 {
1590 if (!op0_medium_p)
1591 {
1592 tem = expand_simple_binop (hmode, PLUS, hipart0, const1_rtx,
1593 NULL_RTX, 1, OPTAB_DIRECT);
1594 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1595 NULL_RTX, NULL, do_error,
1596 PROB_VERY_UNLIKELY);
1597 }
1598
1599 if (!op1_medium_p)
1600 {
1601 tem = expand_simple_binop (hmode, PLUS, hipart1, const1_rtx,
1602 NULL_RTX, 1, OPTAB_DIRECT);
1603 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1604 NULL_RTX, NULL, do_error,
1605 PROB_VERY_UNLIKELY);
1606 }
1607
1608 /* At this point hipart{0,1} are both in [-1, 0]. If they are
1609 the same, overflow happened if res is negative, if they are
1610 different, overflow happened if res is positive. */
1611 if (op0_sign != 1 && op1_sign != 1 && op0_sign != op1_sign)
1612 emit_jump (hipart_different);
1613 else if (op0_sign == 1 || op1_sign == 1)
1614 do_compare_rtx_and_jump (hipart0, hipart1, NE, true, hmode,
1615 NULL_RTX, NULL, hipart_different,
1616 PROB_EVEN);
1617
1618 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode,
1619 NULL_RTX, NULL, do_error,
1620 PROB_VERY_UNLIKELY);
1621 emit_jump (done_label);
1622
1623 emit_label (hipart_different);
1624
1625 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode,
1626 NULL_RTX, NULL, do_error,
1627 PROB_VERY_UNLIKELY);
1628 emit_jump (done_label);
1629 }
1630
1631 emit_label (do_overflow);
1632
1633 /* Overflow, do full multiplication and fallthru into do_error. */
1634 ops.op0 = make_tree (type, op0);
1635 ops.op1 = make_tree (type, op1);
1636 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1637 emit_move_insn (res, tem);
1638 }
1639 else
1640 {
1641 gcc_assert (!is_ubsan);
1642 ops.code = MULT_EXPR;
1643 ops.type = type;
1644 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1645 emit_jump (done_label);
1646 }
1647 }
1648
1649 do_error_label:
1650 emit_label (do_error);
1651 if (is_ubsan)
1652 {
1653 /* Expand the ubsan builtin call. */
1654 push_temp_slots ();
1655 fn = ubsan_build_overflow_builtin (MULT_EXPR, loc, TREE_TYPE (arg0),
1656 arg0, arg1);
1657 expand_normal (fn);
1658 pop_temp_slots ();
1659 do_pending_stack_adjust ();
1660 }
1661 else if (lhs)
1662 expand_arith_set_overflow (lhs, target);
1663
1664 /* We're done. */
1665 emit_label (done_label);
1666
1667 /* u1 * u2 -> sr */
1668 if (uns0_p && uns1_p && !unsr_p)
1669 {
1670 rtx_code_label *all_done_label = gen_label_rtx ();
1671 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
1672 NULL, all_done_label, PROB_VERY_LIKELY);
1673 expand_arith_set_overflow (lhs, target);
1674 emit_label (all_done_label);
1675 }
1676
1677 /* s1 * u2 -> sr */
1678 if (!uns0_p && uns1_p && !unsr_p && pos_neg1 == 3)
1679 {
1680 rtx_code_label *all_done_label = gen_label_rtx ();
1681 rtx_code_label *set_noovf = gen_label_rtx ();
1682 do_compare_rtx_and_jump (op1, const0_rtx, GE, false, mode, NULL_RTX,
1683 NULL, all_done_label, PROB_VERY_LIKELY);
1684 expand_arith_set_overflow (lhs, target);
1685 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1686 NULL, set_noovf, PROB_VERY_LIKELY);
1687 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1688 NULL, all_done_label, PROB_VERY_UNLIKELY);
1689 do_compare_rtx_and_jump (op1, res, NE, true, mode, NULL_RTX, NULL,
1690 all_done_label, PROB_VERY_UNLIKELY);
1691 emit_label (set_noovf);
1692 write_complex_part (target, const0_rtx, true);
1693 emit_label (all_done_label);
1694 }
1695
1696 if (lhs)
1697 {
1698 if (is_ubsan)
1699 expand_ubsan_result_store (target, res);
1700 else
1701 expand_arith_overflow_result_store (lhs, target, mode, res);
1702 }
1703 }
1704
1705 /* Expand UBSAN_CHECK_ADD call STMT. */
1706
1707 static void
1708 expand_UBSAN_CHECK_ADD (internal_fn, gcall *stmt)
1709 {
1710 location_t loc = gimple_location (stmt);
1711 tree lhs = gimple_call_lhs (stmt);
1712 tree arg0 = gimple_call_arg (stmt, 0);
1713 tree arg1 = gimple_call_arg (stmt, 1);
1714 expand_addsub_overflow (loc, PLUS_EXPR, lhs, arg0, arg1,
1715 false, false, false, true);
1716 }
1717
1718 /* Expand UBSAN_CHECK_SUB call STMT. */
1719
1720 static void
1721 expand_UBSAN_CHECK_SUB (internal_fn, gcall *stmt)
1722 {
1723 location_t loc = gimple_location (stmt);
1724 tree lhs = gimple_call_lhs (stmt);
1725 tree arg0 = gimple_call_arg (stmt, 0);
1726 tree arg1 = gimple_call_arg (stmt, 1);
1727 if (integer_zerop (arg0))
1728 expand_neg_overflow (loc, lhs, arg1, true);
1729 else
1730 expand_addsub_overflow (loc, MINUS_EXPR, lhs, arg0, arg1,
1731 false, false, false, true);
1732 }
1733
1734 /* Expand UBSAN_CHECK_MUL call STMT. */
1735
1736 static void
1737 expand_UBSAN_CHECK_MUL (internal_fn, gcall *stmt)
1738 {
1739 location_t loc = gimple_location (stmt);
1740 tree lhs = gimple_call_lhs (stmt);
1741 tree arg0 = gimple_call_arg (stmt, 0);
1742 tree arg1 = gimple_call_arg (stmt, 1);
1743 expand_mul_overflow (loc, lhs, arg0, arg1, false, false, false, true);
1744 }
1745
1746 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
1747
1748 static void
1749 expand_arith_overflow (enum tree_code code, gimple *stmt)
1750 {
1751 tree lhs = gimple_call_lhs (stmt);
1752 if (lhs == NULL_TREE)
1753 return;
1754 tree arg0 = gimple_call_arg (stmt, 0);
1755 tree arg1 = gimple_call_arg (stmt, 1);
1756 tree type = TREE_TYPE (TREE_TYPE (lhs));
1757 int uns0_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
1758 int uns1_p = TYPE_UNSIGNED (TREE_TYPE (arg1));
1759 int unsr_p = TYPE_UNSIGNED (type);
1760 int prec0 = TYPE_PRECISION (TREE_TYPE (arg0));
1761 int prec1 = TYPE_PRECISION (TREE_TYPE (arg1));
1762 int precres = TYPE_PRECISION (type);
1763 location_t loc = gimple_location (stmt);
1764 if (!uns0_p && get_range_pos_neg (arg0) == 1)
1765 uns0_p = true;
1766 if (!uns1_p && get_range_pos_neg (arg1) == 1)
1767 uns1_p = true;
1768 int pr = get_min_precision (arg0, uns0_p ? UNSIGNED : SIGNED);
1769 prec0 = MIN (prec0, pr);
1770 pr = get_min_precision (arg1, uns1_p ? UNSIGNED : SIGNED);
1771 prec1 = MIN (prec1, pr);
1772
1773 /* If uns0_p && uns1_p, precop is minimum needed precision
1774 of unsigned type to hold the exact result, otherwise
1775 precop is minimum needed precision of signed type to
1776 hold the exact result. */
1777 int precop;
1778 if (code == MULT_EXPR)
1779 precop = prec0 + prec1 + (uns0_p != uns1_p);
1780 else
1781 {
1782 if (uns0_p == uns1_p)
1783 precop = MAX (prec0, prec1) + 1;
1784 else if (uns0_p)
1785 precop = MAX (prec0 + 1, prec1) + 1;
1786 else
1787 precop = MAX (prec0, prec1 + 1) + 1;
1788 }
1789 int orig_precres = precres;
1790
1791 do
1792 {
1793 if ((uns0_p && uns1_p)
1794 ? ((precop + !unsr_p) <= precres
1795 /* u1 - u2 -> ur can overflow, no matter what precision
1796 the result has. */
1797 && (code != MINUS_EXPR || !unsr_p))
1798 : (!unsr_p && precop <= precres))
1799 {
1800 /* The infinity precision result will always fit into result. */
1801 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1802 write_complex_part (target, const0_rtx, true);
1803 enum machine_mode mode = TYPE_MODE (type);
1804 struct separate_ops ops;
1805 ops.code = code;
1806 ops.type = type;
1807 ops.op0 = fold_convert_loc (loc, type, arg0);
1808 ops.op1 = fold_convert_loc (loc, type, arg1);
1809 ops.op2 = NULL_TREE;
1810 ops.location = loc;
1811 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1812 expand_arith_overflow_result_store (lhs, target, mode, tem);
1813 return;
1814 }
1815
1816 /* For sub-word operations, if target doesn't have them, start
1817 with precres widening right away, otherwise do it only
1818 if the most simple cases can't be used. */
1819 if (WORD_REGISTER_OPERATIONS
1820 && orig_precres == precres
1821 && precres < BITS_PER_WORD)
1822 ;
1823 else if ((uns0_p && uns1_p && unsr_p && prec0 <= precres
1824 && prec1 <= precres)
1825 || ((!uns0_p || !uns1_p) && !unsr_p
1826 && prec0 + uns0_p <= precres
1827 && prec1 + uns1_p <= precres))
1828 {
1829 arg0 = fold_convert_loc (loc, type, arg0);
1830 arg1 = fold_convert_loc (loc, type, arg1);
1831 switch (code)
1832 {
1833 case MINUS_EXPR:
1834 if (integer_zerop (arg0) && !unsr_p)
1835 expand_neg_overflow (loc, lhs, arg1, false);
1836 /* FALLTHRU */
1837 case PLUS_EXPR:
1838 expand_addsub_overflow (loc, code, lhs, arg0, arg1,
1839 unsr_p, unsr_p, unsr_p, false);
1840 return;
1841 case MULT_EXPR:
1842 expand_mul_overflow (loc, lhs, arg0, arg1,
1843 unsr_p, unsr_p, unsr_p, false);
1844 return;
1845 default:
1846 gcc_unreachable ();
1847 }
1848 }
1849
1850 /* For sub-word operations, retry with a wider type first. */
1851 if (orig_precres == precres && precop <= BITS_PER_WORD)
1852 {
1853 int p = WORD_REGISTER_OPERATIONS ? BITS_PER_WORD : precop;
1854 enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
1855 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
1856 uns0_p && uns1_p
1857 && unsr_p);
1858 p = TYPE_PRECISION (optype);
1859 if (p > precres)
1860 {
1861 precres = p;
1862 unsr_p = TYPE_UNSIGNED (optype);
1863 type = optype;
1864 continue;
1865 }
1866 }
1867
1868 if (prec0 <= precres && prec1 <= precres)
1869 {
1870 tree types[2];
1871 if (unsr_p)
1872 {
1873 types[0] = build_nonstandard_integer_type (precres, 0);
1874 types[1] = type;
1875 }
1876 else
1877 {
1878 types[0] = type;
1879 types[1] = build_nonstandard_integer_type (precres, 1);
1880 }
1881 arg0 = fold_convert_loc (loc, types[uns0_p], arg0);
1882 arg1 = fold_convert_loc (loc, types[uns1_p], arg1);
1883 if (code != MULT_EXPR)
1884 expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
1885 uns0_p, uns1_p, false);
1886 else
1887 expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
1888 uns0_p, uns1_p, false);
1889 return;
1890 }
1891
1892 /* Retry with a wider type. */
1893 if (orig_precres == precres)
1894 {
1895 int p = MAX (prec0, prec1);
1896 enum machine_mode m = smallest_mode_for_size (p, MODE_INT);
1897 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
1898 uns0_p && uns1_p
1899 && unsr_p);
1900 p = TYPE_PRECISION (optype);
1901 if (p > precres)
1902 {
1903 precres = p;
1904 unsr_p = TYPE_UNSIGNED (optype);
1905 type = optype;
1906 continue;
1907 }
1908 }
1909
1910 gcc_unreachable ();
1911 }
1912 while (1);
1913 }
1914
1915 /* Expand ADD_OVERFLOW STMT. */
1916
1917 static void
1918 expand_ADD_OVERFLOW (internal_fn, gcall *stmt)
1919 {
1920 expand_arith_overflow (PLUS_EXPR, stmt);
1921 }
1922
1923 /* Expand SUB_OVERFLOW STMT. */
1924
1925 static void
1926 expand_SUB_OVERFLOW (internal_fn, gcall *stmt)
1927 {
1928 expand_arith_overflow (MINUS_EXPR, stmt);
1929 }
1930
1931 /* Expand MUL_OVERFLOW STMT. */
1932
1933 static void
1934 expand_MUL_OVERFLOW (internal_fn, gcall *stmt)
1935 {
1936 expand_arith_overflow (MULT_EXPR, stmt);
1937 }
1938
1939 /* This should get folded in tree-vectorizer.c. */
1940
1941 static void
1942 expand_LOOP_VECTORIZED (internal_fn, gcall *)
1943 {
1944 gcc_unreachable ();
1945 }
1946
1947 /* Expand MASK_LOAD call STMT using optab OPTAB. */
1948
1949 static void
1950 expand_mask_load_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
1951 {
1952 struct expand_operand ops[3];
1953 tree type, lhs, rhs, maskt, ptr;
1954 rtx mem, target, mask;
1955 unsigned align;
1956
1957 maskt = gimple_call_arg (stmt, 2);
1958 lhs = gimple_call_lhs (stmt);
1959 if (lhs == NULL_TREE)
1960 return;
1961 type = TREE_TYPE (lhs);
1962 ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), 0);
1963 align = tree_to_shwi (gimple_call_arg (stmt, 1));
1964 if (TYPE_ALIGN (type) != align)
1965 type = build_aligned_type (type, align);
1966 rhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0), ptr);
1967
1968 mem = expand_expr (rhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1969 gcc_assert (MEM_P (mem));
1970 mask = expand_normal (maskt);
1971 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1972 create_output_operand (&ops[0], target, TYPE_MODE (type));
1973 create_fixed_operand (&ops[1], mem);
1974 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
1975 expand_insn (convert_optab_handler (optab, TYPE_MODE (type),
1976 TYPE_MODE (TREE_TYPE (maskt))),
1977 3, ops);
1978 }
1979
1980 /* Expand MASK_STORE call STMT using optab OPTAB. */
1981
1982 static void
1983 expand_mask_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
1984 {
1985 struct expand_operand ops[3];
1986 tree type, lhs, rhs, maskt, ptr;
1987 rtx mem, reg, mask;
1988 unsigned align;
1989
1990 maskt = gimple_call_arg (stmt, 2);
1991 rhs = gimple_call_arg (stmt, 3);
1992 type = TREE_TYPE (rhs);
1993 ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), 0);
1994 align = tree_to_shwi (gimple_call_arg (stmt, 1));
1995 if (TYPE_ALIGN (type) != align)
1996 type = build_aligned_type (type, align);
1997 lhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0), ptr);
1998
1999 mem = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2000 gcc_assert (MEM_P (mem));
2001 mask = expand_normal (maskt);
2002 reg = expand_normal (rhs);
2003 create_fixed_operand (&ops[0], mem);
2004 create_input_operand (&ops[1], reg, TYPE_MODE (type));
2005 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
2006 expand_insn (convert_optab_handler (optab, TYPE_MODE (type),
2007 TYPE_MODE (TREE_TYPE (maskt))),
2008 3, ops);
2009 }
2010
2011 static void
2012 expand_ABNORMAL_DISPATCHER (internal_fn, gcall *)
2013 {
2014 }
2015
2016 static void
2017 expand_BUILTIN_EXPECT (internal_fn, gcall *stmt)
2018 {
2019 /* When guessing was done, the hints should be already stripped away. */
2020 gcc_assert (!flag_guess_branch_prob || optimize == 0 || seen_error ());
2021
2022 rtx target;
2023 tree lhs = gimple_call_lhs (stmt);
2024 if (lhs)
2025 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2026 else
2027 target = const0_rtx;
2028 rtx val = expand_expr (gimple_call_arg (stmt, 0), target, VOIDmode, EXPAND_NORMAL);
2029 if (lhs && val != target)
2030 emit_move_insn (target, val);
2031 }
2032
2033 /* IFN_VA_ARG is supposed to be expanded at pass_stdarg. So this dummy function
2034 should never be called. */
2035
2036 static void
2037 expand_VA_ARG (internal_fn, gcall *)
2038 {
2039 gcc_unreachable ();
2040 }
2041
2042 /* Expand the IFN_UNIQUE function according to its first argument. */
2043
2044 static void
2045 expand_UNIQUE (internal_fn, gcall *stmt)
2046 {
2047 rtx pattern = NULL_RTX;
2048 enum ifn_unique_kind kind
2049 = (enum ifn_unique_kind) TREE_INT_CST_LOW (gimple_call_arg (stmt, 0));
2050
2051 switch (kind)
2052 {
2053 default:
2054 gcc_unreachable ();
2055
2056 case IFN_UNIQUE_UNSPEC:
2057 if (targetm.have_unique ())
2058 pattern = targetm.gen_unique ();
2059 break;
2060
2061 case IFN_UNIQUE_OACC_FORK:
2062 case IFN_UNIQUE_OACC_JOIN:
2063 if (targetm.have_oacc_fork () && targetm.have_oacc_join ())
2064 {
2065 tree lhs = gimple_call_lhs (stmt);
2066 rtx target = const0_rtx;
2067
2068 if (lhs)
2069 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2070
2071 rtx data_dep = expand_normal (gimple_call_arg (stmt, 1));
2072 rtx axis = expand_normal (gimple_call_arg (stmt, 2));
2073
2074 if (kind == IFN_UNIQUE_OACC_FORK)
2075 pattern = targetm.gen_oacc_fork (target, data_dep, axis);
2076 else
2077 pattern = targetm.gen_oacc_join (target, data_dep, axis);
2078 }
2079 else
2080 gcc_unreachable ();
2081 break;
2082 }
2083
2084 if (pattern)
2085 emit_insn (pattern);
2086 }
2087
2088 /* The size of an OpenACC compute dimension. */
2089
2090 static void
2091 expand_GOACC_DIM_SIZE (internal_fn, gcall *stmt)
2092 {
2093 tree lhs = gimple_call_lhs (stmt);
2094
2095 if (!lhs)
2096 return;
2097
2098 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2099 if (targetm.have_oacc_dim_size ())
2100 {
2101 rtx dim = expand_expr (gimple_call_arg (stmt, 0), NULL_RTX,
2102 VOIDmode, EXPAND_NORMAL);
2103 emit_insn (targetm.gen_oacc_dim_size (target, dim));
2104 }
2105 else
2106 emit_move_insn (target, GEN_INT (1));
2107 }
2108
2109 /* The position of an OpenACC execution engine along one compute axis. */
2110
2111 static void
2112 expand_GOACC_DIM_POS (internal_fn, gcall *stmt)
2113 {
2114 tree lhs = gimple_call_lhs (stmt);
2115
2116 if (!lhs)
2117 return;
2118
2119 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2120 if (targetm.have_oacc_dim_pos ())
2121 {
2122 rtx dim = expand_expr (gimple_call_arg (stmt, 0), NULL_RTX,
2123 VOIDmode, EXPAND_NORMAL);
2124 emit_insn (targetm.gen_oacc_dim_pos (target, dim));
2125 }
2126 else
2127 emit_move_insn (target, const0_rtx);
2128 }
2129
2130 /* This is expanded by oacc_device_lower pass. */
2131
2132 static void
2133 expand_GOACC_LOOP (internal_fn, gcall *)
2134 {
2135 gcc_unreachable ();
2136 }
2137
2138 /* This is expanded by oacc_device_lower pass. */
2139
2140 static void
2141 expand_GOACC_REDUCTION (internal_fn, gcall *)
2142 {
2143 gcc_unreachable ();
2144 }
2145
2146 /* Set errno to EDOM. */
2147
2148 static void
2149 expand_SET_EDOM (internal_fn, gcall *)
2150 {
2151 #ifdef TARGET_EDOM
2152 #ifdef GEN_ERRNO_RTX
2153 rtx errno_rtx = GEN_ERRNO_RTX;
2154 #else
2155 rtx errno_rtx = gen_rtx_MEM (word_mode, gen_rtx_SYMBOL_REF (Pmode, "errno"));
2156 #endif
2157 emit_move_insn (errno_rtx,
2158 gen_int_mode (TARGET_EDOM, GET_MODE (errno_rtx)));
2159 #else
2160 gcc_unreachable ();
2161 #endif
2162 }
2163
2164 /* Expand atomic bit test and set. */
2165
2166 static void
2167 expand_ATOMIC_BIT_TEST_AND_SET (internal_fn, gcall *call)
2168 {
2169 expand_ifn_atomic_bit_test_and (call);
2170 }
2171
2172 /* Expand atomic bit test and complement. */
2173
2174 static void
2175 expand_ATOMIC_BIT_TEST_AND_COMPLEMENT (internal_fn, gcall *call)
2176 {
2177 expand_ifn_atomic_bit_test_and (call);
2178 }
2179
2180 /* Expand atomic bit test and reset. */
2181
2182 static void
2183 expand_ATOMIC_BIT_TEST_AND_RESET (internal_fn, gcall *call)
2184 {
2185 expand_ifn_atomic_bit_test_and (call);
2186 }
2187
2188 /* Expand atomic bit test and set. */
2189
2190 static void
2191 expand_ATOMIC_COMPARE_EXCHANGE (internal_fn, gcall *call)
2192 {
2193 expand_ifn_atomic_compare_exchange (call);
2194 }
2195
2196 /* Expand a call to FN using the operands in STMT. FN has a single
2197 output operand and NARGS input operands. */
2198
2199 static void
2200 expand_direct_optab_fn (internal_fn fn, gcall *stmt, direct_optab optab,
2201 unsigned int nargs)
2202 {
2203 expand_operand *ops = XALLOCAVEC (expand_operand, nargs + 1);
2204
2205 tree_pair types = direct_internal_fn_types (fn, stmt);
2206 insn_code icode = direct_optab_handler (optab, TYPE_MODE (types.first));
2207
2208 tree lhs = gimple_call_lhs (stmt);
2209 tree lhs_type = TREE_TYPE (lhs);
2210 rtx lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2211 create_output_operand (&ops[0], lhs_rtx, insn_data[icode].operand[0].mode);
2212
2213 for (unsigned int i = 0; i < nargs; ++i)
2214 {
2215 tree rhs = gimple_call_arg (stmt, i);
2216 tree rhs_type = TREE_TYPE (rhs);
2217 rtx rhs_rtx = expand_normal (rhs);
2218 if (INTEGRAL_TYPE_P (rhs_type))
2219 create_convert_operand_from (&ops[i + 1], rhs_rtx,
2220 TYPE_MODE (rhs_type),
2221 TYPE_UNSIGNED (rhs_type));
2222 else
2223 create_input_operand (&ops[i + 1], rhs_rtx, TYPE_MODE (rhs_type));
2224 }
2225
2226 expand_insn (icode, nargs + 1, ops);
2227 if (!rtx_equal_p (lhs_rtx, ops[0].value))
2228 {
2229 /* If the return value has an integral type, convert the instruction
2230 result to that type. This is useful for things that return an
2231 int regardless of the size of the input. If the instruction result
2232 is smaller than required, assume that it is signed.
2233
2234 If the return value has a nonintegral type, its mode must match
2235 the instruction result. */
2236 if (GET_CODE (lhs_rtx) == SUBREG && SUBREG_PROMOTED_VAR_P (lhs_rtx))
2237 {
2238 /* If this is a scalar in a register that is stored in a wider
2239 mode than the declared mode, compute the result into its
2240 declared mode and then convert to the wider mode. */
2241 gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type));
2242 rtx tmp = convert_to_mode (GET_MODE (lhs_rtx), ops[0].value, 0);
2243 convert_move (SUBREG_REG (lhs_rtx), tmp,
2244 SUBREG_PROMOTED_SIGN (lhs_rtx));
2245 }
2246 else if (GET_MODE (lhs_rtx) == GET_MODE (ops[0].value))
2247 emit_move_insn (lhs_rtx, ops[0].value);
2248 else
2249 {
2250 gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type));
2251 convert_move (lhs_rtx, ops[0].value, 0);
2252 }
2253 }
2254 }
2255
2256 /* Expanders for optabs that can use expand_direct_optab_fn. */
2257
2258 #define expand_unary_optab_fn(FN, STMT, OPTAB) \
2259 expand_direct_optab_fn (FN, STMT, OPTAB, 1)
2260
2261 #define expand_binary_optab_fn(FN, STMT, OPTAB) \
2262 expand_direct_optab_fn (FN, STMT, OPTAB, 2)
2263
2264 /* RETURN_TYPE and ARGS are a return type and argument list that are
2265 in principle compatible with FN (which satisfies direct_internal_fn_p).
2266 Return the types that should be used to determine whether the
2267 target supports FN. */
2268
2269 tree_pair
2270 direct_internal_fn_types (internal_fn fn, tree return_type, tree *args)
2271 {
2272 const direct_internal_fn_info &info = direct_internal_fn (fn);
2273 tree type0 = (info.type0 < 0 ? return_type : TREE_TYPE (args[info.type0]));
2274 tree type1 = (info.type1 < 0 ? return_type : TREE_TYPE (args[info.type1]));
2275 return tree_pair (type0, type1);
2276 }
2277
2278 /* CALL is a call whose return type and arguments are in principle
2279 compatible with FN (which satisfies direct_internal_fn_p). Return the
2280 types that should be used to determine whether the target supports FN. */
2281
2282 tree_pair
2283 direct_internal_fn_types (internal_fn fn, gcall *call)
2284 {
2285 const direct_internal_fn_info &info = direct_internal_fn (fn);
2286 tree op0 = (info.type0 < 0
2287 ? gimple_call_lhs (call)
2288 : gimple_call_arg (call, info.type0));
2289 tree op1 = (info.type1 < 0
2290 ? gimple_call_lhs (call)
2291 : gimple_call_arg (call, info.type1));
2292 return tree_pair (TREE_TYPE (op0), TREE_TYPE (op1));
2293 }
2294
2295 /* Return true if OPTAB is supported for TYPES (whose modes should be
2296 the same) when the optimization type is OPT_TYPE. Used for simple
2297 direct optabs. */
2298
2299 static bool
2300 direct_optab_supported_p (direct_optab optab, tree_pair types,
2301 optimization_type opt_type)
2302 {
2303 machine_mode mode = TYPE_MODE (types.first);
2304 gcc_checking_assert (mode == TYPE_MODE (types.second));
2305 return direct_optab_handler (optab, mode, opt_type) != CODE_FOR_nothing;
2306 }
2307
2308 /* Return true if load/store lanes optab OPTAB is supported for
2309 array type TYPES.first when the optimization type is OPT_TYPE. */
2310
2311 static bool
2312 multi_vector_optab_supported_p (convert_optab optab, tree_pair types,
2313 optimization_type opt_type)
2314 {
2315 gcc_assert (TREE_CODE (types.first) == ARRAY_TYPE);
2316 machine_mode imode = TYPE_MODE (types.first);
2317 machine_mode vmode = TYPE_MODE (TREE_TYPE (types.first));
2318 return (convert_optab_handler (optab, imode, vmode, opt_type)
2319 != CODE_FOR_nothing);
2320 }
2321
2322 #define direct_unary_optab_supported_p direct_optab_supported_p
2323 #define direct_binary_optab_supported_p direct_optab_supported_p
2324 #define direct_mask_load_optab_supported_p direct_optab_supported_p
2325 #define direct_load_lanes_optab_supported_p multi_vector_optab_supported_p
2326 #define direct_mask_store_optab_supported_p direct_optab_supported_p
2327 #define direct_store_lanes_optab_supported_p multi_vector_optab_supported_p
2328
2329 /* Return true if FN is supported for the types in TYPES when the
2330 optimization type is OPT_TYPE. The types are those associated with
2331 the "type0" and "type1" fields of FN's direct_internal_fn_info
2332 structure. */
2333
2334 bool
2335 direct_internal_fn_supported_p (internal_fn fn, tree_pair types,
2336 optimization_type opt_type)
2337 {
2338 switch (fn)
2339 {
2340 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
2341 case IFN_##CODE: break;
2342 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
2343 case IFN_##CODE: \
2344 return direct_##TYPE##_optab_supported_p (OPTAB##_optab, types, \
2345 opt_type);
2346 #include "internal-fn.def"
2347
2348 case IFN_LAST:
2349 break;
2350 }
2351 gcc_unreachable ();
2352 }
2353
2354 /* Return true if FN is supported for type TYPE when the optimization
2355 type is OPT_TYPE. The caller knows that the "type0" and "type1"
2356 fields of FN's direct_internal_fn_info structure are the same. */
2357
2358 bool
2359 direct_internal_fn_supported_p (internal_fn fn, tree type,
2360 optimization_type opt_type)
2361 {
2362 const direct_internal_fn_info &info = direct_internal_fn (fn);
2363 gcc_checking_assert (info.type0 == info.type1);
2364 return direct_internal_fn_supported_p (fn, tree_pair (type, type), opt_type);
2365 }
2366
2367 /* Return true if IFN_SET_EDOM is supported. */
2368
2369 bool
2370 set_edom_supported_p (void)
2371 {
2372 #ifdef TARGET_EDOM
2373 return true;
2374 #else
2375 return false;
2376 #endif
2377 }
2378
2379 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
2380 static void \
2381 expand_##CODE (internal_fn fn, gcall *stmt) \
2382 { \
2383 expand_##TYPE##_optab_fn (fn, stmt, OPTAB##_optab); \
2384 }
2385 #include "internal-fn.def"
2386
2387 /* Routines to expand each internal function, indexed by function number.
2388 Each routine has the prototype:
2389
2390 expand_<NAME> (gcall *stmt)
2391
2392 where STMT is the statement that performs the call. */
2393 static void (*const internal_fn_expanders[]) (internal_fn, gcall *) = {
2394 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
2395 #include "internal-fn.def"
2396 0
2397 };
2398
2399 /* Expand STMT as though it were a call to internal function FN. */
2400
2401 void
2402 expand_internal_call (internal_fn fn, gcall *stmt)
2403 {
2404 internal_fn_expanders[fn] (fn, stmt);
2405 }
2406
2407 /* Expand STMT, which is a call to internal function FN. */
2408
2409 void
2410 expand_internal_call (gcall *stmt)
2411 {
2412 expand_internal_call (gimple_call_internal_fn (stmt), stmt);
2413 }