]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/internal-fn.c
[OpenACC privatization] Don't let unhandled 'IFN_UNIQUE_OACC_PRIVATE' linger [PR90115]
[thirdparty/gcc.git] / gcc / internal-fn.c
1 /* Internal functions.
2 Copyright (C) 2011-2021 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "predict.h"
29 #include "stringpool.h"
30 #include "tree-vrp.h"
31 #include "tree-ssanames.h"
32 #include "expmed.h"
33 #include "memmodel.h"
34 #include "optabs.h"
35 #include "emit-rtl.h"
36 #include "diagnostic-core.h"
37 #include "fold-const.h"
38 #include "internal-fn.h"
39 #include "stor-layout.h"
40 #include "dojump.h"
41 #include "expr.h"
42 #include "stringpool.h"
43 #include "attribs.h"
44 #include "asan.h"
45 #include "ubsan.h"
46 #include "recog.h"
47 #include "builtins.h"
48 #include "optabs-tree.h"
49 #include "gimple-ssa.h"
50 #include "tree-phinodes.h"
51 #include "ssa-iterators.h"
52 #include "explow.h"
53 #include "rtl-iter.h"
54
55 /* The names of each internal function, indexed by function number. */
56 const char *const internal_fn_name_array[] = {
57 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
58 #include "internal-fn.def"
59 "<invalid-fn>"
60 };
61
62 /* The ECF_* flags of each internal function, indexed by function number. */
63 const int internal_fn_flags_array[] = {
64 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
65 #include "internal-fn.def"
66 0
67 };
68
69 /* Return the internal function called NAME, or IFN_LAST if there's
70 no such function. */
71
72 internal_fn
73 lookup_internal_fn (const char *name)
74 {
75 typedef hash_map<nofree_string_hash, internal_fn> name_to_fn_map_type;
76 static name_to_fn_map_type *name_to_fn_map;
77
78 if (!name_to_fn_map)
79 {
80 name_to_fn_map = new name_to_fn_map_type (IFN_LAST);
81 for (unsigned int i = 0; i < IFN_LAST; ++i)
82 name_to_fn_map->put (internal_fn_name (internal_fn (i)),
83 internal_fn (i));
84 }
85 internal_fn *entry = name_to_fn_map->get (name);
86 return entry ? *entry : IFN_LAST;
87 }
88
89 /* Fnspec of each internal function, indexed by function number. */
90 const_tree internal_fn_fnspec_array[IFN_LAST + 1];
91
92 void
93 init_internal_fns ()
94 {
95 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
96 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
97 build_string ((int) sizeof (FNSPEC) - 1, FNSPEC ? FNSPEC : "");
98 #include "internal-fn.def"
99 internal_fn_fnspec_array[IFN_LAST] = 0;
100 }
101
102 /* Create static initializers for the information returned by
103 direct_internal_fn. */
104 #define not_direct { -2, -2, false }
105 #define mask_load_direct { -1, 2, false }
106 #define load_lanes_direct { -1, -1, false }
107 #define mask_load_lanes_direct { -1, -1, false }
108 #define gather_load_direct { 3, 1, false }
109 #define len_load_direct { -1, -1, false }
110 #define mask_store_direct { 3, 2, false }
111 #define store_lanes_direct { 0, 0, false }
112 #define mask_store_lanes_direct { 0, 0, false }
113 #define vec_cond_mask_direct { 1, 0, false }
114 #define vec_cond_direct { 2, 0, false }
115 #define scatter_store_direct { 3, 1, false }
116 #define len_store_direct { 3, 3, false }
117 #define vec_set_direct { 3, 3, false }
118 #define unary_direct { 0, 0, true }
119 #define binary_direct { 0, 0, true }
120 #define ternary_direct { 0, 0, true }
121 #define cond_unary_direct { 1, 1, true }
122 #define cond_binary_direct { 1, 1, true }
123 #define cond_ternary_direct { 1, 1, true }
124 #define while_direct { 0, 2, false }
125 #define fold_extract_direct { 2, 2, false }
126 #define fold_left_direct { 1, 1, false }
127 #define mask_fold_left_direct { 1, 1, false }
128 #define check_ptrs_direct { 0, 0, false }
129
130 const direct_internal_fn_info direct_internal_fn_array[IFN_LAST + 1] = {
131 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) not_direct,
132 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) TYPE##_direct,
133 #define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
134 UNSIGNED_OPTAB, TYPE) TYPE##_direct,
135 #include "internal-fn.def"
136 not_direct
137 };
138
139 /* ARRAY_TYPE is an array of vector modes. Return the associated insn
140 for load-lanes-style optab OPTAB, or CODE_FOR_nothing if none. */
141
142 static enum insn_code
143 get_multi_vector_move (tree array_type, convert_optab optab)
144 {
145 machine_mode imode;
146 machine_mode vmode;
147
148 gcc_assert (TREE_CODE (array_type) == ARRAY_TYPE);
149 imode = TYPE_MODE (array_type);
150 vmode = TYPE_MODE (TREE_TYPE (array_type));
151
152 return convert_optab_handler (optab, imode, vmode);
153 }
154
155 /* Expand LOAD_LANES call STMT using optab OPTAB. */
156
157 static void
158 expand_load_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
159 {
160 class expand_operand ops[2];
161 tree type, lhs, rhs;
162 rtx target, mem;
163
164 lhs = gimple_call_lhs (stmt);
165 rhs = gimple_call_arg (stmt, 0);
166 type = TREE_TYPE (lhs);
167
168 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
169 mem = expand_normal (rhs);
170
171 gcc_assert (MEM_P (mem));
172 PUT_MODE (mem, TYPE_MODE (type));
173
174 create_output_operand (&ops[0], target, TYPE_MODE (type));
175 create_fixed_operand (&ops[1], mem);
176 expand_insn (get_multi_vector_move (type, optab), 2, ops);
177 if (!rtx_equal_p (target, ops[0].value))
178 emit_move_insn (target, ops[0].value);
179 }
180
181 /* Expand STORE_LANES call STMT using optab OPTAB. */
182
183 static void
184 expand_store_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
185 {
186 class expand_operand ops[2];
187 tree type, lhs, rhs;
188 rtx target, reg;
189
190 lhs = gimple_call_lhs (stmt);
191 rhs = gimple_call_arg (stmt, 0);
192 type = TREE_TYPE (rhs);
193
194 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
195 reg = expand_normal (rhs);
196
197 gcc_assert (MEM_P (target));
198 PUT_MODE (target, TYPE_MODE (type));
199
200 create_fixed_operand (&ops[0], target);
201 create_input_operand (&ops[1], reg, TYPE_MODE (type));
202 expand_insn (get_multi_vector_move (type, optab), 2, ops);
203 }
204
205 static void
206 expand_ANNOTATE (internal_fn, gcall *)
207 {
208 gcc_unreachable ();
209 }
210
211 /* This should get expanded in omp_device_lower pass. */
212
213 static void
214 expand_GOMP_USE_SIMT (internal_fn, gcall *)
215 {
216 gcc_unreachable ();
217 }
218
219 /* This should get expanded in omp_device_lower pass. */
220
221 static void
222 expand_GOMP_SIMT_ENTER (internal_fn, gcall *)
223 {
224 gcc_unreachable ();
225 }
226
227 /* Allocate per-lane storage and begin non-uniform execution region. */
228
229 static void
230 expand_GOMP_SIMT_ENTER_ALLOC (internal_fn, gcall *stmt)
231 {
232 rtx target;
233 tree lhs = gimple_call_lhs (stmt);
234 if (lhs)
235 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
236 else
237 target = gen_reg_rtx (Pmode);
238 rtx size = expand_normal (gimple_call_arg (stmt, 0));
239 rtx align = expand_normal (gimple_call_arg (stmt, 1));
240 class expand_operand ops[3];
241 create_output_operand (&ops[0], target, Pmode);
242 create_input_operand (&ops[1], size, Pmode);
243 create_input_operand (&ops[2], align, Pmode);
244 gcc_assert (targetm.have_omp_simt_enter ());
245 expand_insn (targetm.code_for_omp_simt_enter, 3, ops);
246 if (!rtx_equal_p (target, ops[0].value))
247 emit_move_insn (target, ops[0].value);
248 }
249
250 /* Deallocate per-lane storage and leave non-uniform execution region. */
251
252 static void
253 expand_GOMP_SIMT_EXIT (internal_fn, gcall *stmt)
254 {
255 gcc_checking_assert (!gimple_call_lhs (stmt));
256 rtx arg = expand_normal (gimple_call_arg (stmt, 0));
257 class expand_operand ops[1];
258 create_input_operand (&ops[0], arg, Pmode);
259 gcc_assert (targetm.have_omp_simt_exit ());
260 expand_insn (targetm.code_for_omp_simt_exit, 1, ops);
261 }
262
263 /* Lane index on SIMT targets: thread index in the warp on NVPTX. On targets
264 without SIMT execution this should be expanded in omp_device_lower pass. */
265
266 static void
267 expand_GOMP_SIMT_LANE (internal_fn, gcall *stmt)
268 {
269 tree lhs = gimple_call_lhs (stmt);
270 if (!lhs)
271 return;
272
273 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
274 gcc_assert (targetm.have_omp_simt_lane ());
275 emit_insn (targetm.gen_omp_simt_lane (target));
276 }
277
278 /* This should get expanded in omp_device_lower pass. */
279
280 static void
281 expand_GOMP_SIMT_VF (internal_fn, gcall *)
282 {
283 gcc_unreachable ();
284 }
285
286 /* Lane index of the first SIMT lane that supplies a non-zero argument.
287 This is a SIMT counterpart to GOMP_SIMD_LAST_LANE, used to represent the
288 lane that executed the last iteration for handling OpenMP lastprivate. */
289
290 static void
291 expand_GOMP_SIMT_LAST_LANE (internal_fn, gcall *stmt)
292 {
293 tree lhs = gimple_call_lhs (stmt);
294 if (!lhs)
295 return;
296
297 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
298 rtx cond = expand_normal (gimple_call_arg (stmt, 0));
299 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
300 class expand_operand ops[2];
301 create_output_operand (&ops[0], target, mode);
302 create_input_operand (&ops[1], cond, mode);
303 gcc_assert (targetm.have_omp_simt_last_lane ());
304 expand_insn (targetm.code_for_omp_simt_last_lane, 2, ops);
305 if (!rtx_equal_p (target, ops[0].value))
306 emit_move_insn (target, ops[0].value);
307 }
308
309 /* Non-transparent predicate used in SIMT lowering of OpenMP "ordered". */
310
311 static void
312 expand_GOMP_SIMT_ORDERED_PRED (internal_fn, gcall *stmt)
313 {
314 tree lhs = gimple_call_lhs (stmt);
315 if (!lhs)
316 return;
317
318 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
319 rtx ctr = expand_normal (gimple_call_arg (stmt, 0));
320 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
321 class expand_operand ops[2];
322 create_output_operand (&ops[0], target, mode);
323 create_input_operand (&ops[1], ctr, mode);
324 gcc_assert (targetm.have_omp_simt_ordered ());
325 expand_insn (targetm.code_for_omp_simt_ordered, 2, ops);
326 if (!rtx_equal_p (target, ops[0].value))
327 emit_move_insn (target, ops[0].value);
328 }
329
330 /* "Or" boolean reduction across SIMT lanes: return non-zero in all lanes if
331 any lane supplies a non-zero argument. */
332
333 static void
334 expand_GOMP_SIMT_VOTE_ANY (internal_fn, gcall *stmt)
335 {
336 tree lhs = gimple_call_lhs (stmt);
337 if (!lhs)
338 return;
339
340 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
341 rtx cond = expand_normal (gimple_call_arg (stmt, 0));
342 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
343 class expand_operand ops[2];
344 create_output_operand (&ops[0], target, mode);
345 create_input_operand (&ops[1], cond, mode);
346 gcc_assert (targetm.have_omp_simt_vote_any ());
347 expand_insn (targetm.code_for_omp_simt_vote_any, 2, ops);
348 if (!rtx_equal_p (target, ops[0].value))
349 emit_move_insn (target, ops[0].value);
350 }
351
352 /* Exchange between SIMT lanes with a "butterfly" pattern: source lane index
353 is destination lane index XOR given offset. */
354
355 static void
356 expand_GOMP_SIMT_XCHG_BFLY (internal_fn, gcall *stmt)
357 {
358 tree lhs = gimple_call_lhs (stmt);
359 if (!lhs)
360 return;
361
362 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
363 rtx src = expand_normal (gimple_call_arg (stmt, 0));
364 rtx idx = expand_normal (gimple_call_arg (stmt, 1));
365 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
366 class expand_operand ops[3];
367 create_output_operand (&ops[0], target, mode);
368 create_input_operand (&ops[1], src, mode);
369 create_input_operand (&ops[2], idx, SImode);
370 gcc_assert (targetm.have_omp_simt_xchg_bfly ());
371 expand_insn (targetm.code_for_omp_simt_xchg_bfly, 3, ops);
372 if (!rtx_equal_p (target, ops[0].value))
373 emit_move_insn (target, ops[0].value);
374 }
375
376 /* Exchange between SIMT lanes according to given source lane index. */
377
378 static void
379 expand_GOMP_SIMT_XCHG_IDX (internal_fn, gcall *stmt)
380 {
381 tree lhs = gimple_call_lhs (stmt);
382 if (!lhs)
383 return;
384
385 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
386 rtx src = expand_normal (gimple_call_arg (stmt, 0));
387 rtx idx = expand_normal (gimple_call_arg (stmt, 1));
388 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
389 class expand_operand ops[3];
390 create_output_operand (&ops[0], target, mode);
391 create_input_operand (&ops[1], src, mode);
392 create_input_operand (&ops[2], idx, SImode);
393 gcc_assert (targetm.have_omp_simt_xchg_idx ());
394 expand_insn (targetm.code_for_omp_simt_xchg_idx, 3, ops);
395 if (!rtx_equal_p (target, ops[0].value))
396 emit_move_insn (target, ops[0].value);
397 }
398
399 /* This should get expanded in adjust_simduid_builtins. */
400
401 static void
402 expand_GOMP_SIMD_LANE (internal_fn, gcall *)
403 {
404 gcc_unreachable ();
405 }
406
407 /* This should get expanded in adjust_simduid_builtins. */
408
409 static void
410 expand_GOMP_SIMD_VF (internal_fn, gcall *)
411 {
412 gcc_unreachable ();
413 }
414
415 /* This should get expanded in adjust_simduid_builtins. */
416
417 static void
418 expand_GOMP_SIMD_LAST_LANE (internal_fn, gcall *)
419 {
420 gcc_unreachable ();
421 }
422
423 /* This should get expanded in adjust_simduid_builtins. */
424
425 static void
426 expand_GOMP_SIMD_ORDERED_START (internal_fn, gcall *)
427 {
428 gcc_unreachable ();
429 }
430
431 /* This should get expanded in adjust_simduid_builtins. */
432
433 static void
434 expand_GOMP_SIMD_ORDERED_END (internal_fn, gcall *)
435 {
436 gcc_unreachable ();
437 }
438
439 /* This should get expanded in the sanopt pass. */
440
441 static void
442 expand_UBSAN_NULL (internal_fn, gcall *)
443 {
444 gcc_unreachable ();
445 }
446
447 /* This should get expanded in the sanopt pass. */
448
449 static void
450 expand_UBSAN_BOUNDS (internal_fn, gcall *)
451 {
452 gcc_unreachable ();
453 }
454
455 /* This should get expanded in the sanopt pass. */
456
457 static void
458 expand_UBSAN_VPTR (internal_fn, gcall *)
459 {
460 gcc_unreachable ();
461 }
462
463 /* This should get expanded in the sanopt pass. */
464
465 static void
466 expand_UBSAN_PTR (internal_fn, gcall *)
467 {
468 gcc_unreachable ();
469 }
470
471 /* This should get expanded in the sanopt pass. */
472
473 static void
474 expand_UBSAN_OBJECT_SIZE (internal_fn, gcall *)
475 {
476 gcc_unreachable ();
477 }
478
479 /* This should get expanded in the sanopt pass. */
480
481 static void
482 expand_HWASAN_CHECK (internal_fn, gcall *)
483 {
484 gcc_unreachable ();
485 }
486
487 /* For hwasan stack tagging:
488 Clear tags on the dynamically allocated space.
489 For use after an object dynamically allocated on the stack goes out of
490 scope. */
491 static void
492 expand_HWASAN_ALLOCA_UNPOISON (internal_fn, gcall *gc)
493 {
494 gcc_assert (Pmode == ptr_mode);
495 tree restored_position = gimple_call_arg (gc, 0);
496 rtx restored_rtx = expand_expr (restored_position, NULL_RTX, VOIDmode,
497 EXPAND_NORMAL);
498 rtx func = init_one_libfunc ("__hwasan_tag_memory");
499 rtx off = expand_simple_binop (Pmode, MINUS, restored_rtx,
500 stack_pointer_rtx, NULL_RTX, 0,
501 OPTAB_WIDEN);
502 emit_library_call_value (func, NULL_RTX, LCT_NORMAL, VOIDmode,
503 virtual_stack_dynamic_rtx, Pmode,
504 HWASAN_STACK_BACKGROUND, QImode,
505 off, Pmode);
506 }
507
508 /* For hwasan stack tagging:
509 Return a tag to be used for a dynamic allocation. */
510 static void
511 expand_HWASAN_CHOOSE_TAG (internal_fn, gcall *gc)
512 {
513 tree tag = gimple_call_lhs (gc);
514 rtx target = expand_expr (tag, NULL_RTX, VOIDmode, EXPAND_NORMAL);
515 machine_mode mode = GET_MODE (target);
516 gcc_assert (mode == QImode);
517
518 rtx base_tag = targetm.memtag.extract_tag (hwasan_frame_base (), NULL_RTX);
519 gcc_assert (base_tag);
520 rtx tag_offset = gen_int_mode (hwasan_current_frame_tag (), QImode);
521 rtx chosen_tag = expand_simple_binop (QImode, PLUS, base_tag, tag_offset,
522 target, /* unsignedp = */1,
523 OPTAB_WIDEN);
524 chosen_tag = hwasan_truncate_to_tag_size (chosen_tag, target);
525
526 /* Really need to put the tag into the `target` RTX. */
527 if (chosen_tag != target)
528 {
529 rtx temp = chosen_tag;
530 gcc_assert (GET_MODE (chosen_tag) == mode);
531 emit_move_insn (target, temp);
532 }
533
534 hwasan_increment_frame_tag ();
535 }
536
537 /* For hwasan stack tagging:
538 Tag a region of space in the shadow stack according to the base pointer of
539 an object on the stack. N.b. the length provided in the internal call is
540 required to be aligned to HWASAN_TAG_GRANULE_SIZE. */
541 static void
542 expand_HWASAN_MARK (internal_fn, gcall *gc)
543 {
544 gcc_assert (ptr_mode == Pmode);
545 HOST_WIDE_INT flag = tree_to_shwi (gimple_call_arg (gc, 0));
546 bool is_poison = ((asan_mark_flags)flag) == ASAN_MARK_POISON;
547
548 tree base = gimple_call_arg (gc, 1);
549 gcc_checking_assert (TREE_CODE (base) == ADDR_EXPR);
550 rtx base_rtx = expand_normal (base);
551
552 rtx tag = is_poison ? HWASAN_STACK_BACKGROUND
553 : targetm.memtag.extract_tag (base_rtx, NULL_RTX);
554 rtx address = targetm.memtag.untagged_pointer (base_rtx, NULL_RTX);
555
556 tree len = gimple_call_arg (gc, 2);
557 rtx r_len = expand_normal (len);
558
559 rtx func = init_one_libfunc ("__hwasan_tag_memory");
560 emit_library_call (func, LCT_NORMAL, VOIDmode, address, Pmode,
561 tag, QImode, r_len, Pmode);
562 }
563
564 /* For hwasan stack tagging:
565 Store a tag into a pointer. */
566 static void
567 expand_HWASAN_SET_TAG (internal_fn, gcall *gc)
568 {
569 gcc_assert (ptr_mode == Pmode);
570 tree g_target = gimple_call_lhs (gc);
571 tree g_ptr = gimple_call_arg (gc, 0);
572 tree g_tag = gimple_call_arg (gc, 1);
573
574 rtx ptr = expand_normal (g_ptr);
575 rtx tag = expand_expr (g_tag, NULL_RTX, QImode, EXPAND_NORMAL);
576 rtx target = expand_normal (g_target);
577
578 rtx untagged = targetm.memtag.untagged_pointer (ptr, target);
579 rtx tagged_value = targetm.memtag.set_tag (untagged, tag, target);
580 if (tagged_value != target)
581 emit_move_insn (target, tagged_value);
582 }
583
584 /* This should get expanded in the sanopt pass. */
585
586 static void
587 expand_ASAN_CHECK (internal_fn, gcall *)
588 {
589 gcc_unreachable ();
590 }
591
592 /* This should get expanded in the sanopt pass. */
593
594 static void
595 expand_ASAN_MARK (internal_fn, gcall *)
596 {
597 gcc_unreachable ();
598 }
599
600 /* This should get expanded in the sanopt pass. */
601
602 static void
603 expand_ASAN_POISON (internal_fn, gcall *)
604 {
605 gcc_unreachable ();
606 }
607
608 /* This should get expanded in the sanopt pass. */
609
610 static void
611 expand_ASAN_POISON_USE (internal_fn, gcall *)
612 {
613 gcc_unreachable ();
614 }
615
616 /* This should get expanded in the tsan pass. */
617
618 static void
619 expand_TSAN_FUNC_EXIT (internal_fn, gcall *)
620 {
621 gcc_unreachable ();
622 }
623
624 /* This should get expanded in the lower pass. */
625
626 static void
627 expand_FALLTHROUGH (internal_fn, gcall *call)
628 {
629 error_at (gimple_location (call),
630 "invalid use of attribute %<fallthrough%>");
631 }
632
633 /* Return minimum precision needed to represent all values
634 of ARG in SIGNed integral type. */
635
636 static int
637 get_min_precision (tree arg, signop sign)
638 {
639 int prec = TYPE_PRECISION (TREE_TYPE (arg));
640 int cnt = 0;
641 signop orig_sign = sign;
642 if (TREE_CODE (arg) == INTEGER_CST)
643 {
644 int p;
645 if (TYPE_SIGN (TREE_TYPE (arg)) != sign)
646 {
647 widest_int w = wi::to_widest (arg);
648 w = wi::ext (w, prec, sign);
649 p = wi::min_precision (w, sign);
650 }
651 else
652 p = wi::min_precision (wi::to_wide (arg), sign);
653 return MIN (p, prec);
654 }
655 while (CONVERT_EXPR_P (arg)
656 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
657 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
658 {
659 arg = TREE_OPERAND (arg, 0);
660 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
661 {
662 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
663 sign = UNSIGNED;
664 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
665 return prec + (orig_sign != sign);
666 prec = TYPE_PRECISION (TREE_TYPE (arg));
667 }
668 if (++cnt > 30)
669 return prec + (orig_sign != sign);
670 }
671 if (CONVERT_EXPR_P (arg)
672 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
673 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) > prec)
674 {
675 /* We have e.g. (unsigned short) y_2 where int y_2 = (int) x_1(D);
676 If y_2's min precision is smaller than prec, return that. */
677 int oprec = get_min_precision (TREE_OPERAND (arg, 0), sign);
678 if (oprec < prec)
679 return oprec + (orig_sign != sign);
680 }
681 if (TREE_CODE (arg) != SSA_NAME)
682 return prec + (orig_sign != sign);
683 wide_int arg_min, arg_max;
684 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
685 {
686 gimple *g = SSA_NAME_DEF_STMT (arg);
687 if (is_gimple_assign (g)
688 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
689 {
690 tree t = gimple_assign_rhs1 (g);
691 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
692 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
693 {
694 arg = t;
695 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
696 {
697 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
698 sign = UNSIGNED;
699 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
700 return prec + (orig_sign != sign);
701 prec = TYPE_PRECISION (TREE_TYPE (arg));
702 }
703 if (++cnt > 30)
704 return prec + (orig_sign != sign);
705 continue;
706 }
707 }
708 return prec + (orig_sign != sign);
709 }
710 if (sign == TYPE_SIGN (TREE_TYPE (arg)))
711 {
712 int p1 = wi::min_precision (arg_min, sign);
713 int p2 = wi::min_precision (arg_max, sign);
714 p1 = MAX (p1, p2);
715 prec = MIN (prec, p1);
716 }
717 else if (sign == UNSIGNED && !wi::neg_p (arg_min, SIGNED))
718 {
719 int p = wi::min_precision (arg_max, UNSIGNED);
720 prec = MIN (prec, p);
721 }
722 return prec + (orig_sign != sign);
723 }
724
725 /* Helper for expand_*_overflow. Set the __imag__ part to true
726 (1 except for signed:1 type, in which case store -1). */
727
728 static void
729 expand_arith_set_overflow (tree lhs, rtx target)
730 {
731 if (TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs))) == 1
732 && !TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs))))
733 write_complex_part (target, constm1_rtx, true);
734 else
735 write_complex_part (target, const1_rtx, true);
736 }
737
738 /* Helper for expand_*_overflow. Store RES into the __real__ part
739 of TARGET. If RES has larger MODE than __real__ part of TARGET,
740 set the __imag__ part to 1 if RES doesn't fit into it. Similarly
741 if LHS has smaller precision than its mode. */
742
743 static void
744 expand_arith_overflow_result_store (tree lhs, rtx target,
745 scalar_int_mode mode, rtx res)
746 {
747 scalar_int_mode tgtmode
748 = as_a <scalar_int_mode> (GET_MODE_INNER (GET_MODE (target)));
749 rtx lres = res;
750 if (tgtmode != mode)
751 {
752 rtx_code_label *done_label = gen_label_rtx ();
753 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
754 lres = convert_modes (tgtmode, mode, res, uns);
755 gcc_assert (GET_MODE_PRECISION (tgtmode) < GET_MODE_PRECISION (mode));
756 do_compare_rtx_and_jump (res, convert_modes (mode, tgtmode, lres, uns),
757 EQ, true, mode, NULL_RTX, NULL, done_label,
758 profile_probability::very_likely ());
759 expand_arith_set_overflow (lhs, target);
760 emit_label (done_label);
761 }
762 int prec = TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs)));
763 int tgtprec = GET_MODE_PRECISION (tgtmode);
764 if (prec < tgtprec)
765 {
766 rtx_code_label *done_label = gen_label_rtx ();
767 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
768 res = lres;
769 if (uns)
770 {
771 rtx mask
772 = immed_wide_int_const (wi::shifted_mask (0, prec, false, tgtprec),
773 tgtmode);
774 lres = expand_simple_binop (tgtmode, AND, res, mask, NULL_RTX,
775 true, OPTAB_LIB_WIDEN);
776 }
777 else
778 {
779 lres = expand_shift (LSHIFT_EXPR, tgtmode, res, tgtprec - prec,
780 NULL_RTX, 1);
781 lres = expand_shift (RSHIFT_EXPR, tgtmode, lres, tgtprec - prec,
782 NULL_RTX, 0);
783 }
784 do_compare_rtx_and_jump (res, lres,
785 EQ, true, tgtmode, NULL_RTX, NULL, done_label,
786 profile_probability::very_likely ());
787 expand_arith_set_overflow (lhs, target);
788 emit_label (done_label);
789 }
790 write_complex_part (target, lres, false);
791 }
792
793 /* Helper for expand_*_overflow. Store RES into TARGET. */
794
795 static void
796 expand_ubsan_result_store (rtx target, rtx res)
797 {
798 if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
799 /* If this is a scalar in a register that is stored in a wider mode
800 than the declared mode, compute the result into its declared mode
801 and then convert to the wider mode. Our value is the computed
802 expression. */
803 convert_move (SUBREG_REG (target), res, SUBREG_PROMOTED_SIGN (target));
804 else
805 emit_move_insn (target, res);
806 }
807
808 /* Add sub/add overflow checking to the statement STMT.
809 CODE says whether the operation is +, or -. */
810
811 void
812 expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
813 tree arg0, tree arg1, bool unsr_p, bool uns0_p,
814 bool uns1_p, bool is_ubsan, tree *datap)
815 {
816 rtx res, target = NULL_RTX;
817 tree fn;
818 rtx_code_label *done_label = gen_label_rtx ();
819 rtx_code_label *do_error = gen_label_rtx ();
820 do_pending_stack_adjust ();
821 rtx op0 = expand_normal (arg0);
822 rtx op1 = expand_normal (arg1);
823 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg0));
824 int prec = GET_MODE_PRECISION (mode);
825 rtx sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
826 bool do_xor = false;
827
828 if (is_ubsan)
829 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
830
831 if (lhs)
832 {
833 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
834 if (!is_ubsan)
835 write_complex_part (target, const0_rtx, true);
836 }
837
838 /* We assume both operands and result have the same precision
839 here (GET_MODE_BITSIZE (mode)), S stands for signed type
840 with that precision, U for unsigned type with that precision,
841 sgn for unsigned most significant bit in that precision.
842 s1 is signed first operand, u1 is unsigned first operand,
843 s2 is signed second operand, u2 is unsigned second operand,
844 sr is signed result, ur is unsigned result and the following
845 rules say how to compute result (which is always result of
846 the operands as if both were unsigned, cast to the right
847 signedness) and how to compute whether operation overflowed.
848
849 s1 + s2 -> sr
850 res = (S) ((U) s1 + (U) s2)
851 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
852 s1 - s2 -> sr
853 res = (S) ((U) s1 - (U) s2)
854 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
855 u1 + u2 -> ur
856 res = u1 + u2
857 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
858 u1 - u2 -> ur
859 res = u1 - u2
860 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
861 s1 + u2 -> sr
862 res = (S) ((U) s1 + u2)
863 ovf = ((U) res ^ sgn) < u2
864 s1 + u2 -> ur
865 t1 = (S) (u2 ^ sgn)
866 t2 = s1 + t1
867 res = (U) t2 ^ sgn
868 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
869 s1 - u2 -> sr
870 res = (S) ((U) s1 - u2)
871 ovf = u2 > ((U) s1 ^ sgn)
872 s1 - u2 -> ur
873 res = (U) s1 - u2
874 ovf = s1 < 0 || u2 > (U) s1
875 u1 - s2 -> sr
876 res = u1 - (U) s2
877 ovf = u1 >= ((U) s2 ^ sgn)
878 u1 - s2 -> ur
879 t1 = u1 ^ sgn
880 t2 = t1 - (U) s2
881 res = t2 ^ sgn
882 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
883 s1 + s2 -> ur
884 res = (U) s1 + (U) s2
885 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
886 u1 + u2 -> sr
887 res = (S) (u1 + u2)
888 ovf = (U) res < u2 || res < 0
889 u1 - u2 -> sr
890 res = (S) (u1 - u2)
891 ovf = u1 >= u2 ? res < 0 : res >= 0
892 s1 - s2 -> ur
893 res = (U) s1 - (U) s2
894 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
895
896 if (code == PLUS_EXPR && uns0_p && !uns1_p)
897 {
898 /* PLUS_EXPR is commutative, if operand signedness differs,
899 canonicalize to the first operand being signed and second
900 unsigned to simplify following code. */
901 std::swap (op0, op1);
902 std::swap (arg0, arg1);
903 uns0_p = false;
904 uns1_p = true;
905 }
906
907 /* u1 +- u2 -> ur */
908 if (uns0_p && uns1_p && unsr_p)
909 {
910 insn_code icode = optab_handler (code == PLUS_EXPR ? uaddv4_optab
911 : usubv4_optab, mode);
912 if (icode != CODE_FOR_nothing)
913 {
914 class expand_operand ops[4];
915 rtx_insn *last = get_last_insn ();
916
917 res = gen_reg_rtx (mode);
918 create_output_operand (&ops[0], res, mode);
919 create_input_operand (&ops[1], op0, mode);
920 create_input_operand (&ops[2], op1, mode);
921 create_fixed_operand (&ops[3], do_error);
922 if (maybe_expand_insn (icode, 4, ops))
923 {
924 last = get_last_insn ();
925 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
926 && JUMP_P (last)
927 && any_condjump_p (last)
928 && !find_reg_note (last, REG_BR_PROB, 0))
929 add_reg_br_prob_note (last,
930 profile_probability::very_unlikely ());
931 emit_jump (done_label);
932 goto do_error_label;
933 }
934
935 delete_insns_since (last);
936 }
937
938 /* Compute the operation. On RTL level, the addition is always
939 unsigned. */
940 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
941 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
942 rtx tem = op0;
943 /* For PLUS_EXPR, the operation is commutative, so we can pick
944 operand to compare against. For prec <= BITS_PER_WORD, I think
945 preferring REG operand is better over CONST_INT, because
946 the CONST_INT might enlarge the instruction or CSE would need
947 to figure out we'd already loaded it into a register before.
948 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
949 as then the multi-word comparison can be perhaps simplified. */
950 if (code == PLUS_EXPR
951 && (prec <= BITS_PER_WORD
952 ? (CONST_SCALAR_INT_P (op0) && REG_P (op1))
953 : CONST_SCALAR_INT_P (op1)))
954 tem = op1;
955 do_compare_rtx_and_jump (res, tem, code == PLUS_EXPR ? GEU : LEU,
956 true, mode, NULL_RTX, NULL, done_label,
957 profile_probability::very_likely ());
958 goto do_error_label;
959 }
960
961 /* s1 +- u2 -> sr */
962 if (!uns0_p && uns1_p && !unsr_p)
963 {
964 /* Compute the operation. On RTL level, the addition is always
965 unsigned. */
966 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
967 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
968 rtx tem = expand_binop (mode, add_optab,
969 code == PLUS_EXPR ? res : op0, sgn,
970 NULL_RTX, false, OPTAB_LIB_WIDEN);
971 do_compare_rtx_and_jump (tem, op1, GEU, true, mode, NULL_RTX, NULL,
972 done_label, profile_probability::very_likely ());
973 goto do_error_label;
974 }
975
976 /* s1 + u2 -> ur */
977 if (code == PLUS_EXPR && !uns0_p && uns1_p && unsr_p)
978 {
979 op1 = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
980 OPTAB_LIB_WIDEN);
981 /* As we've changed op1, we have to avoid using the value range
982 for the original argument. */
983 arg1 = error_mark_node;
984 do_xor = true;
985 goto do_signed;
986 }
987
988 /* u1 - s2 -> ur */
989 if (code == MINUS_EXPR && uns0_p && !uns1_p && unsr_p)
990 {
991 op0 = expand_binop (mode, add_optab, op0, sgn, NULL_RTX, false,
992 OPTAB_LIB_WIDEN);
993 /* As we've changed op0, we have to avoid using the value range
994 for the original argument. */
995 arg0 = error_mark_node;
996 do_xor = true;
997 goto do_signed;
998 }
999
1000 /* s1 - u2 -> ur */
1001 if (code == MINUS_EXPR && !uns0_p && uns1_p && unsr_p)
1002 {
1003 /* Compute the operation. On RTL level, the addition is always
1004 unsigned. */
1005 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
1006 OPTAB_LIB_WIDEN);
1007 int pos_neg = get_range_pos_neg (arg0);
1008 if (pos_neg == 2)
1009 /* If ARG0 is known to be always negative, this is always overflow. */
1010 emit_jump (do_error);
1011 else if (pos_neg == 3)
1012 /* If ARG0 is not known to be always positive, check at runtime. */
1013 do_compare_rtx_and_jump (op0, const0_rtx, LT, false, mode, NULL_RTX,
1014 NULL, do_error, profile_probability::very_unlikely ());
1015 do_compare_rtx_and_jump (op1, op0, LEU, true, mode, NULL_RTX, NULL,
1016 done_label, profile_probability::very_likely ());
1017 goto do_error_label;
1018 }
1019
1020 /* u1 - s2 -> sr */
1021 if (code == MINUS_EXPR && uns0_p && !uns1_p && !unsr_p)
1022 {
1023 /* Compute the operation. On RTL level, the addition is always
1024 unsigned. */
1025 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
1026 OPTAB_LIB_WIDEN);
1027 rtx tem = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
1028 OPTAB_LIB_WIDEN);
1029 do_compare_rtx_and_jump (op0, tem, LTU, true, mode, NULL_RTX, NULL,
1030 done_label, profile_probability::very_likely ());
1031 goto do_error_label;
1032 }
1033
1034 /* u1 + u2 -> sr */
1035 if (code == PLUS_EXPR && uns0_p && uns1_p && !unsr_p)
1036 {
1037 /* Compute the operation. On RTL level, the addition is always
1038 unsigned. */
1039 res = expand_binop (mode, add_optab, op0, op1, NULL_RTX, false,
1040 OPTAB_LIB_WIDEN);
1041 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
1042 NULL, do_error, profile_probability::very_unlikely ());
1043 rtx tem = op1;
1044 /* The operation is commutative, so we can pick operand to compare
1045 against. For prec <= BITS_PER_WORD, I think preferring REG operand
1046 is better over CONST_INT, because the CONST_INT might enlarge the
1047 instruction or CSE would need to figure out we'd already loaded it
1048 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
1049 might be more beneficial, as then the multi-word comparison can be
1050 perhaps simplified. */
1051 if (prec <= BITS_PER_WORD
1052 ? (CONST_SCALAR_INT_P (op1) && REG_P (op0))
1053 : CONST_SCALAR_INT_P (op0))
1054 tem = op0;
1055 do_compare_rtx_and_jump (res, tem, GEU, true, mode, NULL_RTX, NULL,
1056 done_label, profile_probability::very_likely ());
1057 goto do_error_label;
1058 }
1059
1060 /* s1 +- s2 -> ur */
1061 if (!uns0_p && !uns1_p && unsr_p)
1062 {
1063 /* Compute the operation. On RTL level, the addition is always
1064 unsigned. */
1065 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
1066 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
1067 int pos_neg = get_range_pos_neg (arg1);
1068 if (code == PLUS_EXPR)
1069 {
1070 int pos_neg0 = get_range_pos_neg (arg0);
1071 if (pos_neg0 != 3 && pos_neg == 3)
1072 {
1073 std::swap (op0, op1);
1074 pos_neg = pos_neg0;
1075 }
1076 }
1077 rtx tem;
1078 if (pos_neg != 3)
1079 {
1080 tem = expand_binop (mode, ((pos_neg == 1) ^ (code == MINUS_EXPR))
1081 ? and_optab : ior_optab,
1082 op0, res, NULL_RTX, false, OPTAB_LIB_WIDEN);
1083 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL,
1084 NULL, done_label, profile_probability::very_likely ());
1085 }
1086 else
1087 {
1088 rtx_code_label *do_ior_label = gen_label_rtx ();
1089 do_compare_rtx_and_jump (op1, const0_rtx,
1090 code == MINUS_EXPR ? GE : LT, false, mode,
1091 NULL_RTX, NULL, do_ior_label,
1092 profile_probability::even ());
1093 tem = expand_binop (mode, and_optab, op0, res, NULL_RTX, false,
1094 OPTAB_LIB_WIDEN);
1095 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1096 NULL, done_label, profile_probability::very_likely ());
1097 emit_jump (do_error);
1098 emit_label (do_ior_label);
1099 tem = expand_binop (mode, ior_optab, op0, res, NULL_RTX, false,
1100 OPTAB_LIB_WIDEN);
1101 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1102 NULL, done_label, profile_probability::very_likely ());
1103 }
1104 goto do_error_label;
1105 }
1106
1107 /* u1 - u2 -> sr */
1108 if (code == MINUS_EXPR && uns0_p && uns1_p && !unsr_p)
1109 {
1110 /* Compute the operation. On RTL level, the addition is always
1111 unsigned. */
1112 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
1113 OPTAB_LIB_WIDEN);
1114 rtx_code_label *op0_geu_op1 = gen_label_rtx ();
1115 do_compare_rtx_and_jump (op0, op1, GEU, true, mode, NULL_RTX, NULL,
1116 op0_geu_op1, profile_probability::even ());
1117 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
1118 NULL, done_label, profile_probability::very_likely ());
1119 emit_jump (do_error);
1120 emit_label (op0_geu_op1);
1121 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
1122 NULL, done_label, profile_probability::very_likely ());
1123 goto do_error_label;
1124 }
1125
1126 gcc_assert (!uns0_p && !uns1_p && !unsr_p);
1127
1128 /* s1 +- s2 -> sr */
1129 do_signed:
1130 {
1131 insn_code icode = optab_handler (code == PLUS_EXPR ? addv4_optab
1132 : subv4_optab, mode);
1133 if (icode != CODE_FOR_nothing)
1134 {
1135 class expand_operand ops[4];
1136 rtx_insn *last = get_last_insn ();
1137
1138 res = gen_reg_rtx (mode);
1139 create_output_operand (&ops[0], res, mode);
1140 create_input_operand (&ops[1], op0, mode);
1141 create_input_operand (&ops[2], op1, mode);
1142 create_fixed_operand (&ops[3], do_error);
1143 if (maybe_expand_insn (icode, 4, ops))
1144 {
1145 last = get_last_insn ();
1146 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1147 && JUMP_P (last)
1148 && any_condjump_p (last)
1149 && !find_reg_note (last, REG_BR_PROB, 0))
1150 add_reg_br_prob_note (last,
1151 profile_probability::very_unlikely ());
1152 emit_jump (done_label);
1153 goto do_error_label;
1154 }
1155
1156 delete_insns_since (last);
1157 }
1158
1159 /* Compute the operation. On RTL level, the addition is always
1160 unsigned. */
1161 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
1162 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
1163
1164 /* If we can prove that one of the arguments (for MINUS_EXPR only
1165 the second operand, as subtraction is not commutative) is always
1166 non-negative or always negative, we can do just one comparison
1167 and conditional jump. */
1168 int pos_neg = get_range_pos_neg (arg1);
1169 if (code == PLUS_EXPR)
1170 {
1171 int pos_neg0 = get_range_pos_neg (arg0);
1172 if (pos_neg0 != 3 && pos_neg == 3)
1173 {
1174 std::swap (op0, op1);
1175 pos_neg = pos_neg0;
1176 }
1177 }
1178
1179 /* Addition overflows if and only if the two operands have the same sign,
1180 and the result has the opposite sign. Subtraction overflows if and
1181 only if the two operands have opposite sign, and the subtrahend has
1182 the same sign as the result. Here 0 is counted as positive. */
1183 if (pos_neg == 3)
1184 {
1185 /* Compute op0 ^ op1 (operands have opposite sign). */
1186 rtx op_xor = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1187 OPTAB_LIB_WIDEN);
1188
1189 /* Compute res ^ op1 (result and 2nd operand have opposite sign). */
1190 rtx res_xor = expand_binop (mode, xor_optab, res, op1, NULL_RTX, false,
1191 OPTAB_LIB_WIDEN);
1192
1193 rtx tem;
1194 if (code == PLUS_EXPR)
1195 {
1196 /* Compute (res ^ op1) & ~(op0 ^ op1). */
1197 tem = expand_unop (mode, one_cmpl_optab, op_xor, NULL_RTX, false);
1198 tem = expand_binop (mode, and_optab, res_xor, tem, NULL_RTX, false,
1199 OPTAB_LIB_WIDEN);
1200 }
1201 else
1202 {
1203 /* Compute (op0 ^ op1) & ~(res ^ op1). */
1204 tem = expand_unop (mode, one_cmpl_optab, res_xor, NULL_RTX, false);
1205 tem = expand_binop (mode, and_optab, op_xor, tem, NULL_RTX, false,
1206 OPTAB_LIB_WIDEN);
1207 }
1208
1209 /* No overflow if the result has bit sign cleared. */
1210 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1211 NULL, done_label, profile_probability::very_likely ());
1212 }
1213
1214 /* Compare the result of the operation with the first operand.
1215 No overflow for addition if second operand is positive and result
1216 is larger or second operand is negative and result is smaller.
1217 Likewise for subtraction with sign of second operand flipped. */
1218 else
1219 do_compare_rtx_and_jump (res, op0,
1220 (pos_neg == 1) ^ (code == MINUS_EXPR) ? GE : LE,
1221 false, mode, NULL_RTX, NULL, done_label,
1222 profile_probability::very_likely ());
1223 }
1224
1225 do_error_label:
1226 emit_label (do_error);
1227 if (is_ubsan)
1228 {
1229 /* Expand the ubsan builtin call. */
1230 push_temp_slots ();
1231 fn = ubsan_build_overflow_builtin (code, loc, TREE_TYPE (arg0),
1232 arg0, arg1, datap);
1233 expand_normal (fn);
1234 pop_temp_slots ();
1235 do_pending_stack_adjust ();
1236 }
1237 else if (lhs)
1238 expand_arith_set_overflow (lhs, target);
1239
1240 /* We're done. */
1241 emit_label (done_label);
1242
1243 if (lhs)
1244 {
1245 if (is_ubsan)
1246 expand_ubsan_result_store (target, res);
1247 else
1248 {
1249 if (do_xor)
1250 res = expand_binop (mode, add_optab, res, sgn, NULL_RTX, false,
1251 OPTAB_LIB_WIDEN);
1252
1253 expand_arith_overflow_result_store (lhs, target, mode, res);
1254 }
1255 }
1256 }
1257
1258 /* Add negate overflow checking to the statement STMT. */
1259
1260 static void
1261 expand_neg_overflow (location_t loc, tree lhs, tree arg1, bool is_ubsan,
1262 tree *datap)
1263 {
1264 rtx res, op1;
1265 tree fn;
1266 rtx_code_label *done_label, *do_error;
1267 rtx target = NULL_RTX;
1268
1269 done_label = gen_label_rtx ();
1270 do_error = gen_label_rtx ();
1271
1272 do_pending_stack_adjust ();
1273 op1 = expand_normal (arg1);
1274
1275 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg1));
1276 if (lhs)
1277 {
1278 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1279 if (!is_ubsan)
1280 write_complex_part (target, const0_rtx, true);
1281 }
1282
1283 enum insn_code icode = optab_handler (negv3_optab, mode);
1284 if (icode != CODE_FOR_nothing)
1285 {
1286 class expand_operand ops[3];
1287 rtx_insn *last = get_last_insn ();
1288
1289 res = gen_reg_rtx (mode);
1290 create_output_operand (&ops[0], res, mode);
1291 create_input_operand (&ops[1], op1, mode);
1292 create_fixed_operand (&ops[2], do_error);
1293 if (maybe_expand_insn (icode, 3, ops))
1294 {
1295 last = get_last_insn ();
1296 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1297 && JUMP_P (last)
1298 && any_condjump_p (last)
1299 && !find_reg_note (last, REG_BR_PROB, 0))
1300 add_reg_br_prob_note (last,
1301 profile_probability::very_unlikely ());
1302 emit_jump (done_label);
1303 }
1304 else
1305 {
1306 delete_insns_since (last);
1307 icode = CODE_FOR_nothing;
1308 }
1309 }
1310
1311 if (icode == CODE_FOR_nothing)
1312 {
1313 /* Compute the operation. On RTL level, the addition is always
1314 unsigned. */
1315 res = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1316
1317 /* Compare the operand with the most negative value. */
1318 rtx minv = expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1)));
1319 do_compare_rtx_and_jump (op1, minv, NE, true, mode, NULL_RTX, NULL,
1320 done_label, profile_probability::very_likely ());
1321 }
1322
1323 emit_label (do_error);
1324 if (is_ubsan)
1325 {
1326 /* Expand the ubsan builtin call. */
1327 push_temp_slots ();
1328 fn = ubsan_build_overflow_builtin (NEGATE_EXPR, loc, TREE_TYPE (arg1),
1329 arg1, NULL_TREE, datap);
1330 expand_normal (fn);
1331 pop_temp_slots ();
1332 do_pending_stack_adjust ();
1333 }
1334 else if (lhs)
1335 expand_arith_set_overflow (lhs, target);
1336
1337 /* We're done. */
1338 emit_label (done_label);
1339
1340 if (lhs)
1341 {
1342 if (is_ubsan)
1343 expand_ubsan_result_store (target, res);
1344 else
1345 expand_arith_overflow_result_store (lhs, target, mode, res);
1346 }
1347 }
1348
1349 /* Return true if UNS WIDEN_MULT_EXPR with result mode WMODE and operand
1350 mode MODE can be expanded without using a libcall. */
1351
1352 static bool
1353 can_widen_mult_without_libcall (scalar_int_mode wmode, scalar_int_mode mode,
1354 rtx op0, rtx op1, bool uns)
1355 {
1356 if (find_widening_optab_handler (umul_widen_optab, wmode, mode)
1357 != CODE_FOR_nothing)
1358 return true;
1359
1360 if (find_widening_optab_handler (smul_widen_optab, wmode, mode)
1361 != CODE_FOR_nothing)
1362 return true;
1363
1364 rtx_insn *last = get_last_insn ();
1365 if (CONSTANT_P (op0))
1366 op0 = convert_modes (wmode, mode, op0, uns);
1367 else
1368 op0 = gen_raw_REG (wmode, LAST_VIRTUAL_REGISTER + 1);
1369 if (CONSTANT_P (op1))
1370 op1 = convert_modes (wmode, mode, op1, uns);
1371 else
1372 op1 = gen_raw_REG (wmode, LAST_VIRTUAL_REGISTER + 2);
1373 rtx ret = expand_mult (wmode, op0, op1, NULL_RTX, uns, true);
1374 delete_insns_since (last);
1375 return ret != NULL_RTX;
1376 }
1377
1378 /* Add mul overflow checking to the statement STMT. */
1379
1380 static void
1381 expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
1382 bool unsr_p, bool uns0_p, bool uns1_p, bool is_ubsan,
1383 tree *datap)
1384 {
1385 rtx res, op0, op1;
1386 tree fn, type;
1387 rtx_code_label *done_label, *do_error;
1388 rtx target = NULL_RTX;
1389 signop sign;
1390 enum insn_code icode;
1391
1392 done_label = gen_label_rtx ();
1393 do_error = gen_label_rtx ();
1394
1395 do_pending_stack_adjust ();
1396 op0 = expand_normal (arg0);
1397 op1 = expand_normal (arg1);
1398
1399 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg0));
1400 bool uns = unsr_p;
1401 if (lhs)
1402 {
1403 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1404 if (!is_ubsan)
1405 write_complex_part (target, const0_rtx, true);
1406 }
1407
1408 if (is_ubsan)
1409 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
1410
1411 /* We assume both operands and result have the same precision
1412 here (GET_MODE_BITSIZE (mode)), S stands for signed type
1413 with that precision, U for unsigned type with that precision,
1414 sgn for unsigned most significant bit in that precision.
1415 s1 is signed first operand, u1 is unsigned first operand,
1416 s2 is signed second operand, u2 is unsigned second operand,
1417 sr is signed result, ur is unsigned result and the following
1418 rules say how to compute result (which is always result of
1419 the operands as if both were unsigned, cast to the right
1420 signedness) and how to compute whether operation overflowed.
1421 main_ovf (false) stands for jump on signed multiplication
1422 overflow or the main algorithm with uns == false.
1423 main_ovf (true) stands for jump on unsigned multiplication
1424 overflow or the main algorithm with uns == true.
1425
1426 s1 * s2 -> sr
1427 res = (S) ((U) s1 * (U) s2)
1428 ovf = main_ovf (false)
1429 u1 * u2 -> ur
1430 res = u1 * u2
1431 ovf = main_ovf (true)
1432 s1 * u2 -> ur
1433 res = (U) s1 * u2
1434 ovf = (s1 < 0 && u2) || main_ovf (true)
1435 u1 * u2 -> sr
1436 res = (S) (u1 * u2)
1437 ovf = res < 0 || main_ovf (true)
1438 s1 * u2 -> sr
1439 res = (S) ((U) s1 * u2)
1440 ovf = (S) u2 >= 0 ? main_ovf (false)
1441 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1442 s1 * s2 -> ur
1443 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1444 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1445 res = t1 * t2
1446 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
1447
1448 if (uns0_p && !uns1_p)
1449 {
1450 /* Multiplication is commutative, if operand signedness differs,
1451 canonicalize to the first operand being signed and second
1452 unsigned to simplify following code. */
1453 std::swap (op0, op1);
1454 std::swap (arg0, arg1);
1455 uns0_p = false;
1456 uns1_p = true;
1457 }
1458
1459 int pos_neg0 = get_range_pos_neg (arg0);
1460 int pos_neg1 = get_range_pos_neg (arg1);
1461
1462 /* s1 * u2 -> ur */
1463 if (!uns0_p && uns1_p && unsr_p)
1464 {
1465 switch (pos_neg0)
1466 {
1467 case 1:
1468 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1469 goto do_main;
1470 case 2:
1471 /* If s1 is negative, avoid the main code, just multiply and
1472 signal overflow if op1 is not 0. */
1473 struct separate_ops ops;
1474 ops.code = MULT_EXPR;
1475 ops.type = TREE_TYPE (arg1);
1476 ops.op0 = make_tree (ops.type, op0);
1477 ops.op1 = make_tree (ops.type, op1);
1478 ops.op2 = NULL_TREE;
1479 ops.location = loc;
1480 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1481 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1482 NULL, done_label, profile_probability::very_likely ());
1483 goto do_error_label;
1484 case 3:
1485 if (get_min_precision (arg1, UNSIGNED)
1486 + get_min_precision (arg0, SIGNED) <= GET_MODE_PRECISION (mode))
1487 {
1488 /* If the first operand is sign extended from narrower type, the
1489 second operand is zero extended from narrower type and
1490 the sum of the two precisions is smaller or equal to the
1491 result precision: if the first argument is at runtime
1492 non-negative, maximum result will be 0x7e81 or 0x7f..fe80..01
1493 and there will be no overflow, if the first argument is
1494 negative and the second argument zero, the result will be
1495 0 and there will be no overflow, if the first argument is
1496 negative and the second argument positive, the result when
1497 treated as signed will be negative (minimum -0x7f80 or
1498 -0x7f..f80..0) there there will be always overflow. So, do
1499 res = (U) (s1 * u2)
1500 ovf = (S) res < 0 */
1501 struct separate_ops ops;
1502 ops.code = MULT_EXPR;
1503 ops.type
1504 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1505 1);
1506 ops.op0 = make_tree (ops.type, op0);
1507 ops.op1 = make_tree (ops.type, op1);
1508 ops.op2 = NULL_TREE;
1509 ops.location = loc;
1510 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1511 do_compare_rtx_and_jump (res, const0_rtx, GE, false,
1512 mode, NULL_RTX, NULL, done_label,
1513 profile_probability::very_likely ());
1514 goto do_error_label;
1515 }
1516 rtx_code_label *do_main_label;
1517 do_main_label = gen_label_rtx ();
1518 do_compare_rtx_and_jump (op0, const0_rtx, GE, false, mode, NULL_RTX,
1519 NULL, do_main_label, profile_probability::very_likely ());
1520 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1521 NULL, do_main_label, profile_probability::very_likely ());
1522 expand_arith_set_overflow (lhs, target);
1523 emit_label (do_main_label);
1524 goto do_main;
1525 default:
1526 gcc_unreachable ();
1527 }
1528 }
1529
1530 /* u1 * u2 -> sr */
1531 if (uns0_p && uns1_p && !unsr_p)
1532 {
1533 if ((pos_neg0 | pos_neg1) == 1)
1534 {
1535 /* If both arguments are zero extended from narrower types,
1536 the MSB will be clear on both and so we can pretend it is
1537 a normal s1 * s2 -> sr multiplication. */
1538 uns0_p = false;
1539 uns1_p = false;
1540 }
1541 else
1542 uns = true;
1543 /* Rest of handling of this case after res is computed. */
1544 goto do_main;
1545 }
1546
1547 /* s1 * u2 -> sr */
1548 if (!uns0_p && uns1_p && !unsr_p)
1549 {
1550 switch (pos_neg1)
1551 {
1552 case 1:
1553 goto do_main;
1554 case 2:
1555 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1556 avoid the main code, just multiply and signal overflow
1557 unless 0 * u2 or -1 * ((U) Smin). */
1558 struct separate_ops ops;
1559 ops.code = MULT_EXPR;
1560 ops.type = TREE_TYPE (arg1);
1561 ops.op0 = make_tree (ops.type, op0);
1562 ops.op1 = make_tree (ops.type, op1);
1563 ops.op2 = NULL_TREE;
1564 ops.location = loc;
1565 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1566 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1567 NULL, done_label, profile_probability::very_likely ());
1568 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1569 NULL, do_error, profile_probability::very_unlikely ());
1570 int prec;
1571 prec = GET_MODE_PRECISION (mode);
1572 rtx sgn;
1573 sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
1574 do_compare_rtx_and_jump (op1, sgn, EQ, true, mode, NULL_RTX,
1575 NULL, done_label, profile_probability::very_likely ());
1576 goto do_error_label;
1577 case 3:
1578 /* Rest of handling of this case after res is computed. */
1579 goto do_main;
1580 default:
1581 gcc_unreachable ();
1582 }
1583 }
1584
1585 /* s1 * s2 -> ur */
1586 if (!uns0_p && !uns1_p && unsr_p)
1587 {
1588 rtx tem;
1589 switch (pos_neg0 | pos_neg1)
1590 {
1591 case 1: /* Both operands known to be non-negative. */
1592 goto do_main;
1593 case 2: /* Both operands known to be negative. */
1594 op0 = expand_unop (mode, neg_optab, op0, NULL_RTX, false);
1595 op1 = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1596 /* Avoid looking at arg0/arg1 ranges, as we've changed
1597 the arguments. */
1598 arg0 = error_mark_node;
1599 arg1 = error_mark_node;
1600 goto do_main;
1601 case 3:
1602 if ((pos_neg0 ^ pos_neg1) == 3)
1603 {
1604 /* If one operand is known to be negative and the other
1605 non-negative, this overflows always, unless the non-negative
1606 one is 0. Just do normal multiply and set overflow
1607 unless one of the operands is 0. */
1608 struct separate_ops ops;
1609 ops.code = MULT_EXPR;
1610 ops.type
1611 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1612 1);
1613 ops.op0 = make_tree (ops.type, op0);
1614 ops.op1 = make_tree (ops.type, op1);
1615 ops.op2 = NULL_TREE;
1616 ops.location = loc;
1617 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1618 do_compare_rtx_and_jump (pos_neg0 == 1 ? op0 : op1, const0_rtx, EQ,
1619 true, mode, NULL_RTX, NULL, done_label,
1620 profile_probability::very_likely ());
1621 goto do_error_label;
1622 }
1623 if (get_min_precision (arg0, SIGNED)
1624 + get_min_precision (arg1, SIGNED) <= GET_MODE_PRECISION (mode))
1625 {
1626 /* If both operands are sign extended from narrower types and
1627 the sum of the two precisions is smaller or equal to the
1628 result precision: if both arguments are at runtime
1629 non-negative, maximum result will be 0x3f01 or 0x3f..f0..01
1630 and there will be no overflow, if both arguments are negative,
1631 maximum result will be 0x40..00 and there will be no overflow
1632 either, if one argument is positive and the other argument
1633 negative, the result when treated as signed will be negative
1634 and there will be always overflow, and if one argument is
1635 zero and the other negative the result will be zero and no
1636 overflow. So, do
1637 res = (U) (s1 * s2)
1638 ovf = (S) res < 0 */
1639 struct separate_ops ops;
1640 ops.code = MULT_EXPR;
1641 ops.type
1642 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1643 1);
1644 ops.op0 = make_tree (ops.type, op0);
1645 ops.op1 = make_tree (ops.type, op1);
1646 ops.op2 = NULL_TREE;
1647 ops.location = loc;
1648 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1649 do_compare_rtx_and_jump (res, const0_rtx, GE, false,
1650 mode, NULL_RTX, NULL, done_label,
1651 profile_probability::very_likely ());
1652 goto do_error_label;
1653 }
1654 /* The general case, do all the needed comparisons at runtime. */
1655 rtx_code_label *do_main_label, *after_negate_label;
1656 rtx rop0, rop1;
1657 rop0 = gen_reg_rtx (mode);
1658 rop1 = gen_reg_rtx (mode);
1659 emit_move_insn (rop0, op0);
1660 emit_move_insn (rop1, op1);
1661 op0 = rop0;
1662 op1 = rop1;
1663 do_main_label = gen_label_rtx ();
1664 after_negate_label = gen_label_rtx ();
1665 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1666 OPTAB_LIB_WIDEN);
1667 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1668 NULL, after_negate_label, profile_probability::very_likely ());
1669 /* Both arguments negative here, negate them and continue with
1670 normal unsigned overflow checking multiplication. */
1671 emit_move_insn (op0, expand_unop (mode, neg_optab, op0,
1672 NULL_RTX, false));
1673 emit_move_insn (op1, expand_unop (mode, neg_optab, op1,
1674 NULL_RTX, false));
1675 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1676 the arguments. */
1677 arg0 = error_mark_node;
1678 arg1 = error_mark_node;
1679 emit_jump (do_main_label);
1680 emit_label (after_negate_label);
1681 tem = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1682 OPTAB_LIB_WIDEN);
1683 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1684 NULL, do_main_label,
1685 profile_probability::very_likely ());
1686 /* One argument is negative here, the other positive. This
1687 overflows always, unless one of the arguments is 0. But
1688 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1689 is, thus we can keep do_main code oring in overflow as is. */
1690 if (pos_neg0 != 2)
1691 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1692 NULL, do_main_label,
1693 profile_probability::very_unlikely ());
1694 if (pos_neg1 != 2)
1695 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1696 NULL, do_main_label,
1697 profile_probability::very_unlikely ());
1698 expand_arith_set_overflow (lhs, target);
1699 emit_label (do_main_label);
1700 goto do_main;
1701 default:
1702 gcc_unreachable ();
1703 }
1704 }
1705
1706 do_main:
1707 type = build_nonstandard_integer_type (GET_MODE_PRECISION (mode), uns);
1708 sign = uns ? UNSIGNED : SIGNED;
1709 icode = optab_handler (uns ? umulv4_optab : mulv4_optab, mode);
1710 if (uns
1711 && (integer_pow2p (arg0) || integer_pow2p (arg1))
1712 && (optimize_insn_for_speed_p () || icode == CODE_FOR_nothing))
1713 {
1714 /* Optimize unsigned multiplication by power of 2 constant
1715 using 2 shifts, one for result, one to extract the shifted
1716 out bits to see if they are all zero.
1717 Don't do this if optimizing for size and we have umulv4_optab,
1718 in that case assume multiplication will be shorter.
1719 This is heuristics based on the single target that provides
1720 umulv4 right now (i?86/x86_64), if further targets add it, this
1721 might need to be revisited.
1722 Cases where both operands are constant should be folded already
1723 during GIMPLE, and cases where one operand is constant but not
1724 power of 2 are questionable, either the WIDEN_MULT_EXPR case
1725 below can be done without multiplication, just by shifts and adds,
1726 or we'd need to divide the result (and hope it actually doesn't
1727 really divide nor multiply) and compare the result of the division
1728 with the original operand. */
1729 rtx opn0 = op0;
1730 rtx opn1 = op1;
1731 tree argn0 = arg0;
1732 tree argn1 = arg1;
1733 if (integer_pow2p (arg0))
1734 {
1735 std::swap (opn0, opn1);
1736 std::swap (argn0, argn1);
1737 }
1738 int cnt = tree_log2 (argn1);
1739 if (cnt >= 0 && cnt < GET_MODE_PRECISION (mode))
1740 {
1741 rtx upper = const0_rtx;
1742 res = expand_shift (LSHIFT_EXPR, mode, opn0, cnt, NULL_RTX, uns);
1743 if (cnt != 0)
1744 upper = expand_shift (RSHIFT_EXPR, mode, opn0,
1745 GET_MODE_PRECISION (mode) - cnt,
1746 NULL_RTX, uns);
1747 do_compare_rtx_and_jump (upper, const0_rtx, EQ, true, mode,
1748 NULL_RTX, NULL, done_label,
1749 profile_probability::very_likely ());
1750 goto do_error_label;
1751 }
1752 }
1753 if (icode != CODE_FOR_nothing)
1754 {
1755 class expand_operand ops[4];
1756 rtx_insn *last = get_last_insn ();
1757
1758 res = gen_reg_rtx (mode);
1759 create_output_operand (&ops[0], res, mode);
1760 create_input_operand (&ops[1], op0, mode);
1761 create_input_operand (&ops[2], op1, mode);
1762 create_fixed_operand (&ops[3], do_error);
1763 if (maybe_expand_insn (icode, 4, ops))
1764 {
1765 last = get_last_insn ();
1766 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1767 && JUMP_P (last)
1768 && any_condjump_p (last)
1769 && !find_reg_note (last, REG_BR_PROB, 0))
1770 add_reg_br_prob_note (last,
1771 profile_probability::very_unlikely ());
1772 emit_jump (done_label);
1773 }
1774 else
1775 {
1776 delete_insns_since (last);
1777 icode = CODE_FOR_nothing;
1778 }
1779 }
1780
1781 if (icode == CODE_FOR_nothing)
1782 {
1783 struct separate_ops ops;
1784 int prec = GET_MODE_PRECISION (mode);
1785 scalar_int_mode hmode, wmode;
1786 ops.op0 = make_tree (type, op0);
1787 ops.op1 = make_tree (type, op1);
1788 ops.op2 = NULL_TREE;
1789 ops.location = loc;
1790
1791 /* Optimize unsigned overflow check where we don't use the
1792 multiplication result, just whether overflow happened.
1793 If we can do MULT_HIGHPART_EXPR, that followed by
1794 comparison of the result against zero is cheapest.
1795 We'll still compute res, but it should be DCEd later. */
1796 use_operand_p use;
1797 gimple *use_stmt;
1798 if (!is_ubsan
1799 && lhs
1800 && uns
1801 && !(uns0_p && uns1_p && !unsr_p)
1802 && can_mult_highpart_p (mode, uns) == 1
1803 && single_imm_use (lhs, &use, &use_stmt)
1804 && is_gimple_assign (use_stmt)
1805 && gimple_assign_rhs_code (use_stmt) == IMAGPART_EXPR)
1806 goto highpart;
1807
1808 if (GET_MODE_2XWIDER_MODE (mode).exists (&wmode)
1809 && targetm.scalar_mode_supported_p (wmode)
1810 && can_widen_mult_without_libcall (wmode, mode, op0, op1, uns))
1811 {
1812 twoxwider:
1813 ops.code = WIDEN_MULT_EXPR;
1814 ops.type
1815 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), uns);
1816
1817 res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
1818 rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res, prec,
1819 NULL_RTX, uns);
1820 hipart = convert_modes (mode, wmode, hipart, uns);
1821 res = convert_modes (mode, wmode, res, uns);
1822 if (uns)
1823 /* For the unsigned multiplication, there was overflow if
1824 HIPART is non-zero. */
1825 do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
1826 NULL_RTX, NULL, done_label,
1827 profile_probability::very_likely ());
1828 else
1829 {
1830 /* RES is used more than once, place it in a pseudo. */
1831 res = force_reg (mode, res);
1832
1833 rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
1834 NULL_RTX, 0);
1835 /* RES is low half of the double width result, HIPART
1836 the high half. There was overflow if
1837 HIPART is different from RES < 0 ? -1 : 0. */
1838 do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
1839 NULL_RTX, NULL, done_label,
1840 profile_probability::very_likely ());
1841 }
1842 }
1843 else if (can_mult_highpart_p (mode, uns) == 1)
1844 {
1845 highpart:
1846 ops.code = MULT_HIGHPART_EXPR;
1847 ops.type = type;
1848
1849 rtx hipart = expand_expr_real_2 (&ops, NULL_RTX, mode,
1850 EXPAND_NORMAL);
1851 ops.code = MULT_EXPR;
1852 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1853 if (uns)
1854 /* For the unsigned multiplication, there was overflow if
1855 HIPART is non-zero. */
1856 do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
1857 NULL_RTX, NULL, done_label,
1858 profile_probability::very_likely ());
1859 else
1860 {
1861 rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
1862 NULL_RTX, 0);
1863 /* RES is low half of the double width result, HIPART
1864 the high half. There was overflow if
1865 HIPART is different from RES < 0 ? -1 : 0. */
1866 do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
1867 NULL_RTX, NULL, done_label,
1868 profile_probability::very_likely ());
1869 }
1870
1871 }
1872 else if (int_mode_for_size (prec / 2, 1).exists (&hmode)
1873 && 2 * GET_MODE_PRECISION (hmode) == prec)
1874 {
1875 rtx_code_label *large_op0 = gen_label_rtx ();
1876 rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
1877 rtx_code_label *one_small_one_large = gen_label_rtx ();
1878 rtx_code_label *both_ops_large = gen_label_rtx ();
1879 rtx_code_label *after_hipart_neg = uns ? NULL : gen_label_rtx ();
1880 rtx_code_label *after_lopart_neg = uns ? NULL : gen_label_rtx ();
1881 rtx_code_label *do_overflow = gen_label_rtx ();
1882 rtx_code_label *hipart_different = uns ? NULL : gen_label_rtx ();
1883
1884 unsigned int hprec = GET_MODE_PRECISION (hmode);
1885 rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
1886 NULL_RTX, uns);
1887 hipart0 = convert_modes (hmode, mode, hipart0, uns);
1888 rtx lopart0 = convert_modes (hmode, mode, op0, uns);
1889 rtx signbit0 = const0_rtx;
1890 if (!uns)
1891 signbit0 = expand_shift (RSHIFT_EXPR, hmode, lopart0, hprec - 1,
1892 NULL_RTX, 0);
1893 rtx hipart1 = expand_shift (RSHIFT_EXPR, mode, op1, hprec,
1894 NULL_RTX, uns);
1895 hipart1 = convert_modes (hmode, mode, hipart1, uns);
1896 rtx lopart1 = convert_modes (hmode, mode, op1, uns);
1897 rtx signbit1 = const0_rtx;
1898 if (!uns)
1899 signbit1 = expand_shift (RSHIFT_EXPR, hmode, lopart1, hprec - 1,
1900 NULL_RTX, 0);
1901
1902 res = gen_reg_rtx (mode);
1903
1904 /* True if op0 resp. op1 are known to be in the range of
1905 halfstype. */
1906 bool op0_small_p = false;
1907 bool op1_small_p = false;
1908 /* True if op0 resp. op1 are known to have all zeros or all ones
1909 in the upper half of bits, but are not known to be
1910 op{0,1}_small_p. */
1911 bool op0_medium_p = false;
1912 bool op1_medium_p = false;
1913 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1914 nonnegative, 1 if unknown. */
1915 int op0_sign = 1;
1916 int op1_sign = 1;
1917
1918 if (pos_neg0 == 1)
1919 op0_sign = 0;
1920 else if (pos_neg0 == 2)
1921 op0_sign = -1;
1922 if (pos_neg1 == 1)
1923 op1_sign = 0;
1924 else if (pos_neg1 == 2)
1925 op1_sign = -1;
1926
1927 unsigned int mprec0 = prec;
1928 if (arg0 != error_mark_node)
1929 mprec0 = get_min_precision (arg0, sign);
1930 if (mprec0 <= hprec)
1931 op0_small_p = true;
1932 else if (!uns && mprec0 <= hprec + 1)
1933 op0_medium_p = true;
1934 unsigned int mprec1 = prec;
1935 if (arg1 != error_mark_node)
1936 mprec1 = get_min_precision (arg1, sign);
1937 if (mprec1 <= hprec)
1938 op1_small_p = true;
1939 else if (!uns && mprec1 <= hprec + 1)
1940 op1_medium_p = true;
1941
1942 int smaller_sign = 1;
1943 int larger_sign = 1;
1944 if (op0_small_p)
1945 {
1946 smaller_sign = op0_sign;
1947 larger_sign = op1_sign;
1948 }
1949 else if (op1_small_p)
1950 {
1951 smaller_sign = op1_sign;
1952 larger_sign = op0_sign;
1953 }
1954 else if (op0_sign == op1_sign)
1955 {
1956 smaller_sign = op0_sign;
1957 larger_sign = op0_sign;
1958 }
1959
1960 if (!op0_small_p)
1961 do_compare_rtx_and_jump (signbit0, hipart0, NE, true, hmode,
1962 NULL_RTX, NULL, large_op0,
1963 profile_probability::unlikely ());
1964
1965 if (!op1_small_p)
1966 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1967 NULL_RTX, NULL, small_op0_large_op1,
1968 profile_probability::unlikely ());
1969
1970 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1971 hmode to mode, the multiplication will never overflow. We can
1972 do just one hmode x hmode => mode widening multiplication. */
1973 tree halfstype = build_nonstandard_integer_type (hprec, uns);
1974 ops.op0 = make_tree (halfstype, lopart0);
1975 ops.op1 = make_tree (halfstype, lopart1);
1976 ops.code = WIDEN_MULT_EXPR;
1977 ops.type = type;
1978 rtx thisres
1979 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1980 emit_move_insn (res, thisres);
1981 emit_jump (done_label);
1982
1983 emit_label (small_op0_large_op1);
1984
1985 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1986 but op1 is not, just swap the arguments and handle it as op1
1987 sign/zero extended, op0 not. */
1988 rtx larger = gen_reg_rtx (mode);
1989 rtx hipart = gen_reg_rtx (hmode);
1990 rtx lopart = gen_reg_rtx (hmode);
1991 emit_move_insn (larger, op1);
1992 emit_move_insn (hipart, hipart1);
1993 emit_move_insn (lopart, lopart0);
1994 emit_jump (one_small_one_large);
1995
1996 emit_label (large_op0);
1997
1998 if (!op1_small_p)
1999 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
2000 NULL_RTX, NULL, both_ops_large,
2001 profile_probability::unlikely ());
2002
2003 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
2004 but op0 is not, prepare larger, hipart and lopart pseudos and
2005 handle it together with small_op0_large_op1. */
2006 emit_move_insn (larger, op0);
2007 emit_move_insn (hipart, hipart0);
2008 emit_move_insn (lopart, lopart1);
2009
2010 emit_label (one_small_one_large);
2011
2012 /* lopart is the low part of the operand that is sign extended
2013 to mode, larger is the other operand, hipart is the
2014 high part of larger and lopart0 and lopart1 are the low parts
2015 of both operands.
2016 We perform lopart0 * lopart1 and lopart * hipart widening
2017 multiplications. */
2018 tree halfutype = build_nonstandard_integer_type (hprec, 1);
2019 ops.op0 = make_tree (halfutype, lopart0);
2020 ops.op1 = make_tree (halfutype, lopart1);
2021 rtx lo0xlo1
2022 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2023
2024 ops.op0 = make_tree (halfutype, lopart);
2025 ops.op1 = make_tree (halfutype, hipart);
2026 rtx loxhi = gen_reg_rtx (mode);
2027 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2028 emit_move_insn (loxhi, tem);
2029
2030 if (!uns)
2031 {
2032 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
2033 if (larger_sign == 0)
2034 emit_jump (after_hipart_neg);
2035 else if (larger_sign != -1)
2036 do_compare_rtx_and_jump (hipart, const0_rtx, GE, false, hmode,
2037 NULL_RTX, NULL, after_hipart_neg,
2038 profile_probability::even ());
2039
2040 tem = convert_modes (mode, hmode, lopart, 1);
2041 tem = expand_shift (LSHIFT_EXPR, mode, tem, hprec, NULL_RTX, 1);
2042 tem = expand_simple_binop (mode, MINUS, loxhi, tem, NULL_RTX,
2043 1, OPTAB_WIDEN);
2044 emit_move_insn (loxhi, tem);
2045
2046 emit_label (after_hipart_neg);
2047
2048 /* if (lopart < 0) loxhi -= larger; */
2049 if (smaller_sign == 0)
2050 emit_jump (after_lopart_neg);
2051 else if (smaller_sign != -1)
2052 do_compare_rtx_and_jump (lopart, const0_rtx, GE, false, hmode,
2053 NULL_RTX, NULL, after_lopart_neg,
2054 profile_probability::even ());
2055
2056 tem = expand_simple_binop (mode, MINUS, loxhi, larger, NULL_RTX,
2057 1, OPTAB_WIDEN);
2058 emit_move_insn (loxhi, tem);
2059
2060 emit_label (after_lopart_neg);
2061 }
2062
2063 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
2064 tem = expand_shift (RSHIFT_EXPR, mode, lo0xlo1, hprec, NULL_RTX, 1);
2065 tem = expand_simple_binop (mode, PLUS, loxhi, tem, NULL_RTX,
2066 1, OPTAB_WIDEN);
2067 emit_move_insn (loxhi, tem);
2068
2069 /* if (loxhi >> (bitsize / 2)
2070 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
2071 if (loxhi >> (bitsize / 2) == 0 (if uns). */
2072 rtx hipartloxhi = expand_shift (RSHIFT_EXPR, mode, loxhi, hprec,
2073 NULL_RTX, 0);
2074 hipartloxhi = convert_modes (hmode, mode, hipartloxhi, 0);
2075 rtx signbitloxhi = const0_rtx;
2076 if (!uns)
2077 signbitloxhi = expand_shift (RSHIFT_EXPR, hmode,
2078 convert_modes (hmode, mode,
2079 loxhi, 0),
2080 hprec - 1, NULL_RTX, 0);
2081
2082 do_compare_rtx_and_jump (signbitloxhi, hipartloxhi, NE, true, hmode,
2083 NULL_RTX, NULL, do_overflow,
2084 profile_probability::very_unlikely ());
2085
2086 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
2087 rtx loxhishifted = expand_shift (LSHIFT_EXPR, mode, loxhi, hprec,
2088 NULL_RTX, 1);
2089 tem = convert_modes (mode, hmode,
2090 convert_modes (hmode, mode, lo0xlo1, 1), 1);
2091
2092 tem = expand_simple_binop (mode, IOR, loxhishifted, tem, res,
2093 1, OPTAB_WIDEN);
2094 if (tem != res)
2095 emit_move_insn (res, tem);
2096 emit_jump (done_label);
2097
2098 emit_label (both_ops_large);
2099
2100 /* If both operands are large (not sign (!uns) or zero (uns)
2101 extended from hmode), then perform the full multiplication
2102 which will be the result of the operation.
2103 The only cases which don't overflow are for signed multiplication
2104 some cases where both hipart0 and highpart1 are 0 or -1.
2105 For unsigned multiplication when high parts are both non-zero
2106 this overflows always. */
2107 ops.code = MULT_EXPR;
2108 ops.op0 = make_tree (type, op0);
2109 ops.op1 = make_tree (type, op1);
2110 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2111 emit_move_insn (res, tem);
2112
2113 if (!uns)
2114 {
2115 if (!op0_medium_p)
2116 {
2117 tem = expand_simple_binop (hmode, PLUS, hipart0, const1_rtx,
2118 NULL_RTX, 1, OPTAB_WIDEN);
2119 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
2120 NULL_RTX, NULL, do_error,
2121 profile_probability::very_unlikely ());
2122 }
2123
2124 if (!op1_medium_p)
2125 {
2126 tem = expand_simple_binop (hmode, PLUS, hipart1, const1_rtx,
2127 NULL_RTX, 1, OPTAB_WIDEN);
2128 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
2129 NULL_RTX, NULL, do_error,
2130 profile_probability::very_unlikely ());
2131 }
2132
2133 /* At this point hipart{0,1} are both in [-1, 0]. If they are
2134 the same, overflow happened if res is non-positive, if they
2135 are different, overflow happened if res is positive. */
2136 if (op0_sign != 1 && op1_sign != 1 && op0_sign != op1_sign)
2137 emit_jump (hipart_different);
2138 else if (op0_sign == 1 || op1_sign == 1)
2139 do_compare_rtx_and_jump (hipart0, hipart1, NE, true, hmode,
2140 NULL_RTX, NULL, hipart_different,
2141 profile_probability::even ());
2142
2143 do_compare_rtx_and_jump (res, const0_rtx, LE, false, mode,
2144 NULL_RTX, NULL, do_error,
2145 profile_probability::very_unlikely ());
2146 emit_jump (done_label);
2147
2148 emit_label (hipart_different);
2149
2150 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode,
2151 NULL_RTX, NULL, do_error,
2152 profile_probability::very_unlikely ());
2153 emit_jump (done_label);
2154 }
2155
2156 emit_label (do_overflow);
2157
2158 /* Overflow, do full multiplication and fallthru into do_error. */
2159 ops.op0 = make_tree (type, op0);
2160 ops.op1 = make_tree (type, op1);
2161 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2162 emit_move_insn (res, tem);
2163 }
2164 else if (GET_MODE_2XWIDER_MODE (mode).exists (&wmode)
2165 && targetm.scalar_mode_supported_p (wmode))
2166 /* Even emitting a libcall is better than not detecting overflow
2167 at all. */
2168 goto twoxwider;
2169 else
2170 {
2171 gcc_assert (!is_ubsan);
2172 ops.code = MULT_EXPR;
2173 ops.type = type;
2174 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2175 emit_jump (done_label);
2176 }
2177 }
2178
2179 do_error_label:
2180 emit_label (do_error);
2181 if (is_ubsan)
2182 {
2183 /* Expand the ubsan builtin call. */
2184 push_temp_slots ();
2185 fn = ubsan_build_overflow_builtin (MULT_EXPR, loc, TREE_TYPE (arg0),
2186 arg0, arg1, datap);
2187 expand_normal (fn);
2188 pop_temp_slots ();
2189 do_pending_stack_adjust ();
2190 }
2191 else if (lhs)
2192 expand_arith_set_overflow (lhs, target);
2193
2194 /* We're done. */
2195 emit_label (done_label);
2196
2197 /* u1 * u2 -> sr */
2198 if (uns0_p && uns1_p && !unsr_p)
2199 {
2200 rtx_code_label *all_done_label = gen_label_rtx ();
2201 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
2202 NULL, all_done_label, profile_probability::very_likely ());
2203 expand_arith_set_overflow (lhs, target);
2204 emit_label (all_done_label);
2205 }
2206
2207 /* s1 * u2 -> sr */
2208 if (!uns0_p && uns1_p && !unsr_p && pos_neg1 == 3)
2209 {
2210 rtx_code_label *all_done_label = gen_label_rtx ();
2211 rtx_code_label *set_noovf = gen_label_rtx ();
2212 do_compare_rtx_and_jump (op1, const0_rtx, GE, false, mode, NULL_RTX,
2213 NULL, all_done_label, profile_probability::very_likely ());
2214 expand_arith_set_overflow (lhs, target);
2215 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
2216 NULL, set_noovf, profile_probability::very_likely ());
2217 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
2218 NULL, all_done_label, profile_probability::very_unlikely ());
2219 do_compare_rtx_and_jump (op1, res, NE, true, mode, NULL_RTX, NULL,
2220 all_done_label, profile_probability::very_unlikely ());
2221 emit_label (set_noovf);
2222 write_complex_part (target, const0_rtx, true);
2223 emit_label (all_done_label);
2224 }
2225
2226 if (lhs)
2227 {
2228 if (is_ubsan)
2229 expand_ubsan_result_store (target, res);
2230 else
2231 expand_arith_overflow_result_store (lhs, target, mode, res);
2232 }
2233 }
2234
2235 /* Expand UBSAN_CHECK_* internal function if it has vector operands. */
2236
2237 static void
2238 expand_vector_ubsan_overflow (location_t loc, enum tree_code code, tree lhs,
2239 tree arg0, tree arg1)
2240 {
2241 poly_uint64 cnt = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
2242 rtx_code_label *loop_lab = NULL;
2243 rtx cntvar = NULL_RTX;
2244 tree cntv = NULL_TREE;
2245 tree eltype = TREE_TYPE (TREE_TYPE (arg0));
2246 tree sz = TYPE_SIZE (eltype);
2247 tree data = NULL_TREE;
2248 tree resv = NULL_TREE;
2249 rtx lhsr = NULL_RTX;
2250 rtx resvr = NULL_RTX;
2251 unsigned HOST_WIDE_INT const_cnt = 0;
2252 bool use_loop_p = (!cnt.is_constant (&const_cnt) || const_cnt > 4);
2253
2254 if (lhs)
2255 {
2256 optab op;
2257 lhsr = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2258 if (!VECTOR_MODE_P (GET_MODE (lhsr))
2259 || (op = optab_for_tree_code (code, TREE_TYPE (arg0),
2260 optab_default)) == unknown_optab
2261 || (optab_handler (op, TYPE_MODE (TREE_TYPE (arg0)))
2262 == CODE_FOR_nothing))
2263 {
2264 if (MEM_P (lhsr))
2265 resv = make_tree (TREE_TYPE (lhs), lhsr);
2266 else
2267 {
2268 resvr = assign_temp (TREE_TYPE (lhs), 1, 1);
2269 resv = make_tree (TREE_TYPE (lhs), resvr);
2270 }
2271 }
2272 }
2273 if (use_loop_p)
2274 {
2275 do_pending_stack_adjust ();
2276 loop_lab = gen_label_rtx ();
2277 cntvar = gen_reg_rtx (TYPE_MODE (sizetype));
2278 cntv = make_tree (sizetype, cntvar);
2279 emit_move_insn (cntvar, const0_rtx);
2280 emit_label (loop_lab);
2281 }
2282 if (TREE_CODE (arg0) != VECTOR_CST)
2283 {
2284 rtx arg0r = expand_normal (arg0);
2285 arg0 = make_tree (TREE_TYPE (arg0), arg0r);
2286 }
2287 if (TREE_CODE (arg1) != VECTOR_CST)
2288 {
2289 rtx arg1r = expand_normal (arg1);
2290 arg1 = make_tree (TREE_TYPE (arg1), arg1r);
2291 }
2292 for (unsigned int i = 0; i < (use_loop_p ? 1 : const_cnt); i++)
2293 {
2294 tree op0, op1, res = NULL_TREE;
2295 if (use_loop_p)
2296 {
2297 tree atype = build_array_type_nelts (eltype, cnt);
2298 op0 = uniform_vector_p (arg0);
2299 if (op0 == NULL_TREE)
2300 {
2301 op0 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, arg0);
2302 op0 = build4_loc (loc, ARRAY_REF, eltype, op0, cntv,
2303 NULL_TREE, NULL_TREE);
2304 }
2305 op1 = uniform_vector_p (arg1);
2306 if (op1 == NULL_TREE)
2307 {
2308 op1 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, arg1);
2309 op1 = build4_loc (loc, ARRAY_REF, eltype, op1, cntv,
2310 NULL_TREE, NULL_TREE);
2311 }
2312 if (resv)
2313 {
2314 res = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, resv);
2315 res = build4_loc (loc, ARRAY_REF, eltype, res, cntv,
2316 NULL_TREE, NULL_TREE);
2317 }
2318 }
2319 else
2320 {
2321 tree bitpos = bitsize_int (tree_to_uhwi (sz) * i);
2322 op0 = fold_build3_loc (loc, BIT_FIELD_REF, eltype, arg0, sz, bitpos);
2323 op1 = fold_build3_loc (loc, BIT_FIELD_REF, eltype, arg1, sz, bitpos);
2324 if (resv)
2325 res = fold_build3_loc (loc, BIT_FIELD_REF, eltype, resv, sz,
2326 bitpos);
2327 }
2328 switch (code)
2329 {
2330 case PLUS_EXPR:
2331 expand_addsub_overflow (loc, PLUS_EXPR, res, op0, op1,
2332 false, false, false, true, &data);
2333 break;
2334 case MINUS_EXPR:
2335 if (use_loop_p ? integer_zerop (arg0) : integer_zerop (op0))
2336 expand_neg_overflow (loc, res, op1, true, &data);
2337 else
2338 expand_addsub_overflow (loc, MINUS_EXPR, res, op0, op1,
2339 false, false, false, true, &data);
2340 break;
2341 case MULT_EXPR:
2342 expand_mul_overflow (loc, res, op0, op1, false, false, false,
2343 true, &data);
2344 break;
2345 default:
2346 gcc_unreachable ();
2347 }
2348 }
2349 if (use_loop_p)
2350 {
2351 struct separate_ops ops;
2352 ops.code = PLUS_EXPR;
2353 ops.type = TREE_TYPE (cntv);
2354 ops.op0 = cntv;
2355 ops.op1 = build_int_cst (TREE_TYPE (cntv), 1);
2356 ops.op2 = NULL_TREE;
2357 ops.location = loc;
2358 rtx ret = expand_expr_real_2 (&ops, cntvar, TYPE_MODE (sizetype),
2359 EXPAND_NORMAL);
2360 if (ret != cntvar)
2361 emit_move_insn (cntvar, ret);
2362 rtx cntrtx = gen_int_mode (cnt, TYPE_MODE (sizetype));
2363 do_compare_rtx_and_jump (cntvar, cntrtx, NE, false,
2364 TYPE_MODE (sizetype), NULL_RTX, NULL, loop_lab,
2365 profile_probability::very_likely ());
2366 }
2367 if (lhs && resv == NULL_TREE)
2368 {
2369 struct separate_ops ops;
2370 ops.code = code;
2371 ops.type = TREE_TYPE (arg0);
2372 ops.op0 = arg0;
2373 ops.op1 = arg1;
2374 ops.op2 = NULL_TREE;
2375 ops.location = loc;
2376 rtx ret = expand_expr_real_2 (&ops, lhsr, TYPE_MODE (TREE_TYPE (arg0)),
2377 EXPAND_NORMAL);
2378 if (ret != lhsr)
2379 emit_move_insn (lhsr, ret);
2380 }
2381 else if (resvr)
2382 emit_move_insn (lhsr, resvr);
2383 }
2384
2385 /* Expand UBSAN_CHECK_ADD call STMT. */
2386
2387 static void
2388 expand_UBSAN_CHECK_ADD (internal_fn, gcall *stmt)
2389 {
2390 location_t loc = gimple_location (stmt);
2391 tree lhs = gimple_call_lhs (stmt);
2392 tree arg0 = gimple_call_arg (stmt, 0);
2393 tree arg1 = gimple_call_arg (stmt, 1);
2394 if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2395 expand_vector_ubsan_overflow (loc, PLUS_EXPR, lhs, arg0, arg1);
2396 else
2397 expand_addsub_overflow (loc, PLUS_EXPR, lhs, arg0, arg1,
2398 false, false, false, true, NULL);
2399 }
2400
2401 /* Expand UBSAN_CHECK_SUB call STMT. */
2402
2403 static void
2404 expand_UBSAN_CHECK_SUB (internal_fn, gcall *stmt)
2405 {
2406 location_t loc = gimple_location (stmt);
2407 tree lhs = gimple_call_lhs (stmt);
2408 tree arg0 = gimple_call_arg (stmt, 0);
2409 tree arg1 = gimple_call_arg (stmt, 1);
2410 if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2411 expand_vector_ubsan_overflow (loc, MINUS_EXPR, lhs, arg0, arg1);
2412 else if (integer_zerop (arg0))
2413 expand_neg_overflow (loc, lhs, arg1, true, NULL);
2414 else
2415 expand_addsub_overflow (loc, MINUS_EXPR, lhs, arg0, arg1,
2416 false, false, false, true, NULL);
2417 }
2418
2419 /* Expand UBSAN_CHECK_MUL call STMT. */
2420
2421 static void
2422 expand_UBSAN_CHECK_MUL (internal_fn, gcall *stmt)
2423 {
2424 location_t loc = gimple_location (stmt);
2425 tree lhs = gimple_call_lhs (stmt);
2426 tree arg0 = gimple_call_arg (stmt, 0);
2427 tree arg1 = gimple_call_arg (stmt, 1);
2428 if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2429 expand_vector_ubsan_overflow (loc, MULT_EXPR, lhs, arg0, arg1);
2430 else
2431 expand_mul_overflow (loc, lhs, arg0, arg1, false, false, false, true,
2432 NULL);
2433 }
2434
2435 /* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
2436
2437 static void
2438 expand_arith_overflow (enum tree_code code, gimple *stmt)
2439 {
2440 tree lhs = gimple_call_lhs (stmt);
2441 if (lhs == NULL_TREE)
2442 return;
2443 tree arg0 = gimple_call_arg (stmt, 0);
2444 tree arg1 = gimple_call_arg (stmt, 1);
2445 tree type = TREE_TYPE (TREE_TYPE (lhs));
2446 int uns0_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
2447 int uns1_p = TYPE_UNSIGNED (TREE_TYPE (arg1));
2448 int unsr_p = TYPE_UNSIGNED (type);
2449 int prec0 = TYPE_PRECISION (TREE_TYPE (arg0));
2450 int prec1 = TYPE_PRECISION (TREE_TYPE (arg1));
2451 int precres = TYPE_PRECISION (type);
2452 location_t loc = gimple_location (stmt);
2453 if (!uns0_p && get_range_pos_neg (arg0) == 1)
2454 uns0_p = true;
2455 if (!uns1_p && get_range_pos_neg (arg1) == 1)
2456 uns1_p = true;
2457 int pr = get_min_precision (arg0, uns0_p ? UNSIGNED : SIGNED);
2458 prec0 = MIN (prec0, pr);
2459 pr = get_min_precision (arg1, uns1_p ? UNSIGNED : SIGNED);
2460 prec1 = MIN (prec1, pr);
2461
2462 /* If uns0_p && uns1_p, precop is minimum needed precision
2463 of unsigned type to hold the exact result, otherwise
2464 precop is minimum needed precision of signed type to
2465 hold the exact result. */
2466 int precop;
2467 if (code == MULT_EXPR)
2468 precop = prec0 + prec1 + (uns0_p != uns1_p);
2469 else
2470 {
2471 if (uns0_p == uns1_p)
2472 precop = MAX (prec0, prec1) + 1;
2473 else if (uns0_p)
2474 precop = MAX (prec0 + 1, prec1) + 1;
2475 else
2476 precop = MAX (prec0, prec1 + 1) + 1;
2477 }
2478 int orig_precres = precres;
2479
2480 do
2481 {
2482 if ((uns0_p && uns1_p)
2483 ? ((precop + !unsr_p) <= precres
2484 /* u1 - u2 -> ur can overflow, no matter what precision
2485 the result has. */
2486 && (code != MINUS_EXPR || !unsr_p))
2487 : (!unsr_p && precop <= precres))
2488 {
2489 /* The infinity precision result will always fit into result. */
2490 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2491 write_complex_part (target, const0_rtx, true);
2492 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type);
2493 struct separate_ops ops;
2494 ops.code = code;
2495 ops.type = type;
2496 ops.op0 = fold_convert_loc (loc, type, arg0);
2497 ops.op1 = fold_convert_loc (loc, type, arg1);
2498 ops.op2 = NULL_TREE;
2499 ops.location = loc;
2500 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2501 expand_arith_overflow_result_store (lhs, target, mode, tem);
2502 return;
2503 }
2504
2505 /* For operations with low precision, if target doesn't have them, start
2506 with precres widening right away, otherwise do it only if the most
2507 simple cases can't be used. */
2508 const int min_precision = targetm.min_arithmetic_precision ();
2509 if (orig_precres == precres && precres < min_precision)
2510 ;
2511 else if ((uns0_p && uns1_p && unsr_p && prec0 <= precres
2512 && prec1 <= precres)
2513 || ((!uns0_p || !uns1_p) && !unsr_p
2514 && prec0 + uns0_p <= precres
2515 && prec1 + uns1_p <= precres))
2516 {
2517 arg0 = fold_convert_loc (loc, type, arg0);
2518 arg1 = fold_convert_loc (loc, type, arg1);
2519 switch (code)
2520 {
2521 case MINUS_EXPR:
2522 if (integer_zerop (arg0) && !unsr_p)
2523 {
2524 expand_neg_overflow (loc, lhs, arg1, false, NULL);
2525 return;
2526 }
2527 /* FALLTHRU */
2528 case PLUS_EXPR:
2529 expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
2530 unsr_p, unsr_p, false, NULL);
2531 return;
2532 case MULT_EXPR:
2533 expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
2534 unsr_p, unsr_p, false, NULL);
2535 return;
2536 default:
2537 gcc_unreachable ();
2538 }
2539 }
2540
2541 /* For sub-word operations, retry with a wider type first. */
2542 if (orig_precres == precres && precop <= BITS_PER_WORD)
2543 {
2544 int p = MAX (min_precision, precop);
2545 scalar_int_mode m = smallest_int_mode_for_size (p);
2546 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
2547 uns0_p && uns1_p
2548 && unsr_p);
2549 p = TYPE_PRECISION (optype);
2550 if (p > precres)
2551 {
2552 precres = p;
2553 unsr_p = TYPE_UNSIGNED (optype);
2554 type = optype;
2555 continue;
2556 }
2557 }
2558
2559 if (prec0 <= precres && prec1 <= precres)
2560 {
2561 tree types[2];
2562 if (unsr_p)
2563 {
2564 types[0] = build_nonstandard_integer_type (precres, 0);
2565 types[1] = type;
2566 }
2567 else
2568 {
2569 types[0] = type;
2570 types[1] = build_nonstandard_integer_type (precres, 1);
2571 }
2572 arg0 = fold_convert_loc (loc, types[uns0_p], arg0);
2573 arg1 = fold_convert_loc (loc, types[uns1_p], arg1);
2574 if (code != MULT_EXPR)
2575 expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
2576 uns0_p, uns1_p, false, NULL);
2577 else
2578 expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
2579 uns0_p, uns1_p, false, NULL);
2580 return;
2581 }
2582
2583 /* Retry with a wider type. */
2584 if (orig_precres == precres)
2585 {
2586 int p = MAX (prec0, prec1);
2587 scalar_int_mode m = smallest_int_mode_for_size (p);
2588 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
2589 uns0_p && uns1_p
2590 && unsr_p);
2591 p = TYPE_PRECISION (optype);
2592 if (p > precres)
2593 {
2594 precres = p;
2595 unsr_p = TYPE_UNSIGNED (optype);
2596 type = optype;
2597 continue;
2598 }
2599 }
2600
2601 gcc_unreachable ();
2602 }
2603 while (1);
2604 }
2605
2606 /* Expand ADD_OVERFLOW STMT. */
2607
2608 static void
2609 expand_ADD_OVERFLOW (internal_fn, gcall *stmt)
2610 {
2611 expand_arith_overflow (PLUS_EXPR, stmt);
2612 }
2613
2614 /* Expand SUB_OVERFLOW STMT. */
2615
2616 static void
2617 expand_SUB_OVERFLOW (internal_fn, gcall *stmt)
2618 {
2619 expand_arith_overflow (MINUS_EXPR, stmt);
2620 }
2621
2622 /* Expand MUL_OVERFLOW STMT. */
2623
2624 static void
2625 expand_MUL_OVERFLOW (internal_fn, gcall *stmt)
2626 {
2627 expand_arith_overflow (MULT_EXPR, stmt);
2628 }
2629
2630 /* This should get folded in tree-vectorizer.c. */
2631
2632 static void
2633 expand_LOOP_VECTORIZED (internal_fn, gcall *)
2634 {
2635 gcc_unreachable ();
2636 }
2637
2638 /* This should get folded in tree-vectorizer.c. */
2639
2640 static void
2641 expand_LOOP_DIST_ALIAS (internal_fn, gcall *)
2642 {
2643 gcc_unreachable ();
2644 }
2645
2646 /* Return a memory reference of type TYPE for argument INDEX of STMT.
2647 Use argument INDEX + 1 to derive the second (TBAA) operand. */
2648
2649 static tree
2650 expand_call_mem_ref (tree type, gcall *stmt, int index)
2651 {
2652 tree addr = gimple_call_arg (stmt, index);
2653 tree alias_ptr_type = TREE_TYPE (gimple_call_arg (stmt, index + 1));
2654 unsigned int align = tree_to_shwi (gimple_call_arg (stmt, index + 1));
2655 if (TYPE_ALIGN (type) != align)
2656 type = build_aligned_type (type, align);
2657
2658 tree tmp = addr;
2659 if (TREE_CODE (tmp) == SSA_NAME)
2660 {
2661 gimple *def = SSA_NAME_DEF_STMT (tmp);
2662 if (gimple_assign_single_p (def))
2663 tmp = gimple_assign_rhs1 (def);
2664 }
2665
2666 if (TREE_CODE (tmp) == ADDR_EXPR)
2667 {
2668 tree mem = TREE_OPERAND (tmp, 0);
2669 if (TREE_CODE (mem) == TARGET_MEM_REF
2670 && types_compatible_p (TREE_TYPE (mem), type))
2671 {
2672 tree offset = TMR_OFFSET (mem);
2673 if (type != TREE_TYPE (mem)
2674 || alias_ptr_type != TREE_TYPE (offset)
2675 || !integer_zerop (offset))
2676 {
2677 mem = copy_node (mem);
2678 TMR_OFFSET (mem) = wide_int_to_tree (alias_ptr_type,
2679 wi::to_poly_wide (offset));
2680 TREE_TYPE (mem) = type;
2681 }
2682 return mem;
2683 }
2684 }
2685
2686 return fold_build2 (MEM_REF, type, addr, build_int_cst (alias_ptr_type, 0));
2687 }
2688
2689 /* Expand MASK_LOAD{,_LANES} or LEN_LOAD call STMT using optab OPTAB. */
2690
2691 static void
2692 expand_partial_load_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
2693 {
2694 class expand_operand ops[3];
2695 tree type, lhs, rhs, maskt;
2696 rtx mem, target, mask;
2697 insn_code icode;
2698
2699 maskt = gimple_call_arg (stmt, 2);
2700 lhs = gimple_call_lhs (stmt);
2701 if (lhs == NULL_TREE)
2702 return;
2703 type = TREE_TYPE (lhs);
2704 rhs = expand_call_mem_ref (type, stmt, 0);
2705
2706 if (optab == vec_mask_load_lanes_optab)
2707 icode = get_multi_vector_move (type, optab);
2708 else if (optab == len_load_optab)
2709 icode = direct_optab_handler (optab, TYPE_MODE (type));
2710 else
2711 icode = convert_optab_handler (optab, TYPE_MODE (type),
2712 TYPE_MODE (TREE_TYPE (maskt)));
2713
2714 mem = expand_expr (rhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2715 gcc_assert (MEM_P (mem));
2716 mask = expand_normal (maskt);
2717 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2718 create_output_operand (&ops[0], target, TYPE_MODE (type));
2719 create_fixed_operand (&ops[1], mem);
2720 if (optab == len_load_optab)
2721 create_convert_operand_from (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)),
2722 TYPE_UNSIGNED (TREE_TYPE (maskt)));
2723 else
2724 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
2725 expand_insn (icode, 3, ops);
2726 if (!rtx_equal_p (target, ops[0].value))
2727 emit_move_insn (target, ops[0].value);
2728 }
2729
2730 #define expand_mask_load_optab_fn expand_partial_load_optab_fn
2731 #define expand_mask_load_lanes_optab_fn expand_mask_load_optab_fn
2732 #define expand_len_load_optab_fn expand_partial_load_optab_fn
2733
2734 /* Expand MASK_STORE{,_LANES} or LEN_STORE call STMT using optab OPTAB. */
2735
2736 static void
2737 expand_partial_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
2738 {
2739 class expand_operand ops[3];
2740 tree type, lhs, rhs, maskt;
2741 rtx mem, reg, mask;
2742 insn_code icode;
2743
2744 maskt = gimple_call_arg (stmt, 2);
2745 rhs = gimple_call_arg (stmt, 3);
2746 type = TREE_TYPE (rhs);
2747 lhs = expand_call_mem_ref (type, stmt, 0);
2748
2749 if (optab == vec_mask_store_lanes_optab)
2750 icode = get_multi_vector_move (type, optab);
2751 else if (optab == len_store_optab)
2752 icode = direct_optab_handler (optab, TYPE_MODE (type));
2753 else
2754 icode = convert_optab_handler (optab, TYPE_MODE (type),
2755 TYPE_MODE (TREE_TYPE (maskt)));
2756
2757 mem = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2758 gcc_assert (MEM_P (mem));
2759 mask = expand_normal (maskt);
2760 reg = expand_normal (rhs);
2761 create_fixed_operand (&ops[0], mem);
2762 create_input_operand (&ops[1], reg, TYPE_MODE (type));
2763 if (optab == len_store_optab)
2764 create_convert_operand_from (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)),
2765 TYPE_UNSIGNED (TREE_TYPE (maskt)));
2766 else
2767 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
2768 expand_insn (icode, 3, ops);
2769 }
2770
2771 #define expand_mask_store_optab_fn expand_partial_store_optab_fn
2772 #define expand_mask_store_lanes_optab_fn expand_mask_store_optab_fn
2773 #define expand_len_store_optab_fn expand_partial_store_optab_fn
2774
2775 /* Expand VCOND, VCONDU and VCONDEQ optab internal functions.
2776 The expansion of STMT happens based on OPTAB table associated. */
2777
2778 static void
2779 expand_vec_cond_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
2780 {
2781 class expand_operand ops[6];
2782 insn_code icode;
2783 tree lhs = gimple_call_lhs (stmt);
2784 tree op0a = gimple_call_arg (stmt, 0);
2785 tree op0b = gimple_call_arg (stmt, 1);
2786 tree op1 = gimple_call_arg (stmt, 2);
2787 tree op2 = gimple_call_arg (stmt, 3);
2788 enum tree_code tcode = (tree_code) int_cst_value (gimple_call_arg (stmt, 4));
2789
2790 tree vec_cond_type = TREE_TYPE (lhs);
2791 tree op_mode = TREE_TYPE (op0a);
2792 bool unsignedp = TYPE_UNSIGNED (op_mode);
2793
2794 machine_mode mode = TYPE_MODE (vec_cond_type);
2795 machine_mode cmp_op_mode = TYPE_MODE (op_mode);
2796
2797 icode = convert_optab_handler (optab, mode, cmp_op_mode);
2798 rtx comparison
2799 = vector_compare_rtx (VOIDmode, tcode, op0a, op0b, unsignedp, icode, 4);
2800 rtx rtx_op1 = expand_normal (op1);
2801 rtx rtx_op2 = expand_normal (op2);
2802
2803 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2804 create_output_operand (&ops[0], target, mode);
2805 create_input_operand (&ops[1], rtx_op1, mode);
2806 create_input_operand (&ops[2], rtx_op2, mode);
2807 create_fixed_operand (&ops[3], comparison);
2808 create_fixed_operand (&ops[4], XEXP (comparison, 0));
2809 create_fixed_operand (&ops[5], XEXP (comparison, 1));
2810 expand_insn (icode, 6, ops);
2811 if (!rtx_equal_p (ops[0].value, target))
2812 emit_move_insn (target, ops[0].value);
2813 }
2814
2815 /* Expand VCOND_MASK optab internal function.
2816 The expansion of STMT happens based on OPTAB table associated. */
2817
2818 static void
2819 expand_vec_cond_mask_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
2820 {
2821 class expand_operand ops[4];
2822
2823 tree lhs = gimple_call_lhs (stmt);
2824 tree op0 = gimple_call_arg (stmt, 0);
2825 tree op1 = gimple_call_arg (stmt, 1);
2826 tree op2 = gimple_call_arg (stmt, 2);
2827 tree vec_cond_type = TREE_TYPE (lhs);
2828
2829 machine_mode mode = TYPE_MODE (vec_cond_type);
2830 machine_mode mask_mode = TYPE_MODE (TREE_TYPE (op0));
2831 enum insn_code icode = convert_optab_handler (optab, mode, mask_mode);
2832 rtx mask, rtx_op1, rtx_op2;
2833
2834 gcc_assert (icode != CODE_FOR_nothing);
2835
2836 mask = expand_normal (op0);
2837 rtx_op1 = expand_normal (op1);
2838 rtx_op2 = expand_normal (op2);
2839
2840 mask = force_reg (mask_mode, mask);
2841 rtx_op1 = force_reg (mode, rtx_op1);
2842
2843 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2844 create_output_operand (&ops[0], target, mode);
2845 create_input_operand (&ops[1], rtx_op1, mode);
2846 create_input_operand (&ops[2], rtx_op2, mode);
2847 create_input_operand (&ops[3], mask, mask_mode);
2848 expand_insn (icode, 4, ops);
2849 if (!rtx_equal_p (ops[0].value, target))
2850 emit_move_insn (target, ops[0].value);
2851 }
2852
2853 /* Expand VEC_SET internal functions. */
2854
2855 static void
2856 expand_vec_set_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
2857 {
2858 tree lhs = gimple_call_lhs (stmt);
2859 tree op0 = gimple_call_arg (stmt, 0);
2860 tree op1 = gimple_call_arg (stmt, 1);
2861 tree op2 = gimple_call_arg (stmt, 2);
2862 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2863 rtx src = expand_normal (op0);
2864
2865 machine_mode outermode = TYPE_MODE (TREE_TYPE (op0));
2866 scalar_mode innermode = GET_MODE_INNER (outermode);
2867
2868 rtx value = expand_normal (op1);
2869 rtx pos = expand_normal (op2);
2870
2871 class expand_operand ops[3];
2872 enum insn_code icode = optab_handler (optab, outermode);
2873
2874 if (icode != CODE_FOR_nothing)
2875 {
2876 rtx temp = gen_reg_rtx (outermode);
2877 emit_move_insn (temp, src);
2878
2879 create_fixed_operand (&ops[0], temp);
2880 create_input_operand (&ops[1], value, innermode);
2881 create_convert_operand_from (&ops[2], pos, TYPE_MODE (TREE_TYPE (op2)),
2882 true);
2883 if (maybe_expand_insn (icode, 3, ops))
2884 {
2885 emit_move_insn (target, temp);
2886 return;
2887 }
2888 }
2889 gcc_unreachable ();
2890 }
2891
2892 static void
2893 expand_ABNORMAL_DISPATCHER (internal_fn, gcall *)
2894 {
2895 }
2896
2897 static void
2898 expand_BUILTIN_EXPECT (internal_fn, gcall *stmt)
2899 {
2900 /* When guessing was done, the hints should be already stripped away. */
2901 gcc_assert (!flag_guess_branch_prob || optimize == 0 || seen_error ());
2902
2903 rtx target;
2904 tree lhs = gimple_call_lhs (stmt);
2905 if (lhs)
2906 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2907 else
2908 target = const0_rtx;
2909 rtx val = expand_expr (gimple_call_arg (stmt, 0), target, VOIDmode, EXPAND_NORMAL);
2910 if (lhs && val != target)
2911 emit_move_insn (target, val);
2912 }
2913
2914 /* IFN_VA_ARG is supposed to be expanded at pass_stdarg. So this dummy function
2915 should never be called. */
2916
2917 static void
2918 expand_VA_ARG (internal_fn, gcall *)
2919 {
2920 gcc_unreachable ();
2921 }
2922
2923 /* IFN_VEC_CONVERT is supposed to be expanded at pass_lower_vector. So this
2924 dummy function should never be called. */
2925
2926 static void
2927 expand_VEC_CONVERT (internal_fn, gcall *)
2928 {
2929 gcc_unreachable ();
2930 }
2931
2932 /* Expand the IFN_UNIQUE function according to its first argument. */
2933
2934 static void
2935 expand_UNIQUE (internal_fn, gcall *stmt)
2936 {
2937 rtx pattern = NULL_RTX;
2938 enum ifn_unique_kind kind
2939 = (enum ifn_unique_kind) TREE_INT_CST_LOW (gimple_call_arg (stmt, 0));
2940
2941 switch (kind)
2942 {
2943 default:
2944 gcc_unreachable ();
2945
2946 case IFN_UNIQUE_UNSPEC:
2947 if (targetm.have_unique ())
2948 pattern = targetm.gen_unique ();
2949 break;
2950
2951 case IFN_UNIQUE_OACC_FORK:
2952 case IFN_UNIQUE_OACC_JOIN:
2953 if (targetm.have_oacc_fork () && targetm.have_oacc_join ())
2954 {
2955 tree lhs = gimple_call_lhs (stmt);
2956 rtx target = const0_rtx;
2957
2958 if (lhs)
2959 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2960
2961 rtx data_dep = expand_normal (gimple_call_arg (stmt, 1));
2962 rtx axis = expand_normal (gimple_call_arg (stmt, 2));
2963
2964 if (kind == IFN_UNIQUE_OACC_FORK)
2965 pattern = targetm.gen_oacc_fork (target, data_dep, axis);
2966 else
2967 pattern = targetm.gen_oacc_join (target, data_dep, axis);
2968 }
2969 else
2970 gcc_unreachable ();
2971 break;
2972 }
2973
2974 if (pattern)
2975 emit_insn (pattern);
2976 }
2977
2978 /* The size of an OpenACC compute dimension. */
2979
2980 static void
2981 expand_GOACC_DIM_SIZE (internal_fn, gcall *stmt)
2982 {
2983 tree lhs = gimple_call_lhs (stmt);
2984
2985 if (!lhs)
2986 return;
2987
2988 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2989 if (targetm.have_oacc_dim_size ())
2990 {
2991 rtx dim = expand_expr (gimple_call_arg (stmt, 0), NULL_RTX,
2992 VOIDmode, EXPAND_NORMAL);
2993 emit_insn (targetm.gen_oacc_dim_size (target, dim));
2994 }
2995 else
2996 emit_move_insn (target, GEN_INT (1));
2997 }
2998
2999 /* The position of an OpenACC execution engine along one compute axis. */
3000
3001 static void
3002 expand_GOACC_DIM_POS (internal_fn, gcall *stmt)
3003 {
3004 tree lhs = gimple_call_lhs (stmt);
3005
3006 if (!lhs)
3007 return;
3008
3009 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3010 if (targetm.have_oacc_dim_pos ())
3011 {
3012 rtx dim = expand_expr (gimple_call_arg (stmt, 0), NULL_RTX,
3013 VOIDmode, EXPAND_NORMAL);
3014 emit_insn (targetm.gen_oacc_dim_pos (target, dim));
3015 }
3016 else
3017 emit_move_insn (target, const0_rtx);
3018 }
3019
3020 /* This is expanded by oacc_device_lower pass. */
3021
3022 static void
3023 expand_GOACC_LOOP (internal_fn, gcall *)
3024 {
3025 gcc_unreachable ();
3026 }
3027
3028 /* This is expanded by oacc_device_lower pass. */
3029
3030 static void
3031 expand_GOACC_REDUCTION (internal_fn, gcall *)
3032 {
3033 gcc_unreachable ();
3034 }
3035
3036 /* This is expanded by oacc_device_lower pass. */
3037
3038 static void
3039 expand_GOACC_TILE (internal_fn, gcall *)
3040 {
3041 gcc_unreachable ();
3042 }
3043
3044 /* Set errno to EDOM. */
3045
3046 static void
3047 expand_SET_EDOM (internal_fn, gcall *)
3048 {
3049 #ifdef TARGET_EDOM
3050 #ifdef GEN_ERRNO_RTX
3051 rtx errno_rtx = GEN_ERRNO_RTX;
3052 #else
3053 rtx errno_rtx = gen_rtx_MEM (word_mode, gen_rtx_SYMBOL_REF (Pmode, "errno"));
3054 #endif
3055 emit_move_insn (errno_rtx,
3056 gen_int_mode (TARGET_EDOM, GET_MODE (errno_rtx)));
3057 #else
3058 gcc_unreachable ();
3059 #endif
3060 }
3061
3062 /* Expand atomic bit test and set. */
3063
3064 static void
3065 expand_ATOMIC_BIT_TEST_AND_SET (internal_fn, gcall *call)
3066 {
3067 expand_ifn_atomic_bit_test_and (call);
3068 }
3069
3070 /* Expand atomic bit test and complement. */
3071
3072 static void
3073 expand_ATOMIC_BIT_TEST_AND_COMPLEMENT (internal_fn, gcall *call)
3074 {
3075 expand_ifn_atomic_bit_test_and (call);
3076 }
3077
3078 /* Expand atomic bit test and reset. */
3079
3080 static void
3081 expand_ATOMIC_BIT_TEST_AND_RESET (internal_fn, gcall *call)
3082 {
3083 expand_ifn_atomic_bit_test_and (call);
3084 }
3085
3086 /* Expand atomic bit test and set. */
3087
3088 static void
3089 expand_ATOMIC_COMPARE_EXCHANGE (internal_fn, gcall *call)
3090 {
3091 expand_ifn_atomic_compare_exchange (call);
3092 }
3093
3094 /* Expand LAUNDER to assignment, lhs = arg0. */
3095
3096 static void
3097 expand_LAUNDER (internal_fn, gcall *call)
3098 {
3099 tree lhs = gimple_call_lhs (call);
3100
3101 if (!lhs)
3102 return;
3103
3104 expand_assignment (lhs, gimple_call_arg (call, 0), false);
3105 }
3106
3107 /* Expand {MASK_,}SCATTER_STORE{S,U} call CALL using optab OPTAB. */
3108
3109 static void
3110 expand_scatter_store_optab_fn (internal_fn, gcall *stmt, direct_optab optab)
3111 {
3112 internal_fn ifn = gimple_call_internal_fn (stmt);
3113 int rhs_index = internal_fn_stored_value_index (ifn);
3114 int mask_index = internal_fn_mask_index (ifn);
3115 tree base = gimple_call_arg (stmt, 0);
3116 tree offset = gimple_call_arg (stmt, 1);
3117 tree scale = gimple_call_arg (stmt, 2);
3118 tree rhs = gimple_call_arg (stmt, rhs_index);
3119
3120 rtx base_rtx = expand_normal (base);
3121 rtx offset_rtx = expand_normal (offset);
3122 HOST_WIDE_INT scale_int = tree_to_shwi (scale);
3123 rtx rhs_rtx = expand_normal (rhs);
3124
3125 class expand_operand ops[6];
3126 int i = 0;
3127 create_address_operand (&ops[i++], base_rtx);
3128 create_input_operand (&ops[i++], offset_rtx, TYPE_MODE (TREE_TYPE (offset)));
3129 create_integer_operand (&ops[i++], TYPE_UNSIGNED (TREE_TYPE (offset)));
3130 create_integer_operand (&ops[i++], scale_int);
3131 create_input_operand (&ops[i++], rhs_rtx, TYPE_MODE (TREE_TYPE (rhs)));
3132 if (mask_index >= 0)
3133 {
3134 tree mask = gimple_call_arg (stmt, mask_index);
3135 rtx mask_rtx = expand_normal (mask);
3136 create_input_operand (&ops[i++], mask_rtx, TYPE_MODE (TREE_TYPE (mask)));
3137 }
3138
3139 insn_code icode = convert_optab_handler (optab, TYPE_MODE (TREE_TYPE (rhs)),
3140 TYPE_MODE (TREE_TYPE (offset)));
3141 expand_insn (icode, i, ops);
3142 }
3143
3144 /* Expand {MASK_,}GATHER_LOAD call CALL using optab OPTAB. */
3145
3146 static void
3147 expand_gather_load_optab_fn (internal_fn, gcall *stmt, direct_optab optab)
3148 {
3149 tree lhs = gimple_call_lhs (stmt);
3150 tree base = gimple_call_arg (stmt, 0);
3151 tree offset = gimple_call_arg (stmt, 1);
3152 tree scale = gimple_call_arg (stmt, 2);
3153
3154 rtx lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3155 rtx base_rtx = expand_normal (base);
3156 rtx offset_rtx = expand_normal (offset);
3157 HOST_WIDE_INT scale_int = tree_to_shwi (scale);
3158
3159 int i = 0;
3160 class expand_operand ops[6];
3161 create_output_operand (&ops[i++], lhs_rtx, TYPE_MODE (TREE_TYPE (lhs)));
3162 create_address_operand (&ops[i++], base_rtx);
3163 create_input_operand (&ops[i++], offset_rtx, TYPE_MODE (TREE_TYPE (offset)));
3164 create_integer_operand (&ops[i++], TYPE_UNSIGNED (TREE_TYPE (offset)));
3165 create_integer_operand (&ops[i++], scale_int);
3166 if (optab == mask_gather_load_optab)
3167 {
3168 tree mask = gimple_call_arg (stmt, 4);
3169 rtx mask_rtx = expand_normal (mask);
3170 create_input_operand (&ops[i++], mask_rtx, TYPE_MODE (TREE_TYPE (mask)));
3171 }
3172 insn_code icode = convert_optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs)),
3173 TYPE_MODE (TREE_TYPE (offset)));
3174 expand_insn (icode, i, ops);
3175 if (!rtx_equal_p (lhs_rtx, ops[0].value))
3176 emit_move_insn (lhs_rtx, ops[0].value);
3177 }
3178
3179 /* Helper for expand_DIVMOD. Return true if the sequence starting with
3180 INSN contains any call insns or insns with {,U}{DIV,MOD} rtxes. */
3181
3182 static bool
3183 contains_call_div_mod (rtx_insn *insn)
3184 {
3185 subrtx_iterator::array_type array;
3186 for (; insn; insn = NEXT_INSN (insn))
3187 if (CALL_P (insn))
3188 return true;
3189 else if (INSN_P (insn))
3190 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
3191 switch (GET_CODE (*iter))
3192 {
3193 case CALL:
3194 case DIV:
3195 case UDIV:
3196 case MOD:
3197 case UMOD:
3198 return true;
3199 default:
3200 break;
3201 }
3202 return false;
3203 }
3204
3205 /* Expand DIVMOD() using:
3206 a) optab handler for udivmod/sdivmod if it is available.
3207 b) If optab_handler doesn't exist, generate call to
3208 target-specific divmod libfunc. */
3209
3210 static void
3211 expand_DIVMOD (internal_fn, gcall *call_stmt)
3212 {
3213 tree lhs = gimple_call_lhs (call_stmt);
3214 tree arg0 = gimple_call_arg (call_stmt, 0);
3215 tree arg1 = gimple_call_arg (call_stmt, 1);
3216
3217 gcc_assert (TREE_CODE (TREE_TYPE (lhs)) == COMPLEX_TYPE);
3218 tree type = TREE_TYPE (TREE_TYPE (lhs));
3219 machine_mode mode = TYPE_MODE (type);
3220 bool unsignedp = TYPE_UNSIGNED (type);
3221 optab tab = (unsignedp) ? udivmod_optab : sdivmod_optab;
3222
3223 rtx op0 = expand_normal (arg0);
3224 rtx op1 = expand_normal (arg1);
3225 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3226
3227 rtx quotient = NULL_RTX, remainder = NULL_RTX;
3228 rtx_insn *insns = NULL;
3229
3230 if (TREE_CODE (arg1) == INTEGER_CST)
3231 {
3232 /* For DIVMOD by integral constants, there could be efficient code
3233 expanded inline e.g. using shifts and plus/minus. Try to expand
3234 the division and modulo and if it emits any library calls or any
3235 {,U}{DIV,MOD} rtxes throw it away and use a divmod optab or
3236 divmod libcall. */
3237 scalar_int_mode int_mode;
3238 if (remainder == NULL_RTX
3239 && optimize
3240 && CONST_INT_P (op1)
3241 && !pow2p_hwi (INTVAL (op1))
3242 && is_int_mode (TYPE_MODE (type), &int_mode)
3243 && GET_MODE_SIZE (int_mode) == 2 * UNITS_PER_WORD
3244 && optab_handler (and_optab, word_mode) != CODE_FOR_nothing
3245 && optab_handler (add_optab, word_mode) != CODE_FOR_nothing
3246 && optimize_insn_for_speed_p ())
3247 {
3248 rtx_insn *last = get_last_insn ();
3249 remainder = NULL_RTX;
3250 quotient = expand_doubleword_divmod (int_mode, op0, op1, &remainder,
3251 TYPE_UNSIGNED (type));
3252 if (quotient != NULL_RTX)
3253 {
3254 if (optab_handler (mov_optab, int_mode) != CODE_FOR_nothing)
3255 {
3256 rtx_insn *move = emit_move_insn (quotient, quotient);
3257 set_dst_reg_note (move, REG_EQUAL,
3258 gen_rtx_fmt_ee (TYPE_UNSIGNED (type)
3259 ? UDIV : DIV, int_mode,
3260 copy_rtx (op0), op1),
3261 quotient);
3262 move = emit_move_insn (remainder, remainder);
3263 set_dst_reg_note (move, REG_EQUAL,
3264 gen_rtx_fmt_ee (TYPE_UNSIGNED (type)
3265 ? UMOD : MOD, int_mode,
3266 copy_rtx (op0), op1),
3267 quotient);
3268 }
3269 }
3270 else
3271 delete_insns_since (last);
3272 }
3273
3274 if (remainder == NULL_RTX)
3275 {
3276 struct separate_ops ops;
3277 ops.code = TRUNC_DIV_EXPR;
3278 ops.type = type;
3279 ops.op0 = make_tree (ops.type, op0);
3280 ops.op1 = arg1;
3281 ops.op2 = NULL_TREE;
3282 ops.location = gimple_location (call_stmt);
3283 start_sequence ();
3284 quotient = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
3285 if (contains_call_div_mod (get_insns ()))
3286 quotient = NULL_RTX;
3287 else
3288 {
3289 ops.code = TRUNC_MOD_EXPR;
3290 remainder = expand_expr_real_2 (&ops, NULL_RTX, mode,
3291 EXPAND_NORMAL);
3292 if (contains_call_div_mod (get_insns ()))
3293 remainder = NULL_RTX;
3294 }
3295 if (remainder)
3296 insns = get_insns ();
3297 end_sequence ();
3298 }
3299 }
3300
3301 if (remainder)
3302 emit_insn (insns);
3303
3304 /* Check if optab_handler exists for divmod_optab for given mode. */
3305 else if (optab_handler (tab, mode) != CODE_FOR_nothing)
3306 {
3307 quotient = gen_reg_rtx (mode);
3308 remainder = gen_reg_rtx (mode);
3309 expand_twoval_binop (tab, op0, op1, quotient, remainder, unsignedp);
3310 }
3311
3312 /* Generate call to divmod libfunc if it exists. */
3313 else if (rtx libfunc = optab_libfunc (tab, mode))
3314 targetm.expand_divmod_libfunc (libfunc, mode, op0, op1,
3315 &quotient, &remainder);
3316
3317 else
3318 gcc_unreachable ();
3319
3320 /* Wrap the return value (quotient, remainder) within COMPLEX_EXPR. */
3321 expand_expr (build2 (COMPLEX_EXPR, TREE_TYPE (lhs),
3322 make_tree (TREE_TYPE (arg0), quotient),
3323 make_tree (TREE_TYPE (arg1), remainder)),
3324 target, VOIDmode, EXPAND_NORMAL);
3325 }
3326
3327 /* Expand a NOP. */
3328
3329 static void
3330 expand_NOP (internal_fn, gcall *)
3331 {
3332 /* Nothing. But it shouldn't really prevail. */
3333 }
3334
3335 /* Coroutines, all should have been processed at this stage. */
3336
3337 static void
3338 expand_CO_FRAME (internal_fn, gcall *)
3339 {
3340 gcc_unreachable ();
3341 }
3342
3343 static void
3344 expand_CO_YIELD (internal_fn, gcall *)
3345 {
3346 gcc_unreachable ();
3347 }
3348
3349 static void
3350 expand_CO_SUSPN (internal_fn, gcall *)
3351 {
3352 gcc_unreachable ();
3353 }
3354
3355 static void
3356 expand_CO_ACTOR (internal_fn, gcall *)
3357 {
3358 gcc_unreachable ();
3359 }
3360
3361 /* Expand a call to FN using the operands in STMT. FN has a single
3362 output operand and NARGS input operands. */
3363
3364 static void
3365 expand_direct_optab_fn (internal_fn fn, gcall *stmt, direct_optab optab,
3366 unsigned int nargs)
3367 {
3368 expand_operand *ops = XALLOCAVEC (expand_operand, nargs + 1);
3369
3370 tree_pair types = direct_internal_fn_types (fn, stmt);
3371 insn_code icode = direct_optab_handler (optab, TYPE_MODE (types.first));
3372 gcc_assert (icode != CODE_FOR_nothing);
3373
3374 tree lhs = gimple_call_lhs (stmt);
3375 rtx lhs_rtx = NULL_RTX;
3376 if (lhs)
3377 lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3378
3379 /* Do not assign directly to a promoted subreg, since there is no
3380 guarantee that the instruction will leave the upper bits of the
3381 register in the state required by SUBREG_PROMOTED_SIGN. */
3382 rtx dest = lhs_rtx;
3383 if (dest && GET_CODE (dest) == SUBREG && SUBREG_PROMOTED_VAR_P (dest))
3384 dest = NULL_RTX;
3385
3386 create_output_operand (&ops[0], dest, insn_data[icode].operand[0].mode);
3387
3388 for (unsigned int i = 0; i < nargs; ++i)
3389 {
3390 tree rhs = gimple_call_arg (stmt, i);
3391 tree rhs_type = TREE_TYPE (rhs);
3392 rtx rhs_rtx = expand_normal (rhs);
3393 if (INTEGRAL_TYPE_P (rhs_type))
3394 create_convert_operand_from (&ops[i + 1], rhs_rtx,
3395 TYPE_MODE (rhs_type),
3396 TYPE_UNSIGNED (rhs_type));
3397 else
3398 create_input_operand (&ops[i + 1], rhs_rtx, TYPE_MODE (rhs_type));
3399 }
3400
3401 expand_insn (icode, nargs + 1, ops);
3402 if (lhs_rtx && !rtx_equal_p (lhs_rtx, ops[0].value))
3403 {
3404 /* If the return value has an integral type, convert the instruction
3405 result to that type. This is useful for things that return an
3406 int regardless of the size of the input. If the instruction result
3407 is smaller than required, assume that it is signed.
3408
3409 If the return value has a nonintegral type, its mode must match
3410 the instruction result. */
3411 if (GET_CODE (lhs_rtx) == SUBREG && SUBREG_PROMOTED_VAR_P (lhs_rtx))
3412 {
3413 /* If this is a scalar in a register that is stored in a wider
3414 mode than the declared mode, compute the result into its
3415 declared mode and then convert to the wider mode. */
3416 gcc_checking_assert (INTEGRAL_TYPE_P (TREE_TYPE (lhs)));
3417 rtx tmp = convert_to_mode (GET_MODE (lhs_rtx), ops[0].value, 0);
3418 convert_move (SUBREG_REG (lhs_rtx), tmp,
3419 SUBREG_PROMOTED_SIGN (lhs_rtx));
3420 }
3421 else if (GET_MODE (lhs_rtx) == GET_MODE (ops[0].value))
3422 emit_move_insn (lhs_rtx, ops[0].value);
3423 else
3424 {
3425 gcc_checking_assert (INTEGRAL_TYPE_P (TREE_TYPE (lhs)));
3426 convert_move (lhs_rtx, ops[0].value, 0);
3427 }
3428 }
3429 }
3430
3431 /* Expand WHILE_ULT call STMT using optab OPTAB. */
3432
3433 static void
3434 expand_while_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
3435 {
3436 expand_operand ops[3];
3437 tree rhs_type[2];
3438
3439 tree lhs = gimple_call_lhs (stmt);
3440 tree lhs_type = TREE_TYPE (lhs);
3441 rtx lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
3442 create_output_operand (&ops[0], lhs_rtx, TYPE_MODE (lhs_type));
3443
3444 for (unsigned int i = 0; i < 2; ++i)
3445 {
3446 tree rhs = gimple_call_arg (stmt, i);
3447 rhs_type[i] = TREE_TYPE (rhs);
3448 rtx rhs_rtx = expand_normal (rhs);
3449 create_input_operand (&ops[i + 1], rhs_rtx, TYPE_MODE (rhs_type[i]));
3450 }
3451
3452 insn_code icode = convert_optab_handler (optab, TYPE_MODE (rhs_type[0]),
3453 TYPE_MODE (lhs_type));
3454
3455 expand_insn (icode, 3, ops);
3456 if (!rtx_equal_p (lhs_rtx, ops[0].value))
3457 emit_move_insn (lhs_rtx, ops[0].value);
3458 }
3459
3460 /* Expanders for optabs that can use expand_direct_optab_fn. */
3461
3462 #define expand_unary_optab_fn(FN, STMT, OPTAB) \
3463 expand_direct_optab_fn (FN, STMT, OPTAB, 1)
3464
3465 #define expand_binary_optab_fn(FN, STMT, OPTAB) \
3466 expand_direct_optab_fn (FN, STMT, OPTAB, 2)
3467
3468 #define expand_ternary_optab_fn(FN, STMT, OPTAB) \
3469 expand_direct_optab_fn (FN, STMT, OPTAB, 3)
3470
3471 #define expand_cond_unary_optab_fn(FN, STMT, OPTAB) \
3472 expand_direct_optab_fn (FN, STMT, OPTAB, 3)
3473
3474 #define expand_cond_binary_optab_fn(FN, STMT, OPTAB) \
3475 expand_direct_optab_fn (FN, STMT, OPTAB, 4)
3476
3477 #define expand_cond_ternary_optab_fn(FN, STMT, OPTAB) \
3478 expand_direct_optab_fn (FN, STMT, OPTAB, 5)
3479
3480 #define expand_fold_extract_optab_fn(FN, STMT, OPTAB) \
3481 expand_direct_optab_fn (FN, STMT, OPTAB, 3)
3482
3483 #define expand_fold_left_optab_fn(FN, STMT, OPTAB) \
3484 expand_direct_optab_fn (FN, STMT, OPTAB, 2)
3485
3486 #define expand_mask_fold_left_optab_fn(FN, STMT, OPTAB) \
3487 expand_direct_optab_fn (FN, STMT, OPTAB, 3)
3488
3489 #define expand_check_ptrs_optab_fn(FN, STMT, OPTAB) \
3490 expand_direct_optab_fn (FN, STMT, OPTAB, 4)
3491
3492 /* RETURN_TYPE and ARGS are a return type and argument list that are
3493 in principle compatible with FN (which satisfies direct_internal_fn_p).
3494 Return the types that should be used to determine whether the
3495 target supports FN. */
3496
3497 tree_pair
3498 direct_internal_fn_types (internal_fn fn, tree return_type, tree *args)
3499 {
3500 const direct_internal_fn_info &info = direct_internal_fn (fn);
3501 tree type0 = (info.type0 < 0 ? return_type : TREE_TYPE (args[info.type0]));
3502 tree type1 = (info.type1 < 0 ? return_type : TREE_TYPE (args[info.type1]));
3503 return tree_pair (type0, type1);
3504 }
3505
3506 /* CALL is a call whose return type and arguments are in principle
3507 compatible with FN (which satisfies direct_internal_fn_p). Return the
3508 types that should be used to determine whether the target supports FN. */
3509
3510 tree_pair
3511 direct_internal_fn_types (internal_fn fn, gcall *call)
3512 {
3513 const direct_internal_fn_info &info = direct_internal_fn (fn);
3514 tree op0 = (info.type0 < 0
3515 ? gimple_call_lhs (call)
3516 : gimple_call_arg (call, info.type0));
3517 tree op1 = (info.type1 < 0
3518 ? gimple_call_lhs (call)
3519 : gimple_call_arg (call, info.type1));
3520 return tree_pair (TREE_TYPE (op0), TREE_TYPE (op1));
3521 }
3522
3523 /* Return true if OPTAB is supported for TYPES (whose modes should be
3524 the same) when the optimization type is OPT_TYPE. Used for simple
3525 direct optabs. */
3526
3527 static bool
3528 direct_optab_supported_p (direct_optab optab, tree_pair types,
3529 optimization_type opt_type)
3530 {
3531 machine_mode mode = TYPE_MODE (types.first);
3532 gcc_checking_assert (mode == TYPE_MODE (types.second));
3533 return direct_optab_handler (optab, mode, opt_type) != CODE_FOR_nothing;
3534 }
3535
3536 /* Return true if OPTAB is supported for TYPES, where the first type
3537 is the destination and the second type is the source. Used for
3538 convert optabs. */
3539
3540 static bool
3541 convert_optab_supported_p (convert_optab optab, tree_pair types,
3542 optimization_type opt_type)
3543 {
3544 return (convert_optab_handler (optab, TYPE_MODE (types.first),
3545 TYPE_MODE (types.second), opt_type)
3546 != CODE_FOR_nothing);
3547 }
3548
3549 /* Return true if load/store lanes optab OPTAB is supported for
3550 array type TYPES.first when the optimization type is OPT_TYPE. */
3551
3552 static bool
3553 multi_vector_optab_supported_p (convert_optab optab, tree_pair types,
3554 optimization_type opt_type)
3555 {
3556 gcc_assert (TREE_CODE (types.first) == ARRAY_TYPE);
3557 machine_mode imode = TYPE_MODE (types.first);
3558 machine_mode vmode = TYPE_MODE (TREE_TYPE (types.first));
3559 return (convert_optab_handler (optab, imode, vmode, opt_type)
3560 != CODE_FOR_nothing);
3561 }
3562
3563 #define direct_unary_optab_supported_p direct_optab_supported_p
3564 #define direct_binary_optab_supported_p direct_optab_supported_p
3565 #define direct_ternary_optab_supported_p direct_optab_supported_p
3566 #define direct_cond_unary_optab_supported_p direct_optab_supported_p
3567 #define direct_cond_binary_optab_supported_p direct_optab_supported_p
3568 #define direct_cond_ternary_optab_supported_p direct_optab_supported_p
3569 #define direct_mask_load_optab_supported_p convert_optab_supported_p
3570 #define direct_load_lanes_optab_supported_p multi_vector_optab_supported_p
3571 #define direct_mask_load_lanes_optab_supported_p multi_vector_optab_supported_p
3572 #define direct_gather_load_optab_supported_p convert_optab_supported_p
3573 #define direct_len_load_optab_supported_p direct_optab_supported_p
3574 #define direct_mask_store_optab_supported_p convert_optab_supported_p
3575 #define direct_store_lanes_optab_supported_p multi_vector_optab_supported_p
3576 #define direct_mask_store_lanes_optab_supported_p multi_vector_optab_supported_p
3577 #define direct_vec_cond_mask_optab_supported_p convert_optab_supported_p
3578 #define direct_vec_cond_optab_supported_p convert_optab_supported_p
3579 #define direct_scatter_store_optab_supported_p convert_optab_supported_p
3580 #define direct_len_store_optab_supported_p direct_optab_supported_p
3581 #define direct_while_optab_supported_p convert_optab_supported_p
3582 #define direct_fold_extract_optab_supported_p direct_optab_supported_p
3583 #define direct_fold_left_optab_supported_p direct_optab_supported_p
3584 #define direct_mask_fold_left_optab_supported_p direct_optab_supported_p
3585 #define direct_check_ptrs_optab_supported_p direct_optab_supported_p
3586 #define direct_vec_set_optab_supported_p direct_optab_supported_p
3587
3588 /* Return the optab used by internal function FN. */
3589
3590 static optab
3591 direct_internal_fn_optab (internal_fn fn, tree_pair types)
3592 {
3593 switch (fn)
3594 {
3595 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
3596 case IFN_##CODE: break;
3597 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
3598 case IFN_##CODE: return OPTAB##_optab;
3599 #define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
3600 UNSIGNED_OPTAB, TYPE) \
3601 case IFN_##CODE: return (TYPE_UNSIGNED (types.SELECTOR) \
3602 ? UNSIGNED_OPTAB ## _optab \
3603 : SIGNED_OPTAB ## _optab);
3604 #include "internal-fn.def"
3605
3606 case IFN_LAST:
3607 break;
3608 }
3609 gcc_unreachable ();
3610 }
3611
3612 /* Return the optab used by internal function FN. */
3613
3614 static optab
3615 direct_internal_fn_optab (internal_fn fn)
3616 {
3617 switch (fn)
3618 {
3619 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
3620 case IFN_##CODE: break;
3621 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
3622 case IFN_##CODE: return OPTAB##_optab;
3623 #include "internal-fn.def"
3624
3625 case IFN_LAST:
3626 break;
3627 }
3628 gcc_unreachable ();
3629 }
3630
3631 /* Return true if FN is supported for the types in TYPES when the
3632 optimization type is OPT_TYPE. The types are those associated with
3633 the "type0" and "type1" fields of FN's direct_internal_fn_info
3634 structure. */
3635
3636 bool
3637 direct_internal_fn_supported_p (internal_fn fn, tree_pair types,
3638 optimization_type opt_type)
3639 {
3640 switch (fn)
3641 {
3642 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
3643 case IFN_##CODE: break;
3644 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
3645 case IFN_##CODE: \
3646 return direct_##TYPE##_optab_supported_p (OPTAB##_optab, types, \
3647 opt_type);
3648 #define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
3649 UNSIGNED_OPTAB, TYPE) \
3650 case IFN_##CODE: \
3651 { \
3652 optab which_optab = (TYPE_UNSIGNED (types.SELECTOR) \
3653 ? UNSIGNED_OPTAB ## _optab \
3654 : SIGNED_OPTAB ## _optab); \
3655 return direct_##TYPE##_optab_supported_p (which_optab, types, \
3656 opt_type); \
3657 }
3658 #include "internal-fn.def"
3659
3660 case IFN_LAST:
3661 break;
3662 }
3663 gcc_unreachable ();
3664 }
3665
3666 /* Return true if FN is supported for type TYPE when the optimization
3667 type is OPT_TYPE. The caller knows that the "type0" and "type1"
3668 fields of FN's direct_internal_fn_info structure are the same. */
3669
3670 bool
3671 direct_internal_fn_supported_p (internal_fn fn, tree type,
3672 optimization_type opt_type)
3673 {
3674 const direct_internal_fn_info &info = direct_internal_fn (fn);
3675 gcc_checking_assert (info.type0 == info.type1);
3676 return direct_internal_fn_supported_p (fn, tree_pair (type, type), opt_type);
3677 }
3678
3679 /* Return true if the STMT is supported when the optimization type is OPT_TYPE,
3680 given that STMT is a call to a direct internal function. */
3681
3682 bool
3683 direct_internal_fn_supported_p (gcall *stmt, optimization_type opt_type)
3684 {
3685 internal_fn fn = gimple_call_internal_fn (stmt);
3686 tree_pair types = direct_internal_fn_types (fn, stmt);
3687 return direct_internal_fn_supported_p (fn, types, opt_type);
3688 }
3689
3690 /* If FN is commutative in two consecutive arguments, return the
3691 index of the first, otherwise return -1. */
3692
3693 int
3694 first_commutative_argument (internal_fn fn)
3695 {
3696 switch (fn)
3697 {
3698 case IFN_FMA:
3699 case IFN_FMS:
3700 case IFN_FNMA:
3701 case IFN_FNMS:
3702 case IFN_AVG_FLOOR:
3703 case IFN_AVG_CEIL:
3704 case IFN_MULHS:
3705 case IFN_MULHRS:
3706 case IFN_FMIN:
3707 case IFN_FMAX:
3708 return 0;
3709
3710 case IFN_COND_ADD:
3711 case IFN_COND_MUL:
3712 case IFN_COND_MIN:
3713 case IFN_COND_MAX:
3714 case IFN_COND_AND:
3715 case IFN_COND_IOR:
3716 case IFN_COND_XOR:
3717 case IFN_COND_FMA:
3718 case IFN_COND_FMS:
3719 case IFN_COND_FNMA:
3720 case IFN_COND_FNMS:
3721 return 1;
3722
3723 default:
3724 return -1;
3725 }
3726 }
3727
3728 /* Return true if IFN_SET_EDOM is supported. */
3729
3730 bool
3731 set_edom_supported_p (void)
3732 {
3733 #ifdef TARGET_EDOM
3734 return true;
3735 #else
3736 return false;
3737 #endif
3738 }
3739
3740 #define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
3741 static void \
3742 expand_##CODE (internal_fn fn, gcall *stmt) \
3743 { \
3744 expand_##TYPE##_optab_fn (fn, stmt, OPTAB##_optab); \
3745 }
3746 #define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
3747 UNSIGNED_OPTAB, TYPE) \
3748 static void \
3749 expand_##CODE (internal_fn fn, gcall *stmt) \
3750 { \
3751 tree_pair types = direct_internal_fn_types (fn, stmt); \
3752 optab which_optab = direct_internal_fn_optab (fn, types); \
3753 expand_##TYPE##_optab_fn (fn, stmt, which_optab); \
3754 }
3755 #include "internal-fn.def"
3756
3757 /* Routines to expand each internal function, indexed by function number.
3758 Each routine has the prototype:
3759
3760 expand_<NAME> (gcall *stmt)
3761
3762 where STMT is the statement that performs the call. */
3763 static void (*const internal_fn_expanders[]) (internal_fn, gcall *) = {
3764 #define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
3765 #include "internal-fn.def"
3766 0
3767 };
3768
3769 /* Invoke T(CODE, IFN) for each conditional function IFN that maps to a
3770 tree code CODE. */
3771 #define FOR_EACH_CODE_MAPPING(T) \
3772 T (PLUS_EXPR, IFN_COND_ADD) \
3773 T (MINUS_EXPR, IFN_COND_SUB) \
3774 T (MULT_EXPR, IFN_COND_MUL) \
3775 T (TRUNC_DIV_EXPR, IFN_COND_DIV) \
3776 T (TRUNC_MOD_EXPR, IFN_COND_MOD) \
3777 T (RDIV_EXPR, IFN_COND_RDIV) \
3778 T (MIN_EXPR, IFN_COND_MIN) \
3779 T (MAX_EXPR, IFN_COND_MAX) \
3780 T (BIT_AND_EXPR, IFN_COND_AND) \
3781 T (BIT_IOR_EXPR, IFN_COND_IOR) \
3782 T (BIT_XOR_EXPR, IFN_COND_XOR) \
3783 T (LSHIFT_EXPR, IFN_COND_SHL) \
3784 T (RSHIFT_EXPR, IFN_COND_SHR)
3785
3786 /* Return a function that only performs CODE when a certain condition is met
3787 and that uses a given fallback value otherwise. For example, if CODE is
3788 a binary operation associated with conditional function FN:
3789
3790 LHS = FN (COND, A, B, ELSE)
3791
3792 is equivalent to the C expression:
3793
3794 LHS = COND ? A CODE B : ELSE;
3795
3796 operating elementwise if the operands are vectors.
3797
3798 Return IFN_LAST if no such function exists. */
3799
3800 internal_fn
3801 get_conditional_internal_fn (tree_code code)
3802 {
3803 switch (code)
3804 {
3805 #define CASE(CODE, IFN) case CODE: return IFN;
3806 FOR_EACH_CODE_MAPPING(CASE)
3807 #undef CASE
3808 default:
3809 return IFN_LAST;
3810 }
3811 }
3812
3813 /* If IFN implements the conditional form of a tree code, return that
3814 tree code, otherwise return ERROR_MARK. */
3815
3816 tree_code
3817 conditional_internal_fn_code (internal_fn ifn)
3818 {
3819 switch (ifn)
3820 {
3821 #define CASE(CODE, IFN) case IFN: return CODE;
3822 FOR_EACH_CODE_MAPPING(CASE)
3823 #undef CASE
3824 default:
3825 return ERROR_MARK;
3826 }
3827 }
3828
3829 /* Invoke T(IFN) for each internal function IFN that also has an
3830 IFN_COND_* form. */
3831 #define FOR_EACH_COND_FN_PAIR(T) \
3832 T (FMA) \
3833 T (FMS) \
3834 T (FNMA) \
3835 T (FNMS)
3836
3837 /* Return a function that only performs internal function FN when a
3838 certain condition is met and that uses a given fallback value otherwise.
3839 In other words, the returned function FN' is such that:
3840
3841 LHS = FN' (COND, A1, ... An, ELSE)
3842
3843 is equivalent to the C expression:
3844
3845 LHS = COND ? FN (A1, ..., An) : ELSE;
3846
3847 operating elementwise if the operands are vectors.
3848
3849 Return IFN_LAST if no such function exists. */
3850
3851 internal_fn
3852 get_conditional_internal_fn (internal_fn fn)
3853 {
3854 switch (fn)
3855 {
3856 #define CASE(NAME) case IFN_##NAME: return IFN_COND_##NAME;
3857 FOR_EACH_COND_FN_PAIR(CASE)
3858 #undef CASE
3859 default:
3860 return IFN_LAST;
3861 }
3862 }
3863
3864 /* If IFN implements the conditional form of an unconditional internal
3865 function, return that unconditional function, otherwise return IFN_LAST. */
3866
3867 internal_fn
3868 get_unconditional_internal_fn (internal_fn ifn)
3869 {
3870 switch (ifn)
3871 {
3872 #define CASE(NAME) case IFN_COND_##NAME: return IFN_##NAME;
3873 FOR_EACH_COND_FN_PAIR(CASE)
3874 #undef CASE
3875 default:
3876 return IFN_LAST;
3877 }
3878 }
3879
3880 /* Return true if STMT can be interpreted as a conditional tree code
3881 operation of the form:
3882
3883 LHS = COND ? OP (RHS1, ...) : ELSE;
3884
3885 operating elementwise if the operands are vectors. This includes
3886 the case of an all-true COND, so that the operation always happens.
3887
3888 When returning true, set:
3889
3890 - *COND_OUT to the condition COND, or to NULL_TREE if the condition
3891 is known to be all-true
3892 - *CODE_OUT to the tree code
3893 - OPS[I] to operand I of *CODE_OUT
3894 - *ELSE_OUT to the fallback value ELSE, or to NULL_TREE if the
3895 condition is known to be all true. */
3896
3897 bool
3898 can_interpret_as_conditional_op_p (gimple *stmt, tree *cond_out,
3899 tree_code *code_out,
3900 tree (&ops)[3], tree *else_out)
3901 {
3902 if (gassign *assign = dyn_cast <gassign *> (stmt))
3903 {
3904 *cond_out = NULL_TREE;
3905 *code_out = gimple_assign_rhs_code (assign);
3906 ops[0] = gimple_assign_rhs1 (assign);
3907 ops[1] = gimple_assign_rhs2 (assign);
3908 ops[2] = gimple_assign_rhs3 (assign);
3909 *else_out = NULL_TREE;
3910 return true;
3911 }
3912 if (gcall *call = dyn_cast <gcall *> (stmt))
3913 if (gimple_call_internal_p (call))
3914 {
3915 internal_fn ifn = gimple_call_internal_fn (call);
3916 tree_code code = conditional_internal_fn_code (ifn);
3917 if (code != ERROR_MARK)
3918 {
3919 *cond_out = gimple_call_arg (call, 0);
3920 *code_out = code;
3921 unsigned int nops = gimple_call_num_args (call) - 2;
3922 for (unsigned int i = 0; i < 3; ++i)
3923 ops[i] = i < nops ? gimple_call_arg (call, i + 1) : NULL_TREE;
3924 *else_out = gimple_call_arg (call, nops + 1);
3925 if (integer_truep (*cond_out))
3926 {
3927 *cond_out = NULL_TREE;
3928 *else_out = NULL_TREE;
3929 }
3930 return true;
3931 }
3932 }
3933 return false;
3934 }
3935
3936 /* Return true if IFN is some form of load from memory. */
3937
3938 bool
3939 internal_load_fn_p (internal_fn fn)
3940 {
3941 switch (fn)
3942 {
3943 case IFN_MASK_LOAD:
3944 case IFN_LOAD_LANES:
3945 case IFN_MASK_LOAD_LANES:
3946 case IFN_GATHER_LOAD:
3947 case IFN_MASK_GATHER_LOAD:
3948 case IFN_LEN_LOAD:
3949 return true;
3950
3951 default:
3952 return false;
3953 }
3954 }
3955
3956 /* Return true if IFN is some form of store to memory. */
3957
3958 bool
3959 internal_store_fn_p (internal_fn fn)
3960 {
3961 switch (fn)
3962 {
3963 case IFN_MASK_STORE:
3964 case IFN_STORE_LANES:
3965 case IFN_MASK_STORE_LANES:
3966 case IFN_SCATTER_STORE:
3967 case IFN_MASK_SCATTER_STORE:
3968 case IFN_LEN_STORE:
3969 return true;
3970
3971 default:
3972 return false;
3973 }
3974 }
3975
3976 /* Return true if IFN is some form of gather load or scatter store. */
3977
3978 bool
3979 internal_gather_scatter_fn_p (internal_fn fn)
3980 {
3981 switch (fn)
3982 {
3983 case IFN_GATHER_LOAD:
3984 case IFN_MASK_GATHER_LOAD:
3985 case IFN_SCATTER_STORE:
3986 case IFN_MASK_SCATTER_STORE:
3987 return true;
3988
3989 default:
3990 return false;
3991 }
3992 }
3993
3994 /* If FN takes a vector mask argument, return the index of that argument,
3995 otherwise return -1. */
3996
3997 int
3998 internal_fn_mask_index (internal_fn fn)
3999 {
4000 switch (fn)
4001 {
4002 case IFN_MASK_LOAD:
4003 case IFN_MASK_LOAD_LANES:
4004 case IFN_MASK_STORE:
4005 case IFN_MASK_STORE_LANES:
4006 return 2;
4007
4008 case IFN_MASK_GATHER_LOAD:
4009 case IFN_MASK_SCATTER_STORE:
4010 return 4;
4011
4012 default:
4013 return (conditional_internal_fn_code (fn) != ERROR_MARK
4014 || get_unconditional_internal_fn (fn) != IFN_LAST ? 0 : -1);
4015 }
4016 }
4017
4018 /* If FN takes a value that should be stored to memory, return the index
4019 of that argument, otherwise return -1. */
4020
4021 int
4022 internal_fn_stored_value_index (internal_fn fn)
4023 {
4024 switch (fn)
4025 {
4026 case IFN_MASK_STORE:
4027 case IFN_MASK_STORE_LANES:
4028 case IFN_SCATTER_STORE:
4029 case IFN_MASK_SCATTER_STORE:
4030 case IFN_LEN_STORE:
4031 return 3;
4032
4033 default:
4034 return -1;
4035 }
4036 }
4037
4038 /* Return true if the target supports gather load or scatter store function
4039 IFN. For loads, VECTOR_TYPE is the vector type of the load result,
4040 while for stores it is the vector type of the stored data argument.
4041 MEMORY_ELEMENT_TYPE is the type of the memory elements being loaded
4042 or stored. OFFSET_VECTOR_TYPE is the vector type that holds the
4043 offset from the shared base address of each loaded or stored element.
4044 SCALE is the amount by which these offsets should be multiplied
4045 *after* they have been extended to address width. */
4046
4047 bool
4048 internal_gather_scatter_fn_supported_p (internal_fn ifn, tree vector_type,
4049 tree memory_element_type,
4050 tree offset_vector_type, int scale)
4051 {
4052 if (!tree_int_cst_equal (TYPE_SIZE (TREE_TYPE (vector_type)),
4053 TYPE_SIZE (memory_element_type)))
4054 return false;
4055 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vector_type),
4056 TYPE_VECTOR_SUBPARTS (offset_vector_type)))
4057 return false;
4058 optab optab = direct_internal_fn_optab (ifn);
4059 insn_code icode = convert_optab_handler (optab, TYPE_MODE (vector_type),
4060 TYPE_MODE (offset_vector_type));
4061 int output_ops = internal_load_fn_p (ifn) ? 1 : 0;
4062 bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (offset_vector_type));
4063 return (icode != CODE_FOR_nothing
4064 && insn_operand_matches (icode, 2 + output_ops, GEN_INT (unsigned_p))
4065 && insn_operand_matches (icode, 3 + output_ops, GEN_INT (scale)));
4066 }
4067
4068 /* Return true if the target supports IFN_CHECK_{RAW,WAR}_PTRS function IFN
4069 for pointers of type TYPE when the accesses have LENGTH bytes and their
4070 common byte alignment is ALIGN. */
4071
4072 bool
4073 internal_check_ptrs_fn_supported_p (internal_fn ifn, tree type,
4074 poly_uint64 length, unsigned int align)
4075 {
4076 machine_mode mode = TYPE_MODE (type);
4077 optab optab = direct_internal_fn_optab (ifn);
4078 insn_code icode = direct_optab_handler (optab, mode);
4079 if (icode == CODE_FOR_nothing)
4080 return false;
4081 rtx length_rtx = immed_wide_int_const (length, mode);
4082 return (insn_operand_matches (icode, 3, length_rtx)
4083 && insn_operand_matches (icode, 4, GEN_INT (align)));
4084 }
4085
4086 /* Expand STMT as though it were a call to internal function FN. */
4087
4088 void
4089 expand_internal_call (internal_fn fn, gcall *stmt)
4090 {
4091 internal_fn_expanders[fn] (fn, stmt);
4092 }
4093
4094 /* Expand STMT, which is a call to internal function FN. */
4095
4096 void
4097 expand_internal_call (gcall *stmt)
4098 {
4099 expand_internal_call (gimple_call_internal_fn (stmt), stmt);
4100 }
4101
4102 /* If TYPE is a vector type, return true if IFN is a direct internal
4103 function that is supported for that type. If TYPE is a scalar type,
4104 return true if IFN is a direct internal function that is supported for
4105 the target's preferred vector version of TYPE. */
4106
4107 bool
4108 vectorized_internal_fn_supported_p (internal_fn ifn, tree type)
4109 {
4110 scalar_mode smode;
4111 if (!VECTOR_TYPE_P (type) && is_a <scalar_mode> (TYPE_MODE (type), &smode))
4112 {
4113 machine_mode vmode = targetm.vectorize.preferred_simd_mode (smode);
4114 if (VECTOR_MODE_P (vmode))
4115 type = build_vector_type_for_mode (type, vmode);
4116 }
4117
4118 return (VECTOR_MODE_P (TYPE_MODE (type))
4119 && direct_internal_fn_supported_p (ifn, type, OPTIMIZE_FOR_SPEED));
4120 }
4121
4122 void
4123 expand_PHI (internal_fn, gcall *)
4124 {
4125 gcc_unreachable ();
4126 }