]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/rs6000/rs6000-builtin.cc
Update copyright years.
[thirdparty/gcc.git] / gcc / config / rs6000 / rs6000-builtin.cc
1 /* Target-specific built-in function support for the Power architecture.
2 See also rs6000-c.c, rs6000-gen-builtins.c, rs6000-builtins.def, and
3 rs6000-overloads.def.
4 Note that "normal" builtins (generic math functions, etc.) are handled
5 in rs6000.c.
6
7 Copyright (C) 2002-2023 Free Software Foundation, Inc.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it
12 under the terms of the GNU General Public License as published
13 by the Free Software Foundation; either version 3, or (at your
14 option) any later version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
18 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
19 License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 #define IN_TARGET_CODE 1
26
27 #include "config.h"
28 #include "system.h"
29 #include "coretypes.h"
30 #include "target.h"
31 #include "backend.h"
32 #include "rtl.h"
33 #include "tree.h"
34 #include "memmodel.h"
35 #include "gimple.h"
36 #include "tm_p.h"
37 #include "optabs.h"
38 #include "recog.h"
39 #include "diagnostic-core.h"
40 #include "fold-const.h"
41 #include "stor-layout.h"
42 #include "calls.h"
43 #include "varasm.h"
44 #include "explow.h"
45 #include "expr.h"
46 #include "langhooks.h"
47 #include "gimplify.h"
48 #include "gimple-iterator.h"
49 #include "gimple-fold.h"
50 #include "ssa.h"
51 #include "tree-ssa-propagate.h"
52 #include "builtins.h"
53 #include "tree-vector-builder.h"
54 #include "ppc-auxv.h"
55 #include "rs6000-internal.h"
56
57 /* Built in types. */
58 tree rs6000_builtin_types[RS6000_BTI_MAX];
59
60 /* Support targetm.vectorize.builtin_mask_for_load. */
61 tree altivec_builtin_mask_for_load;
62
63 /* **** General support functions **** */
64
65 /* Raise an error message for a builtin function that is called without the
66 appropriate target options being set. */
67
68 void
69 rs6000_invalid_builtin (enum rs6000_gen_builtins fncode)
70 {
71 size_t j = (size_t) fncode;
72 const char *name = rs6000_builtin_info[j].bifname;
73
74 switch (rs6000_builtin_info[j].enable)
75 {
76 case ENB_P5:
77 error ("%qs requires the %qs option", name, "-mcpu=power5");
78 break;
79 case ENB_P6:
80 error ("%qs requires the %qs option", name, "-mcpu=power6");
81 break;
82 case ENB_P6_64:
83 error ("%qs requires the %qs option and either the %qs or %qs option",
84 name, "-mcpu=power6", "-m64", "-mpowerpc64");
85 break;
86 case ENB_ALTIVEC:
87 error ("%qs requires the %qs option", name, "-maltivec");
88 break;
89 case ENB_CELL:
90 error ("%qs requires the %qs option", name, "-mcpu=cell");
91 break;
92 case ENB_VSX:
93 error ("%qs requires the %qs option", name, "-mvsx");
94 break;
95 case ENB_P7:
96 error ("%qs requires the %qs option", name, "-mcpu=power7");
97 break;
98 case ENB_P7_64:
99 error ("%qs requires the %qs option and either the %qs or %qs option",
100 name, "-mcpu=power7", "-m64", "-mpowerpc64");
101 break;
102 case ENB_P8:
103 error ("%qs requires the %qs option", name, "-mcpu=power8");
104 break;
105 case ENB_P8V:
106 error ("%qs requires the %qs and %qs options", name, "-mcpu=power8",
107 "-mvsx");
108 break;
109 case ENB_P9:
110 error ("%qs requires the %qs option", name, "-mcpu=power9");
111 break;
112 case ENB_P9_64:
113 error ("%qs requires the %qs option and either the %qs or %qs option",
114 name, "-mcpu=power9", "-m64", "-mpowerpc64");
115 break;
116 case ENB_P9V:
117 error ("%qs requires the %qs and %qs options", name, "-mcpu=power9",
118 "-mvsx");
119 break;
120 case ENB_IEEE128_HW:
121 error ("%qs requires quad-precision floating-point arithmetic", name);
122 break;
123 case ENB_DFP:
124 error ("%qs requires the %qs option", name, "-mhard-dfp");
125 break;
126 case ENB_CRYPTO:
127 error ("%qs requires the %qs option", name, "-mcrypto");
128 break;
129 case ENB_HTM:
130 error ("%qs requires the %qs option", name, "-mhtm");
131 break;
132 case ENB_P10:
133 error ("%qs requires the %qs option", name, "-mcpu=power10");
134 break;
135 case ENB_P10_64:
136 error ("%qs requires the %qs option and either the %qs or %qs option",
137 name, "-mcpu=power10", "-m64", "-mpowerpc64");
138 break;
139 case ENB_MMA:
140 error ("%qs requires the %qs option", name, "-mmma");
141 break;
142 default:
143 case ENB_ALWAYS:
144 gcc_unreachable ();
145 }
146 }
147
148 /* Check whether a builtin function is supported in this target
149 configuration. */
150 bool
151 rs6000_builtin_is_supported (enum rs6000_gen_builtins fncode)
152 {
153 switch (rs6000_builtin_info[(size_t) fncode].enable)
154 {
155 case ENB_ALWAYS:
156 return true;
157 case ENB_P5:
158 return TARGET_POPCNTB;
159 case ENB_P6:
160 return TARGET_CMPB;
161 case ENB_P6_64:
162 return TARGET_CMPB && TARGET_POWERPC64;
163 case ENB_P7:
164 return TARGET_POPCNTD;
165 case ENB_P7_64:
166 return TARGET_POPCNTD && TARGET_POWERPC64;
167 case ENB_P8:
168 return TARGET_DIRECT_MOVE;
169 case ENB_P8V:
170 return TARGET_P8_VECTOR;
171 case ENB_P9:
172 return TARGET_MODULO;
173 case ENB_P9_64:
174 return TARGET_MODULO && TARGET_POWERPC64;
175 case ENB_P9V:
176 return TARGET_P9_VECTOR;
177 case ENB_P10:
178 return TARGET_POWER10;
179 case ENB_P10_64:
180 return TARGET_POWER10 && TARGET_POWERPC64;
181 case ENB_ALTIVEC:
182 return TARGET_ALTIVEC;
183 case ENB_VSX:
184 return TARGET_VSX;
185 case ENB_CELL:
186 return TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL;
187 case ENB_IEEE128_HW:
188 return TARGET_FLOAT128_HW;
189 case ENB_DFP:
190 return TARGET_DFP;
191 case ENB_CRYPTO:
192 return TARGET_CRYPTO;
193 case ENB_HTM:
194 return TARGET_HTM;
195 case ENB_MMA:
196 return TARGET_MMA;
197 default:
198 gcc_unreachable ();
199 }
200 gcc_unreachable ();
201 }
202
203 /* Target hook for early folding of built-ins, shamelessly stolen
204 from ia64.cc. */
205
206 tree
207 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
208 int n_args ATTRIBUTE_UNUSED,
209 tree *args ATTRIBUTE_UNUSED,
210 bool ignore ATTRIBUTE_UNUSED)
211 {
212 #ifdef SUBTARGET_FOLD_BUILTIN
213 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
214 #else
215 return NULL_TREE;
216 #endif
217 }
218
219 tree
220 rs6000_builtin_decl (unsigned code, bool /* initialize_p */)
221 {
222 rs6000_gen_builtins fcode = (rs6000_gen_builtins) code;
223
224 if (fcode >= RS6000_OVLD_MAX)
225 return error_mark_node;
226
227 return rs6000_builtin_decls[code];
228 }
229
230 /* Implement targetm.vectorize.builtin_mask_for_load. */
231 tree
232 rs6000_builtin_mask_for_load (void)
233 {
234 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
235 if ((TARGET_ALTIVEC && !TARGET_VSX)
236 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
237 return altivec_builtin_mask_for_load;
238 else
239 return 0;
240 }
241
242 /* Implement targetm.vectorize.builtin_md_vectorized_function. */
243
244 tree
245 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
246 tree type_in)
247 {
248 machine_mode in_mode, out_mode;
249 int in_n, out_n;
250
251 if (TARGET_DEBUG_BUILTIN)
252 fprintf (stderr,
253 "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
254 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
255 GET_MODE_NAME (TYPE_MODE (type_out)),
256 GET_MODE_NAME (TYPE_MODE (type_in)));
257
258 /* TODO: Should this be gcc_assert? */
259 if (TREE_CODE (type_out) != VECTOR_TYPE
260 || TREE_CODE (type_in) != VECTOR_TYPE)
261 return NULL_TREE;
262
263 out_mode = TYPE_MODE (TREE_TYPE (type_out));
264 out_n = TYPE_VECTOR_SUBPARTS (type_out);
265 in_mode = TYPE_MODE (TREE_TYPE (type_in));
266 in_n = TYPE_VECTOR_SUBPARTS (type_in);
267
268 enum rs6000_gen_builtins fn
269 = (enum rs6000_gen_builtins) DECL_MD_FUNCTION_CODE (fndecl);
270 switch (fn)
271 {
272 case RS6000_BIF_RSQRTF:
273 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
274 && out_mode == SFmode && out_n == 4
275 && in_mode == SFmode && in_n == 4)
276 return rs6000_builtin_decls[RS6000_BIF_VRSQRTFP];
277 break;
278 case RS6000_BIF_RSQRT:
279 if (VECTOR_UNIT_VSX_P (V2DFmode)
280 && out_mode == DFmode && out_n == 2
281 && in_mode == DFmode && in_n == 2)
282 return rs6000_builtin_decls[RS6000_BIF_RSQRT_2DF];
283 break;
284 case RS6000_BIF_RECIPF:
285 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
286 && out_mode == SFmode && out_n == 4
287 && in_mode == SFmode && in_n == 4)
288 return rs6000_builtin_decls[RS6000_BIF_VRECIPFP];
289 break;
290 case RS6000_BIF_RECIP:
291 if (VECTOR_UNIT_VSX_P (V2DFmode)
292 && out_mode == DFmode && out_n == 2
293 && in_mode == DFmode && in_n == 2)
294 return rs6000_builtin_decls[RS6000_BIF_RECIP_V2DF];
295 break;
296 default:
297 break;
298 }
299
300 machine_mode in_vmode = TYPE_MODE (type_in);
301 machine_mode out_vmode = TYPE_MODE (type_out);
302
303 /* Power10 supported vectorized built-in functions. */
304 if (TARGET_POWER10
305 && in_vmode == out_vmode
306 && VECTOR_UNIT_ALTIVEC_OR_VSX_P (in_vmode))
307 {
308 machine_mode exp_mode = DImode;
309 machine_mode exp_vmode = V2DImode;
310 enum rs6000_gen_builtins bif;
311 switch (fn)
312 {
313 case RS6000_BIF_DIVWE:
314 case RS6000_BIF_DIVWEU:
315 exp_mode = SImode;
316 exp_vmode = V4SImode;
317 if (fn == RS6000_BIF_DIVWE)
318 bif = RS6000_BIF_VDIVESW;
319 else
320 bif = RS6000_BIF_VDIVEUW;
321 break;
322 case RS6000_BIF_DIVDE:
323 case RS6000_BIF_DIVDEU:
324 if (fn == RS6000_BIF_DIVDE)
325 bif = RS6000_BIF_VDIVESD;
326 else
327 bif = RS6000_BIF_VDIVEUD;
328 break;
329 case RS6000_BIF_CFUGED:
330 bif = RS6000_BIF_VCFUGED;
331 break;
332 case RS6000_BIF_CNTLZDM:
333 bif = RS6000_BIF_VCLZDM;
334 break;
335 case RS6000_BIF_CNTTZDM:
336 bif = RS6000_BIF_VCTZDM;
337 break;
338 case RS6000_BIF_PDEPD:
339 bif = RS6000_BIF_VPDEPD;
340 break;
341 case RS6000_BIF_PEXTD:
342 bif = RS6000_BIF_VPEXTD;
343 break;
344 default:
345 return NULL_TREE;
346 }
347
348 if (in_mode == exp_mode && in_vmode == exp_vmode)
349 return rs6000_builtin_decls[bif];
350 }
351
352 return NULL_TREE;
353 }
354
355 /* Returns a code for a target-specific builtin that implements
356 reciprocal of the function, or NULL_TREE if not available. */
357
358 tree
359 rs6000_builtin_reciprocal (tree fndecl)
360 {
361 switch (DECL_MD_FUNCTION_CODE (fndecl))
362 {
363 case RS6000_BIF_XVSQRTDP:
364 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
365 return NULL_TREE;
366
367 return rs6000_builtin_decls[RS6000_BIF_RSQRT_2DF];
368
369 case RS6000_BIF_XVSQRTSP:
370 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
371 return NULL_TREE;
372
373 return rs6000_builtin_decls[RS6000_BIF_RSQRT_4SF];
374
375 default:
376 return NULL_TREE;
377 }
378 }
379
380 /* **** Initialization support **** */
381
382 /* Create a builtin vector type with a name. Taking care not to give
383 the canonical type a name. */
384
385 static tree
386 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
387 {
388 tree result = build_vector_type (elt_type, num_elts);
389
390 /* Copy so we don't give the canonical type a name. */
391 result = build_variant_type_copy (result);
392
393 add_builtin_type (name, result);
394
395 return result;
396 }
397
398 /* Debug utility to translate a type node to a single textual token. */
399 static
400 const char *rs6000_type_string (tree type_node)
401 {
402 if (type_node == NULL_TREE)
403 return "**NULL**";
404 else if (type_node == void_type_node)
405 return "void";
406 else if (type_node == long_integer_type_node)
407 return "long";
408 else if (type_node == long_unsigned_type_node)
409 return "ulong";
410 else if (type_node == long_long_integer_type_node)
411 return "longlong";
412 else if (type_node == long_long_unsigned_type_node)
413 return "ulonglong";
414 else if (type_node == bool_V2DI_type_node)
415 return "vbll";
416 else if (type_node == bool_V4SI_type_node)
417 return "vbi";
418 else if (type_node == bool_V8HI_type_node)
419 return "vbs";
420 else if (type_node == bool_V16QI_type_node)
421 return "vbc";
422 else if (type_node == bool_int_type_node)
423 return "bool";
424 else if (type_node == dfloat64_type_node)
425 return "_Decimal64";
426 else if (type_node == double_type_node)
427 return "double";
428 else if (type_node == intDI_type_node)
429 return "sll";
430 else if (type_node == intHI_type_node)
431 return "ss";
432 else if (type_node == ibm128_float_type_node)
433 return "__ibm128";
434 else if (type_node == ieee128_float_type_node)
435 return "__ieee128";
436 else if (type_node == opaque_V4SI_type_node)
437 return "opaque";
438 else if (POINTER_TYPE_P (type_node))
439 return "void*";
440 else if (type_node == intQI_type_node || type_node == char_type_node)
441 return "sc";
442 else if (type_node == dfloat32_type_node)
443 return "_Decimal32";
444 else if (type_node == float_type_node)
445 return "float";
446 else if (type_node == intSI_type_node || type_node == integer_type_node)
447 return "si";
448 else if (type_node == dfloat128_type_node)
449 return "_Decimal128";
450 else if (type_node == long_double_type_node)
451 return "longdouble";
452 else if (type_node == intTI_type_node)
453 return "sq";
454 else if (type_node == unsigned_intDI_type_node)
455 return "ull";
456 else if (type_node == unsigned_intHI_type_node)
457 return "us";
458 else if (type_node == unsigned_intQI_type_node)
459 return "uc";
460 else if (type_node == unsigned_intSI_type_node)
461 return "ui";
462 else if (type_node == unsigned_intTI_type_node)
463 return "uq";
464 else if (type_node == unsigned_V1TI_type_node)
465 return "vuq";
466 else if (type_node == unsigned_V2DI_type_node)
467 return "vull";
468 else if (type_node == unsigned_V4SI_type_node)
469 return "vui";
470 else if (type_node == unsigned_V8HI_type_node)
471 return "vus";
472 else if (type_node == unsigned_V16QI_type_node)
473 return "vuc";
474 else if (type_node == V16QI_type_node)
475 return "vsc";
476 else if (type_node == V1TI_type_node)
477 return "vsq";
478 else if (type_node == V2DF_type_node)
479 return "vd";
480 else if (type_node == V2DI_type_node)
481 return "vsll";
482 else if (type_node == V4SF_type_node)
483 return "vf";
484 else if (type_node == V4SI_type_node)
485 return "vsi";
486 else if (type_node == V8HI_type_node)
487 return "vss";
488 else if (type_node == pixel_V8HI_type_node)
489 return "vp";
490 else if (type_node == pcvoid_type_node)
491 return "voidc*";
492 else if (type_node == float128_type_node)
493 return "_Float128";
494 else if (type_node == vector_pair_type_node)
495 return "__vector_pair";
496 else if (type_node == vector_quad_type_node)
497 return "__vector_quad";
498
499 return "unknown";
500 }
501
502 void
503 rs6000_init_builtins (void)
504 {
505 tree tdecl;
506 tree t;
507
508 if (TARGET_DEBUG_BUILTIN)
509 fprintf (stderr, "rs6000_init_builtins%s%s\n",
510 (TARGET_ALTIVEC) ? ", altivec" : "",
511 (TARGET_VSX) ? ", vsx" : "");
512
513 V2DI_type_node = rs6000_vector_type ("__vector long long",
514 long_long_integer_type_node, 2);
515 ptr_V2DI_type_node
516 = build_pointer_type (build_qualified_type (V2DI_type_node,
517 TYPE_QUAL_CONST));
518
519 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
520 ptr_V2DF_type_node
521 = build_pointer_type (build_qualified_type (V2DF_type_node,
522 TYPE_QUAL_CONST));
523
524 V4SI_type_node = rs6000_vector_type ("__vector signed int",
525 intSI_type_node, 4);
526 ptr_V4SI_type_node
527 = build_pointer_type (build_qualified_type (V4SI_type_node,
528 TYPE_QUAL_CONST));
529
530 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
531 ptr_V4SF_type_node
532 = build_pointer_type (build_qualified_type (V4SF_type_node,
533 TYPE_QUAL_CONST));
534
535 V8HI_type_node = rs6000_vector_type ("__vector signed short",
536 intHI_type_node, 8);
537 ptr_V8HI_type_node
538 = build_pointer_type (build_qualified_type (V8HI_type_node,
539 TYPE_QUAL_CONST));
540
541 V16QI_type_node = rs6000_vector_type ("__vector signed char",
542 intQI_type_node, 16);
543 ptr_V16QI_type_node
544 = build_pointer_type (build_qualified_type (V16QI_type_node,
545 TYPE_QUAL_CONST));
546
547 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
548 unsigned_intQI_type_node, 16);
549 ptr_unsigned_V16QI_type_node
550 = build_pointer_type (build_qualified_type (unsigned_V16QI_type_node,
551 TYPE_QUAL_CONST));
552
553 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
554 unsigned_intHI_type_node, 8);
555 ptr_unsigned_V8HI_type_node
556 = build_pointer_type (build_qualified_type (unsigned_V8HI_type_node,
557 TYPE_QUAL_CONST));
558
559 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
560 unsigned_intSI_type_node, 4);
561 ptr_unsigned_V4SI_type_node
562 = build_pointer_type (build_qualified_type (unsigned_V4SI_type_node,
563 TYPE_QUAL_CONST));
564
565 unsigned_V2DI_type_node
566 = rs6000_vector_type ("__vector unsigned long long",
567 long_long_unsigned_type_node, 2);
568
569 ptr_unsigned_V2DI_type_node
570 = build_pointer_type (build_qualified_type (unsigned_V2DI_type_node,
571 TYPE_QUAL_CONST));
572
573 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
574
575 const_str_type_node
576 = build_pointer_type (build_qualified_type (char_type_node,
577 TYPE_QUAL_CONST));
578
579 /* We use V1TI mode as a special container to hold __int128_t items that
580 must live in VSX registers. */
581 if (intTI_type_node)
582 {
583 V1TI_type_node = rs6000_vector_type ("__vector __int128",
584 intTI_type_node, 1);
585 ptr_V1TI_type_node
586 = build_pointer_type (build_qualified_type (V1TI_type_node,
587 TYPE_QUAL_CONST));
588 unsigned_V1TI_type_node
589 = rs6000_vector_type ("__vector unsigned __int128",
590 unsigned_intTI_type_node, 1);
591 ptr_unsigned_V1TI_type_node
592 = build_pointer_type (build_qualified_type (unsigned_V1TI_type_node,
593 TYPE_QUAL_CONST));
594 }
595
596 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
597 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
598 'vector unsigned short'. */
599
600 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
601 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
602 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
603 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
604 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
605
606 long_integer_type_internal_node = long_integer_type_node;
607 long_unsigned_type_internal_node = long_unsigned_type_node;
608 long_long_integer_type_internal_node = long_long_integer_type_node;
609 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
610 intQI_type_internal_node = intQI_type_node;
611 uintQI_type_internal_node = unsigned_intQI_type_node;
612 intHI_type_internal_node = intHI_type_node;
613 uintHI_type_internal_node = unsigned_intHI_type_node;
614 intSI_type_internal_node = intSI_type_node;
615 uintSI_type_internal_node = unsigned_intSI_type_node;
616 intDI_type_internal_node = intDI_type_node;
617 uintDI_type_internal_node = unsigned_intDI_type_node;
618 intTI_type_internal_node = intTI_type_node;
619 uintTI_type_internal_node = unsigned_intTI_type_node;
620 float_type_internal_node = float_type_node;
621 double_type_internal_node = double_type_node;
622 long_double_type_internal_node = long_double_type_node;
623 dfloat64_type_internal_node = dfloat64_type_node;
624 dfloat128_type_internal_node = dfloat128_type_node;
625 void_type_internal_node = void_type_node;
626
627 ptr_intQI_type_node
628 = build_pointer_type (build_qualified_type (intQI_type_internal_node,
629 TYPE_QUAL_CONST));
630 ptr_uintQI_type_node
631 = build_pointer_type (build_qualified_type (uintQI_type_internal_node,
632 TYPE_QUAL_CONST));
633 ptr_intHI_type_node
634 = build_pointer_type (build_qualified_type (intHI_type_internal_node,
635 TYPE_QUAL_CONST));
636 ptr_uintHI_type_node
637 = build_pointer_type (build_qualified_type (uintHI_type_internal_node,
638 TYPE_QUAL_CONST));
639 ptr_intSI_type_node
640 = build_pointer_type (build_qualified_type (intSI_type_internal_node,
641 TYPE_QUAL_CONST));
642 ptr_uintSI_type_node
643 = build_pointer_type (build_qualified_type (uintSI_type_internal_node,
644 TYPE_QUAL_CONST));
645 ptr_intDI_type_node
646 = build_pointer_type (build_qualified_type (intDI_type_internal_node,
647 TYPE_QUAL_CONST));
648 ptr_uintDI_type_node
649 = build_pointer_type (build_qualified_type (uintDI_type_internal_node,
650 TYPE_QUAL_CONST));
651 ptr_intTI_type_node
652 = build_pointer_type (build_qualified_type (intTI_type_internal_node,
653 TYPE_QUAL_CONST));
654 ptr_uintTI_type_node
655 = build_pointer_type (build_qualified_type (uintTI_type_internal_node,
656 TYPE_QUAL_CONST));
657
658 t = build_qualified_type (long_integer_type_internal_node, TYPE_QUAL_CONST);
659 ptr_long_integer_type_node = build_pointer_type (t);
660
661 t = build_qualified_type (long_unsigned_type_internal_node, TYPE_QUAL_CONST);
662 ptr_long_unsigned_type_node = build_pointer_type (t);
663
664 ptr_float_type_node
665 = build_pointer_type (build_qualified_type (float_type_internal_node,
666 TYPE_QUAL_CONST));
667 ptr_double_type_node
668 = build_pointer_type (build_qualified_type (double_type_internal_node,
669 TYPE_QUAL_CONST));
670 ptr_long_double_type_node
671 = build_pointer_type (build_qualified_type (long_double_type_internal_node,
672 TYPE_QUAL_CONST));
673 if (dfloat64_type_node)
674 {
675 t = build_qualified_type (dfloat64_type_internal_node, TYPE_QUAL_CONST);
676 ptr_dfloat64_type_node = build_pointer_type (t);
677 }
678 else
679 ptr_dfloat64_type_node = NULL;
680
681 if (dfloat128_type_node)
682 {
683 t = build_qualified_type (dfloat128_type_internal_node, TYPE_QUAL_CONST);
684 ptr_dfloat128_type_node = build_pointer_type (t);
685 }
686 else
687 ptr_dfloat128_type_node = NULL;
688
689 t = build_qualified_type (long_long_integer_type_internal_node,
690 TYPE_QUAL_CONST);
691 ptr_long_long_integer_type_node = build_pointer_type (t);
692
693 t = build_qualified_type (long_long_unsigned_type_internal_node,
694 TYPE_QUAL_CONST);
695 ptr_long_long_unsigned_type_node = build_pointer_type (t);
696
697 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
698 IFmode is the IBM extended 128-bit format that is a pair of doubles.
699 TFmode will be either IEEE 128-bit floating point or the IBM double-double
700 format that uses a pair of doubles, depending on the switches and
701 defaults.
702
703 If we don't support for either 128-bit IBM double double or IEEE 128-bit
704 floating point, we need make sure the type is non-zero or else self-test
705 fails during bootstrap.
706
707 Always create __ibm128 as a separate type, even if the current long double
708 format is IBM extended double.
709
710 For IEEE 128-bit floating point, always create the type __ieee128. If the
711 user used -mfloat128, rs6000-c.cc will create a define from __float128 to
712 __ieee128. */
713 if (TARGET_LONG_DOUBLE_128 && (!TARGET_IEEEQUAD || TARGET_FLOAT128_TYPE))
714 {
715 if (!TARGET_IEEEQUAD)
716 ibm128_float_type_node = long_double_type_node;
717 else
718 {
719 ibm128_float_type_node = make_node (REAL_TYPE);
720 TYPE_PRECISION (ibm128_float_type_node) = 128;
721 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
722 layout_type (ibm128_float_type_node);
723 }
724 t = build_qualified_type (ibm128_float_type_node, TYPE_QUAL_CONST);
725 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
726 "__ibm128");
727 }
728 else
729 ibm128_float_type_node = NULL_TREE;
730
731 if (TARGET_FLOAT128_TYPE)
732 {
733 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
734 ieee128_float_type_node = long_double_type_node;
735 else
736 {
737 /* For C we only need to register the __ieee128 name for
738 it. For C++, we create a distinct type which will mangle
739 differently (u9__ieee128) vs. _Float128 (DF128_) and behave
740 backwards compatibly. */
741 if (float128t_type_node == NULL_TREE)
742 {
743 float128t_type_node = make_node (REAL_TYPE);
744 TYPE_PRECISION (float128t_type_node)
745 = TYPE_PRECISION (float128_type_node);
746 layout_type (float128t_type_node);
747 SET_TYPE_MODE (float128t_type_node,
748 TYPE_MODE (float128_type_node));
749 }
750 ieee128_float_type_node = float128t_type_node;
751 }
752 t = build_qualified_type (ieee128_float_type_node, TYPE_QUAL_CONST);
753 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
754 "__ieee128");
755 }
756 else
757 ieee128_float_type_node = NULL_TREE;
758
759 /* Vector pair and vector quad support. */
760 vector_pair_type_node = make_node (OPAQUE_TYPE);
761 SET_TYPE_MODE (vector_pair_type_node, OOmode);
762 TYPE_SIZE (vector_pair_type_node) = bitsize_int (GET_MODE_BITSIZE (OOmode));
763 TYPE_PRECISION (vector_pair_type_node) = GET_MODE_BITSIZE (OOmode);
764 TYPE_SIZE_UNIT (vector_pair_type_node) = size_int (GET_MODE_SIZE (OOmode));
765 SET_TYPE_ALIGN (vector_pair_type_node, 256);
766 TYPE_USER_ALIGN (vector_pair_type_node) = 0;
767 lang_hooks.types.register_builtin_type (vector_pair_type_node,
768 "__vector_pair");
769 t = build_qualified_type (vector_pair_type_node, TYPE_QUAL_CONST);
770 ptr_vector_pair_type_node = build_pointer_type (t);
771
772 vector_quad_type_node = make_node (OPAQUE_TYPE);
773 SET_TYPE_MODE (vector_quad_type_node, XOmode);
774 TYPE_SIZE (vector_quad_type_node) = bitsize_int (GET_MODE_BITSIZE (XOmode));
775 TYPE_PRECISION (vector_quad_type_node) = GET_MODE_BITSIZE (XOmode);
776 TYPE_SIZE_UNIT (vector_quad_type_node) = size_int (GET_MODE_SIZE (XOmode));
777 SET_TYPE_ALIGN (vector_quad_type_node, 512);
778 TYPE_USER_ALIGN (vector_quad_type_node) = 0;
779 lang_hooks.types.register_builtin_type (vector_quad_type_node,
780 "__vector_quad");
781 t = build_qualified_type (vector_quad_type_node, TYPE_QUAL_CONST);
782 ptr_vector_quad_type_node = build_pointer_type (t);
783
784 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
785 TYPE_NAME (bool_char_type_node) = tdecl;
786
787 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
788 TYPE_NAME (bool_short_type_node) = tdecl;
789
790 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
791 TYPE_NAME (bool_int_type_node) = tdecl;
792
793 tdecl = add_builtin_type ("__pixel", pixel_type_node);
794 TYPE_NAME (pixel_type_node) = tdecl;
795
796 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
797 bool_char_type_node, 16);
798 ptr_bool_V16QI_type_node
799 = build_pointer_type (build_qualified_type (bool_V16QI_type_node,
800 TYPE_QUAL_CONST));
801
802 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
803 bool_short_type_node, 8);
804 ptr_bool_V8HI_type_node
805 = build_pointer_type (build_qualified_type (bool_V8HI_type_node,
806 TYPE_QUAL_CONST));
807
808 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
809 bool_int_type_node, 4);
810 ptr_bool_V4SI_type_node
811 = build_pointer_type (build_qualified_type (bool_V4SI_type_node,
812 TYPE_QUAL_CONST));
813
814 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
815 ? "__vector __bool long"
816 : "__vector __bool long long",
817 bool_long_long_type_node, 2);
818 ptr_bool_V2DI_type_node
819 = build_pointer_type (build_qualified_type (bool_V2DI_type_node,
820 TYPE_QUAL_CONST));
821
822 bool_V1TI_type_node = rs6000_vector_type ("__vector __bool __int128",
823 intTI_type_node, 1);
824 ptr_bool_V1TI_type_node
825 = build_pointer_type (build_qualified_type (bool_V1TI_type_node,
826 TYPE_QUAL_CONST));
827
828 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
829 pixel_type_node, 8);
830 ptr_pixel_V8HI_type_node
831 = build_pointer_type (build_qualified_type (pixel_V8HI_type_node,
832 TYPE_QUAL_CONST));
833 pcvoid_type_node
834 = build_pointer_type (build_qualified_type (void_type_node,
835 TYPE_QUAL_CONST));
836
837 /* Execute the autogenerated initialization code for builtins. */
838 rs6000_init_generated_builtins ();
839
840 if (TARGET_DEBUG_BUILTIN)
841 {
842 fprintf (stderr, "\nAutogenerated built-in functions:\n\n");
843 for (int i = 1; i < (int) RS6000_BIF_MAX; i++)
844 {
845 enum rs6000_gen_builtins fn_code = (enum rs6000_gen_builtins) i;
846 if (!rs6000_builtin_is_supported (fn_code))
847 continue;
848 tree fntype = rs6000_builtin_info[i].fntype;
849 tree t = TREE_TYPE (fntype);
850 fprintf (stderr, "%s %s (", rs6000_type_string (t),
851 rs6000_builtin_info[i].bifname);
852 t = TYPE_ARG_TYPES (fntype);
853 while (t && TREE_VALUE (t) != void_type_node)
854 {
855 fprintf (stderr, "%s",
856 rs6000_type_string (TREE_VALUE (t)));
857 t = TREE_CHAIN (t);
858 if (t && TREE_VALUE (t) != void_type_node)
859 fprintf (stderr, ", ");
860 }
861 fprintf (stderr, "); %s [%4d]\n",
862 rs6000_builtin_info[i].attr_string, (int) i);
863 }
864 fprintf (stderr, "\nEnd autogenerated built-in functions.\n\n\n");
865 }
866
867 if (TARGET_XCOFF)
868 {
869 /* AIX libm provides clog as __clog. */
870 if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
871 set_user_assembler_name (tdecl, "__clog");
872
873 /* When long double is 64 bit, some long double builtins of libc
874 functions (like __builtin_frexpl) must call the double version
875 (frexp) not the long double version (frexpl) that expects a 128 bit
876 argument. */
877 if (! TARGET_LONG_DOUBLE_128)
878 {
879 if ((tdecl = builtin_decl_explicit (BUILT_IN_FMODL)) != NULL_TREE)
880 set_user_assembler_name (tdecl, "fmod");
881 if ((tdecl = builtin_decl_explicit (BUILT_IN_FREXPL)) != NULL_TREE)
882 set_user_assembler_name (tdecl, "frexp");
883 if ((tdecl = builtin_decl_explicit (BUILT_IN_LDEXPL)) != NULL_TREE)
884 set_user_assembler_name (tdecl, "ldexp");
885 if ((tdecl = builtin_decl_explicit (BUILT_IN_MODFL)) != NULL_TREE)
886 set_user_assembler_name (tdecl, "modf");
887 }
888 }
889
890 altivec_builtin_mask_for_load
891 = rs6000_builtin_decls[RS6000_BIF_MASK_FOR_LOAD];
892
893 #ifdef SUBTARGET_INIT_BUILTINS
894 SUBTARGET_INIT_BUILTINS;
895 #endif
896
897 return;
898 }
899
900 /* **** GIMPLE folding support **** */
901
902 /* Helper function to handle the gimple folding of a vector compare
903 operation. This sets up true/false vectors, and uses the
904 VEC_COND_EXPR operation.
905 CODE indicates which comparison is to be made. (EQ, GT, ...).
906 TYPE indicates the type of the result.
907 Code is inserted before GSI. */
908 static tree
909 fold_build_vec_cmp (tree_code code, tree type, tree arg0, tree arg1,
910 gimple_stmt_iterator *gsi)
911 {
912 tree cmp_type = truth_type_for (type);
913 tree zero_vec = build_zero_cst (type);
914 tree minus_one_vec = build_minus_one_cst (type);
915 tree temp = create_tmp_reg_or_ssa_name (cmp_type);
916 gimple *g = gimple_build_assign (temp, code, arg0, arg1);
917 gsi_insert_before (gsi, g, GSI_SAME_STMT);
918 return fold_build3 (VEC_COND_EXPR, type, temp, minus_one_vec, zero_vec);
919 }
920
921 /* Helper function to handle the in-between steps for the
922 vector compare built-ins. */
923 static void
924 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
925 {
926 tree arg0 = gimple_call_arg (stmt, 0);
927 tree arg1 = gimple_call_arg (stmt, 1);
928 tree lhs = gimple_call_lhs (stmt);
929 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1, gsi);
930 gimple *g = gimple_build_assign (lhs, cmp);
931 gimple_set_location (g, gimple_location (stmt));
932 gsi_replace (gsi, g, true);
933 }
934
935 /* Helper function to map V2DF and V4SF types to their
936 integral equivalents (V2DI and V4SI). */
937 tree map_to_integral_tree_type (tree input_tree_type)
938 {
939 if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type)))
940 return input_tree_type;
941 else
942 {
943 if (types_compatible_p (TREE_TYPE (input_tree_type),
944 TREE_TYPE (V2DF_type_node)))
945 return V2DI_type_node;
946 else if (types_compatible_p (TREE_TYPE (input_tree_type),
947 TREE_TYPE (V4SF_type_node)))
948 return V4SI_type_node;
949 else
950 gcc_unreachable ();
951 }
952 }
953
954 /* Helper function to handle the vector merge[hl] built-ins. The
955 implementation difference between h and l versions for this code are in
956 the values used when building of the permute vector for high word versus
957 low word merge. The variance is keyed off the use_high parameter. */
958 static void
959 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
960 {
961 tree arg0 = gimple_call_arg (stmt, 0);
962 tree arg1 = gimple_call_arg (stmt, 1);
963 tree lhs = gimple_call_lhs (stmt);
964 tree lhs_type = TREE_TYPE (lhs);
965 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
966 int midpoint = n_elts / 2;
967 int offset = 0;
968
969 if (use_high == 1)
970 offset = midpoint;
971
972 /* The permute_type will match the lhs for integral types. For double and
973 float types, the permute type needs to map to the V2 or V4 type that
974 matches size. */
975 tree permute_type;
976 permute_type = map_to_integral_tree_type (lhs_type);
977 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
978
979 for (int i = 0; i < midpoint; i++)
980 {
981 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
982 offset + i));
983 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
984 offset + n_elts + i));
985 }
986
987 tree permute = elts.build ();
988
989 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
990 gimple_set_location (g, gimple_location (stmt));
991 gsi_replace (gsi, g, true);
992 }
993
994 /* Helper function to handle the vector merge[eo] built-ins. */
995 static void
996 fold_mergeeo_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_odd)
997 {
998 tree arg0 = gimple_call_arg (stmt, 0);
999 tree arg1 = gimple_call_arg (stmt, 1);
1000 tree lhs = gimple_call_lhs (stmt);
1001 tree lhs_type = TREE_TYPE (lhs);
1002 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
1003
1004 /* The permute_type will match the lhs for integral types. For double and
1005 float types, the permute type needs to map to the V2 or V4 type that
1006 matches size. */
1007 tree permute_type;
1008 permute_type = map_to_integral_tree_type (lhs_type);
1009
1010 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
1011
1012 /* Build the permute vector. */
1013 for (int i = 0; i < n_elts / 2; i++)
1014 {
1015 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
1016 2*i + use_odd));
1017 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
1018 2*i + use_odd + n_elts));
1019 }
1020
1021 tree permute = elts.build ();
1022
1023 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
1024 gimple_set_location (g, gimple_location (stmt));
1025 gsi_replace (gsi, g, true);
1026 }
1027
1028 /* Helper function to sort out which built-ins may be valid without having
1029 a LHS. */
1030 static bool
1031 rs6000_builtin_valid_without_lhs (enum rs6000_gen_builtins fn_code,
1032 tree fndecl)
1033 {
1034 if (TREE_TYPE (TREE_TYPE (fndecl)) == void_type_node)
1035 return true;
1036
1037 switch (fn_code)
1038 {
1039 case RS6000_BIF_STVX_V16QI:
1040 case RS6000_BIF_STVX_V8HI:
1041 case RS6000_BIF_STVX_V4SI:
1042 case RS6000_BIF_STVX_V4SF:
1043 case RS6000_BIF_STVX_V2DI:
1044 case RS6000_BIF_STVX_V2DF:
1045 case RS6000_BIF_STXVW4X_V16QI:
1046 case RS6000_BIF_STXVW4X_V8HI:
1047 case RS6000_BIF_STXVW4X_V4SF:
1048 case RS6000_BIF_STXVW4X_V4SI:
1049 case RS6000_BIF_STXVD2X_V2DF:
1050 case RS6000_BIF_STXVD2X_V2DI:
1051 return true;
1052 default:
1053 return false;
1054 }
1055 }
1056
1057 /* Expand the MMA built-ins early, so that we can convert the pass-by-reference
1058 __vector_quad arguments into pass-by-value arguments, leading to more
1059 efficient code generation. */
1060 static bool
1061 rs6000_gimple_fold_mma_builtin (gimple_stmt_iterator *gsi,
1062 rs6000_gen_builtins fn_code)
1063 {
1064 gimple *stmt = gsi_stmt (*gsi);
1065 size_t fncode = (size_t) fn_code;
1066
1067 if (!bif_is_mma (rs6000_builtin_info[fncode]))
1068 return false;
1069
1070 /* Each call that can be gimple-expanded has an associated built-in
1071 function that it will expand into. If this one doesn't, we have
1072 already expanded it! Exceptions: lxvp and stxvp. */
1073 if (rs6000_builtin_info[fncode].assoc_bif == RS6000_BIF_NONE
1074 && fncode != RS6000_BIF_LXVP
1075 && fncode != RS6000_BIF_STXVP)
1076 return false;
1077
1078 bifdata *bd = &rs6000_builtin_info[fncode];
1079 unsigned nopnds = bd->nargs;
1080 gimple_seq new_seq = NULL;
1081 gimple *new_call;
1082 tree new_decl;
1083
1084 /* Compatibility built-ins; we used to call these
1085 __builtin_mma_{dis,}assemble_pair, but now we call them
1086 __builtin_vsx_{dis,}assemble_pair. Handle the old versions. */
1087 if (fncode == RS6000_BIF_ASSEMBLE_PAIR)
1088 fncode = RS6000_BIF_ASSEMBLE_PAIR_V;
1089 else if (fncode == RS6000_BIF_DISASSEMBLE_PAIR)
1090 fncode = RS6000_BIF_DISASSEMBLE_PAIR_V;
1091
1092 if (fncode == RS6000_BIF_DISASSEMBLE_ACC
1093 || fncode == RS6000_BIF_DISASSEMBLE_PAIR_V)
1094 {
1095 /* This is an MMA disassemble built-in function. */
1096 push_gimplify_context (true);
1097 unsigned nvec = (fncode == RS6000_BIF_DISASSEMBLE_ACC) ? 4 : 2;
1098 tree dst_ptr = gimple_call_arg (stmt, 0);
1099 tree src_ptr = gimple_call_arg (stmt, 1);
1100 tree src_type = (fncode == RS6000_BIF_DISASSEMBLE_ACC)
1101 ? build_pointer_type (vector_quad_type_node)
1102 : build_pointer_type (vector_pair_type_node);
1103 if (TREE_TYPE (src_ptr) != src_type)
1104 src_ptr = build1 (NOP_EXPR, src_type, src_ptr);
1105
1106 tree src = create_tmp_reg_or_ssa_name (TREE_TYPE (src_type));
1107 gimplify_assign (src, build_simple_mem_ref (src_ptr), &new_seq);
1108
1109 /* If we are not disassembling an accumulator/pair or our destination is
1110 another accumulator/pair, then just copy the entire thing as is. */
1111 if ((fncode == RS6000_BIF_DISASSEMBLE_ACC
1112 && TREE_TYPE (TREE_TYPE (dst_ptr)) == vector_quad_type_node)
1113 || (fncode == RS6000_BIF_DISASSEMBLE_PAIR_V
1114 && TREE_TYPE (TREE_TYPE (dst_ptr)) == vector_pair_type_node))
1115 {
1116 tree dst = build_simple_mem_ref (build1 (NOP_EXPR,
1117 src_type, dst_ptr));
1118 gimplify_assign (dst, src, &new_seq);
1119 pop_gimplify_context (NULL);
1120 gsi_replace_with_seq (gsi, new_seq, true);
1121 return true;
1122 }
1123
1124 /* If we're disassembling an accumulator into a different type, we need
1125 to emit a xxmfacc instruction now, since we cannot do it later. */
1126 if (fncode == RS6000_BIF_DISASSEMBLE_ACC)
1127 {
1128 new_decl = rs6000_builtin_decls[RS6000_BIF_XXMFACC_INTERNAL];
1129 new_call = gimple_build_call (new_decl, 1, src);
1130 src = create_tmp_reg_or_ssa_name (vector_quad_type_node);
1131 gimple_call_set_lhs (new_call, src);
1132 gimple_seq_add_stmt (&new_seq, new_call);
1133 }
1134
1135 /* Copy the accumulator/pair vector by vector. */
1136 new_decl
1137 = rs6000_builtin_decls[rs6000_builtin_info[fncode].assoc_bif];
1138 tree dst_type = build_pointer_type_for_mode (unsigned_V16QI_type_node,
1139 ptr_mode, true);
1140 tree dst_base = build1 (NOP_EXPR, dst_type, dst_ptr);
1141 for (unsigned i = 0; i < nvec; i++)
1142 {
1143 unsigned index = WORDS_BIG_ENDIAN ? i : nvec - 1 - i;
1144 tree dst = build2 (MEM_REF, unsigned_V16QI_type_node, dst_base,
1145 build_int_cst (dst_type, index * 16));
1146 tree dstssa = create_tmp_reg_or_ssa_name (unsigned_V16QI_type_node);
1147 new_call = gimple_build_call (new_decl, 2, src,
1148 build_int_cstu (uint16_type_node, i));
1149 gimple_call_set_lhs (new_call, dstssa);
1150 gimple_seq_add_stmt (&new_seq, new_call);
1151 gimplify_assign (dst, dstssa, &new_seq);
1152 }
1153 pop_gimplify_context (NULL);
1154 gsi_replace_with_seq (gsi, new_seq, true);
1155 return true;
1156 }
1157
1158 /* TODO: Do some factoring on these two chunks. */
1159 if (fncode == RS6000_BIF_LXVP)
1160 {
1161 push_gimplify_context (true);
1162 tree offset = gimple_call_arg (stmt, 0);
1163 tree ptr = gimple_call_arg (stmt, 1);
1164 tree lhs = gimple_call_lhs (stmt);
1165 if (TREE_TYPE (TREE_TYPE (ptr)) != vector_pair_type_node)
1166 ptr = build1 (NOP_EXPR,
1167 build_pointer_type (vector_pair_type_node), ptr);
1168 tree mem = build_simple_mem_ref (build2 (POINTER_PLUS_EXPR,
1169 TREE_TYPE (ptr), ptr, offset));
1170 gimplify_assign (lhs, mem, &new_seq);
1171 pop_gimplify_context (NULL);
1172 gsi_replace_with_seq (gsi, new_seq, true);
1173 return true;
1174 }
1175
1176 if (fncode == RS6000_BIF_STXVP)
1177 {
1178 push_gimplify_context (true);
1179 tree src = gimple_call_arg (stmt, 0);
1180 tree offset = gimple_call_arg (stmt, 1);
1181 tree ptr = gimple_call_arg (stmt, 2);
1182 if (TREE_TYPE (TREE_TYPE (ptr)) != vector_pair_type_node)
1183 ptr = build1 (NOP_EXPR,
1184 build_pointer_type (vector_pair_type_node), ptr);
1185 tree mem = build_simple_mem_ref (build2 (POINTER_PLUS_EXPR,
1186 TREE_TYPE (ptr), ptr, offset));
1187 gimplify_assign (mem, src, &new_seq);
1188 pop_gimplify_context (NULL);
1189 gsi_replace_with_seq (gsi, new_seq, true);
1190 return true;
1191 }
1192
1193 /* Convert this built-in into an internal version that uses pass-by-value
1194 arguments. The internal built-in is found in the assoc_bif field. */
1195 new_decl = rs6000_builtin_decls[rs6000_builtin_info[fncode].assoc_bif];
1196 tree lhs, op[MAX_MMA_OPERANDS];
1197 tree acc = gimple_call_arg (stmt, 0);
1198 push_gimplify_context (true);
1199
1200 if (bif_is_quad (*bd))
1201 {
1202 /* This built-in has a pass-by-reference accumulator input, so load it
1203 into a temporary accumulator for use as a pass-by-value input. */
1204 op[0] = create_tmp_reg_or_ssa_name (vector_quad_type_node);
1205 for (unsigned i = 1; i < nopnds; i++)
1206 op[i] = gimple_call_arg (stmt, i);
1207 gimplify_assign (op[0], build_simple_mem_ref (acc), &new_seq);
1208 }
1209 else
1210 {
1211 /* This built-in does not use its pass-by-reference accumulator argument
1212 as an input argument, so remove it from the input list. */
1213 nopnds--;
1214 for (unsigned i = 0; i < nopnds; i++)
1215 op[i] = gimple_call_arg (stmt, i + 1);
1216 }
1217
1218 switch (nopnds)
1219 {
1220 case 0:
1221 new_call = gimple_build_call (new_decl, 0);
1222 break;
1223 case 1:
1224 new_call = gimple_build_call (new_decl, 1, op[0]);
1225 break;
1226 case 2:
1227 new_call = gimple_build_call (new_decl, 2, op[0], op[1]);
1228 break;
1229 case 3:
1230 new_call = gimple_build_call (new_decl, 3, op[0], op[1], op[2]);
1231 break;
1232 case 4:
1233 new_call = gimple_build_call (new_decl, 4, op[0], op[1], op[2], op[3]);
1234 break;
1235 case 5:
1236 new_call = gimple_build_call (new_decl, 5, op[0], op[1], op[2], op[3],
1237 op[4]);
1238 break;
1239 case 6:
1240 new_call = gimple_build_call (new_decl, 6, op[0], op[1], op[2], op[3],
1241 op[4], op[5]);
1242 break;
1243 case 7:
1244 new_call = gimple_build_call (new_decl, 7, op[0], op[1], op[2], op[3],
1245 op[4], op[5], op[6]);
1246 break;
1247 default:
1248 gcc_unreachable ();
1249 }
1250
1251 if (fncode == RS6000_BIF_BUILD_PAIR || fncode == RS6000_BIF_ASSEMBLE_PAIR_V)
1252 lhs = create_tmp_reg_or_ssa_name (vector_pair_type_node);
1253 else
1254 lhs = create_tmp_reg_or_ssa_name (vector_quad_type_node);
1255 gimple_call_set_lhs (new_call, lhs);
1256 gimple_seq_add_stmt (&new_seq, new_call);
1257 gimplify_assign (build_simple_mem_ref (acc), lhs, &new_seq);
1258 pop_gimplify_context (NULL);
1259 gsi_replace_with_seq (gsi, new_seq, true);
1260
1261 return true;
1262 }
1263
1264 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
1265 a constant, use rs6000_fold_builtin.) */
1266 bool
1267 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
1268 {
1269 gimple *stmt = gsi_stmt (*gsi);
1270 tree fndecl = gimple_call_fndecl (stmt);
1271 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
1272 enum rs6000_gen_builtins fn_code
1273 = (enum rs6000_gen_builtins) DECL_MD_FUNCTION_CODE (fndecl);
1274 tree arg0, arg1, lhs, temp;
1275 enum tree_code bcode;
1276 gimple *g;
1277
1278 /* For an unresolved overloaded builtin, return early here since there
1279 is no builtin info for it and we are unable to fold it. */
1280 if (fn_code > RS6000_OVLD_NONE)
1281 return false;
1282
1283 size_t uns_fncode = (size_t) fn_code;
1284 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
1285 const char *fn_name1 = rs6000_builtin_info[uns_fncode].bifname;
1286 const char *fn_name2 = (icode != CODE_FOR_nothing)
1287 ? get_insn_name ((int) icode)
1288 : "nothing";
1289
1290 if (TARGET_DEBUG_BUILTIN)
1291 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
1292 fn_code, fn_name1, fn_name2);
1293
1294 /* Prevent gimple folding for code that does not have a LHS, unless it is
1295 allowed per the rs6000_builtin_valid_without_lhs helper function. */
1296 if (!gimple_call_lhs (stmt)
1297 && !rs6000_builtin_valid_without_lhs (fn_code, fndecl))
1298 return false;
1299
1300 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
1301 if (!rs6000_builtin_is_supported (fn_code))
1302 return false;
1303
1304 if (rs6000_gimple_fold_mma_builtin (gsi, fn_code))
1305 return true;
1306
1307 switch (fn_code)
1308 {
1309 /* Flavors of vec_add. We deliberately don't expand
1310 RS6000_BIF_VADDUQM as it gets lowered from V1TImode to
1311 TImode, resulting in much poorer code generation. */
1312 case RS6000_BIF_VADDUBM:
1313 case RS6000_BIF_VADDUHM:
1314 case RS6000_BIF_VADDUWM:
1315 case RS6000_BIF_VADDUDM:
1316 case RS6000_BIF_VADDFP:
1317 case RS6000_BIF_XVADDDP:
1318 case RS6000_BIF_XVADDSP:
1319 bcode = PLUS_EXPR;
1320 do_binary:
1321 arg0 = gimple_call_arg (stmt, 0);
1322 arg1 = gimple_call_arg (stmt, 1);
1323 lhs = gimple_call_lhs (stmt);
1324 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (lhs)))
1325 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (lhs))))
1326 {
1327 /* Ensure the binary operation is performed in a type
1328 that wraps if it is integral type. */
1329 gimple_seq stmts = NULL;
1330 tree type = unsigned_type_for (TREE_TYPE (lhs));
1331 tree uarg0 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
1332 type, arg0);
1333 tree uarg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
1334 type, arg1);
1335 tree res = gimple_build (&stmts, gimple_location (stmt), bcode,
1336 type, uarg0, uarg1);
1337 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
1338 g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR,
1339 build1 (VIEW_CONVERT_EXPR,
1340 TREE_TYPE (lhs), res));
1341 gsi_replace (gsi, g, true);
1342 return true;
1343 }
1344 g = gimple_build_assign (lhs, bcode, arg0, arg1);
1345 gimple_set_location (g, gimple_location (stmt));
1346 gsi_replace (gsi, g, true);
1347 return true;
1348 /* Flavors of vec_sub. We deliberately don't expand
1349 RS6000_BIF_VSUBUQM. */
1350 case RS6000_BIF_VSUBUBM:
1351 case RS6000_BIF_VSUBUHM:
1352 case RS6000_BIF_VSUBUWM:
1353 case RS6000_BIF_VSUBUDM:
1354 case RS6000_BIF_VSUBFP:
1355 case RS6000_BIF_XVSUBDP:
1356 case RS6000_BIF_XVSUBSP:
1357 bcode = MINUS_EXPR;
1358 goto do_binary;
1359 case RS6000_BIF_XVMULSP:
1360 case RS6000_BIF_XVMULDP:
1361 arg0 = gimple_call_arg (stmt, 0);
1362 arg1 = gimple_call_arg (stmt, 1);
1363 lhs = gimple_call_lhs (stmt);
1364 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
1365 gimple_set_location (g, gimple_location (stmt));
1366 gsi_replace (gsi, g, true);
1367 return true;
1368 /* Even element flavors of vec_mul (signed). */
1369 case RS6000_BIF_VMULESB:
1370 case RS6000_BIF_VMULESH:
1371 case RS6000_BIF_VMULESW:
1372 /* Even element flavors of vec_mul (unsigned). */
1373 case RS6000_BIF_VMULEUB:
1374 case RS6000_BIF_VMULEUH:
1375 case RS6000_BIF_VMULEUW:
1376 arg0 = gimple_call_arg (stmt, 0);
1377 arg1 = gimple_call_arg (stmt, 1);
1378 lhs = gimple_call_lhs (stmt);
1379 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
1380 gimple_set_location (g, gimple_location (stmt));
1381 gsi_replace (gsi, g, true);
1382 return true;
1383 /* Odd element flavors of vec_mul (signed). */
1384 case RS6000_BIF_VMULOSB:
1385 case RS6000_BIF_VMULOSH:
1386 case RS6000_BIF_VMULOSW:
1387 /* Odd element flavors of vec_mul (unsigned). */
1388 case RS6000_BIF_VMULOUB:
1389 case RS6000_BIF_VMULOUH:
1390 case RS6000_BIF_VMULOUW:
1391 arg0 = gimple_call_arg (stmt, 0);
1392 arg1 = gimple_call_arg (stmt, 1);
1393 lhs = gimple_call_lhs (stmt);
1394 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
1395 gimple_set_location (g, gimple_location (stmt));
1396 gsi_replace (gsi, g, true);
1397 return true;
1398 /* Flavors of vec_div (Integer). */
1399 case RS6000_BIF_DIV_V2DI:
1400 case RS6000_BIF_UDIV_V2DI:
1401 arg0 = gimple_call_arg (stmt, 0);
1402 arg1 = gimple_call_arg (stmt, 1);
1403 lhs = gimple_call_lhs (stmt);
1404 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
1405 gimple_set_location (g, gimple_location (stmt));
1406 gsi_replace (gsi, g, true);
1407 return true;
1408 /* Flavors of vec_div (Float). */
1409 case RS6000_BIF_XVDIVSP:
1410 case RS6000_BIF_XVDIVDP:
1411 arg0 = gimple_call_arg (stmt, 0);
1412 arg1 = gimple_call_arg (stmt, 1);
1413 lhs = gimple_call_lhs (stmt);
1414 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
1415 gimple_set_location (g, gimple_location (stmt));
1416 gsi_replace (gsi, g, true);
1417 return true;
1418 /* Flavors of vec_and. */
1419 case RS6000_BIF_VAND_V16QI_UNS:
1420 case RS6000_BIF_VAND_V16QI:
1421 case RS6000_BIF_VAND_V8HI_UNS:
1422 case RS6000_BIF_VAND_V8HI:
1423 case RS6000_BIF_VAND_V4SI_UNS:
1424 case RS6000_BIF_VAND_V4SI:
1425 case RS6000_BIF_VAND_V2DI_UNS:
1426 case RS6000_BIF_VAND_V2DI:
1427 case RS6000_BIF_VAND_V4SF:
1428 case RS6000_BIF_VAND_V2DF:
1429 arg0 = gimple_call_arg (stmt, 0);
1430 arg1 = gimple_call_arg (stmt, 1);
1431 lhs = gimple_call_lhs (stmt);
1432 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
1433 gimple_set_location (g, gimple_location (stmt));
1434 gsi_replace (gsi, g, true);
1435 return true;
1436 /* Flavors of vec_andc. */
1437 case RS6000_BIF_VANDC_V16QI_UNS:
1438 case RS6000_BIF_VANDC_V16QI:
1439 case RS6000_BIF_VANDC_V8HI_UNS:
1440 case RS6000_BIF_VANDC_V8HI:
1441 case RS6000_BIF_VANDC_V4SI_UNS:
1442 case RS6000_BIF_VANDC_V4SI:
1443 case RS6000_BIF_VANDC_V2DI_UNS:
1444 case RS6000_BIF_VANDC_V2DI:
1445 case RS6000_BIF_VANDC_V4SF:
1446 case RS6000_BIF_VANDC_V2DF:
1447 arg0 = gimple_call_arg (stmt, 0);
1448 arg1 = gimple_call_arg (stmt, 1);
1449 lhs = gimple_call_lhs (stmt);
1450 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
1451 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
1452 gimple_set_location (g, gimple_location (stmt));
1453 gsi_insert_before (gsi, g, GSI_SAME_STMT);
1454 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
1455 gimple_set_location (g, gimple_location (stmt));
1456 gsi_replace (gsi, g, true);
1457 return true;
1458 /* Flavors of vec_nand. */
1459 case RS6000_BIF_NAND_V16QI_UNS:
1460 case RS6000_BIF_NAND_V16QI:
1461 case RS6000_BIF_NAND_V8HI_UNS:
1462 case RS6000_BIF_NAND_V8HI:
1463 case RS6000_BIF_NAND_V4SI_UNS:
1464 case RS6000_BIF_NAND_V4SI:
1465 case RS6000_BIF_NAND_V2DI_UNS:
1466 case RS6000_BIF_NAND_V2DI:
1467 case RS6000_BIF_NAND_V4SF:
1468 case RS6000_BIF_NAND_V2DF:
1469 arg0 = gimple_call_arg (stmt, 0);
1470 arg1 = gimple_call_arg (stmt, 1);
1471 lhs = gimple_call_lhs (stmt);
1472 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
1473 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
1474 gimple_set_location (g, gimple_location (stmt));
1475 gsi_insert_before (gsi, g, GSI_SAME_STMT);
1476 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
1477 gimple_set_location (g, gimple_location (stmt));
1478 gsi_replace (gsi, g, true);
1479 return true;
1480 /* Flavors of vec_or. */
1481 case RS6000_BIF_VOR_V16QI_UNS:
1482 case RS6000_BIF_VOR_V16QI:
1483 case RS6000_BIF_VOR_V8HI_UNS:
1484 case RS6000_BIF_VOR_V8HI:
1485 case RS6000_BIF_VOR_V4SI_UNS:
1486 case RS6000_BIF_VOR_V4SI:
1487 case RS6000_BIF_VOR_V2DI_UNS:
1488 case RS6000_BIF_VOR_V2DI:
1489 case RS6000_BIF_VOR_V4SF:
1490 case RS6000_BIF_VOR_V2DF:
1491 arg0 = gimple_call_arg (stmt, 0);
1492 arg1 = gimple_call_arg (stmt, 1);
1493 lhs = gimple_call_lhs (stmt);
1494 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
1495 gimple_set_location (g, gimple_location (stmt));
1496 gsi_replace (gsi, g, true);
1497 return true;
1498 /* flavors of vec_orc. */
1499 case RS6000_BIF_ORC_V16QI_UNS:
1500 case RS6000_BIF_ORC_V16QI:
1501 case RS6000_BIF_ORC_V8HI_UNS:
1502 case RS6000_BIF_ORC_V8HI:
1503 case RS6000_BIF_ORC_V4SI_UNS:
1504 case RS6000_BIF_ORC_V4SI:
1505 case RS6000_BIF_ORC_V2DI_UNS:
1506 case RS6000_BIF_ORC_V2DI:
1507 case RS6000_BIF_ORC_V4SF:
1508 case RS6000_BIF_ORC_V2DF:
1509 arg0 = gimple_call_arg (stmt, 0);
1510 arg1 = gimple_call_arg (stmt, 1);
1511 lhs = gimple_call_lhs (stmt);
1512 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
1513 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
1514 gimple_set_location (g, gimple_location (stmt));
1515 gsi_insert_before (gsi, g, GSI_SAME_STMT);
1516 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
1517 gimple_set_location (g, gimple_location (stmt));
1518 gsi_replace (gsi, g, true);
1519 return true;
1520 /* Flavors of vec_xor. */
1521 case RS6000_BIF_VXOR_V16QI_UNS:
1522 case RS6000_BIF_VXOR_V16QI:
1523 case RS6000_BIF_VXOR_V8HI_UNS:
1524 case RS6000_BIF_VXOR_V8HI:
1525 case RS6000_BIF_VXOR_V4SI_UNS:
1526 case RS6000_BIF_VXOR_V4SI:
1527 case RS6000_BIF_VXOR_V2DI_UNS:
1528 case RS6000_BIF_VXOR_V2DI:
1529 case RS6000_BIF_VXOR_V4SF:
1530 case RS6000_BIF_VXOR_V2DF:
1531 arg0 = gimple_call_arg (stmt, 0);
1532 arg1 = gimple_call_arg (stmt, 1);
1533 lhs = gimple_call_lhs (stmt);
1534 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
1535 gimple_set_location (g, gimple_location (stmt));
1536 gsi_replace (gsi, g, true);
1537 return true;
1538 /* Flavors of vec_nor. */
1539 case RS6000_BIF_VNOR_V16QI_UNS:
1540 case RS6000_BIF_VNOR_V16QI:
1541 case RS6000_BIF_VNOR_V8HI_UNS:
1542 case RS6000_BIF_VNOR_V8HI:
1543 case RS6000_BIF_VNOR_V4SI_UNS:
1544 case RS6000_BIF_VNOR_V4SI:
1545 case RS6000_BIF_VNOR_V2DI_UNS:
1546 case RS6000_BIF_VNOR_V2DI:
1547 case RS6000_BIF_VNOR_V4SF:
1548 case RS6000_BIF_VNOR_V2DF:
1549 arg0 = gimple_call_arg (stmt, 0);
1550 arg1 = gimple_call_arg (stmt, 1);
1551 lhs = gimple_call_lhs (stmt);
1552 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
1553 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
1554 gimple_set_location (g, gimple_location (stmt));
1555 gsi_insert_before (gsi, g, GSI_SAME_STMT);
1556 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
1557 gimple_set_location (g, gimple_location (stmt));
1558 gsi_replace (gsi, g, true);
1559 return true;
1560 /* flavors of vec_abs. */
1561 case RS6000_BIF_ABS_V16QI:
1562 case RS6000_BIF_ABS_V8HI:
1563 case RS6000_BIF_ABS_V4SI:
1564 case RS6000_BIF_ABS_V4SF:
1565 case RS6000_BIF_ABS_V2DI:
1566 case RS6000_BIF_XVABSDP:
1567 case RS6000_BIF_XVABSSP:
1568 arg0 = gimple_call_arg (stmt, 0);
1569 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
1570 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
1571 return false;
1572 lhs = gimple_call_lhs (stmt);
1573 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
1574 gimple_set_location (g, gimple_location (stmt));
1575 gsi_replace (gsi, g, true);
1576 return true;
1577 /* flavors of vec_min. */
1578 case RS6000_BIF_XVMINDP:
1579 case RS6000_BIF_XVMINSP:
1580 case RS6000_BIF_VMINFP:
1581 {
1582 lhs = gimple_call_lhs (stmt);
1583 tree type = TREE_TYPE (lhs);
1584 if (HONOR_NANS (type))
1585 return false;
1586 gcc_fallthrough ();
1587 }
1588 case RS6000_BIF_VMINSD:
1589 case RS6000_BIF_VMINUD:
1590 case RS6000_BIF_VMINSB:
1591 case RS6000_BIF_VMINSH:
1592 case RS6000_BIF_VMINSW:
1593 case RS6000_BIF_VMINUB:
1594 case RS6000_BIF_VMINUH:
1595 case RS6000_BIF_VMINUW:
1596 arg0 = gimple_call_arg (stmt, 0);
1597 arg1 = gimple_call_arg (stmt, 1);
1598 lhs = gimple_call_lhs (stmt);
1599 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
1600 gimple_set_location (g, gimple_location (stmt));
1601 gsi_replace (gsi, g, true);
1602 return true;
1603 /* flavors of vec_max. */
1604 case RS6000_BIF_XVMAXDP:
1605 case RS6000_BIF_XVMAXSP:
1606 case RS6000_BIF_VMAXFP:
1607 {
1608 lhs = gimple_call_lhs (stmt);
1609 tree type = TREE_TYPE (lhs);
1610 if (HONOR_NANS (type))
1611 return false;
1612 gcc_fallthrough ();
1613 }
1614 case RS6000_BIF_VMAXSD:
1615 case RS6000_BIF_VMAXUD:
1616 case RS6000_BIF_VMAXSB:
1617 case RS6000_BIF_VMAXSH:
1618 case RS6000_BIF_VMAXSW:
1619 case RS6000_BIF_VMAXUB:
1620 case RS6000_BIF_VMAXUH:
1621 case RS6000_BIF_VMAXUW:
1622 arg0 = gimple_call_arg (stmt, 0);
1623 arg1 = gimple_call_arg (stmt, 1);
1624 lhs = gimple_call_lhs (stmt);
1625 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
1626 gimple_set_location (g, gimple_location (stmt));
1627 gsi_replace (gsi, g, true);
1628 return true;
1629 /* Flavors of vec_eqv. */
1630 case RS6000_BIF_EQV_V16QI:
1631 case RS6000_BIF_EQV_V8HI:
1632 case RS6000_BIF_EQV_V4SI:
1633 case RS6000_BIF_EQV_V4SF:
1634 case RS6000_BIF_EQV_V2DF:
1635 case RS6000_BIF_EQV_V2DI:
1636 arg0 = gimple_call_arg (stmt, 0);
1637 arg1 = gimple_call_arg (stmt, 1);
1638 lhs = gimple_call_lhs (stmt);
1639 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
1640 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
1641 gimple_set_location (g, gimple_location (stmt));
1642 gsi_insert_before (gsi, g, GSI_SAME_STMT);
1643 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
1644 gimple_set_location (g, gimple_location (stmt));
1645 gsi_replace (gsi, g, true);
1646 return true;
1647 /* Flavors of vec_rotate_left. */
1648 case RS6000_BIF_VRLB:
1649 case RS6000_BIF_VRLH:
1650 case RS6000_BIF_VRLW:
1651 case RS6000_BIF_VRLD:
1652 arg0 = gimple_call_arg (stmt, 0);
1653 arg1 = gimple_call_arg (stmt, 1);
1654 lhs = gimple_call_lhs (stmt);
1655 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
1656 gimple_set_location (g, gimple_location (stmt));
1657 gsi_replace (gsi, g, true);
1658 return true;
1659 /* Flavors of vector shift right algebraic.
1660 vec_sra{b,h,w} -> vsra{b,h,w}. */
1661 case RS6000_BIF_VSRAB:
1662 case RS6000_BIF_VSRAH:
1663 case RS6000_BIF_VSRAW:
1664 case RS6000_BIF_VSRAD:
1665 {
1666 arg0 = gimple_call_arg (stmt, 0);
1667 arg1 = gimple_call_arg (stmt, 1);
1668 lhs = gimple_call_lhs (stmt);
1669 tree arg1_type = TREE_TYPE (arg1);
1670 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
1671 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
1672 location_t loc = gimple_location (stmt);
1673 /* Force arg1 into the range valid matching the arg0 type. */
1674 /* Build a vector consisting of the max valid bit-size values. */
1675 int n_elts = VECTOR_CST_NELTS (arg1);
1676 tree element_size = build_int_cst (unsigned_element_type,
1677 128 / n_elts);
1678 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
1679 for (int i = 0; i < n_elts; i++)
1680 elts.safe_push (element_size);
1681 tree modulo_tree = elts.build ();
1682 /* Modulo the provided shift value against that vector. */
1683 gimple_seq stmts = NULL;
1684 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
1685 unsigned_arg1_type, arg1);
1686 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
1687 unsigned_arg1_type, unsigned_arg1,
1688 modulo_tree);
1689 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
1690 /* And finally, do the shift. */
1691 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, new_arg1);
1692 gimple_set_location (g, loc);
1693 gsi_replace (gsi, g, true);
1694 return true;
1695 }
1696 /* Flavors of vector shift left.
1697 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
1698 case RS6000_BIF_VSLB:
1699 case RS6000_BIF_VSLH:
1700 case RS6000_BIF_VSLW:
1701 case RS6000_BIF_VSLD:
1702 {
1703 location_t loc;
1704 gimple_seq stmts = NULL;
1705 arg0 = gimple_call_arg (stmt, 0);
1706 tree arg0_type = TREE_TYPE (arg0);
1707 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
1708 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
1709 return false;
1710 arg1 = gimple_call_arg (stmt, 1);
1711 tree arg1_type = TREE_TYPE (arg1);
1712 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
1713 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
1714 loc = gimple_location (stmt);
1715 lhs = gimple_call_lhs (stmt);
1716 /* Force arg1 into the range valid matching the arg0 type. */
1717 /* Build a vector consisting of the max valid bit-size values. */
1718 int n_elts = VECTOR_CST_NELTS (arg1);
1719 int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
1720 * BITS_PER_UNIT;
1721 tree element_size = build_int_cst (unsigned_element_type,
1722 tree_size_in_bits / n_elts);
1723 tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
1724 for (int i = 0; i < n_elts; i++)
1725 elts.safe_push (element_size);
1726 tree modulo_tree = elts.build ();
1727 /* Modulo the provided shift value against that vector. */
1728 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
1729 unsigned_arg1_type, arg1);
1730 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
1731 unsigned_arg1_type, unsigned_arg1,
1732 modulo_tree);
1733 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
1734 /* And finally, do the shift. */
1735 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
1736 gimple_set_location (g, gimple_location (stmt));
1737 gsi_replace (gsi, g, true);
1738 return true;
1739 }
1740 /* Flavors of vector shift right. */
1741 case RS6000_BIF_VSRB:
1742 case RS6000_BIF_VSRH:
1743 case RS6000_BIF_VSRW:
1744 case RS6000_BIF_VSRD:
1745 {
1746 arg0 = gimple_call_arg (stmt, 0);
1747 arg1 = gimple_call_arg (stmt, 1);
1748 lhs = gimple_call_lhs (stmt);
1749 tree arg1_type = TREE_TYPE (arg1);
1750 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
1751 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
1752 location_t loc = gimple_location (stmt);
1753 gimple_seq stmts = NULL;
1754 /* Convert arg0 to unsigned. */
1755 tree arg0_unsigned
1756 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
1757 unsigned_type_for (TREE_TYPE (arg0)), arg0);
1758 /* Force arg1 into the range valid matching the arg0 type. */
1759 /* Build a vector consisting of the max valid bit-size values. */
1760 int n_elts = VECTOR_CST_NELTS (arg1);
1761 tree element_size = build_int_cst (unsigned_element_type,
1762 128 / n_elts);
1763 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
1764 for (int i = 0; i < n_elts; i++)
1765 elts.safe_push (element_size);
1766 tree modulo_tree = elts.build ();
1767 /* Modulo the provided shift value against that vector. */
1768 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
1769 unsigned_arg1_type, arg1);
1770 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
1771 unsigned_arg1_type, unsigned_arg1,
1772 modulo_tree);
1773 /* Do the shift. */
1774 tree res
1775 = gimple_build (&stmts, RSHIFT_EXPR,
1776 TREE_TYPE (arg0_unsigned), arg0_unsigned, new_arg1);
1777 /* Convert result back to the lhs type. */
1778 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
1779 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
1780 replace_call_with_value (gsi, res);
1781 return true;
1782 }
1783 /* Vector loads. */
1784 case RS6000_BIF_LVX_V16QI:
1785 case RS6000_BIF_LVX_V8HI:
1786 case RS6000_BIF_LVX_V4SI:
1787 case RS6000_BIF_LVX_V4SF:
1788 case RS6000_BIF_LVX_V2DI:
1789 case RS6000_BIF_LVX_V2DF:
1790 case RS6000_BIF_LVX_V1TI:
1791 {
1792 arg0 = gimple_call_arg (stmt, 0); // offset
1793 arg1 = gimple_call_arg (stmt, 1); // address
1794 lhs = gimple_call_lhs (stmt);
1795 location_t loc = gimple_location (stmt);
1796 /* Since arg1 may be cast to a different type, just use ptr_type_node
1797 here instead of trying to enforce TBAA on pointer types. */
1798 tree arg1_type = ptr_type_node;
1799 tree lhs_type = TREE_TYPE (lhs);
1800 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
1801 the tree using the value from arg0. The resulting type will match
1802 the type of arg1. */
1803 gimple_seq stmts = NULL;
1804 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
1805 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
1806 arg1_type, arg1, temp_offset);
1807 /* Mask off any lower bits from the address. */
1808 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
1809 arg1_type, temp_addr,
1810 build_int_cst (arg1_type, -16));
1811 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
1812 if (!is_gimple_mem_ref_addr (aligned_addr))
1813 {
1814 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
1815 gimple *g = gimple_build_assign (t, aligned_addr);
1816 gsi_insert_before (gsi, g, GSI_SAME_STMT);
1817 aligned_addr = t;
1818 }
1819 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
1820 take an offset, but since we've already incorporated the offset
1821 above, here we just pass in a zero. */
1822 gimple *g
1823 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
1824 build_int_cst (arg1_type, 0)));
1825 gimple_set_location (g, loc);
1826 gsi_replace (gsi, g, true);
1827 return true;
1828 }
1829 /* Vector stores. */
1830 case RS6000_BIF_STVX_V16QI:
1831 case RS6000_BIF_STVX_V8HI:
1832 case RS6000_BIF_STVX_V4SI:
1833 case RS6000_BIF_STVX_V4SF:
1834 case RS6000_BIF_STVX_V2DI:
1835 case RS6000_BIF_STVX_V2DF:
1836 {
1837 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
1838 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
1839 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
1840 location_t loc = gimple_location (stmt);
1841 tree arg0_type = TREE_TYPE (arg0);
1842 /* Use ptr_type_node (no TBAA) for the arg2_type.
1843 FIXME: (Richard) "A proper fix would be to transition this type as
1844 seen from the frontend to GIMPLE, for example in a similar way we
1845 do for MEM_REFs by piggy-backing that on an extra argument, a
1846 constant zero pointer of the alias pointer type to use (which would
1847 also serve as a type indicator of the store itself). I'd use a
1848 target specific internal function for this (not sure if we can have
1849 those target specific, but I guess if it's folded away then that's
1850 fine) and get away with the overload set." */
1851 tree arg2_type = ptr_type_node;
1852 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
1853 the tree using the value from arg0. The resulting type will match
1854 the type of arg2. */
1855 gimple_seq stmts = NULL;
1856 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
1857 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
1858 arg2_type, arg2, temp_offset);
1859 /* Mask off any lower bits from the address. */
1860 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
1861 arg2_type, temp_addr,
1862 build_int_cst (arg2_type, -16));
1863 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
1864 if (!is_gimple_mem_ref_addr (aligned_addr))
1865 {
1866 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
1867 gimple *g = gimple_build_assign (t, aligned_addr);
1868 gsi_insert_before (gsi, g, GSI_SAME_STMT);
1869 aligned_addr = t;
1870 }
1871 /* The desired gimple result should be similar to:
1872 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
1873 gimple *g
1874 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
1875 build_int_cst (arg2_type, 0)), arg0);
1876 gimple_set_location (g, loc);
1877 gsi_replace (gsi, g, true);
1878 return true;
1879 }
1880
1881 /* unaligned Vector loads. */
1882 case RS6000_BIF_LXVW4X_V16QI:
1883 case RS6000_BIF_LXVW4X_V8HI:
1884 case RS6000_BIF_LXVW4X_V4SF:
1885 case RS6000_BIF_LXVW4X_V4SI:
1886 case RS6000_BIF_LXVD2X_V2DF:
1887 case RS6000_BIF_LXVD2X_V2DI:
1888 {
1889 arg0 = gimple_call_arg (stmt, 0); // offset
1890 arg1 = gimple_call_arg (stmt, 1); // address
1891 lhs = gimple_call_lhs (stmt);
1892 location_t loc = gimple_location (stmt);
1893 /* Since arg1 may be cast to a different type, just use ptr_type_node
1894 here instead of trying to enforce TBAA on pointer types. */
1895 tree arg1_type = ptr_type_node;
1896 tree lhs_type = TREE_TYPE (lhs);
1897 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
1898 required alignment (power) is 4 bytes regardless of data type. */
1899 tree align_ltype = build_aligned_type (lhs_type, 4);
1900 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
1901 the tree using the value from arg0. The resulting type will match
1902 the type of arg1. */
1903 gimple_seq stmts = NULL;
1904 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
1905 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
1906 arg1_type, arg1, temp_offset);
1907 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
1908 if (!is_gimple_mem_ref_addr (temp_addr))
1909 {
1910 tree t = make_ssa_name (TREE_TYPE (temp_addr));
1911 gimple *g = gimple_build_assign (t, temp_addr);
1912 gsi_insert_before (gsi, g, GSI_SAME_STMT);
1913 temp_addr = t;
1914 }
1915 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
1916 take an offset, but since we've already incorporated the offset
1917 above, here we just pass in a zero. */
1918 gimple *g;
1919 g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr,
1920 build_int_cst (arg1_type, 0)));
1921 gimple_set_location (g, loc);
1922 gsi_replace (gsi, g, true);
1923 return true;
1924 }
1925
1926 /* unaligned Vector stores. */
1927 case RS6000_BIF_STXVW4X_V16QI:
1928 case RS6000_BIF_STXVW4X_V8HI:
1929 case RS6000_BIF_STXVW4X_V4SF:
1930 case RS6000_BIF_STXVW4X_V4SI:
1931 case RS6000_BIF_STXVD2X_V2DF:
1932 case RS6000_BIF_STXVD2X_V2DI:
1933 {
1934 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
1935 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
1936 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
1937 location_t loc = gimple_location (stmt);
1938 tree arg0_type = TREE_TYPE (arg0);
1939 /* Use ptr_type_node (no TBAA) for the arg2_type. */
1940 tree arg2_type = ptr_type_node;
1941 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
1942 required alignment (power) is 4 bytes regardless of data type. */
1943 tree align_stype = build_aligned_type (arg0_type, 4);
1944 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
1945 the tree using the value from arg1. */
1946 gimple_seq stmts = NULL;
1947 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
1948 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
1949 arg2_type, arg2, temp_offset);
1950 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
1951 if (!is_gimple_mem_ref_addr (temp_addr))
1952 {
1953 tree t = make_ssa_name (TREE_TYPE (temp_addr));
1954 gimple *g = gimple_build_assign (t, temp_addr);
1955 gsi_insert_before (gsi, g, GSI_SAME_STMT);
1956 temp_addr = t;
1957 }
1958 gimple *g;
1959 g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
1960 build_int_cst (arg2_type, 0)), arg0);
1961 gimple_set_location (g, loc);
1962 gsi_replace (gsi, g, true);
1963 return true;
1964 }
1965
1966 /* Vector Fused multiply-add (fma). */
1967 case RS6000_BIF_VMADDFP:
1968 case RS6000_BIF_XVMADDDP:
1969 case RS6000_BIF_XVMADDSP:
1970 case RS6000_BIF_VMLADDUHM:
1971 {
1972 arg0 = gimple_call_arg (stmt, 0);
1973 arg1 = gimple_call_arg (stmt, 1);
1974 tree arg2 = gimple_call_arg (stmt, 2);
1975 lhs = gimple_call_lhs (stmt);
1976 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
1977 gimple_call_set_lhs (g, lhs);
1978 gimple_call_set_nothrow (g, true);
1979 gimple_set_location (g, gimple_location (stmt));
1980 gsi_replace (gsi, g, true);
1981 return true;
1982 }
1983
1984 /* Vector compares; EQ, NE, GE, GT, LE. */
1985 case RS6000_BIF_VCMPEQUB:
1986 case RS6000_BIF_VCMPEQUH:
1987 case RS6000_BIF_VCMPEQUW:
1988 case RS6000_BIF_VCMPEQUD:
1989 case RS6000_BIF_VCMPEQUT:
1990 fold_compare_helper (gsi, EQ_EXPR, stmt);
1991 return true;
1992
1993 case RS6000_BIF_VCMPNEB:
1994 case RS6000_BIF_VCMPNEH:
1995 case RS6000_BIF_VCMPNEW:
1996 case RS6000_BIF_VCMPNET:
1997 fold_compare_helper (gsi, NE_EXPR, stmt);
1998 return true;
1999
2000 case RS6000_BIF_CMPGE_16QI:
2001 case RS6000_BIF_CMPGE_U16QI:
2002 case RS6000_BIF_CMPGE_8HI:
2003 case RS6000_BIF_CMPGE_U8HI:
2004 case RS6000_BIF_CMPGE_4SI:
2005 case RS6000_BIF_CMPGE_U4SI:
2006 case RS6000_BIF_CMPGE_2DI:
2007 case RS6000_BIF_CMPGE_U2DI:
2008 case RS6000_BIF_CMPGE_1TI:
2009 case RS6000_BIF_CMPGE_U1TI:
2010 fold_compare_helper (gsi, GE_EXPR, stmt);
2011 return true;
2012
2013 case RS6000_BIF_VCMPGTSB:
2014 case RS6000_BIF_VCMPGTUB:
2015 case RS6000_BIF_VCMPGTSH:
2016 case RS6000_BIF_VCMPGTUH:
2017 case RS6000_BIF_VCMPGTSW:
2018 case RS6000_BIF_VCMPGTUW:
2019 case RS6000_BIF_VCMPGTUD:
2020 case RS6000_BIF_VCMPGTSD:
2021 case RS6000_BIF_VCMPGTUT:
2022 case RS6000_BIF_VCMPGTST:
2023 fold_compare_helper (gsi, GT_EXPR, stmt);
2024 return true;
2025
2026 case RS6000_BIF_CMPLE_16QI:
2027 case RS6000_BIF_CMPLE_U16QI:
2028 case RS6000_BIF_CMPLE_8HI:
2029 case RS6000_BIF_CMPLE_U8HI:
2030 case RS6000_BIF_CMPLE_4SI:
2031 case RS6000_BIF_CMPLE_U4SI:
2032 case RS6000_BIF_CMPLE_2DI:
2033 case RS6000_BIF_CMPLE_U2DI:
2034 case RS6000_BIF_CMPLE_1TI:
2035 case RS6000_BIF_CMPLE_U1TI:
2036 fold_compare_helper (gsi, LE_EXPR, stmt);
2037 return true;
2038
2039 /* flavors of vec_splat_[us]{8,16,32}. */
2040 case RS6000_BIF_VSPLTISB:
2041 case RS6000_BIF_VSPLTISH:
2042 case RS6000_BIF_VSPLTISW:
2043 {
2044 arg0 = gimple_call_arg (stmt, 0);
2045 lhs = gimple_call_lhs (stmt);
2046
2047 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
2048 5-bit signed constant in range -16 to +15. */
2049 if (TREE_CODE (arg0) != INTEGER_CST
2050 || !IN_RANGE (TREE_INT_CST_LOW (arg0), -16, 15))
2051 return false;
2052 gimple_seq stmts = NULL;
2053 location_t loc = gimple_location (stmt);
2054 tree splat_value = gimple_convert (&stmts, loc,
2055 TREE_TYPE (TREE_TYPE (lhs)), arg0);
2056 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
2057 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
2058 g = gimple_build_assign (lhs, splat_tree);
2059 gimple_set_location (g, gimple_location (stmt));
2060 gsi_replace (gsi, g, true);
2061 return true;
2062 }
2063
2064 /* Flavors of vec_splat. */
2065 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
2066 case RS6000_BIF_VSPLTB:
2067 case RS6000_BIF_VSPLTH:
2068 case RS6000_BIF_VSPLTW:
2069 case RS6000_BIF_XXSPLTD_V2DI:
2070 case RS6000_BIF_XXSPLTD_V2DF:
2071 {
2072 arg0 = gimple_call_arg (stmt, 0); /* input vector. */
2073 arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */
2074 /* Only fold the vec_splat_*() if arg1 is both a constant value and
2075 is a valid index into the arg0 vector. */
2076 unsigned int n_elts = VECTOR_CST_NELTS (arg0);
2077 if (TREE_CODE (arg1) != INTEGER_CST
2078 || TREE_INT_CST_LOW (arg1) > (n_elts -1))
2079 return false;
2080 lhs = gimple_call_lhs (stmt);
2081 tree lhs_type = TREE_TYPE (lhs);
2082 tree arg0_type = TREE_TYPE (arg0);
2083 tree splat;
2084 if (TREE_CODE (arg0) == VECTOR_CST)
2085 splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1));
2086 else
2087 {
2088 /* Determine (in bits) the length and start location of the
2089 splat value for a call to the tree_vec_extract helper. */
2090 int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type))
2091 * BITS_PER_UNIT / n_elts;
2092 int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size;
2093 tree len = build_int_cst (bitsizetype, splat_elem_size);
2094 tree start = build_int_cst (bitsizetype, splat_start_bit);
2095 splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0,
2096 len, start);
2097 }
2098 /* And finally, build the new vector. */
2099 tree splat_tree = build_vector_from_val (lhs_type, splat);
2100 g = gimple_build_assign (lhs, splat_tree);
2101 gimple_set_location (g, gimple_location (stmt));
2102 gsi_replace (gsi, g, true);
2103 return true;
2104 }
2105
2106 /* vec_mergel (integrals). */
2107 case RS6000_BIF_VMRGLH:
2108 case RS6000_BIF_VMRGLW:
2109 case RS6000_BIF_XXMRGLW_4SI:
2110 case RS6000_BIF_VMRGLB:
2111 case RS6000_BIF_VEC_MERGEL_V2DI:
2112 case RS6000_BIF_XXMRGLW_4SF:
2113 case RS6000_BIF_VEC_MERGEL_V2DF:
2114 fold_mergehl_helper (gsi, stmt, 1);
2115 return true;
2116 /* vec_mergeh (integrals). */
2117 case RS6000_BIF_VMRGHH:
2118 case RS6000_BIF_VMRGHW:
2119 case RS6000_BIF_XXMRGHW_4SI:
2120 case RS6000_BIF_VMRGHB:
2121 case RS6000_BIF_VEC_MERGEH_V2DI:
2122 case RS6000_BIF_XXMRGHW_4SF:
2123 case RS6000_BIF_VEC_MERGEH_V2DF:
2124 fold_mergehl_helper (gsi, stmt, 0);
2125 return true;
2126
2127 /* Flavors of vec_mergee. */
2128 case RS6000_BIF_VMRGEW_V4SI:
2129 case RS6000_BIF_VMRGEW_V2DI:
2130 case RS6000_BIF_VMRGEW_V4SF:
2131 case RS6000_BIF_VMRGEW_V2DF:
2132 fold_mergeeo_helper (gsi, stmt, 0);
2133 return true;
2134 /* Flavors of vec_mergeo. */
2135 case RS6000_BIF_VMRGOW_V4SI:
2136 case RS6000_BIF_VMRGOW_V2DI:
2137 case RS6000_BIF_VMRGOW_V4SF:
2138 case RS6000_BIF_VMRGOW_V2DF:
2139 fold_mergeeo_helper (gsi, stmt, 1);
2140 return true;
2141
2142 /* d = vec_pack (a, b) */
2143 case RS6000_BIF_VPKUDUM:
2144 case RS6000_BIF_VPKUHUM:
2145 case RS6000_BIF_VPKUWUM:
2146 {
2147 arg0 = gimple_call_arg (stmt, 0);
2148 arg1 = gimple_call_arg (stmt, 1);
2149 lhs = gimple_call_lhs (stmt);
2150 gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1);
2151 gimple_set_location (g, gimple_location (stmt));
2152 gsi_replace (gsi, g, true);
2153 return true;
2154 }
2155
2156 /* d = vec_unpackh (a) */
2157 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
2158 in this code is sensitive to endian-ness, and needs to be inverted to
2159 handle both LE and BE targets. */
2160 case RS6000_BIF_VUPKHSB:
2161 case RS6000_BIF_VUPKHSH:
2162 case RS6000_BIF_VUPKHSW:
2163 {
2164 arg0 = gimple_call_arg (stmt, 0);
2165 lhs = gimple_call_lhs (stmt);
2166 if (BYTES_BIG_ENDIAN)
2167 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
2168 else
2169 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
2170 gimple_set_location (g, gimple_location (stmt));
2171 gsi_replace (gsi, g, true);
2172 return true;
2173 }
2174 /* d = vec_unpackl (a) */
2175 case RS6000_BIF_VUPKLSB:
2176 case RS6000_BIF_VUPKLSH:
2177 case RS6000_BIF_VUPKLSW:
2178 {
2179 arg0 = gimple_call_arg (stmt, 0);
2180 lhs = gimple_call_lhs (stmt);
2181 if (BYTES_BIG_ENDIAN)
2182 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
2183 else
2184 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
2185 gimple_set_location (g, gimple_location (stmt));
2186 gsi_replace (gsi, g, true);
2187 return true;
2188 }
2189 /* There is no gimple type corresponding with pixel, so just return. */
2190 case RS6000_BIF_VUPKHPX:
2191 case RS6000_BIF_VUPKLPX:
2192 return false;
2193
2194 /* vec_perm. */
2195 case RS6000_BIF_VPERM_16QI:
2196 case RS6000_BIF_VPERM_8HI:
2197 case RS6000_BIF_VPERM_4SI:
2198 case RS6000_BIF_VPERM_2DI:
2199 case RS6000_BIF_VPERM_4SF:
2200 case RS6000_BIF_VPERM_2DF:
2201 case RS6000_BIF_VPERM_16QI_UNS:
2202 case RS6000_BIF_VPERM_8HI_UNS:
2203 case RS6000_BIF_VPERM_4SI_UNS:
2204 case RS6000_BIF_VPERM_2DI_UNS:
2205 {
2206 arg0 = gimple_call_arg (stmt, 0);
2207 arg1 = gimple_call_arg (stmt, 1);
2208 tree permute = gimple_call_arg (stmt, 2);
2209 lhs = gimple_call_lhs (stmt);
2210 location_t loc = gimple_location (stmt);
2211 gimple_seq stmts = NULL;
2212 // convert arg0 and arg1 to match the type of the permute
2213 // for the VEC_PERM_EXPR operation.
2214 tree permute_type = (TREE_TYPE (permute));
2215 tree arg0_ptype = gimple_build (&stmts, loc, VIEW_CONVERT_EXPR,
2216 permute_type, arg0);
2217 tree arg1_ptype = gimple_build (&stmts, loc, VIEW_CONVERT_EXPR,
2218 permute_type, arg1);
2219 tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR,
2220 permute_type, arg0_ptype, arg1_ptype,
2221 permute);
2222 // Convert the result back to the desired lhs type upon completion.
2223 tree temp = gimple_build (&stmts, loc, VIEW_CONVERT_EXPR,
2224 TREE_TYPE (lhs), lhs_ptype);
2225 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
2226 g = gimple_build_assign (lhs, temp);
2227 gimple_set_location (g, loc);
2228 gsi_replace (gsi, g, true);
2229 return true;
2230 }
2231
2232 default:
2233 if (TARGET_DEBUG_BUILTIN)
2234 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
2235 fn_code, fn_name1, fn_name2);
2236 break;
2237 }
2238
2239 return false;
2240 }
2241
2242 /* **** Expansion support **** */
2243
2244 static rtx
2245 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
2246 {
2247 rtx pat, scratch;
2248 tree cr6_form = CALL_EXPR_ARG (exp, 0);
2249 tree arg0 = CALL_EXPR_ARG (exp, 1);
2250 tree arg1 = CALL_EXPR_ARG (exp, 2);
2251 rtx op0 = expand_normal (arg0);
2252 rtx op1 = expand_normal (arg1);
2253 machine_mode tmode = SImode;
2254 machine_mode mode0 = insn_data[icode].operand[1].mode;
2255 machine_mode mode1 = insn_data[icode].operand[2].mode;
2256 int cr6_form_int;
2257
2258 if (TREE_CODE (cr6_form) != INTEGER_CST)
2259 {
2260 error ("argument 1 of %qs must be a constant",
2261 "__builtin_altivec_predicate");
2262 return const0_rtx;
2263 }
2264 else
2265 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
2266
2267 gcc_assert (mode0 == mode1);
2268
2269 /* If we have invalid arguments, bail out before generating bad rtl. */
2270 if (arg0 == error_mark_node || arg1 == error_mark_node)
2271 return const0_rtx;
2272
2273 if (target == 0
2274 || GET_MODE (target) != tmode
2275 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
2276 target = gen_reg_rtx (tmode);
2277
2278 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
2279 op0 = copy_to_mode_reg (mode0, op0);
2280 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
2281 op1 = copy_to_mode_reg (mode1, op1);
2282
2283 /* Note that for many of the relevant operations (e.g. cmpne or
2284 cmpeq) with float or double operands, it makes more sense for the
2285 mode of the allocated scratch register to select a vector of
2286 integer. But the choice to copy the mode of operand 0 was made
2287 long ago and there are no plans to change it. */
2288 scratch = gen_reg_rtx (mode0);
2289
2290 pat = GEN_FCN (icode) (scratch, op0, op1);
2291 if (! pat)
2292 return 0;
2293 emit_insn (pat);
2294
2295 /* The vec_any* and vec_all* predicates use the same opcodes for two
2296 different operations, but the bits in CR6 will be different
2297 depending on what information we want. So we have to play tricks
2298 with CR6 to get the right bits out.
2299
2300 If you think this is disgusting, look at the specs for the
2301 AltiVec predicates. */
2302
2303 switch (cr6_form_int)
2304 {
2305 case 0:
2306 emit_insn (gen_cr6_test_for_zero (target));
2307 break;
2308 case 1:
2309 emit_insn (gen_cr6_test_for_zero_reverse (target));
2310 break;
2311 case 2:
2312 emit_insn (gen_cr6_test_for_lt (target));
2313 break;
2314 case 3:
2315 emit_insn (gen_cr6_test_for_lt_reverse (target));
2316 break;
2317 default:
2318 error ("argument 1 of %qs is out of range",
2319 "__builtin_altivec_predicate");
2320 break;
2321 }
2322
2323 return target;
2324 }
2325
2326 /* Expand vec_init builtin. */
2327 static rtx
2328 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
2329 {
2330 machine_mode tmode = TYPE_MODE (type);
2331 machine_mode inner_mode = GET_MODE_INNER (tmode);
2332 int i, n_elt = GET_MODE_NUNITS (tmode);
2333
2334 gcc_assert (VECTOR_MODE_P (tmode));
2335 gcc_assert (n_elt == call_expr_nargs (exp));
2336
2337 if (!target || !register_operand (target, tmode))
2338 target = gen_reg_rtx (tmode);
2339
2340 /* If we have a vector compromised of a single element, such as V1TImode, do
2341 the initialization directly. */
2342 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
2343 {
2344 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
2345 emit_move_insn (target, gen_lowpart (tmode, x));
2346 }
2347 else
2348 {
2349 rtvec v = rtvec_alloc (n_elt);
2350
2351 for (i = 0; i < n_elt; ++i)
2352 {
2353 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
2354 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
2355 }
2356
2357 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
2358 }
2359
2360 return target;
2361 }
2362
2363 /* Return the integer constant in ARG. Constrain it to be in the range
2364 of the subparts of VEC_TYPE; issue an error if not. */
2365
2366 static int
2367 get_element_number (tree vec_type, tree arg)
2368 {
2369 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
2370
2371 if (!tree_fits_uhwi_p (arg)
2372 || (elt = tree_to_uhwi (arg), elt > max))
2373 {
2374 error ("selector must be an integer constant in the range [0, %wi]", max);
2375 return 0;
2376 }
2377
2378 return elt;
2379 }
2380
2381 /* Expand vec_set builtin. */
2382 static rtx
2383 altivec_expand_vec_set_builtin (tree exp)
2384 {
2385 machine_mode tmode, mode1;
2386 tree arg0, arg1, arg2;
2387 int elt;
2388 rtx op0, op1;
2389
2390 arg0 = CALL_EXPR_ARG (exp, 0);
2391 arg1 = CALL_EXPR_ARG (exp, 1);
2392 arg2 = CALL_EXPR_ARG (exp, 2);
2393
2394 tmode = TYPE_MODE (TREE_TYPE (arg0));
2395 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
2396 gcc_assert (VECTOR_MODE_P (tmode));
2397
2398 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
2399 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
2400 elt = get_element_number (TREE_TYPE (arg0), arg2);
2401
2402 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
2403 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
2404
2405 op0 = force_reg (tmode, op0);
2406 op1 = force_reg (mode1, op1);
2407
2408 rs6000_expand_vector_set (op0, op1, GEN_INT (elt));
2409
2410 return op0;
2411 }
2412
2413 /* Expand vec_ext builtin. */
2414 static rtx
2415 altivec_expand_vec_ext_builtin (tree exp, rtx target)
2416 {
2417 machine_mode tmode, mode0;
2418 tree arg0, arg1;
2419 rtx op0;
2420 rtx op1;
2421
2422 arg0 = CALL_EXPR_ARG (exp, 0);
2423 arg1 = CALL_EXPR_ARG (exp, 1);
2424
2425 op0 = expand_normal (arg0);
2426 op1 = expand_normal (arg1);
2427
2428 if (TREE_CODE (arg1) == INTEGER_CST)
2429 {
2430 unsigned HOST_WIDE_INT elt;
2431 unsigned HOST_WIDE_INT size = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
2432 unsigned int truncated_selector;
2433 /* Even if !tree_fits_uhwi_p (arg1)), TREE_INT_CST_LOW (arg0)
2434 returns low-order bits of INTEGER_CST for modulo indexing. */
2435 elt = TREE_INT_CST_LOW (arg1);
2436 truncated_selector = elt % size;
2437 op1 = GEN_INT (truncated_selector);
2438 }
2439
2440 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
2441 mode0 = TYPE_MODE (TREE_TYPE (arg0));
2442 gcc_assert (VECTOR_MODE_P (mode0));
2443
2444 op0 = force_reg (mode0, op0);
2445
2446 if (optimize || !target || !register_operand (target, tmode))
2447 target = gen_reg_rtx (tmode);
2448
2449 rs6000_expand_vector_extract (target, op0, op1);
2450
2451 return target;
2452 }
2453
2454 /* Expand ALTIVEC_BUILTIN_MASK_FOR_LOAD. */
2455 rtx
2456 rs6000_expand_ldst_mask (rtx target, tree arg0)
2457 {
2458 int icode2 = BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
2459 : (int) CODE_FOR_altivec_lvsl_direct;
2460 machine_mode tmode = insn_data[icode2].operand[0].mode;
2461 machine_mode mode = insn_data[icode2].operand[1].mode;
2462
2463 gcc_assert (TARGET_ALTIVEC);
2464
2465 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg0)));
2466 rtx op = expand_expr (arg0, NULL_RTX, Pmode, EXPAND_NORMAL);
2467 rtx addr = memory_address (mode, op);
2468 /* We need to negate the address. */
2469 op = gen_reg_rtx (GET_MODE (addr));
2470 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
2471 op = gen_rtx_MEM (mode, op);
2472
2473 if (target == 0
2474 || GET_MODE (target) != tmode
2475 || !insn_data[icode2].operand[0].predicate (target, tmode))
2476 target = gen_reg_rtx (tmode);
2477
2478 rtx pat = GEN_FCN (icode2) (target, op);
2479 if (!pat)
2480 return 0;
2481 emit_insn (pat);
2482
2483 return target;
2484 }
2485
2486 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
2487 static const struct
2488 {
2489 const char *cpu;
2490 unsigned int cpuid;
2491 } cpu_is_info[] = {
2492 { "power10", PPC_PLATFORM_POWER10 },
2493 { "power9", PPC_PLATFORM_POWER9 },
2494 { "power8", PPC_PLATFORM_POWER8 },
2495 { "power7", PPC_PLATFORM_POWER7 },
2496 { "power6x", PPC_PLATFORM_POWER6X },
2497 { "power6", PPC_PLATFORM_POWER6 },
2498 { "power5+", PPC_PLATFORM_POWER5_PLUS },
2499 { "power5", PPC_PLATFORM_POWER5 },
2500 { "ppc970", PPC_PLATFORM_PPC970 },
2501 { "power4", PPC_PLATFORM_POWER4 },
2502 { "ppca2", PPC_PLATFORM_PPCA2 },
2503 { "ppc476", PPC_PLATFORM_PPC476 },
2504 { "ppc464", PPC_PLATFORM_PPC464 },
2505 { "ppc440", PPC_PLATFORM_PPC440 },
2506 { "ppc405", PPC_PLATFORM_PPC405 },
2507 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
2508 };
2509
2510 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
2511 static const struct
2512 {
2513 const char *hwcap;
2514 int mask;
2515 unsigned int id;
2516 } cpu_supports_info[] = {
2517 /* AT_HWCAP masks. */
2518 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
2519 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
2520 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
2521 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
2522 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
2523 { "booke", PPC_FEATURE_BOOKE, 0 },
2524 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
2525 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
2526 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
2527 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
2528 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
2529 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
2530 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
2531 { "notb", PPC_FEATURE_NO_TB, 0 },
2532 { "pa6t", PPC_FEATURE_PA6T, 0 },
2533 { "power4", PPC_FEATURE_POWER4, 0 },
2534 { "power5", PPC_FEATURE_POWER5, 0 },
2535 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
2536 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
2537 { "ppc32", PPC_FEATURE_32, 0 },
2538 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
2539 { "ppc64", PPC_FEATURE_64, 0 },
2540 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
2541 { "smt", PPC_FEATURE_SMT, 0 },
2542 { "spe", PPC_FEATURE_HAS_SPE, 0 },
2543 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
2544 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
2545 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
2546
2547 /* AT_HWCAP2 masks. */
2548 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
2549 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
2550 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
2551 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
2552 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
2553 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
2554 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
2555 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
2556 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
2557 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
2558 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
2559 { "darn", PPC_FEATURE2_DARN, 1 },
2560 { "scv", PPC_FEATURE2_SCV, 1 },
2561 { "arch_3_1", PPC_FEATURE2_ARCH_3_1, 1 },
2562 { "mma", PPC_FEATURE2_MMA, 1 },
2563 };
2564
2565 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
2566 static rtx
2567 cpu_expand_builtin (enum rs6000_gen_builtins fcode,
2568 tree exp ATTRIBUTE_UNUSED, rtx target)
2569 {
2570 /* __builtin_cpu_init () is a nop, so expand to nothing. */
2571 if (fcode == RS6000_BIF_CPU_INIT)
2572 return const0_rtx;
2573
2574 if (target == 0 || GET_MODE (target) != SImode)
2575 target = gen_reg_rtx (SImode);
2576
2577 /* TODO: Factor the #ifdef'd code into a separate function. */
2578 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
2579 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
2580 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
2581 to a STRING_CST. */
2582 if (TREE_CODE (arg) == ARRAY_REF
2583 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
2584 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
2585 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
2586 arg = TREE_OPERAND (arg, 0);
2587
2588 if (TREE_CODE (arg) != STRING_CST)
2589 {
2590 error ("builtin %qs only accepts a string argument",
2591 rs6000_builtin_info[(size_t) fcode].bifname);
2592 return const0_rtx;
2593 }
2594
2595 if (fcode == RS6000_BIF_CPU_IS)
2596 {
2597 const char *cpu = TREE_STRING_POINTER (arg);
2598 rtx cpuid = NULL_RTX;
2599 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
2600 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
2601 {
2602 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
2603 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
2604 break;
2605 }
2606 if (cpuid == NULL_RTX)
2607 {
2608 /* Invalid CPU argument. */
2609 error ("cpu %qs is an invalid argument to builtin %qs",
2610 cpu, rs6000_builtin_info[(size_t) fcode].bifname);
2611 return const0_rtx;
2612 }
2613
2614 rtx platform = gen_reg_rtx (SImode);
2615 rtx address = gen_rtx_PLUS (Pmode,
2616 gen_rtx_REG (Pmode, TLS_REGNUM),
2617 GEN_INT (TCB_PLATFORM_OFFSET));
2618 rtx tcbmem = gen_const_mem (SImode, address);
2619 emit_move_insn (platform, tcbmem);
2620 emit_insn (gen_eqsi3 (target, platform, cpuid));
2621 }
2622 else if (fcode == RS6000_BIF_CPU_SUPPORTS)
2623 {
2624 const char *hwcap = TREE_STRING_POINTER (arg);
2625 rtx mask = NULL_RTX;
2626 int hwcap_offset;
2627 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
2628 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
2629 {
2630 mask = GEN_INT (cpu_supports_info[i].mask);
2631 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
2632 break;
2633 }
2634 if (mask == NULL_RTX)
2635 {
2636 /* Invalid HWCAP argument. */
2637 error ("%s %qs is an invalid argument to builtin %qs",
2638 "hwcap", hwcap,
2639 rs6000_builtin_info[(size_t) fcode].bifname);
2640 return const0_rtx;
2641 }
2642
2643 rtx tcb_hwcap = gen_reg_rtx (SImode);
2644 rtx address = gen_rtx_PLUS (Pmode,
2645 gen_rtx_REG (Pmode, TLS_REGNUM),
2646 GEN_INT (hwcap_offset));
2647 rtx tcbmem = gen_const_mem (SImode, address);
2648 emit_move_insn (tcb_hwcap, tcbmem);
2649 rtx scratch1 = gen_reg_rtx (SImode);
2650 emit_insn (gen_rtx_SET (scratch1,
2651 gen_rtx_AND (SImode, tcb_hwcap, mask)));
2652 rtx scratch2 = gen_reg_rtx (SImode);
2653 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
2654 emit_insn (gen_rtx_SET (target,
2655 gen_rtx_XOR (SImode, scratch2, const1_rtx)));
2656 }
2657 else
2658 gcc_unreachable ();
2659
2660 /* Record that we have expanded a CPU builtin, so that we can later
2661 emit a reference to the special symbol exported by LIBC to ensure we
2662 do not link against an old LIBC that doesn't support this feature. */
2663 cpu_builtin_p = true;
2664
2665 #else
2666 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
2667 "capability bits", rs6000_builtin_info[(size_t) fcode].bifname);
2668
2669 /* For old LIBCs, always return FALSE. */
2670 emit_move_insn (target, GEN_INT (0));
2671 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
2672
2673 return target;
2674 }
2675
2676 /* For the element-reversing load/store built-ins, produce the correct
2677 insn_code depending on the target endianness. */
2678 static insn_code
2679 elemrev_icode (rs6000_gen_builtins fcode)
2680 {
2681 switch (fcode)
2682 {
2683 case RS6000_BIF_ST_ELEMREV_V1TI:
2684 return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
2685 : CODE_FOR_vsx_st_elemrev_v1ti;
2686
2687 case RS6000_BIF_ST_ELEMREV_V2DF:
2688 return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
2689 : CODE_FOR_vsx_st_elemrev_v2df;
2690
2691 case RS6000_BIF_ST_ELEMREV_V2DI:
2692 return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
2693 : CODE_FOR_vsx_st_elemrev_v2di;
2694
2695 case RS6000_BIF_ST_ELEMREV_V4SF:
2696 return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
2697 : CODE_FOR_vsx_st_elemrev_v4sf;
2698
2699 case RS6000_BIF_ST_ELEMREV_V4SI:
2700 return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
2701 : CODE_FOR_vsx_st_elemrev_v4si;
2702
2703 case RS6000_BIF_ST_ELEMREV_V8HI:
2704 return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
2705 : CODE_FOR_vsx_st_elemrev_v8hi;
2706
2707 case RS6000_BIF_ST_ELEMREV_V16QI:
2708 return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
2709 : CODE_FOR_vsx_st_elemrev_v16qi;
2710
2711 case RS6000_BIF_LD_ELEMREV_V2DF:
2712 return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
2713 : CODE_FOR_vsx_ld_elemrev_v2df;
2714
2715 case RS6000_BIF_LD_ELEMREV_V1TI:
2716 return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
2717 : CODE_FOR_vsx_ld_elemrev_v1ti;
2718
2719 case RS6000_BIF_LD_ELEMREV_V2DI:
2720 return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
2721 : CODE_FOR_vsx_ld_elemrev_v2di;
2722
2723 case RS6000_BIF_LD_ELEMREV_V4SF:
2724 return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
2725 : CODE_FOR_vsx_ld_elemrev_v4sf;
2726
2727 case RS6000_BIF_LD_ELEMREV_V4SI:
2728 return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
2729 : CODE_FOR_vsx_ld_elemrev_v4si;
2730
2731 case RS6000_BIF_LD_ELEMREV_V8HI:
2732 return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
2733 : CODE_FOR_vsx_ld_elemrev_v8hi;
2734
2735 case RS6000_BIF_LD_ELEMREV_V16QI:
2736 return BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
2737 : CODE_FOR_vsx_ld_elemrev_v16qi;
2738 default:
2739 ;
2740 }
2741
2742 gcc_unreachable ();
2743 }
2744
2745 /* Expand an AltiVec vector load builtin, and return the expanded rtx. */
2746 static rtx
2747 ldv_expand_builtin (rtx target, insn_code icode, rtx *op, machine_mode tmode)
2748 {
2749 if (target == 0
2750 || GET_MODE (target) != tmode
2751 || !insn_data[icode].operand[0].predicate (target, tmode))
2752 target = gen_reg_rtx (tmode);
2753
2754 op[1] = copy_to_mode_reg (Pmode, op[1]);
2755
2756 /* These CELL built-ins use BLKmode instead of tmode for historical
2757 (i.e., unknown) reasons. TODO: Is this necessary? */
2758 bool blk = (icode == CODE_FOR_altivec_lvlx
2759 || icode == CODE_FOR_altivec_lvlxl
2760 || icode == CODE_FOR_altivec_lvrx
2761 || icode == CODE_FOR_altivec_lvrxl);
2762
2763 /* For LVX, express the RTL accurately by ANDing the address with -16.
2764 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
2765 so the raw address is fine. */
2766 /* TODO: That statement seems wrong, as the UNSPECs don't surround the
2767 memory expression, so a latent bug may lie here. The &-16 is likely
2768 needed for all VMX-style loads. */
2769 if (icode == CODE_FOR_altivec_lvx_v1ti
2770 || icode == CODE_FOR_altivec_lvx_v2df
2771 || icode == CODE_FOR_altivec_lvx_v2di
2772 || icode == CODE_FOR_altivec_lvx_v4sf
2773 || icode == CODE_FOR_altivec_lvx_v4si
2774 || icode == CODE_FOR_altivec_lvx_v8hi
2775 || icode == CODE_FOR_altivec_lvx_v16qi)
2776 {
2777 rtx rawaddr;
2778 if (op[0] == const0_rtx)
2779 rawaddr = op[1];
2780 else
2781 {
2782 op[0] = copy_to_mode_reg (Pmode, op[0]);
2783 rawaddr = gen_rtx_PLUS (Pmode, op[1], op[0]);
2784 }
2785 rtx addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
2786 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
2787
2788 emit_insn (gen_rtx_SET (target, addr));
2789 }
2790 else
2791 {
2792 rtx addr;
2793 if (op[0] == const0_rtx)
2794 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op[1]);
2795 else
2796 {
2797 op[0] = copy_to_mode_reg (Pmode, op[0]);
2798 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
2799 gen_rtx_PLUS (Pmode, op[1], op[0]));
2800 }
2801
2802 rtx pat = GEN_FCN (icode) (target, addr);
2803 if (!pat)
2804 return 0;
2805 emit_insn (pat);
2806 }
2807
2808 return target;
2809 }
2810
2811 /* Expand a builtin function that loads a scalar into a vector register
2812 with sign extension, and return the expanded rtx. */
2813 static rtx
2814 lxvrse_expand_builtin (rtx target, insn_code icode, rtx *op,
2815 machine_mode tmode, machine_mode smode)
2816 {
2817 rtx pat, addr;
2818 op[1] = copy_to_mode_reg (Pmode, op[1]);
2819
2820 if (op[0] == const0_rtx)
2821 addr = gen_rtx_MEM (tmode, op[1]);
2822 else
2823 {
2824 op[0] = copy_to_mode_reg (Pmode, op[0]);
2825 addr = gen_rtx_MEM (smode,
2826 gen_rtx_PLUS (Pmode, op[1], op[0]));
2827 }
2828
2829 rtx discratch = gen_reg_rtx (V2DImode);
2830 rtx tiscratch = gen_reg_rtx (TImode);
2831
2832 /* Emit the lxvr*x insn. */
2833 pat = GEN_FCN (icode) (tiscratch, addr);
2834 if (!pat)
2835 return 0;
2836 emit_insn (pat);
2837
2838 /* Emit a sign extension from V16QI,V8HI,V4SI to V2DI. */
2839 rtx temp1;
2840 if (icode == CODE_FOR_vsx_lxvrbx)
2841 {
2842 temp1 = simplify_gen_subreg (V16QImode, tiscratch, TImode, 0);
2843 emit_insn (gen_vsx_sign_extend_qi_v2di (discratch, temp1));
2844 }
2845 else if (icode == CODE_FOR_vsx_lxvrhx)
2846 {
2847 temp1 = simplify_gen_subreg (V8HImode, tiscratch, TImode, 0);
2848 emit_insn (gen_vsx_sign_extend_hi_v2di (discratch, temp1));
2849 }
2850 else if (icode == CODE_FOR_vsx_lxvrwx)
2851 {
2852 temp1 = simplify_gen_subreg (V4SImode, tiscratch, TImode, 0);
2853 emit_insn (gen_vsx_sign_extend_si_v2di (discratch, temp1));
2854 }
2855 else if (icode == CODE_FOR_vsx_lxvrdx)
2856 discratch = simplify_gen_subreg (V2DImode, tiscratch, TImode, 0);
2857 else
2858 gcc_unreachable ();
2859
2860 /* Emit the sign extension from V2DI (double) to TI (quad). */
2861 rtx temp2 = simplify_gen_subreg (TImode, discratch, V2DImode, 0);
2862 emit_insn (gen_extendditi2_vector (target, temp2));
2863
2864 return target;
2865 }
2866
2867 /* Expand a builtin function that loads a scalar into a vector register
2868 with zero extension, and return the expanded rtx. */
2869 static rtx
2870 lxvrze_expand_builtin (rtx target, insn_code icode, rtx *op,
2871 machine_mode tmode, machine_mode smode)
2872 {
2873 rtx pat, addr;
2874 op[1] = copy_to_mode_reg (Pmode, op[1]);
2875
2876 if (op[0] == const0_rtx)
2877 addr = gen_rtx_MEM (tmode, op[1]);
2878 else
2879 {
2880 op[0] = copy_to_mode_reg (Pmode, op[0]);
2881 addr = gen_rtx_MEM (smode,
2882 gen_rtx_PLUS (Pmode, op[1], op[0]));
2883 }
2884
2885 pat = GEN_FCN (icode) (target, addr);
2886 if (!pat)
2887 return 0;
2888 emit_insn (pat);
2889 return target;
2890 }
2891
2892 /* Expand an AltiVec vector store builtin, and return the expanded rtx. */
2893 static rtx
2894 stv_expand_builtin (insn_code icode, rtx *op,
2895 machine_mode tmode, machine_mode smode)
2896 {
2897 op[2] = copy_to_mode_reg (Pmode, op[2]);
2898
2899 /* For STVX, express the RTL accurately by ANDing the address with -16.
2900 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
2901 so the raw address is fine. */
2902 /* TODO: That statement seems wrong, as the UNSPECs don't surround the
2903 memory expression, so a latent bug may lie here. The &-16 is likely
2904 needed for all VMX-style stores. */
2905 if (icode == CODE_FOR_altivec_stvx_v2df
2906 || icode == CODE_FOR_altivec_stvx_v2di
2907 || icode == CODE_FOR_altivec_stvx_v4sf
2908 || icode == CODE_FOR_altivec_stvx_v4si
2909 || icode == CODE_FOR_altivec_stvx_v8hi
2910 || icode == CODE_FOR_altivec_stvx_v16qi)
2911 {
2912 rtx rawaddr;
2913 if (op[1] == const0_rtx)
2914 rawaddr = op[2];
2915 else
2916 {
2917 op[1] = copy_to_mode_reg (Pmode, op[1]);
2918 rawaddr = gen_rtx_PLUS (Pmode, op[2], op[1]);
2919 }
2920
2921 rtx addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
2922 addr = gen_rtx_MEM (tmode, addr);
2923 op[0] = copy_to_mode_reg (tmode, op[0]);
2924 emit_insn (gen_rtx_SET (addr, op[0]));
2925 }
2926 else if (icode == CODE_FOR_vsx_stxvrbx
2927 || icode == CODE_FOR_vsx_stxvrhx
2928 || icode == CODE_FOR_vsx_stxvrwx
2929 || icode == CODE_FOR_vsx_stxvrdx)
2930 {
2931 rtx truncrtx = gen_rtx_TRUNCATE (tmode, op[0]);
2932 op[0] = copy_to_mode_reg (E_TImode, truncrtx);
2933
2934 rtx addr;
2935 if (op[1] == const0_rtx)
2936 addr = gen_rtx_MEM (Pmode, op[2]);
2937 else
2938 {
2939 op[1] = copy_to_mode_reg (Pmode, op[1]);
2940 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op[2], op[1]));
2941 }
2942 rtx pat = GEN_FCN (icode) (addr, op[0]);
2943 if (pat)
2944 emit_insn (pat);
2945 }
2946 else
2947 {
2948 if (!insn_data[icode].operand[1].predicate (op[0], smode))
2949 op[0] = copy_to_mode_reg (smode, op[0]);
2950
2951 rtx addr;
2952 if (op[1] == const0_rtx)
2953 addr = gen_rtx_MEM (tmode, op[2]);
2954 else
2955 {
2956 op[1] = copy_to_mode_reg (Pmode, op[1]);
2957 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op[2], op[1]));
2958 }
2959
2960 rtx pat = GEN_FCN (icode) (addr, op[0]);
2961 if (pat)
2962 emit_insn (pat);
2963 }
2964
2965 return NULL_RTX;
2966 }
2967
2968 /* Expand the MMA built-in in EXP, and return it. */
2969 static rtx
2970 mma_expand_builtin (tree exp, rtx target, insn_code icode,
2971 rs6000_gen_builtins fcode)
2972 {
2973 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
2974 bool void_func = TREE_TYPE (TREE_TYPE (fndecl)) == void_type_node;
2975 machine_mode tmode = VOIDmode;
2976 rtx op[MAX_MMA_OPERANDS];
2977 unsigned nopnds = 0;
2978
2979 if (!void_func)
2980 {
2981 tmode = insn_data[icode].operand[0].mode;
2982 if (!(target
2983 && GET_MODE (target) == tmode
2984 && insn_data[icode].operand[0].predicate (target, tmode)))
2985 target = gen_reg_rtx (tmode);
2986 op[nopnds++] = target;
2987 }
2988 else
2989 target = const0_rtx;
2990
2991 call_expr_arg_iterator iter;
2992 tree arg;
2993 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
2994 {
2995 if (arg == error_mark_node)
2996 return const0_rtx;
2997
2998 rtx opnd;
2999 const struct insn_operand_data *insn_op;
3000 insn_op = &insn_data[icode].operand[nopnds];
3001 if (TREE_CODE (arg) == ADDR_EXPR
3002 && MEM_P (DECL_RTL (TREE_OPERAND (arg, 0))))
3003 opnd = DECL_RTL (TREE_OPERAND (arg, 0));
3004 else
3005 opnd = expand_normal (arg);
3006
3007 if (!insn_op->predicate (opnd, insn_op->mode))
3008 {
3009 /* TODO: This use of constraints needs explanation. */
3010 if (!strcmp (insn_op->constraint, "n"))
3011 {
3012 if (!CONST_INT_P (opnd))
3013 error ("argument %d must be an unsigned literal", nopnds);
3014 else
3015 error ("argument %d is an unsigned literal that is "
3016 "out of range", nopnds);
3017 return const0_rtx;
3018 }
3019 opnd = copy_to_mode_reg (insn_op->mode, opnd);
3020 }
3021
3022 /* Some MMA instructions have INOUT accumulator operands, so force
3023 their target register to be the same as their input register. */
3024 if (!void_func
3025 && nopnds == 1
3026 && !strcmp (insn_op->constraint, "0")
3027 && insn_op->mode == tmode
3028 && REG_P (opnd)
3029 && insn_data[icode].operand[0].predicate (opnd, tmode))
3030 target = op[0] = opnd;
3031
3032 op[nopnds++] = opnd;
3033 }
3034
3035 rtx pat;
3036 switch (nopnds)
3037 {
3038 case 1:
3039 pat = GEN_FCN (icode) (op[0]);
3040 break;
3041 case 2:
3042 pat = GEN_FCN (icode) (op[0], op[1]);
3043 break;
3044 case 3:
3045 /* The ASSEMBLE builtin source operands are reversed in little-endian
3046 mode, so reorder them. */
3047 if (fcode == RS6000_BIF_ASSEMBLE_PAIR_V_INTERNAL && !WORDS_BIG_ENDIAN)
3048 std::swap (op[1], op[2]);
3049 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
3050 break;
3051 case 4:
3052 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
3053 break;
3054 case 5:
3055 /* The ASSEMBLE builtin source operands are reversed in little-endian
3056 mode, so reorder them. */
3057 if (fcode == RS6000_BIF_ASSEMBLE_ACC_INTERNAL && !WORDS_BIG_ENDIAN)
3058 {
3059 std::swap (op[1], op[4]);
3060 std::swap (op[2], op[3]);
3061 }
3062 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
3063 break;
3064 case 6:
3065 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5]);
3066 break;
3067 case 7:
3068 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5], op[6]);
3069 break;
3070 default:
3071 gcc_unreachable ();
3072 }
3073
3074 if (!pat)
3075 return NULL_RTX;
3076
3077 emit_insn (pat);
3078 return target;
3079 }
3080
3081 /* Return the correct ICODE value depending on whether we are
3082 setting or reading the HTM SPRs. */
3083 static inline enum insn_code
3084 rs6000_htm_spr_icode (bool nonvoid)
3085 {
3086 if (nonvoid)
3087 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
3088 else
3089 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
3090 }
3091
3092 /* Return the appropriate SPR number associated with the given builtin. */
3093 static inline HOST_WIDE_INT
3094 htm_spr_num (enum rs6000_gen_builtins code)
3095 {
3096 if (code == RS6000_BIF_GET_TFHAR
3097 || code == RS6000_BIF_SET_TFHAR)
3098 return TFHAR_SPR;
3099 else if (code == RS6000_BIF_GET_TFIAR
3100 || code == RS6000_BIF_SET_TFIAR)
3101 return TFIAR_SPR;
3102 else if (code == RS6000_BIF_GET_TEXASR
3103 || code == RS6000_BIF_SET_TEXASR)
3104 return TEXASR_SPR;
3105 gcc_assert (code == RS6000_BIF_GET_TEXASRU
3106 || code == RS6000_BIF_SET_TEXASRU);
3107 return TEXASRU_SPR;
3108 }
3109
3110 /* Expand the HTM builtin in EXP and store the result in TARGET.
3111 Return the expanded rtx. */
3112 static rtx
3113 htm_expand_builtin (bifdata *bifaddr, rs6000_gen_builtins fcode,
3114 tree exp, rtx target)
3115 {
3116 if (!TARGET_POWERPC64
3117 && (fcode == RS6000_BIF_TABORTDC
3118 || fcode == RS6000_BIF_TABORTDCI))
3119 {
3120 error ("builtin %qs is only valid in 64-bit mode", bifaddr->bifname);
3121 return const0_rtx;
3122 }
3123
3124 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
3125 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
3126 bool uses_spr = bif_is_htmspr (*bifaddr);
3127 insn_code icode = bifaddr->icode;
3128
3129 if (uses_spr)
3130 icode = rs6000_htm_spr_icode (nonvoid);
3131
3132 rtx op[MAX_HTM_OPERANDS];
3133 int nopnds = 0;
3134 const insn_operand_data *insn_op = &insn_data[icode].operand[0];
3135
3136 if (nonvoid)
3137 {
3138 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
3139 if (!target
3140 || GET_MODE (target) != tmode
3141 || (uses_spr && !insn_op->predicate (target, tmode)))
3142 target = gen_reg_rtx (tmode);
3143 if (uses_spr)
3144 op[nopnds++] = target;
3145 }
3146
3147 tree arg;
3148 call_expr_arg_iterator iter;
3149
3150 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
3151 {
3152 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
3153 return const0_rtx;
3154
3155 insn_op = &insn_data[icode].operand[nopnds];
3156 op[nopnds] = expand_normal (arg);
3157
3158 if (!insn_op->predicate (op[nopnds], insn_op->mode))
3159 {
3160 /* TODO: This use of constraints could use explanation.
3161 This happens a couple of places, perhaps make that a
3162 function to document what's happening. */
3163 if (!strcmp (insn_op->constraint, "n"))
3164 {
3165 int arg_num = nonvoid ? nopnds : nopnds + 1;
3166 if (!CONST_INT_P (op[nopnds]))
3167 error ("argument %d must be an unsigned literal", arg_num);
3168 else
3169 error ("argument %d is an unsigned literal that is "
3170 "out of range", arg_num);
3171 return const0_rtx;
3172 }
3173 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
3174 }
3175
3176 nopnds++;
3177 }
3178
3179 /* Handle the builtins for extended mnemonics. These accept
3180 no arguments, but map to builtins that take arguments. */
3181 switch (fcode)
3182 {
3183 case RS6000_BIF_TENDALL: /* Alias for: tend. 1 */
3184 case RS6000_BIF_TRESUME: /* Alias for: tsr. 1 */
3185 op[nopnds++] = GEN_INT (1);
3186 break;
3187 case RS6000_BIF_TSUSPEND: /* Alias for: tsr. 0 */
3188 op[nopnds++] = GEN_INT (0);
3189 break;
3190 default:
3191 break;
3192 }
3193
3194 /* If this builtin accesses SPRs, then pass in the appropriate
3195 SPR number and SPR regno as the last two operands. */
3196 rtx cr = NULL_RTX;
3197 if (uses_spr)
3198 {
3199 machine_mode mode = TARGET_POWERPC64 ? DImode : SImode;
3200 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
3201 }
3202 /* If this builtin accesses a CR field, then pass in a scratch
3203 CR field as the last operand. */
3204 else if (bif_is_htmcr (*bifaddr))
3205 {
3206 cr = gen_reg_rtx (CCmode);
3207 op[nopnds++] = cr;
3208 }
3209
3210 rtx pat;
3211 switch (nopnds)
3212 {
3213 case 1:
3214 pat = GEN_FCN (icode) (op[0]);
3215 break;
3216 case 2:
3217 pat = GEN_FCN (icode) (op[0], op[1]);
3218 break;
3219 case 3:
3220 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
3221 break;
3222 case 4:
3223 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
3224 break;
3225 default:
3226 gcc_unreachable ();
3227 }
3228 if (!pat)
3229 return NULL_RTX;
3230 emit_insn (pat);
3231
3232 if (bif_is_htmcr (*bifaddr))
3233 {
3234 if (fcode == RS6000_BIF_TBEGIN)
3235 {
3236 /* Emit code to set TARGET to true or false depending on
3237 whether the tbegin. instruction succeeded or failed
3238 to start a transaction. We do this by placing the 1's
3239 complement of CR's EQ bit into TARGET. */
3240 rtx scratch = gen_reg_rtx (SImode);
3241 emit_insn (gen_rtx_SET (scratch,
3242 gen_rtx_EQ (SImode, cr,
3243 const0_rtx)));
3244 emit_insn (gen_rtx_SET (target,
3245 gen_rtx_XOR (SImode, scratch,
3246 GEN_INT (1))));
3247 }
3248 else
3249 {
3250 /* Emit code to copy the 4-bit condition register field
3251 CR into the least significant end of register TARGET. */
3252 rtx scratch1 = gen_reg_rtx (SImode);
3253 rtx scratch2 = gen_reg_rtx (SImode);
3254 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
3255 emit_insn (gen_movcc (subreg, cr));
3256 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
3257 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
3258 }
3259 }
3260
3261 if (nonvoid)
3262 return target;
3263 return const0_rtx;
3264 }
3265
3266 /* Expand an expression EXP that calls a built-in function,
3267 with result going to TARGET if that's convenient
3268 (and in mode MODE if that's convenient).
3269 SUBTARGET may be used as the target for computing one of EXP's operands.
3270 IGNORE is nonzero if the value is to be ignored.
3271 Use the new builtin infrastructure. */
3272 rtx
3273 rs6000_expand_builtin (tree exp, rtx target, rtx /* subtarget */,
3274 machine_mode /* mode */, int ignore)
3275 {
3276 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
3277 enum rs6000_gen_builtins fcode
3278 = (enum rs6000_gen_builtins) DECL_MD_FUNCTION_CODE (fndecl);
3279
3280 /* Emit error message if it's an unresolved overloaded builtin. */
3281 if (fcode > RS6000_OVLD_NONE)
3282 {
3283 error ("unresolved overload for builtin %qF", fndecl);
3284 return const0_rtx;
3285 }
3286
3287 size_t uns_fcode = (size_t)fcode;
3288 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
3289
3290 /* TODO: The following commentary and code is inherited from the original
3291 builtin processing code. The commentary is a bit confusing, with the
3292 intent being that KFmode is always IEEE-128, IFmode is always IBM
3293 double-double, and TFmode is the current long double. The code is
3294 confusing in that it converts from KFmode to TFmode pattern names,
3295 when the other direction is more intuitive. Try to address this. */
3296
3297 /* We have two different modes (KFmode, TFmode) that are the IEEE
3298 128-bit floating point type, depending on whether long double is the
3299 IBM extended double (KFmode) or long double is IEEE 128-bit (TFmode).
3300 It is simpler if we only define one variant of the built-in function,
3301 and switch the code when defining it, rather than defining two built-
3302 ins and using the overload table in rs6000-c.cc to switch between the
3303 two. If we don't have the proper assembler, don't do this switch
3304 because CODE_FOR_*kf* and CODE_FOR_*tf* will be CODE_FOR_nothing. */
3305 if (FLOAT128_IEEE_P (TFmode))
3306 switch (icode)
3307 {
3308 case CODE_FOR_sqrtkf2_odd:
3309 icode = CODE_FOR_sqrttf2_odd;
3310 break;
3311 case CODE_FOR_trunckfdf2_odd:
3312 icode = CODE_FOR_trunctfdf2_odd;
3313 break;
3314 case CODE_FOR_addkf3_odd:
3315 icode = CODE_FOR_addtf3_odd;
3316 break;
3317 case CODE_FOR_subkf3_odd:
3318 icode = CODE_FOR_subtf3_odd;
3319 break;
3320 case CODE_FOR_mulkf3_odd:
3321 icode = CODE_FOR_multf3_odd;
3322 break;
3323 case CODE_FOR_divkf3_odd:
3324 icode = CODE_FOR_divtf3_odd;
3325 break;
3326 case CODE_FOR_fmakf4_odd:
3327 icode = CODE_FOR_fmatf4_odd;
3328 break;
3329 case CODE_FOR_xsxexpqp_kf:
3330 icode = CODE_FOR_xsxexpqp_tf;
3331 break;
3332 case CODE_FOR_xsxsigqp_kf:
3333 icode = CODE_FOR_xsxsigqp_tf;
3334 break;
3335 case CODE_FOR_xststdcnegqp_kf:
3336 icode = CODE_FOR_xststdcnegqp_tf;
3337 break;
3338 case CODE_FOR_xsiexpqp_kf:
3339 icode = CODE_FOR_xsiexpqp_tf;
3340 break;
3341 case CODE_FOR_xsiexpqpf_kf:
3342 icode = CODE_FOR_xsiexpqpf_tf;
3343 break;
3344 case CODE_FOR_xststdcqp_kf:
3345 icode = CODE_FOR_xststdcqp_tf;
3346 break;
3347 case CODE_FOR_xscmpexpqp_eq_kf:
3348 icode = CODE_FOR_xscmpexpqp_eq_tf;
3349 break;
3350 case CODE_FOR_xscmpexpqp_lt_kf:
3351 icode = CODE_FOR_xscmpexpqp_lt_tf;
3352 break;
3353 case CODE_FOR_xscmpexpqp_gt_kf:
3354 icode = CODE_FOR_xscmpexpqp_gt_tf;
3355 break;
3356 case CODE_FOR_xscmpexpqp_unordered_kf:
3357 icode = CODE_FOR_xscmpexpqp_unordered_tf;
3358 break;
3359 default:
3360 break;
3361 }
3362
3363 /* In case of "#pragma target" changes, we initialize all builtins
3364 but check for actual availability now, during expand time. For
3365 invalid builtins, generate a normal call. */
3366 bifdata *bifaddr = &rs6000_builtin_info[uns_fcode];
3367
3368 if (!rs6000_builtin_is_supported (fcode))
3369 {
3370 rs6000_invalid_builtin (fcode);
3371 return expand_call (exp, target, ignore);
3372 }
3373
3374 if (bif_is_nosoft (*bifaddr)
3375 && rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
3376 {
3377 error ("%qs not supported with %<-msoft-float%>",
3378 bifaddr->bifname);
3379 return const0_rtx;
3380 }
3381
3382 if (bif_is_no32bit (*bifaddr) && TARGET_32BIT)
3383 {
3384 error ("%qs is not supported in 32-bit mode", bifaddr->bifname);
3385 return const0_rtx;
3386 }
3387
3388 if (bif_is_ibmld (*bifaddr) && !FLOAT128_2REG_P (TFmode))
3389 {
3390 error ("%qs requires %<long double%> to be IBM 128-bit format",
3391 bifaddr->bifname);
3392 return const0_rtx;
3393 }
3394
3395 if (bif_is_ibm128 (*bifaddr) && !ibm128_float_type_node)
3396 {
3397 error ("%qs requires %<__ibm128%> type support",
3398 bifaddr->bifname);
3399 return const0_rtx;
3400 }
3401
3402 if (bif_is_cpu (*bifaddr))
3403 return cpu_expand_builtin (fcode, exp, target);
3404
3405 if (bif_is_init (*bifaddr))
3406 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
3407
3408 if (bif_is_set (*bifaddr))
3409 return altivec_expand_vec_set_builtin (exp);
3410
3411 if (bif_is_extract (*bifaddr))
3412 return altivec_expand_vec_ext_builtin (exp, target);
3413
3414 if (bif_is_predicate (*bifaddr))
3415 return altivec_expand_predicate_builtin (icode, exp, target);
3416
3417 if (bif_is_htm (*bifaddr))
3418 return htm_expand_builtin (bifaddr, fcode, exp, target);
3419
3420 if (bif_is_32bit (*bifaddr) && TARGET_32BIT)
3421 {
3422 if (fcode == RS6000_BIF_MFTB)
3423 icode = CODE_FOR_rs6000_mftb_si;
3424 else if (fcode == RS6000_BIF_BPERMD)
3425 icode = CODE_FOR_bpermd_si;
3426 else if (fcode == RS6000_BIF_DARN)
3427 icode = CODE_FOR_darn_64_si;
3428 else if (fcode == RS6000_BIF_DARN_32)
3429 icode = CODE_FOR_darn_32_si;
3430 else if (fcode == RS6000_BIF_DARN_RAW)
3431 icode = CODE_FOR_darn_raw_si;
3432 else
3433 gcc_unreachable ();
3434 }
3435
3436 if (bif_is_endian (*bifaddr) && BYTES_BIG_ENDIAN)
3437 {
3438 if (fcode == RS6000_BIF_LD_ELEMREV_V1TI)
3439 icode = CODE_FOR_vsx_load_v1ti;
3440 else if (fcode == RS6000_BIF_LD_ELEMREV_V2DF)
3441 icode = CODE_FOR_vsx_load_v2df;
3442 else if (fcode == RS6000_BIF_LD_ELEMREV_V2DI)
3443 icode = CODE_FOR_vsx_load_v2di;
3444 else if (fcode == RS6000_BIF_LD_ELEMREV_V4SF)
3445 icode = CODE_FOR_vsx_load_v4sf;
3446 else if (fcode == RS6000_BIF_LD_ELEMREV_V4SI)
3447 icode = CODE_FOR_vsx_load_v4si;
3448 else if (fcode == RS6000_BIF_LD_ELEMREV_V8HI)
3449 icode = CODE_FOR_vsx_load_v8hi;
3450 else if (fcode == RS6000_BIF_LD_ELEMREV_V16QI)
3451 icode = CODE_FOR_vsx_load_v16qi;
3452 else if (fcode == RS6000_BIF_ST_ELEMREV_V1TI)
3453 icode = CODE_FOR_vsx_store_v1ti;
3454 else if (fcode == RS6000_BIF_ST_ELEMREV_V2DF)
3455 icode = CODE_FOR_vsx_store_v2df;
3456 else if (fcode == RS6000_BIF_ST_ELEMREV_V2DI)
3457 icode = CODE_FOR_vsx_store_v2di;
3458 else if (fcode == RS6000_BIF_ST_ELEMREV_V4SF)
3459 icode = CODE_FOR_vsx_store_v4sf;
3460 else if (fcode == RS6000_BIF_ST_ELEMREV_V4SI)
3461 icode = CODE_FOR_vsx_store_v4si;
3462 else if (fcode == RS6000_BIF_ST_ELEMREV_V8HI)
3463 icode = CODE_FOR_vsx_store_v8hi;
3464 else if (fcode == RS6000_BIF_ST_ELEMREV_V16QI)
3465 icode = CODE_FOR_vsx_store_v16qi;
3466 else if (fcode == RS6000_BIF_VCLZLSBB_V16QI)
3467 icode = CODE_FOR_vclzlsbb_v16qi;
3468 else if (fcode == RS6000_BIF_VCLZLSBB_V4SI)
3469 icode = CODE_FOR_vclzlsbb_v4si;
3470 else if (fcode == RS6000_BIF_VCLZLSBB_V8HI)
3471 icode = CODE_FOR_vclzlsbb_v8hi;
3472 else if (fcode == RS6000_BIF_VCTZLSBB_V16QI)
3473 icode = CODE_FOR_vctzlsbb_v16qi;
3474 else if (fcode == RS6000_BIF_VCTZLSBB_V4SI)
3475 icode = CODE_FOR_vctzlsbb_v4si;
3476 else if (fcode == RS6000_BIF_VCTZLSBB_V8HI)
3477 icode = CODE_FOR_vctzlsbb_v8hi;
3478 else
3479 gcc_unreachable ();
3480 }
3481
3482 if (bif_is_ibm128 (*bifaddr) && TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
3483 {
3484 if (fcode == RS6000_BIF_PACK_IF)
3485 {
3486 icode = CODE_FOR_packtf;
3487 fcode = RS6000_BIF_PACK_TF;
3488 uns_fcode = (size_t) fcode;
3489 }
3490 else if (fcode == RS6000_BIF_UNPACK_IF)
3491 {
3492 icode = CODE_FOR_unpacktf;
3493 fcode = RS6000_BIF_UNPACK_TF;
3494 uns_fcode = (size_t) fcode;
3495 }
3496 }
3497
3498 /* TRUE iff the built-in function returns void. */
3499 bool void_func = TREE_TYPE (TREE_TYPE (fndecl)) == void_type_node;
3500 /* Position of first argument (0 for void-returning functions, else 1). */
3501 int k;
3502 /* Modes for the return value, if any, and arguments. */
3503 const int MAX_BUILTIN_ARGS = 6;
3504 machine_mode mode[MAX_BUILTIN_ARGS + 1];
3505
3506 if (void_func)
3507 k = 0;
3508 else
3509 {
3510 k = 1;
3511 mode[0] = insn_data[icode].operand[0].mode;
3512 }
3513
3514 /* Tree expressions for each argument. */
3515 tree arg[MAX_BUILTIN_ARGS];
3516 /* RTL expressions for each argument. */
3517 rtx op[MAX_BUILTIN_ARGS];
3518
3519 int nargs = bifaddr->nargs;
3520 gcc_assert (nargs <= MAX_BUILTIN_ARGS);
3521
3522
3523 for (int i = 0; i < nargs; i++)
3524 {
3525 arg[i] = CALL_EXPR_ARG (exp, i);
3526 if (arg[i] == error_mark_node)
3527 return const0_rtx;
3528 STRIP_NOPS (arg[i]);
3529 op[i] = expand_normal (arg[i]);
3530 /* We have a couple of pesky patterns that don't specify the mode... */
3531 mode[i+k] = insn_data[icode].operand[i+k].mode;
3532 if (!mode[i+k])
3533 mode[i+k] = Pmode;
3534 }
3535
3536 /* Check for restricted constant arguments. */
3537 for (int i = 0; i < 2; i++)
3538 {
3539 switch (bifaddr->restr[i])
3540 {
3541 case RES_BITS:
3542 {
3543 size_t mask = 1;
3544 mask <<= bifaddr->restr_val1[i];
3545 mask--;
3546 tree restr_arg = arg[bifaddr->restr_opnd[i] - 1];
3547 STRIP_NOPS (restr_arg);
3548 if (!(TREE_CODE (restr_arg) == INTEGER_CST
3549 && (TREE_INT_CST_LOW (restr_arg) & ~mask) == 0))
3550 {
3551 unsigned p = (1U << bifaddr->restr_val1[i]) - 1;
3552 error ("argument %d must be a literal between 0 and %d,"
3553 " inclusive",
3554 bifaddr->restr_opnd[i], p);
3555 return CONST0_RTX (mode[0]);
3556 }
3557 break;
3558 }
3559 case RES_RANGE:
3560 {
3561 tree restr_arg = arg[bifaddr->restr_opnd[i] - 1];
3562 STRIP_NOPS (restr_arg);
3563 if (!(TREE_CODE (restr_arg) == INTEGER_CST
3564 && IN_RANGE (tree_to_shwi (restr_arg),
3565 bifaddr->restr_val1[i],
3566 bifaddr->restr_val2[i])))
3567 {
3568 error ("argument %d must be a literal between %d and %d,"
3569 " inclusive",
3570 bifaddr->restr_opnd[i], bifaddr->restr_val1[i],
3571 bifaddr->restr_val2[i]);
3572 return CONST0_RTX (mode[0]);
3573 }
3574 break;
3575 }
3576 case RES_VAR_RANGE:
3577 {
3578 tree restr_arg = arg[bifaddr->restr_opnd[i] - 1];
3579 STRIP_NOPS (restr_arg);
3580 if (TREE_CODE (restr_arg) == INTEGER_CST
3581 && !IN_RANGE (tree_to_shwi (restr_arg),
3582 bifaddr->restr_val1[i],
3583 bifaddr->restr_val2[i]))
3584 {
3585 error ("argument %d must be a variable or a literal "
3586 "between %d and %d, inclusive",
3587 bifaddr->restr_opnd[i], bifaddr->restr_val1[i],
3588 bifaddr->restr_val2[i]);
3589 return CONST0_RTX (mode[0]);
3590 }
3591 break;
3592 }
3593 case RES_VALUES:
3594 {
3595 tree restr_arg = arg[bifaddr->restr_opnd[i] - 1];
3596 STRIP_NOPS (restr_arg);
3597 if (!(TREE_CODE (restr_arg) == INTEGER_CST
3598 && (tree_to_shwi (restr_arg) == bifaddr->restr_val1[i]
3599 || tree_to_shwi (restr_arg) == bifaddr->restr_val2[i])))
3600 {
3601 error ("argument %d must be either a literal %d or a "
3602 "literal %d",
3603 bifaddr->restr_opnd[i], bifaddr->restr_val1[i],
3604 bifaddr->restr_val2[i]);
3605 return CONST0_RTX (mode[0]);
3606 }
3607 break;
3608 }
3609 default:
3610 case RES_NONE:
3611 break;
3612 }
3613 }
3614
3615 if (bif_is_ldstmask (*bifaddr))
3616 return rs6000_expand_ldst_mask (target, arg[0]);
3617
3618 if (bif_is_stvec (*bifaddr))
3619 {
3620 if (bif_is_reve (*bifaddr))
3621 icode = elemrev_icode (fcode);
3622 return stv_expand_builtin (icode, op, mode[0], mode[1]);
3623 }
3624
3625 if (bif_is_ldvec (*bifaddr))
3626 {
3627 if (bif_is_reve (*bifaddr))
3628 icode = elemrev_icode (fcode);
3629 return ldv_expand_builtin (target, icode, op, mode[0]);
3630 }
3631
3632 if (bif_is_lxvrse (*bifaddr))
3633 return lxvrse_expand_builtin (target, icode, op, mode[0], mode[1]);
3634
3635 if (bif_is_lxvrze (*bifaddr))
3636 return lxvrze_expand_builtin (target, icode, op, mode[0], mode[1]);
3637
3638 if (bif_is_mma (*bifaddr))
3639 return mma_expand_builtin (exp, target, icode, fcode);
3640
3641 if (TREE_TYPE (TREE_TYPE (fndecl)) == void_type_node)
3642 target = NULL_RTX;
3643 else if (target == 0
3644 || GET_MODE (target) != mode[0]
3645 || !insn_data[icode].operand[0].predicate (target, mode[0]))
3646 target = gen_reg_rtx (mode[0]);
3647
3648 for (int i = 0; i < nargs; i++)
3649 if (!insn_data[icode].operand[i+k].predicate (op[i], mode[i+k]))
3650 op[i] = copy_to_mode_reg (mode[i+k], op[i]);
3651
3652 rtx pat;
3653
3654 switch (nargs)
3655 {
3656 case 0:
3657 pat = (void_func
3658 ? GEN_FCN (icode) ()
3659 : GEN_FCN (icode) (target));
3660 break;
3661 case 1:
3662 pat = (void_func
3663 ? GEN_FCN (icode) (op[0])
3664 : GEN_FCN (icode) (target, op[0]));
3665 break;
3666 case 2:
3667 pat = (void_func
3668 ? GEN_FCN (icode) (op[0], op[1])
3669 : GEN_FCN (icode) (target, op[0], op[1]));
3670 break;
3671 case 3:
3672 pat = (void_func
3673 ? GEN_FCN (icode) (op[0], op[1], op[2])
3674 : GEN_FCN (icode) (target, op[0], op[1], op[2]));
3675 break;
3676 case 4:
3677 pat = (void_func
3678 ? GEN_FCN (icode) (op[0], op[1], op[2], op[3])
3679 : GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]));
3680 break;
3681 case 5:
3682 pat = (void_func
3683 ? GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4])
3684 : GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]));
3685 break;
3686 case 6:
3687 pat = (void_func
3688 ? GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4], op[5])
3689 : GEN_FCN (icode) (target, op[0], op[1],
3690 op[2], op[3], op[4], op[5]));
3691 break;
3692 default:
3693 gcc_assert (MAX_BUILTIN_ARGS == 6);
3694 gcc_unreachable ();
3695 }
3696
3697 if (!pat)
3698 return 0;
3699
3700 emit_insn (pat);
3701 return target;
3702 }