]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/rs6000/rs6000-c.c
Update copyright years.
[thirdparty/gcc.git] / gcc / config / rs6000 / rs6000-c.c
1 /* Subroutines for the C front end on the PowerPC architecture.
2 Copyright (C) 2002-2022 Free Software Foundation, Inc.
3
4 Contributed by Zack Weinberg <zack@codesourcery.com>
5 and Paolo Bonzini <bonzini@gnu.org>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published
11 by the Free Software Foundation; either version 3, or (at your
12 option) any later version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
17 License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #define IN_TARGET_CODE 1
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "target.h"
29 #include "c-family/c-common.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "stringpool.h"
33 #include "stor-layout.h"
34 #include "c-family/c-pragma.h"
35 #include "langhooks.h"
36 #include "c/c-tree.h"
37
38 #include "rs6000-internal.h"
39
40 /* Handle the machine specific pragma longcall. Its syntax is
41
42 # pragma longcall ( TOGGLE )
43
44 where TOGGLE is either 0 or 1.
45
46 rs6000_default_long_calls is set to the value of TOGGLE, changing
47 whether or not new function declarations receive a longcall
48 attribute by default. */
49
50 void
51 rs6000_pragma_longcall (cpp_reader *pfile ATTRIBUTE_UNUSED)
52 {
53 #define SYNTAX_ERROR(gmsgid) do { \
54 warning (OPT_Wpragmas, gmsgid); \
55 warning (OPT_Wpragmas, "ignoring malformed %<#pragma longcall%>"); \
56 return; \
57 } while (0)
58
59
60
61 tree x, n;
62
63 /* If we get here, generic code has already scanned the directive
64 leader and the word "longcall". */
65
66 if (pragma_lex (&x) != CPP_OPEN_PAREN)
67 SYNTAX_ERROR ("missing open paren");
68 if (pragma_lex (&n) != CPP_NUMBER)
69 SYNTAX_ERROR ("missing number");
70 if (pragma_lex (&x) != CPP_CLOSE_PAREN)
71 SYNTAX_ERROR ("missing close paren");
72
73 if (n != integer_zero_node && n != integer_one_node)
74 SYNTAX_ERROR ("number must be 0 or 1");
75
76 if (pragma_lex (&x) != CPP_EOF)
77 warning (OPT_Wpragmas, "junk at end of %<#pragma longcall%>");
78
79 rs6000_default_long_calls = (n == integer_one_node);
80 }
81
82 /* Handle defining many CPP flags based on TARGET_xxx. As a general
83 policy, rather than trying to guess what flags a user might want a
84 #define for, it's better to define a flag for everything. */
85
86 #define builtin_define(TXT) cpp_define (pfile, TXT)
87 #define builtin_assert(TXT) cpp_assert (pfile, TXT)
88
89 /* Keep the AltiVec keywords handy for fast comparisons. */
90 static GTY(()) tree __vector_keyword;
91 static GTY(()) tree vector_keyword;
92 static GTY(()) tree __pixel_keyword;
93 static GTY(()) tree pixel_keyword;
94 static GTY(()) tree __bool_keyword;
95 static GTY(()) tree bool_keyword;
96 static GTY(()) tree _Bool_keyword;
97 static GTY(()) tree __int128_type;
98 static GTY(()) tree __uint128_type;
99
100 /* Preserved across calls. */
101 static tree expand_bool_pixel;
102
103 static cpp_hashnode *
104 altivec_categorize_keyword (const cpp_token *tok)
105 {
106 if (tok->type == CPP_NAME)
107 {
108 cpp_hashnode *ident = tok->val.node.node;
109
110 if (ident == C_CPP_HASHNODE (vector_keyword))
111 return C_CPP_HASHNODE (__vector_keyword);
112
113 if (ident == C_CPP_HASHNODE (pixel_keyword))
114 return C_CPP_HASHNODE (__pixel_keyword);
115
116 if (ident == C_CPP_HASHNODE (bool_keyword))
117 return C_CPP_HASHNODE (__bool_keyword);
118
119 if (ident == C_CPP_HASHNODE (_Bool_keyword))
120 return C_CPP_HASHNODE (__bool_keyword);
121
122 return ident;
123 }
124
125 return 0;
126 }
127
128 static void
129 init_vector_keywords (void)
130 {
131 /* Keywords without two leading underscores are context-sensitive, and hence
132 implemented as conditional macros, controlled by the
133 rs6000_macro_to_expand() function below. If we have ISA 2.07 64-bit
134 support, record the __int128_t and __uint128_t types. */
135
136 __vector_keyword = get_identifier ("__vector");
137 C_CPP_HASHNODE (__vector_keyword)->flags |= NODE_CONDITIONAL;
138
139 __pixel_keyword = get_identifier ("__pixel");
140 C_CPP_HASHNODE (__pixel_keyword)->flags |= NODE_CONDITIONAL;
141
142 __bool_keyword = get_identifier ("__bool");
143 C_CPP_HASHNODE (__bool_keyword)->flags |= NODE_CONDITIONAL;
144
145 vector_keyword = get_identifier ("vector");
146 C_CPP_HASHNODE (vector_keyword)->flags |= NODE_CONDITIONAL;
147
148 pixel_keyword = get_identifier ("pixel");
149 C_CPP_HASHNODE (pixel_keyword)->flags |= NODE_CONDITIONAL;
150
151 bool_keyword = get_identifier ("bool");
152 C_CPP_HASHNODE (bool_keyword)->flags |= NODE_CONDITIONAL;
153
154 _Bool_keyword = get_identifier ("_Bool");
155 C_CPP_HASHNODE (_Bool_keyword)->flags |= NODE_CONDITIONAL;
156
157 if (TARGET_VADDUQM)
158 {
159 __int128_type = get_identifier ("__int128_t");
160 __uint128_type = get_identifier ("__uint128_t");
161 }
162 }
163
164 /* Helper function to find out which RID_INT_N_* code is the one for
165 __int128, if any. Returns RID_MAX+1 if none apply, which is safe
166 (for our purposes, since we always expect to have __int128) to
167 compare against. */
168 static int
169 rid_int128(void)
170 {
171 int i;
172
173 for (i = 0; i < NUM_INT_N_ENTS; i ++)
174 if (int_n_enabled_p[i]
175 && int_n_data[i].bitsize == 128)
176 return RID_INT_N_0 + i;
177
178 return RID_MAX + 1;
179 }
180
181 /* Called to decide whether a conditional macro should be expanded.
182 Since we have exactly one such macro (i.e, 'vector'), we do not
183 need to examine the 'tok' parameter. */
184
185 static cpp_hashnode *
186 rs6000_macro_to_expand (cpp_reader *pfile, const cpp_token *tok)
187 {
188 cpp_hashnode *expand_this = tok->val.node.node;
189 cpp_hashnode *ident;
190
191 /* If the current machine does not have altivec, don't look for the
192 keywords. */
193 if (!TARGET_ALTIVEC)
194 return NULL;
195
196 ident = altivec_categorize_keyword (tok);
197
198 if (ident != expand_this)
199 expand_this = NULL;
200
201 if (ident == C_CPP_HASHNODE (__vector_keyword))
202 {
203 int idx = 0;
204 do
205 tok = cpp_peek_token (pfile, idx++);
206 while (tok->type == CPP_PADDING);
207 ident = altivec_categorize_keyword (tok);
208
209 if (ident == C_CPP_HASHNODE (__pixel_keyword))
210 {
211 expand_this = C_CPP_HASHNODE (__vector_keyword);
212 expand_bool_pixel = __pixel_keyword;
213 }
214 else if (ident == C_CPP_HASHNODE (__bool_keyword))
215 {
216 expand_this = C_CPP_HASHNODE (__vector_keyword);
217 expand_bool_pixel = __bool_keyword;
218 }
219 /* The boost libraries have code with Iterator::vector vector in it. If
220 we allow the normal handling, this module will be called recursively,
221 and the vector will be skipped.; */
222 else if (ident && (ident != C_CPP_HASHNODE (__vector_keyword)))
223 {
224 enum rid rid_code = (enum rid)(ident->rid_code);
225 bool is_macro = cpp_macro_p (ident);
226
227 /* If there is a function-like macro, check if it is going to be
228 invoked with or without arguments. Without following ( treat
229 it like non-macro, otherwise the following cpp_get_token eats
230 what should be preserved. */
231 if (is_macro && cpp_fun_like_macro_p (ident))
232 {
233 int idx2 = idx;
234 do
235 tok = cpp_peek_token (pfile, idx2++);
236 while (tok->type == CPP_PADDING);
237 if (tok->type != CPP_OPEN_PAREN)
238 is_macro = false;
239 }
240
241 if (is_macro)
242 {
243 do
244 (void) cpp_get_token (pfile);
245 while (--idx > 0);
246 do
247 tok = cpp_peek_token (pfile, idx++);
248 while (tok->type == CPP_PADDING);
249 ident = altivec_categorize_keyword (tok);
250 if (ident == C_CPP_HASHNODE (__pixel_keyword))
251 {
252 expand_this = C_CPP_HASHNODE (__vector_keyword);
253 expand_bool_pixel = __pixel_keyword;
254 rid_code = RID_MAX;
255 }
256 else if (ident == C_CPP_HASHNODE (__bool_keyword))
257 {
258 expand_this = C_CPP_HASHNODE (__vector_keyword);
259 expand_bool_pixel = __bool_keyword;
260 rid_code = RID_MAX;
261 }
262 else if (ident)
263 rid_code = (enum rid)(ident->rid_code);
264 }
265
266 if (rid_code == RID_UNSIGNED || rid_code == RID_LONG
267 || rid_code == RID_SHORT || rid_code == RID_SIGNED
268 || rid_code == RID_INT || rid_code == RID_CHAR
269 || rid_code == RID_FLOAT
270 || (rid_code == RID_DOUBLE && TARGET_VSX)
271 || (rid_code == rid_int128 () && TARGET_VADDUQM))
272 {
273 expand_this = C_CPP_HASHNODE (__vector_keyword);
274 /* If the next keyword is bool or pixel, it
275 will need to be expanded as well. */
276 do
277 tok = cpp_peek_token (pfile, idx++);
278 while (tok->type == CPP_PADDING);
279 ident = altivec_categorize_keyword (tok);
280
281 if (ident == C_CPP_HASHNODE (__pixel_keyword))
282 expand_bool_pixel = __pixel_keyword;
283 else if (ident == C_CPP_HASHNODE (__bool_keyword))
284 expand_bool_pixel = __bool_keyword;
285 else
286 {
287 /* Try two tokens down, too. */
288 do
289 tok = cpp_peek_token (pfile, idx++);
290 while (tok->type == CPP_PADDING);
291 ident = altivec_categorize_keyword (tok);
292 if (ident == C_CPP_HASHNODE (__pixel_keyword))
293 expand_bool_pixel = __pixel_keyword;
294 else if (ident == C_CPP_HASHNODE (__bool_keyword))
295 expand_bool_pixel = __bool_keyword;
296 }
297 }
298
299 /* Support vector __int128_t, but we don't need to worry about bool
300 or pixel on this type. */
301 else if (TARGET_VADDUQM
302 && (ident == C_CPP_HASHNODE (__int128_type)
303 || ident == C_CPP_HASHNODE (__uint128_type)))
304 expand_this = C_CPP_HASHNODE (__vector_keyword);
305 }
306 }
307 else if (expand_bool_pixel && ident == C_CPP_HASHNODE (__pixel_keyword))
308 {
309 expand_this = C_CPP_HASHNODE (__pixel_keyword);
310 expand_bool_pixel = 0;
311 }
312 else if (expand_bool_pixel && ident == C_CPP_HASHNODE (__bool_keyword))
313 {
314 expand_this = C_CPP_HASHNODE (__bool_keyword);
315 expand_bool_pixel = 0;
316 }
317
318 return expand_this;
319 }
320
321
322 /* Define or undefine a single macro. */
323
324 static void
325 rs6000_define_or_undefine_macro (bool define_p, const char *name)
326 {
327 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
328 fprintf (stderr, "#%s %s\n", (define_p) ? "define" : "undef", name);
329
330 if (define_p)
331 cpp_define (parse_in, name);
332 else
333 cpp_undef (parse_in, name);
334 }
335
336 /* Define or undefine macros based on the current target. If the user does
337 #pragma GCC target, we need to adjust the macros dynamically. Note, some of
338 the options needed for builtins have been moved to separate variables, so
339 have both the target flags and the builtin flags as arguments. */
340
341 void
342 rs6000_target_modify_macros (bool define_p, HOST_WIDE_INT flags,
343 HOST_WIDE_INT bu_mask)
344 {
345 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
346 fprintf (stderr,
347 "rs6000_target_modify_macros (%s, " HOST_WIDE_INT_PRINT_HEX
348 ", " HOST_WIDE_INT_PRINT_HEX ")\n",
349 (define_p) ? "define" : "undef",
350 flags, bu_mask);
351
352 /* Each of the flags mentioned below controls whether certain
353 preprocessor macros will be automatically defined when
354 preprocessing source files for compilation by this compiler.
355 While most of these flags can be enabled or disabled
356 explicitly by specifying certain command-line options when
357 invoking the compiler, there are also many ways in which these
358 flags are enabled or disabled implicitly, based on compiler
359 defaults, configuration choices, and on the presence of certain
360 related command-line options. Many, but not all, of these
361 implicit behaviors can be found in file "rs6000.c", the
362 rs6000_option_override_internal() function.
363
364 In general, each of the flags may be automatically enabled in
365 any of the following conditions:
366
367 1. If no -mcpu target is specified on the command line and no
368 --with-cpu target is specified to the configure command line
369 and the TARGET_DEFAULT macro for this default cpu host
370 includes the flag, and the flag has not been explicitly disabled
371 by command-line options.
372
373 2. If the target specified with -mcpu=target on the command line, or
374 in the absence of a -mcpu=target command-line option, if the
375 target specified using --with-cpu=target on the configure
376 command line, is disqualified because the associated binary
377 tools (e.g. the assembler) lack support for the requested cpu,
378 and the TARGET_DEFAULT macro for this default cpu host
379 includes the flag, and the flag has not been explicitly disabled
380 by command-line options.
381
382 3. If either of the above two conditions apply except that the
383 TARGET_DEFAULT macro is defined to equal zero, and
384 TARGET_POWERPC64 and
385 a) BYTES_BIG_ENDIAN and the flag to be enabled is either
386 MASK_PPC_GFXOPT or MASK_POWERPC64 (flags for "powerpc64"
387 target), or
388 b) !BYTES_BIG_ENDIAN and the flag to be enabled is either
389 MASK_POWERPC64 or it is one of the flags included in
390 ISA_2_7_MASKS_SERVER (flags for "powerpc64le" target).
391
392 4. If a cpu has been requested with a -mcpu=target command-line option
393 and this cpu has not been disqualified due to shortcomings of the
394 binary tools, and the set of flags associated with the requested cpu
395 include the flag to be enabled. See rs6000-cpus.def for macro
396 definitions that represent various ABI standards
397 (e.g. ISA_2_1_MASKS, ISA_3_0_MASKS_SERVER) and for a list of
398 the specific flags that are associated with each of the cpu
399 choices that can be specified as the target of a -mcpu=target
400 compile option, or as the target of a --with-cpu=target
401 configure option. Target flags that are specified in either
402 of these two ways are considered "implicit" since the flags
403 are not mentioned specifically by name.
404
405 Additional documentation describing behavior specific to
406 particular flags is provided below, immediately preceding the
407 use of each relevant flag.
408
409 5. If there is no -mcpu=target command-line option, and the cpu
410 requested by a --with-cpu=target command-line option has not
411 been disqualified due to shortcomings of the binary tools, and
412 the set of flags associated with the specified target include
413 the flag to be enabled. See the notes immediately above for a
414 summary of the flags associated with particular cpu
415 definitions. */
416
417 /* rs6000_isa_flags based options. */
418 rs6000_define_or_undefine_macro (define_p, "_ARCH_PPC");
419 if ((flags & OPTION_MASK_PPC_GPOPT) != 0)
420 rs6000_define_or_undefine_macro (define_p, "_ARCH_PPCSQ");
421 if ((flags & OPTION_MASK_PPC_GFXOPT) != 0)
422 rs6000_define_or_undefine_macro (define_p, "_ARCH_PPCGR");
423 if ((flags & OPTION_MASK_POWERPC64) != 0)
424 rs6000_define_or_undefine_macro (define_p, "_ARCH_PPC64");
425 if ((flags & OPTION_MASK_MFCRF) != 0)
426 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR4");
427 if ((flags & OPTION_MASK_POPCNTB) != 0)
428 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR5");
429 if ((flags & OPTION_MASK_FPRND) != 0)
430 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR5X");
431 if ((flags & OPTION_MASK_CMPB) != 0)
432 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR6");
433 if ((flags & OPTION_MASK_POPCNTD) != 0)
434 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR7");
435 /* Note that the OPTION_MASK_DIRECT_MOVE flag is automatically
436 turned on in the following condition:
437 1. TARGET_P8_VECTOR is enabled and OPTION_MASK_DIRECT_MOVE is not
438 explicitly disabled.
439 Hereafter, the OPTION_MASK_DIRECT_MOVE flag is considered to
440 have been turned on explicitly.
441 Note that the OPTION_MASK_DIRECT_MOVE flag is automatically
442 turned off in any of the following conditions:
443 1. TARGET_HARD_FLOAT, TARGET_ALTIVEC, or TARGET_VSX is explicitly
444 disabled and OPTION_MASK_DIRECT_MOVE was not explicitly
445 enabled.
446 2. TARGET_VSX is off. */
447 if ((flags & OPTION_MASK_DIRECT_MOVE) != 0)
448 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR8");
449 if ((flags & OPTION_MASK_MODULO) != 0)
450 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR9");
451 if ((flags & OPTION_MASK_POWER10) != 0)
452 rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR10");
453 if ((flags & OPTION_MASK_SOFT_FLOAT) != 0)
454 rs6000_define_or_undefine_macro (define_p, "_SOFT_FLOAT");
455 if ((flags & OPTION_MASK_RECIP_PRECISION) != 0)
456 rs6000_define_or_undefine_macro (define_p, "__RECIP_PRECISION__");
457 /* Note that the OPTION_MASK_ALTIVEC flag is automatically turned on
458 in any of the following conditions:
459 1. The operating system is Darwin and it is configured for 64
460 bit. (See darwin_rs6000_override_options.)
461 2. The operating system is Darwin and the operating system
462 version is 10.5 or higher and the user has not explicitly
463 disabled ALTIVEC by specifying -mcpu=G3 or -mno-altivec and
464 the compiler is not producing code for integration within the
465 kernel. (See darwin_rs6000_override_options.)
466 Note that the OPTION_MASK_ALTIVEC flag is automatically turned
467 off in any of the following conditions:
468 1. The operating system does not support saving of AltiVec
469 registers (OS_MISSING_ALTIVEC).
470 2. If an inner context (as introduced by
471 __attribute__((__target__())) or #pragma GCC target()
472 requests a target that normally enables the
473 OPTION_MASK_ALTIVEC flag but the outer-most "main target"
474 does not support the rs6000_altivec_abi, this flag is
475 turned off for the inner context unless OPTION_MASK_ALTIVEC
476 was explicitly enabled for the inner context. */
477 if ((flags & OPTION_MASK_ALTIVEC) != 0)
478 {
479 const char *vec_str = (define_p) ? "__VEC__=10206" : "__VEC__";
480 rs6000_define_or_undefine_macro (define_p, "__ALTIVEC__");
481 rs6000_define_or_undefine_macro (define_p, vec_str);
482
483 /* Define this when supporting context-sensitive keywords. */
484 if (!flag_iso)
485 rs6000_define_or_undefine_macro (define_p, "__APPLE_ALTIVEC__");
486 if (rs6000_aix_extabi)
487 rs6000_define_or_undefine_macro (define_p, "__EXTABI__");
488 }
489 /* Note that the OPTION_MASK_VSX flag is automatically turned on in
490 the following conditions:
491 1. TARGET_P8_VECTOR is explicitly turned on and the OPTION_MASK_VSX
492 was not explicitly turned off. Hereafter, the OPTION_MASK_VSX
493 flag is considered to have been explicitly turned on.
494 Note that the OPTION_MASK_VSX flag is automatically turned off in
495 the following conditions:
496 1. The operating system does not support saving of AltiVec
497 registers (OS_MISSING_ALTIVEC).
498 2. If the option TARGET_HARD_FLOAT is turned off. Hereafter, the
499 OPTION_MASK_VSX flag is considered to have been turned off
500 explicitly.
501 3. If TARGET_AVOID_XFORM is turned on explicitly at the outermost
502 compilation context, or if it is turned on by any means in an
503 inner compilation context. Hereafter, the OPTION_MASK_VSX
504 flag is considered to have been turned off explicitly.
505 4. If TARGET_ALTIVEC was explicitly disabled. Hereafter, the
506 OPTION_MASK_VSX flag is considered to have been turned off
507 explicitly.
508 5. If an inner context (as introduced by
509 __attribute__((__target__())) or #pragma GCC target()
510 requests a target that normally enables the
511 OPTION_MASK_VSX flag but the outer-most "main target"
512 does not support the rs6000_altivec_abi, this flag is
513 turned off for the inner context unless OPTION_MASK_VSX
514 was explicitly enabled for the inner context. */
515 if ((flags & OPTION_MASK_VSX) != 0)
516 rs6000_define_or_undefine_macro (define_p, "__VSX__");
517 if ((flags & OPTION_MASK_HTM) != 0)
518 {
519 rs6000_define_or_undefine_macro (define_p, "__HTM__");
520 /* Tell the user that our HTM insn patterns act as memory barriers. */
521 rs6000_define_or_undefine_macro (define_p, "__TM_FENCE__");
522 }
523 /* Note that the OPTION_MASK_P8_VECTOR flag is automatically turned
524 on in the following conditions:
525 1. TARGET_P9_VECTOR is explicitly turned on and
526 OPTION_MASK_P8_VECTOR is not explicitly turned off.
527 Hereafter, the OPTION_MASK_P8_VECTOR flag is considered to
528 have been turned off explicitly.
529 Note that the OPTION_MASK_P8_VECTOR flag is automatically turned
530 off in the following conditions:
531 1. If any of TARGET_HARD_FLOAT, TARGET_ALTIVEC, or TARGET_VSX
532 were turned off explicitly and OPTION_MASK_P8_VECTOR flag was
533 not turned on explicitly.
534 2. If TARGET_ALTIVEC is turned off. Hereafter, the
535 OPTION_MASK_P8_VECTOR flag is considered to have been turned off
536 explicitly.
537 3. If TARGET_VSX is turned off and OPTION_MASK_P8_VECTOR was not
538 explicitly enabled. If TARGET_VSX is explicitly enabled, the
539 OPTION_MASK_P8_VECTOR flag is hereafter also considered to
540 have been turned off explicitly. */
541 if ((flags & OPTION_MASK_P8_VECTOR) != 0)
542 rs6000_define_or_undefine_macro (define_p, "__POWER8_VECTOR__");
543 /* Note that the OPTION_MASK_P9_VECTOR flag is automatically turned
544 off in the following conditions:
545 1. If TARGET_P8_VECTOR is turned off and OPTION_MASK_P9_VECTOR is
546 not turned on explicitly. Hereafter, if OPTION_MASK_P8_VECTOR
547 was turned on explicitly, the OPTION_MASK_P9_VECTOR flag is
548 also considered to have been turned off explicitly.
549 Note that the OPTION_MASK_P9_VECTOR is automatically turned on
550 in the following conditions:
551 1. If TARGET_P9_MINMAX was turned on explicitly.
552 Hereafter, THE OPTION_MASK_P9_VECTOR flag is considered to
553 have been turned on explicitly. */
554 if ((flags & OPTION_MASK_P9_VECTOR) != 0)
555 rs6000_define_or_undefine_macro (define_p, "__POWER9_VECTOR__");
556 /* Note that the OPTION_MASK_QUAD_MEMORY flag is automatically
557 turned off in the following conditions:
558 1. If TARGET_POWERPC64 is turned off.
559 2. If WORDS_BIG_ENDIAN is false (non-atomic quad memory
560 load/store are disabled on little endian). */
561 if ((flags & OPTION_MASK_QUAD_MEMORY) != 0)
562 rs6000_define_or_undefine_macro (define_p, "__QUAD_MEMORY__");
563 /* Note that the OPTION_MASK_QUAD_MEMORY_ATOMIC flag is automatically
564 turned off in the following conditions:
565 1. If TARGET_POWERPC64 is turned off.
566 Note that the OPTION_MASK_QUAD_MEMORY_ATOMIC flag is
567 automatically turned on in the following conditions:
568 1. If TARGET_QUAD_MEMORY and this flag was not explicitly
569 disabled. */
570 if ((flags & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
571 rs6000_define_or_undefine_macro (define_p, "__QUAD_MEMORY_ATOMIC__");
572 /* Note that the OPTION_MASK_CRYPTO flag is automatically turned off
573 in the following conditions:
574 1. If any of TARGET_HARD_FLOAT or TARGET_ALTIVEC or TARGET_VSX
575 are turned off explicitly and OPTION_MASK_CRYPTO is not turned
576 on explicitly.
577 2. If TARGET_ALTIVEC is turned off. */
578 if ((flags & OPTION_MASK_CRYPTO) != 0)
579 rs6000_define_or_undefine_macro (define_p, "__CRYPTO__");
580 if ((flags & OPTION_MASK_FLOAT128_KEYWORD) != 0)
581 {
582 rs6000_define_or_undefine_macro (define_p, "__FLOAT128__");
583 if (define_p)
584 rs6000_define_or_undefine_macro (true, "__float128=__ieee128");
585 else
586 rs6000_define_or_undefine_macro (false, "__float128");
587 }
588 /* OPTION_MASK_FLOAT128_HARDWARE can be turned on if -mcpu=power9 is used or
589 via the target attribute/pragma. */
590 if ((flags & OPTION_MASK_FLOAT128_HW) != 0)
591 rs6000_define_or_undefine_macro (define_p, "__FLOAT128_HARDWARE__");
592
593 /* options from the builtin masks. */
594 /* Note that RS6000_BTM_CELL is enabled only if (rs6000_cpu ==
595 PROCESSOR_CELL) (e.g. -mcpu=cell). */
596 if ((bu_mask & RS6000_BTM_CELL) != 0)
597 rs6000_define_or_undefine_macro (define_p, "__PPU__");
598
599 /* Tell the user if we support the MMA instructions. */
600 if ((flags & OPTION_MASK_MMA) != 0)
601 rs6000_define_or_undefine_macro (define_p, "__MMA__");
602 /* Whether pc-relative code is being generated. */
603 if ((flags & OPTION_MASK_PCREL) != 0)
604 rs6000_define_or_undefine_macro (define_p, "__PCREL__");
605 /* Tell the user -mrop-protect is in play. */
606 if (rs6000_rop_protect)
607 rs6000_define_or_undefine_macro (define_p, "__ROP_PROTECT__");
608 }
609
610 void
611 rs6000_cpu_cpp_builtins (cpp_reader *pfile)
612 {
613 /* Define all of the common macros. */
614 rs6000_target_modify_macros (true, rs6000_isa_flags,
615 rs6000_builtin_mask_calculate ());
616
617 if (TARGET_FRE)
618 builtin_define ("__RECIP__");
619 if (TARGET_FRES)
620 builtin_define ("__RECIPF__");
621 if (TARGET_FRSQRTE)
622 builtin_define ("__RSQRTE__");
623 if (TARGET_FRSQRTES)
624 builtin_define ("__RSQRTEF__");
625 if (TARGET_FLOAT128_TYPE)
626 builtin_define ("__FLOAT128_TYPE__");
627 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
628 builtin_define ("__BUILTIN_CPU_SUPPORTS__");
629 #endif
630
631 if (TARGET_EXTRA_BUILTINS && cpp_get_options (pfile)->lang != CLK_ASM)
632 {
633 /* Define the AltiVec syntactic elements. */
634 builtin_define ("__vector=__attribute__((altivec(vector__)))");
635 builtin_define ("__pixel=__attribute__((altivec(pixel__))) unsigned short");
636 builtin_define ("__bool=__attribute__((altivec(bool__))) unsigned");
637
638 if (!flag_iso)
639 {
640 builtin_define ("vector=vector");
641 builtin_define ("pixel=pixel");
642 builtin_define ("bool=bool");
643 builtin_define ("_Bool=_Bool");
644 init_vector_keywords ();
645
646 /* Enable context-sensitive macros. */
647 cpp_get_callbacks (pfile)->macro_to_expand = rs6000_macro_to_expand;
648 }
649 }
650 if (!TARGET_HARD_FLOAT)
651 builtin_define ("_SOFT_DOUBLE");
652 /* Used by lwarx/stwcx. errata work-around. */
653 if (rs6000_cpu == PROCESSOR_PPC405)
654 builtin_define ("__PPC405__");
655 /* Used by libstdc++. */
656 if (TARGET_NO_LWSYNC)
657 builtin_define ("__NO_LWSYNC__");
658
659 if (TARGET_EXTRA_BUILTINS)
660 {
661 /* For the VSX builtin functions identical to Altivec functions, just map
662 the altivec builtin into the vsx version (the altivec functions
663 generate VSX code if -mvsx). */
664 builtin_define ("__builtin_vsx_xxland=__builtin_vec_and");
665 builtin_define ("__builtin_vsx_xxlandc=__builtin_vec_andc");
666 builtin_define ("__builtin_vsx_xxlnor=__builtin_vec_nor");
667 builtin_define ("__builtin_vsx_xxlor=__builtin_vec_or");
668 builtin_define ("__builtin_vsx_xxlxor=__builtin_vec_xor");
669 builtin_define ("__builtin_vsx_xxsel=__builtin_vec_sel");
670 builtin_define ("__builtin_vsx_vperm=__builtin_vec_perm");
671
672 /* Also map the a and m versions of the multiply/add instructions to the
673 builtin for people blindly going off the instruction manual. */
674 builtin_define ("__builtin_vsx_xvmaddadp=__builtin_vsx_xvmadddp");
675 builtin_define ("__builtin_vsx_xvmaddmdp=__builtin_vsx_xvmadddp");
676 builtin_define ("__builtin_vsx_xvmaddasp=__builtin_vsx_xvmaddsp");
677 builtin_define ("__builtin_vsx_xvmaddmsp=__builtin_vsx_xvmaddsp");
678 builtin_define ("__builtin_vsx_xvmsubadp=__builtin_vsx_xvmsubdp");
679 builtin_define ("__builtin_vsx_xvmsubmdp=__builtin_vsx_xvmsubdp");
680 builtin_define ("__builtin_vsx_xvmsubasp=__builtin_vsx_xvmsubsp");
681 builtin_define ("__builtin_vsx_xvmsubmsp=__builtin_vsx_xvmsubsp");
682 builtin_define ("__builtin_vsx_xvnmaddadp=__builtin_vsx_xvnmadddp");
683 builtin_define ("__builtin_vsx_xvnmaddmdp=__builtin_vsx_xvnmadddp");
684 builtin_define ("__builtin_vsx_xvnmaddasp=__builtin_vsx_xvnmaddsp");
685 builtin_define ("__builtin_vsx_xvnmaddmsp=__builtin_vsx_xvnmaddsp");
686 builtin_define ("__builtin_vsx_xvnmsubadp=__builtin_vsx_xvnmsubdp");
687 builtin_define ("__builtin_vsx_xvnmsubmdp=__builtin_vsx_xvnmsubdp");
688 builtin_define ("__builtin_vsx_xvnmsubasp=__builtin_vsx_xvnmsubsp");
689 builtin_define ("__builtin_vsx_xvnmsubmsp=__builtin_vsx_xvnmsubsp");
690 }
691
692 /* Map the old _Float128 'q' builtins into the new 'f128' builtins. */
693 if (TARGET_FLOAT128_TYPE)
694 {
695 builtin_define ("__builtin_fabsq=__builtin_fabsf128");
696 builtin_define ("__builtin_copysignq=__builtin_copysignf128");
697 builtin_define ("__builtin_nanq=__builtin_nanf128");
698 builtin_define ("__builtin_nansq=__builtin_nansf128");
699 builtin_define ("__builtin_infq=__builtin_inff128");
700 builtin_define ("__builtin_huge_valq=__builtin_huge_valf128");
701 }
702
703 /* Tell users they can use __builtin_bswap{16,64}. */
704 builtin_define ("__HAVE_BSWAP__");
705
706 /* May be overridden by target configuration. */
707 RS6000_CPU_CPP_ENDIAN_BUILTINS();
708
709 if (TARGET_LONG_DOUBLE_128)
710 {
711 builtin_define ("__LONG_DOUBLE_128__");
712 builtin_define ("__LONGDOUBLE128");
713
714 if (TARGET_IEEEQUAD)
715 {
716 /* Older versions of GLIBC used __attribute__((__KC__)) to create the
717 IEEE 128-bit floating point complex type for C++ (which does not
718 support _Float128 _Complex). If the default for long double is
719 IEEE 128-bit mode, the library would need to use
720 __attribute__((__TC__)) instead. Defining __KF__ and __KC__
721 is a stop-gap to build with the older libraries, until we
722 get an updated library. */
723 builtin_define ("__LONG_DOUBLE_IEEE128__");
724 builtin_define ("__KF__=__TF__");
725 builtin_define ("__KC__=__TC__");
726 }
727 else
728 builtin_define ("__LONG_DOUBLE_IBM128__");
729 }
730
731 switch (TARGET_CMODEL)
732 {
733 /* Deliberately omit __CMODEL_SMALL__ since that was the default
734 before --mcmodel support was added. */
735 case CMODEL_MEDIUM:
736 builtin_define ("__CMODEL_MEDIUM__");
737 break;
738 case CMODEL_LARGE:
739 builtin_define ("__CMODEL_LARGE__");
740 break;
741 default:
742 break;
743 }
744
745 switch (rs6000_current_abi)
746 {
747 case ABI_V4:
748 builtin_define ("_CALL_SYSV");
749 break;
750 case ABI_AIX:
751 builtin_define ("_CALL_AIXDESC");
752 builtin_define ("_CALL_AIX");
753 builtin_define ("_CALL_ELF=1");
754 break;
755 case ABI_ELFv2:
756 builtin_define ("_CALL_ELF=2");
757 break;
758 case ABI_DARWIN:
759 builtin_define ("_CALL_DARWIN");
760 break;
761 default:
762 break;
763 }
764
765 /* Vector element order. */
766 if (BYTES_BIG_ENDIAN)
767 builtin_define ("__VEC_ELEMENT_REG_ORDER__=__ORDER_BIG_ENDIAN__");
768 else
769 builtin_define ("__VEC_ELEMENT_REG_ORDER__=__ORDER_LITTLE_ENDIAN__");
770
771 /* Let the compiled code know if 'f' class registers will not be available. */
772 if (TARGET_SOFT_FLOAT)
773 builtin_define ("__NO_FPRS__");
774
775 /* Whether aggregates passed by value are aligned to a 16 byte boundary
776 if their alignment is 16 bytes or larger. */
777 if ((TARGET_MACHO && rs6000_darwin64_abi)
778 || DEFAULT_ABI == ABI_ELFv2
779 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
780 builtin_define ("__STRUCT_PARM_ALIGN__=16");
781 }
782
783 \f
784
785 /* Convert a type stored into a struct altivec_builtin_types as ID,
786 into a tree. The types are in rs6000_builtin_types: negative values
787 create a pointer type for the type associated to ~ID. Note it is
788 a logical NOT, rather than a negation, otherwise you cannot represent
789 a pointer type for ID 0. */
790
791 static inline tree
792 rs6000_builtin_type (int id)
793 {
794 tree t;
795 t = rs6000_builtin_types[id < 0 ? ~id : id];
796 return id < 0 ? build_pointer_type (t) : t;
797 }
798
799 /* Check whether the type of an argument, T, is compatible with a type ID
800 stored into a struct altivec_builtin_types. Integer types are considered
801 compatible; otherwise, the language hook lang_hooks.types_compatible_p makes
802 the decision. Also allow long double and _Float128 to be compatible if
803 -mabi=ieeelongdouble. */
804
805 static inline bool
806 is_float128_p (tree t)
807 {
808 return (t == float128_type_node
809 || (TARGET_IEEEQUAD
810 && TARGET_LONG_DOUBLE_128
811 && t == long_double_type_node));
812 }
813
814
815 /* Return true iff ARGTYPE can be compatibly passed as PARMTYPE. */
816 static bool
817 rs6000_builtin_type_compatible (tree parmtype, tree argtype)
818 {
819 if (parmtype == error_mark_node)
820 return false;
821
822 if (INTEGRAL_TYPE_P (parmtype) && INTEGRAL_TYPE_P (argtype))
823 return true;
824
825 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
826 && is_float128_p (parmtype) && is_float128_p (argtype))
827 return true;
828
829 if (POINTER_TYPE_P (parmtype) && POINTER_TYPE_P (argtype))
830 {
831 parmtype = TREE_TYPE (parmtype);
832 argtype = TREE_TYPE (argtype);
833 if (TYPE_READONLY (argtype))
834 parmtype = build_qualified_type (parmtype, TYPE_QUAL_CONST);
835 }
836
837 return lang_hooks.types_compatible_p (parmtype, argtype);
838 }
839
840 /* In addition to calling fold_convert for EXPR of type TYPE, also
841 call c_fully_fold to remove any C_MAYBE_CONST_EXPRs that could be
842 hiding there (PR47197). */
843
844 static tree
845 fully_fold_convert (tree type, tree expr)
846 {
847 tree result = fold_convert (type, expr);
848 bool maybe_const = true;
849
850 if (!c_dialect_cxx ())
851 result = c_fully_fold (result, false, &maybe_const);
852
853 return result;
854 }
855
856 /* Build a tree for a function call to an Altivec non-overloaded builtin.
857 The overloaded builtin that matched the types and args is described
858 by DESC. The N arguments are given in ARGS, respectively.
859
860 Actually the only thing it does is calling fold_convert on ARGS, with
861 a small exception for vec_{all,any}_{ge,le} predicates. */
862
863 static tree
864 altivec_build_resolved_builtin (tree *args, int n, tree fntype, tree ret_type,
865 rs6000_gen_builtins bif_id,
866 rs6000_gen_builtins ovld_id)
867 {
868 tree argtypes = TYPE_ARG_TYPES (fntype);
869 tree arg_type[MAX_OVLD_ARGS];
870 tree fndecl = rs6000_builtin_decls[bif_id];
871
872 for (int i = 0; i < n; i++)
873 {
874 arg_type[i] = TREE_VALUE (argtypes);
875 argtypes = TREE_CHAIN (argtypes);
876 }
877
878 /* The AltiVec overloading implementation is overall gross, but this
879 is particularly disgusting. The vec_{all,any}_{ge,le} builtins
880 are completely different for floating-point vs. integer vector
881 types, because the former has vcmpgefp, but the latter should use
882 vcmpgtXX.
883
884 In practice, the second and third arguments are swapped, and the
885 condition (LT vs. EQ, which is recognizable by bit 1 of the first
886 argument) is reversed. Patch the arguments here before building
887 the resolved CALL_EXPR. */
888 if (n == 3
889 && ovld_id == RS6000_OVLD_VEC_CMPGE_P
890 && bif_id != RS6000_BIF_VCMPGEFP_P
891 && bif_id != RS6000_BIF_XVCMPGEDP_P)
892 {
893 std::swap (args[1], args[2]);
894 std::swap (arg_type[1], arg_type[2]);
895
896 args[0] = fold_build2 (BIT_XOR_EXPR, TREE_TYPE (args[0]), args[0],
897 build_int_cst (NULL_TREE, 2));
898 }
899
900 for (int j = 0; j < n; j++)
901 args[j] = fully_fold_convert (arg_type[j], args[j]);
902
903 /* If the number of arguments to an overloaded function increases,
904 we must expand this switch. */
905 gcc_assert (MAX_OVLD_ARGS <= 4);
906
907 tree call;
908 switch (n)
909 {
910 case 0:
911 call = build_call_expr (fndecl, 0);
912 break;
913 case 1:
914 call = build_call_expr (fndecl, 1, args[0]);
915 break;
916 case 2:
917 call = build_call_expr (fndecl, 2, args[0], args[1]);
918 break;
919 case 3:
920 call = build_call_expr (fndecl, 3, args[0], args[1], args[2]);
921 break;
922 case 4:
923 call = build_call_expr (fndecl, 4, args[0], args[1], args[2], args[3]);
924 break;
925 default:
926 gcc_unreachable ();
927 }
928 return fold_convert (ret_type, call);
929 }
930
931 /* Enumeration of possible results from attempted overload resolution.
932 This is used by special-case helper functions to tell their caller
933 whether they succeeded and what still needs to be done.
934
935 unresolved = Still needs processing
936 resolved = Resolved (but may be an error_mark_node)
937 resolved_bad = An error that needs handling by the caller. */
938
939 enum resolution { unresolved, resolved, resolved_bad };
940
941 /* Resolve an overloaded vec_mul call and return a tree expression for the
942 resolved call if successful. NARGS is the number of arguments to the call.
943 ARGLIST contains the arguments. RES must be set to indicate the status of
944 the resolution attempt. LOC contains statement location information. */
945
946 static tree
947 resolve_vec_mul (resolution *res, vec<tree, va_gc> *arglist, unsigned nargs,
948 location_t loc)
949 {
950 /* vec_mul needs to be special cased because there are no instructions for it
951 for the {un}signed char, {un}signed short, and {un}signed int types. */
952 if (nargs != 2)
953 {
954 error ("builtin %qs only accepts 2 arguments", "vec_mul");
955 *res = resolved;
956 return error_mark_node;
957 }
958
959 tree arg0 = (*arglist)[0];
960 tree arg0_type = TREE_TYPE (arg0);
961 tree arg1 = (*arglist)[1];
962 tree arg1_type = TREE_TYPE (arg1);
963
964 /* Both arguments must be vectors and the types must be compatible. */
965 if (TREE_CODE (arg0_type) != VECTOR_TYPE
966 || !lang_hooks.types_compatible_p (arg0_type, arg1_type))
967 {
968 *res = resolved_bad;
969 return error_mark_node;
970 }
971
972 switch (TYPE_MODE (TREE_TYPE (arg0_type)))
973 {
974 case E_QImode:
975 case E_HImode:
976 case E_SImode:
977 case E_DImode:
978 case E_TImode:
979 /* For scalar types just use a multiply expression. */
980 *res = resolved;
981 return fold_build2_loc (loc, MULT_EXPR, TREE_TYPE (arg0), arg0,
982 fold_convert (TREE_TYPE (arg0), arg1));
983 case E_SFmode:
984 {
985 /* For floats use the xvmulsp instruction directly. */
986 *res = resolved;
987 tree call = rs6000_builtin_decls[RS6000_BIF_XVMULSP];
988 return build_call_expr (call, 2, arg0, arg1);
989 }
990 case E_DFmode:
991 {
992 /* For doubles use the xvmuldp instruction directly. */
993 *res = resolved;
994 tree call = rs6000_builtin_decls[RS6000_BIF_XVMULDP];
995 return build_call_expr (call, 2, arg0, arg1);
996 }
997 /* Other types are errors. */
998 default:
999 *res = resolved_bad;
1000 return error_mark_node;
1001 }
1002 }
1003
1004 /* Resolve an overloaded vec_cmpne call and return a tree expression for the
1005 resolved call if successful. NARGS is the number of arguments to the call.
1006 ARGLIST contains the arguments. RES must be set to indicate the status of
1007 the resolution attempt. LOC contains statement location information. */
1008
1009 static tree
1010 resolve_vec_cmpne (resolution *res, vec<tree, va_gc> *arglist, unsigned nargs,
1011 location_t loc)
1012 {
1013 /* vec_cmpne needs to be special cased because there are no instructions
1014 for it (prior to power 9). */
1015 if (nargs != 2)
1016 {
1017 error ("builtin %qs only accepts 2 arguments", "vec_cmpne");
1018 *res = resolved;
1019 return error_mark_node;
1020 }
1021
1022 tree arg0 = (*arglist)[0];
1023 tree arg0_type = TREE_TYPE (arg0);
1024 tree arg1 = (*arglist)[1];
1025 tree arg1_type = TREE_TYPE (arg1);
1026
1027 /* Both arguments must be vectors and the types must be compatible. */
1028 if (TREE_CODE (arg0_type) != VECTOR_TYPE
1029 || !lang_hooks.types_compatible_p (arg0_type, arg1_type))
1030 {
1031 *res = resolved_bad;
1032 return error_mark_node;
1033 }
1034
1035 machine_mode arg0_elt_mode = TYPE_MODE (TREE_TYPE (arg0_type));
1036
1037 /* Power9 instructions provide the most efficient implementation of
1038 ALTIVEC_BUILTIN_VEC_CMPNE if the mode is not DImode or TImode
1039 or SFmode or DFmode. */
1040 if (!TARGET_P9_VECTOR
1041 || arg0_elt_mode == DImode
1042 || arg0_elt_mode == TImode
1043 || arg0_elt_mode == SFmode
1044 || arg0_elt_mode == DFmode)
1045 {
1046 switch (arg0_elt_mode)
1047 {
1048 /* vec_cmpneq (va, vb) == vec_nor (vec_cmpeq (va, vb),
1049 vec_cmpeq (va, vb)). */
1050 /* Note: vec_nand also works but opt changes vec_nand's
1051 to vec_nor's anyway. */
1052 case E_QImode:
1053 case E_HImode:
1054 case E_SImode:
1055 case E_DImode:
1056 case E_TImode:
1057 case E_SFmode:
1058 case E_DFmode:
1059 {
1060 /* call = vec_cmpeq (va, vb)
1061 result = vec_nor (call, call). */
1062 vec<tree, va_gc> *params = make_tree_vector ();
1063 vec_safe_push (params, arg0);
1064 vec_safe_push (params, arg1);
1065 tree decl = rs6000_builtin_decls[RS6000_OVLD_VEC_CMPEQ];
1066 tree call = altivec_resolve_overloaded_builtin (loc, decl, params);
1067 /* Use save_expr to ensure that operands used more than once
1068 that may have side effects (like calls) are only evaluated
1069 once. */
1070 call = save_expr (call);
1071 params = make_tree_vector ();
1072 vec_safe_push (params, call);
1073 vec_safe_push (params, call);
1074 decl = rs6000_builtin_decls[RS6000_OVLD_VEC_NOR];
1075 *res = resolved;
1076 return altivec_resolve_overloaded_builtin (loc, decl, params);
1077 }
1078 /* Other types are errors. */
1079 default:
1080 *res = resolved_bad;
1081 return error_mark_node;
1082 }
1083 }
1084
1085 /* Otherwise this call is unresolved, and altivec_resolve_overloaded_builtin
1086 will later process the Power9 alternative. */
1087 *res = unresolved;
1088 return error_mark_node;
1089 }
1090
1091 /* Resolve an overloaded vec_adde or vec_sube call and return a tree
1092 expression for the resolved call if successful. NARGS is the number of
1093 arguments to the call. ARGLIST contains the arguments. RES must be set
1094 to indicate the status of the resolution attempt. LOC contains statement
1095 location information. */
1096
1097 static tree
1098 resolve_vec_adde_sube (resolution *res, rs6000_gen_builtins fcode,
1099 vec<tree, va_gc> *arglist, unsigned nargs,
1100 location_t loc)
1101 {
1102 /* vec_adde needs to be special cased because there is no instruction
1103 for the {un}signed int version. */
1104 if (nargs != 3)
1105 {
1106 const char *name;
1107 name = fcode == RS6000_OVLD_VEC_ADDE ? "vec_adde" : "vec_sube";
1108 error ("builtin %qs only accepts 3 arguments", name);
1109 *res = resolved;
1110 return error_mark_node;
1111 }
1112
1113 tree arg0 = (*arglist)[0];
1114 tree arg0_type = TREE_TYPE (arg0);
1115 tree arg1 = (*arglist)[1];
1116 tree arg1_type = TREE_TYPE (arg1);
1117 tree arg2 = (*arglist)[2];
1118 tree arg2_type = TREE_TYPE (arg2);
1119
1120 /* All 3 arguments must be vectors of (signed or unsigned) (int or
1121 __int128) and the types must be compatible. */
1122 if (TREE_CODE (arg0_type) != VECTOR_TYPE
1123 || !lang_hooks.types_compatible_p (arg0_type, arg1_type)
1124 || !lang_hooks.types_compatible_p (arg1_type, arg2_type))
1125 {
1126 *res = resolved_bad;
1127 return error_mark_node;
1128 }
1129
1130 switch (TYPE_MODE (TREE_TYPE (arg0_type)))
1131 {
1132 /* For {un}signed ints,
1133 vec_adde (va, vb, carryv) == vec_add (vec_add (va, vb),
1134 vec_and (carryv, 1)).
1135 vec_sube (va, vb, carryv) == vec_sub (vec_sub (va, vb),
1136 vec_and (carryv, 1)). */
1137 case E_SImode:
1138 {
1139 vec<tree, va_gc> *params = make_tree_vector ();
1140 vec_safe_push (params, arg0);
1141 vec_safe_push (params, arg1);
1142
1143 tree add_sub_builtin;
1144 if (fcode == RS6000_OVLD_VEC_ADDE)
1145 add_sub_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_ADD];
1146 else
1147 add_sub_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_SUB];
1148
1149 tree call = altivec_resolve_overloaded_builtin (loc, add_sub_builtin,
1150 params);
1151 tree const1 = build_int_cstu (TREE_TYPE (arg0_type), 1);
1152 tree ones_vector = build_vector_from_val (arg0_type, const1);
1153 tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, arg0_type,
1154 arg2, ones_vector);
1155 params = make_tree_vector ();
1156 vec_safe_push (params, call);
1157 vec_safe_push (params, and_expr);
1158 *res = resolved;
1159 return altivec_resolve_overloaded_builtin (loc, add_sub_builtin,
1160 params);
1161 }
1162 /* For {un}signed __int128s use the vaddeuqm/vsubeuqm instruction
1163 directly using the standard machinery. */
1164 case E_TImode:
1165 *res = unresolved;
1166 break;
1167
1168 /* Types other than {un}signed int and {un}signed __int128
1169 are errors. */
1170 default:
1171 *res = resolved_bad;
1172 }
1173
1174 return error_mark_node;
1175 }
1176
1177 /* Resolve an overloaded vec_addec or vec_subec call and return a tree
1178 expression for the resolved call if successful. NARGS is the number of
1179 arguments to the call. ARGLIST contains the arguments. RES must be set
1180 to indicate the status of the resolution attempt. LOC contains statement
1181 location information. */
1182
1183 static tree
1184 resolve_vec_addec_subec (resolution *res, rs6000_gen_builtins fcode,
1185 vec<tree, va_gc> *arglist, unsigned nargs,
1186 location_t loc)
1187 {
1188 /* vec_addec and vec_subec needs to be special cased because there is
1189 no instruction for the (un)signed int version. */
1190 if (nargs != 3)
1191 {
1192 const char *name;
1193 name = fcode == RS6000_OVLD_VEC_ADDEC ? "vec_addec" : "vec_subec";
1194 error ("builtin %qs only accepts 3 arguments", name);
1195 *res = resolved;
1196 return error_mark_node;
1197 }
1198
1199 tree arg0 = (*arglist)[0];
1200 tree arg0_type = TREE_TYPE (arg0);
1201 tree arg1 = (*arglist)[1];
1202 tree arg1_type = TREE_TYPE (arg1);
1203 tree arg2 = (*arglist)[2];
1204 tree arg2_type = TREE_TYPE (arg2);
1205
1206 /* All 3 arguments must be vectors of (signed or unsigned) (int or
1207 __int128) and the types must be compatible. */
1208 if (TREE_CODE (arg0_type) != VECTOR_TYPE
1209 || !lang_hooks.types_compatible_p (arg0_type, arg1_type)
1210 || !lang_hooks.types_compatible_p (arg1_type, arg2_type))
1211 {
1212 *res = resolved_bad;
1213 return error_mark_node;
1214 }
1215
1216 switch (TYPE_MODE (TREE_TYPE (arg0_type)))
1217 {
1218 /* For {un}signed ints,
1219 vec_addec (va, vb, carryv) ==
1220 vec_or (vec_addc (va, vb),
1221 vec_addc (vec_add (va, vb),
1222 vec_and (carryv, 0x1))). */
1223 case E_SImode:
1224 {
1225 /* Use save_expr to ensure that operands used more than once that may
1226 have side effects (like calls) are only evaluated once. */
1227 arg0 = save_expr (arg0);
1228 arg1 = save_expr (arg1);
1229 vec<tree, va_gc> *params = make_tree_vector ();
1230 vec_safe_push (params, arg0);
1231 vec_safe_push (params, arg1);
1232
1233 tree as_c_builtin;
1234 if (fcode == RS6000_OVLD_VEC_ADDEC)
1235 as_c_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_ADDC];
1236 else
1237 as_c_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_SUBC];
1238
1239 tree call1 = altivec_resolve_overloaded_builtin (loc, as_c_builtin,
1240 params);
1241 params = make_tree_vector ();
1242 vec_safe_push (params, arg0);
1243 vec_safe_push (params, arg1);
1244
1245 tree as_builtin;
1246 if (fcode == RS6000_OVLD_VEC_ADDEC)
1247 as_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_ADD];
1248 else
1249 as_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_SUB];
1250
1251 tree call2 = altivec_resolve_overloaded_builtin (loc, as_builtin,
1252 params);
1253 tree const1 = build_int_cstu (TREE_TYPE (arg0_type), 1);
1254 tree ones_vector = build_vector_from_val (arg0_type, const1);
1255 tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, arg0_type,
1256 arg2, ones_vector);
1257 params = make_tree_vector ();
1258 vec_safe_push (params, call2);
1259 vec_safe_push (params, and_expr);
1260 call2 = altivec_resolve_overloaded_builtin (loc, as_c_builtin, params);
1261 params = make_tree_vector ();
1262 vec_safe_push (params, call1);
1263 vec_safe_push (params, call2);
1264 tree or_builtin = rs6000_builtin_decls[RS6000_OVLD_VEC_OR];
1265 *res = resolved;
1266 return altivec_resolve_overloaded_builtin (loc, or_builtin, params);
1267 }
1268 /* For {un}signed __int128s use the vaddecuq/vsubbecuq
1269 instructions. This occurs through normal processing. */
1270 case E_TImode:
1271 *res = unresolved;
1272 break;
1273
1274 /* Types other than {un}signed int and {un}signed __int128
1275 are errors. */
1276 default:
1277 *res = resolved_bad;
1278 }
1279
1280 return error_mark_node;
1281 }
1282
1283 /* Resolve an overloaded vec_splats or vec_promote call and return a tree
1284 expression for the resolved call if successful. NARGS is the number of
1285 arguments to the call. ARGLIST contains the arguments. RES must be set
1286 to indicate the status of the resolution attempt. */
1287
1288 static tree
1289 resolve_vec_splats (resolution *res, rs6000_gen_builtins fcode,
1290 vec<tree, va_gc> *arglist, unsigned nargs)
1291 {
1292 const char *name;
1293 name = fcode == RS6000_OVLD_VEC_SPLATS ? "vec_splats" : "vec_promote";
1294
1295 if (fcode == RS6000_OVLD_VEC_SPLATS && nargs != 1)
1296 {
1297 error ("builtin %qs only accepts 1 argument", name);
1298 *res = resolved;
1299 return error_mark_node;
1300 }
1301
1302 if (fcode == RS6000_OVLD_VEC_PROMOTE && nargs != 2)
1303 {
1304 error ("builtin %qs only accepts 2 arguments", name);
1305 *res = resolved;
1306 return error_mark_node;
1307 }
1308
1309 /* Ignore promote's element argument. */
1310 if (fcode == RS6000_OVLD_VEC_PROMOTE
1311 && !INTEGRAL_TYPE_P (TREE_TYPE ((*arglist)[1])))
1312 {
1313 *res = resolved_bad;
1314 return error_mark_node;
1315 }
1316
1317 tree arg = (*arglist)[0];
1318 tree type = TREE_TYPE (arg);
1319
1320 if (!SCALAR_FLOAT_TYPE_P (type) && !INTEGRAL_TYPE_P (type))
1321 {
1322 *res = resolved_bad;
1323 return error_mark_node;
1324 }
1325
1326 bool unsigned_p = TYPE_UNSIGNED (type);
1327 int size;
1328
1329 switch (TYPE_MODE (type))
1330 {
1331 case E_TImode:
1332 type = unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node;
1333 size = 1;
1334 break;
1335 case E_DImode:
1336 type = unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node;
1337 size = 2;
1338 break;
1339 case E_SImode:
1340 type = unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node;
1341 size = 4;
1342 break;
1343 case E_HImode:
1344 type = unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node;
1345 size = 8;
1346 break;
1347 case E_QImode:
1348 type = unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node;
1349 size = 16;
1350 break;
1351 case E_SFmode:
1352 type = V4SF_type_node;
1353 size = 4;
1354 break;
1355 case E_DFmode:
1356 type = V2DF_type_node;
1357 size = 2;
1358 break;
1359 default:
1360 *res = resolved_bad;
1361 return error_mark_node;
1362 }
1363
1364 arg = save_expr (fold_convert (TREE_TYPE (type), arg));
1365 vec<constructor_elt, va_gc> *vec;
1366 vec_alloc (vec, size);
1367
1368 for (int i = 0; i < size; i++)
1369 {
1370 constructor_elt elt = {NULL_TREE, arg};
1371 vec->quick_push (elt);
1372 }
1373
1374 *res = resolved;
1375 return build_constructor (type, vec);
1376 }
1377
1378 /* Resolve an overloaded vec_extract call and return a tree expression for
1379 the resolved call if successful. NARGS is the number of arguments to
1380 the call. ARGLIST contains the arguments. RES must be set to indicate
1381 the status of the resolution attempt. LOC contains statement location
1382 information. */
1383
1384 static tree
1385 resolve_vec_extract (resolution *res, vec<tree, va_gc> *arglist,
1386 unsigned nargs, location_t loc)
1387 {
1388 if (nargs != 2)
1389 {
1390 error ("builtin %qs only accepts 2 arguments", "vec_extract");
1391 *res = resolved;
1392 return error_mark_node;
1393 }
1394
1395 tree arg1 = (*arglist)[0];
1396 tree arg1_type = TREE_TYPE (arg1);
1397 tree arg2 = (*arglist)[1];
1398
1399 if (TREE_CODE (arg1_type) != VECTOR_TYPE
1400 || !INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
1401 {
1402 *res = resolved_bad;
1403 return error_mark_node;
1404 }
1405
1406 /* See if we can optimize vec_extract with the current VSX instruction
1407 set. */
1408 machine_mode mode = TYPE_MODE (arg1_type);
1409 tree arg1_inner_type;
1410
1411 if (VECTOR_MEM_VSX_P (mode))
1412 {
1413 tree call = NULL_TREE;
1414 int nunits = GET_MODE_NUNITS (mode);
1415 arg2 = fold_for_warn (arg2);
1416
1417 /* If the second argument is an integer constant, generate
1418 the built-in code if we can. We need 64-bit and direct
1419 move to extract the small integer vectors. */
1420 if (TREE_CODE (arg2) == INTEGER_CST)
1421 {
1422 wide_int selector = wi::to_wide (arg2);
1423 selector = wi::umod_trunc (selector, nunits);
1424 arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1425 switch (mode)
1426 {
1427 case E_V1TImode:
1428 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V1TI];
1429 break;
1430
1431 case E_V2DFmode:
1432 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V2DF];
1433 break;
1434
1435 case E_V2DImode:
1436 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V2DI];
1437 break;
1438
1439 case E_V4SFmode:
1440 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V4SF];
1441 break;
1442
1443 case E_V4SImode:
1444 if (TARGET_DIRECT_MOVE_64BIT)
1445 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V4SI];
1446 break;
1447
1448 case E_V8HImode:
1449 if (TARGET_DIRECT_MOVE_64BIT)
1450 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V8HI];
1451 break;
1452
1453 case E_V16QImode:
1454 if (TARGET_DIRECT_MOVE_64BIT)
1455 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V16QI];
1456 break;
1457
1458 default:
1459 break;
1460 }
1461 }
1462
1463 /* If the second argument is variable, we can optimize it if we are
1464 generating 64-bit code on a machine with direct move. */
1465 else if (TREE_CODE (arg2) != INTEGER_CST && TARGET_DIRECT_MOVE_64BIT)
1466 {
1467 switch (mode)
1468 {
1469 case E_V2DFmode:
1470 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V2DF];
1471 break;
1472
1473 case E_V2DImode:
1474 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V2DI];
1475 break;
1476
1477 case E_V4SFmode:
1478 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V4SF];
1479 break;
1480
1481 case E_V4SImode:
1482 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V4SI];
1483 break;
1484
1485 case E_V8HImode:
1486 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V8HI];
1487 break;
1488
1489 case E_V16QImode:
1490 call = rs6000_builtin_decls[RS6000_BIF_VEC_EXT_V16QI];
1491 break;
1492
1493 default:
1494 break;
1495 }
1496 }
1497
1498 if (call)
1499 {
1500 tree result = build_call_expr (call, 2, arg1, arg2);
1501 /* Coerce the result to vector element type. May be no-op. */
1502 arg1_inner_type = TREE_TYPE (arg1_type);
1503 result = fold_convert (arg1_inner_type, result);
1504 *res = resolved;
1505 return result;
1506 }
1507 }
1508
1509 /* Build *(((arg1_inner_type*) & (vector type){arg1}) + arg2). */
1510 arg1_inner_type = TREE_TYPE (arg1_type);
1511 tree subp = build_int_cst (TREE_TYPE (arg2),
1512 TYPE_VECTOR_SUBPARTS (arg1_type) - 1);
1513 arg2 = build_binary_op (loc, BIT_AND_EXPR, arg2, subp, 0);
1514
1515 tree decl = build_decl (loc, VAR_DECL, NULL_TREE, arg1_type);
1516 DECL_EXTERNAL (decl) = 0;
1517 TREE_PUBLIC (decl) = 0;
1518 DECL_CONTEXT (decl) = current_function_decl;
1519 TREE_USED (decl) = 1;
1520 TREE_TYPE (decl) = arg1_type;
1521 TREE_READONLY (decl) = TYPE_READONLY (arg1_type);
1522
1523 tree stmt;
1524 if (c_dialect_cxx ())
1525 {
1526 stmt = build4 (TARGET_EXPR, arg1_type, decl, arg1, NULL_TREE, NULL_TREE);
1527 SET_EXPR_LOCATION (stmt, loc);
1528 }
1529 else
1530 {
1531 DECL_INITIAL (decl) = arg1;
1532 stmt = build1 (DECL_EXPR, arg1_type, decl);
1533 TREE_ADDRESSABLE (decl) = 1;
1534 SET_EXPR_LOCATION (stmt, loc);
1535 stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt);
1536 }
1537
1538 tree innerptrtype = build_pointer_type (arg1_inner_type);
1539 stmt = build_unary_op (loc, ADDR_EXPR, stmt, 0);
1540 stmt = convert (innerptrtype, stmt);
1541 stmt = build_binary_op (loc, PLUS_EXPR, stmt, arg2, 1);
1542 stmt = build_indirect_ref (loc, stmt, RO_NULL);
1543
1544 /* PR83660: We mark this as having side effects so that downstream in
1545 fold_build_cleanup_point_expr () it will get a CLEANUP_POINT_EXPR. If it
1546 does not we can run into an ICE later in gimplify_cleanup_point_expr ().
1547 Potentially this causes missed optimization because there actually is no
1548 side effect. */
1549 if (c_dialect_cxx ())
1550 TREE_SIDE_EFFECTS (stmt) = 1;
1551
1552 *res = resolved;
1553 return stmt;
1554 }
1555
1556 /* Resolve an overloaded vec_insert call and return a tree expression for
1557 the resolved call if successful. NARGS is the number of arguments to
1558 the call. ARGLIST contains the arguments. RES must be set to indicate
1559 the status of the resolution attempt. LOC contains statement location
1560 information. */
1561
1562 static tree
1563 resolve_vec_insert (resolution *res, vec<tree, va_gc> *arglist,
1564 unsigned nargs, location_t loc)
1565 {
1566 if (nargs != 3)
1567 {
1568 error ("builtin %qs only accepts 3 arguments", "vec_insert");
1569 *res = resolved;
1570 return error_mark_node;
1571 }
1572
1573 tree arg0 = (*arglist)[0];
1574 tree arg1 = (*arglist)[1];
1575 tree arg1_type = TREE_TYPE (arg1);
1576 tree arg2 = fold_for_warn ((*arglist)[2]);
1577
1578 if (TREE_CODE (arg1_type) != VECTOR_TYPE
1579 || !INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
1580 {
1581 *res = resolved_bad;
1582 return error_mark_node;
1583 }
1584
1585 /* If we can use the VSX xxpermdi instruction, use that for insert. */
1586 machine_mode mode = TYPE_MODE (arg1_type);
1587
1588 if ((mode == V2DFmode || mode == V2DImode)
1589 && VECTOR_UNIT_VSX_P (mode)
1590 && TREE_CODE (arg2) == INTEGER_CST)
1591 {
1592 wide_int selector = wi::to_wide (arg2);
1593 selector = wi::umod_trunc (selector, 2);
1594 arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1595
1596 tree call = NULL_TREE;
1597 if (mode == V2DFmode)
1598 call = rs6000_builtin_decls[RS6000_BIF_VEC_SET_V2DF];
1599 else if (mode == V2DImode)
1600 call = rs6000_builtin_decls[RS6000_BIF_VEC_SET_V2DI];
1601
1602 /* Note, __builtin_vec_insert_<xxx> has vector and scalar types
1603 reversed. */
1604 if (call)
1605 {
1606 *res = resolved;
1607 return build_call_expr (call, 3, arg1, arg0, arg2);
1608 }
1609 }
1610
1611 else if (mode == V1TImode
1612 && VECTOR_UNIT_VSX_P (mode)
1613 && TREE_CODE (arg2) == INTEGER_CST)
1614 {
1615 tree call = rs6000_builtin_decls[RS6000_BIF_VEC_SET_V1TI];
1616 wide_int selector = wi::zero(32);
1617 arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1618
1619 /* Note, __builtin_vec_insert_<xxx> has vector and scalar types
1620 reversed. */
1621 *res = resolved;
1622 return build_call_expr (call, 3, arg1, arg0, arg2);
1623 }
1624
1625 /* Build *(((arg1_inner_type*) & (vector type){arg1}) + arg2) = arg0 with
1626 VIEW_CONVERT_EXPR. i.e.:
1627 D.3192 = v1;
1628 _1 = n & 3;
1629 VIEW_CONVERT_EXPR<int[4]>(D.3192)[_1] = i;
1630 v1 = D.3192;
1631 D.3194 = v1; */
1632 if (TYPE_VECTOR_SUBPARTS (arg1_type) == 1)
1633 arg2 = build_int_cst (TREE_TYPE (arg2), 0);
1634 else
1635 {
1636 tree c = build_int_cst (TREE_TYPE (arg2),
1637 TYPE_VECTOR_SUBPARTS (arg1_type) - 1);
1638 arg2 = build_binary_op (loc, BIT_AND_EXPR, arg2, c, 0);
1639 }
1640
1641 tree decl = build_decl (loc, VAR_DECL, NULL_TREE, arg1_type);
1642 DECL_EXTERNAL (decl) = 0;
1643 TREE_PUBLIC (decl) = 0;
1644 DECL_CONTEXT (decl) = current_function_decl;
1645 TREE_USED (decl) = 1;
1646 TREE_TYPE (decl) = arg1_type;
1647 TREE_READONLY (decl) = TYPE_READONLY (arg1_type);
1648 TREE_ADDRESSABLE (decl) = 1;
1649
1650 tree stmt;
1651 if (c_dialect_cxx ())
1652 {
1653 stmt = build4 (TARGET_EXPR, arg1_type, decl, arg1, NULL_TREE, NULL_TREE);
1654 SET_EXPR_LOCATION (stmt, loc);
1655 }
1656 else
1657 {
1658 DECL_INITIAL (decl) = arg1;
1659 stmt = build1 (DECL_EXPR, arg1_type, decl);
1660 SET_EXPR_LOCATION (stmt, loc);
1661 stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt);
1662 }
1663
1664 if (TARGET_VSX)
1665 {
1666 stmt = build_array_ref (loc, stmt, arg2);
1667 stmt = fold_build2 (MODIFY_EXPR, TREE_TYPE (arg0), stmt,
1668 convert (TREE_TYPE (stmt), arg0));
1669 stmt = build2 (COMPOUND_EXPR, arg1_type, stmt, decl);
1670 }
1671 else
1672 {
1673 tree arg1_inner_type = TREE_TYPE (arg1_type);
1674 tree innerptrtype = build_pointer_type (arg1_inner_type);
1675 stmt = build_unary_op (loc, ADDR_EXPR, stmt, 0);
1676 stmt = convert (innerptrtype, stmt);
1677 stmt = build_binary_op (loc, PLUS_EXPR, stmt, arg2, 1);
1678 stmt = build_indirect_ref (loc, stmt, RO_NULL);
1679 stmt = build2 (MODIFY_EXPR, TREE_TYPE (stmt), stmt,
1680 convert (TREE_TYPE (stmt), arg0));
1681 stmt = build2 (COMPOUND_EXPR, arg1_type, stmt, decl);
1682 }
1683
1684 *res = resolved;
1685 return stmt;
1686 }
1687
1688 /* Resolve an overloaded vec_step call and return a tree expression for
1689 the resolved call if successful. NARGS is the number of arguments to
1690 the call. ARGLIST contains the arguments. RES must be set to indicate
1691 the status of the resolution attempt. */
1692
1693 static tree
1694 resolve_vec_step (resolution *res, vec<tree, va_gc> *arglist, unsigned nargs)
1695 {
1696 if (nargs != 1)
1697 {
1698 error ("builtin %qs only accepts 1 argument", "vec_step");
1699 *res = resolved;
1700 return error_mark_node;
1701 }
1702
1703 tree arg0 = (*arglist)[0];
1704 tree arg0_type = TREE_TYPE (arg0);
1705
1706 if (TREE_CODE (arg0_type) != VECTOR_TYPE)
1707 {
1708 *res = resolved_bad;
1709 return error_mark_node;
1710 }
1711
1712 *res = resolved;
1713 return build_int_cst (NULL_TREE, TYPE_VECTOR_SUBPARTS (arg0_type));
1714 }
1715
1716 /* Look for a matching instance in a chain of instances. INSTANCE points to
1717 the chain of instances; INSTANCE_CODE is the code identifying the specific
1718 built-in being searched for; FCODE is the overloaded function code; TYPES
1719 contains an array of two types that must match the types of the instance's
1720 parameters; and ARGS contains an array of two arguments to be passed to
1721 the instance. If found, resolve the built-in and return it, unless the
1722 built-in is not supported in context. In that case, set
1723 UNSUPPORTED_BUILTIN to true. If we don't match, return error_mark_node
1724 and leave UNSUPPORTED_BUILTIN alone. */
1725
1726 tree
1727 find_instance (bool *unsupported_builtin, ovlddata **instance,
1728 rs6000_gen_builtins instance_code,
1729 rs6000_gen_builtins fcode,
1730 tree *types, tree *args)
1731 {
1732 while (*instance && (*instance)->bifid != instance_code)
1733 *instance = (*instance)->next;
1734
1735 ovlddata *inst = *instance;
1736 gcc_assert (inst != NULL);
1737 tree fntype = rs6000_builtin_info[inst->bifid].fntype;
1738 tree parmtype0 = TREE_VALUE (TYPE_ARG_TYPES (fntype));
1739 tree parmtype1 = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (fntype)));
1740
1741 if (rs6000_builtin_type_compatible (types[0], parmtype0)
1742 && rs6000_builtin_type_compatible (types[1], parmtype1))
1743 {
1744 if (rs6000_builtin_decl (inst->bifid, false) != error_mark_node
1745 && rs6000_builtin_is_supported (inst->bifid))
1746 {
1747 tree ret_type = TREE_TYPE (inst->fntype);
1748 return altivec_build_resolved_builtin (args, 2, fntype, ret_type,
1749 inst->bifid, fcode);
1750 }
1751 else
1752 *unsupported_builtin = true;
1753 }
1754
1755 return error_mark_node;
1756 }
1757
1758 /* Implementation of the resolve_overloaded_builtin target hook, to
1759 support Altivec's overloaded builtins. */
1760
1761 tree
1762 altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
1763 void *passed_arglist)
1764 {
1765 rs6000_gen_builtins fcode
1766 = (rs6000_gen_builtins) DECL_MD_FUNCTION_CODE (fndecl);
1767
1768 /* Return immediately if this isn't an overload. */
1769 if (fcode <= RS6000_OVLD_NONE)
1770 return NULL_TREE;
1771
1772 if (TARGET_DEBUG_BUILTIN)
1773 fprintf (stderr, "altivec_resolve_overloaded_builtin, code = %4d, %s\n",
1774 (int) fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)));
1775
1776 /* vec_lvsl and vec_lvsr are deprecated for use with LE element order. */
1777 if (fcode == RS6000_OVLD_VEC_LVSL && !BYTES_BIG_ENDIAN)
1778 warning (OPT_Wdeprecated,
1779 "%<vec_lvsl%> is deprecated for little endian; use "
1780 "assignment for unaligned loads and stores");
1781 else if (fcode == RS6000_OVLD_VEC_LVSR && !BYTES_BIG_ENDIAN)
1782 warning (OPT_Wdeprecated,
1783 "%<vec_lvsr%> is deprecated for little endian; use "
1784 "assignment for unaligned loads and stores");
1785
1786 /* Some overloads require special handling. */
1787 /* FIXME: Could we simplify the helper functions if we gathered arguments
1788 and types into arrays first? */
1789 tree returned_expr = NULL;
1790 resolution res = unresolved;
1791 vec<tree, va_gc> *arglist = static_cast<vec<tree, va_gc> *> (passed_arglist);
1792 unsigned int nargs = vec_safe_length (arglist);
1793
1794 switch (fcode)
1795 {
1796 case RS6000_OVLD_VEC_MUL:
1797 returned_expr = resolve_vec_mul (&res, arglist, nargs, loc);
1798 break;
1799
1800 case RS6000_OVLD_VEC_CMPNE:
1801 returned_expr = resolve_vec_cmpne (&res, arglist, nargs, loc);
1802 break;
1803
1804 case RS6000_OVLD_VEC_ADDE:
1805 case RS6000_OVLD_VEC_SUBE:
1806 returned_expr = resolve_vec_adde_sube (&res, fcode, arglist, nargs, loc);
1807 break;
1808
1809 case RS6000_OVLD_VEC_ADDEC:
1810 case RS6000_OVLD_VEC_SUBEC:
1811 returned_expr = resolve_vec_addec_subec (&res, fcode, arglist, nargs,
1812 loc);
1813 break;
1814
1815 case RS6000_OVLD_VEC_SPLATS:
1816 case RS6000_OVLD_VEC_PROMOTE:
1817 returned_expr = resolve_vec_splats (&res, fcode, arglist, nargs);
1818 break;
1819
1820 case RS6000_OVLD_VEC_EXTRACT:
1821 returned_expr = resolve_vec_extract (&res, arglist, nargs, loc);
1822 break;
1823
1824 case RS6000_OVLD_VEC_INSERT:
1825 returned_expr = resolve_vec_insert (&res, arglist, nargs, loc);
1826 break;
1827
1828 case RS6000_OVLD_VEC_STEP:
1829 returned_expr = resolve_vec_step (&res, arglist, nargs);
1830 break;
1831
1832 default:
1833 ;
1834 }
1835
1836 if (res == resolved)
1837 return returned_expr;
1838
1839 /* "Regular" built-in functions and overloaded functions share a namespace
1840 for some arrays, like rs6000_builtin_decls. But rs6000_overload_info
1841 only has information for the overloaded functions, so we need an
1842 adjusted index for that. */
1843 unsigned int adj_fcode = fcode - RS6000_OVLD_NONE;
1844
1845 if (res == resolved_bad)
1846 {
1847 const char *name = rs6000_overload_info[adj_fcode].ovld_name;
1848 error ("invalid parameter combination for AltiVec intrinsic %qs", name);
1849 return error_mark_node;
1850 }
1851
1852 /* Gather the arguments and their types into arrays for easier handling. */
1853 tree fnargs = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
1854 tree types[MAX_OVLD_ARGS];
1855 tree args[MAX_OVLD_ARGS];
1856 unsigned int n;
1857
1858 for (n = 0;
1859 !VOID_TYPE_P (TREE_VALUE (fnargs)) && n < nargs;
1860 fnargs = TREE_CHAIN (fnargs), n++)
1861 {
1862 tree decl_type = TREE_VALUE (fnargs);
1863 tree arg = (*arglist)[n];
1864
1865 if (arg == error_mark_node)
1866 return error_mark_node;
1867
1868 if (n >= MAX_OVLD_ARGS)
1869 abort ();
1870
1871 arg = default_conversion (arg);
1872 tree type = TREE_TYPE (arg);
1873
1874 /* The C++ front-end converts float * to const void * using
1875 NOP_EXPR<const void *> (NOP_EXPR<void *> (x)). */
1876 if (POINTER_TYPE_P (type)
1877 && TREE_CODE (arg) == NOP_EXPR
1878 && lang_hooks.types_compatible_p (TREE_TYPE (arg),
1879 const_ptr_type_node)
1880 && lang_hooks.types_compatible_p (TREE_TYPE (TREE_OPERAND (arg, 0)),
1881 ptr_type_node))
1882 {
1883 arg = TREE_OPERAND (arg, 0);
1884 type = TREE_TYPE (arg);
1885 }
1886
1887 /* Remove the const from the pointers to simplify the overload
1888 matching further down. */
1889 if (POINTER_TYPE_P (decl_type)
1890 && POINTER_TYPE_P (type)
1891 && TYPE_QUALS (TREE_TYPE (type)) != 0)
1892 {
1893 if (TYPE_READONLY (TREE_TYPE (type))
1894 && !TYPE_READONLY (TREE_TYPE (decl_type)))
1895 warning (0, "passing argument %d of %qE discards const qualifier "
1896 "from pointer target type", n + 1, fndecl);
1897 type = build_qualified_type (TREE_TYPE (type), 0);
1898 type = build_pointer_type (type);
1899 arg = fold_convert (type, arg);
1900 }
1901
1902 /* For RS6000_OVLD_VEC_LXVL, convert any const * to its non constant
1903 equivalent to simplify the overload matching below. */
1904 if (fcode == RS6000_OVLD_VEC_LXVL
1905 && POINTER_TYPE_P (type)
1906 && TYPE_READONLY (TREE_TYPE (type)))
1907 {
1908 type = build_qualified_type (TREE_TYPE (type), 0);
1909 type = build_pointer_type (type);
1910 arg = fold_convert (type, arg);
1911 }
1912
1913 args[n] = arg;
1914 types[n] = type;
1915 }
1916
1917 /* If the number of arguments did not match the prototype, return NULL
1918 and the generic code will issue the appropriate error message. */
1919 if (!VOID_TYPE_P (TREE_VALUE (fnargs)) || n < nargs)
1920 return NULL;
1921
1922 bool unsupported_builtin = false;
1923 rs6000_gen_builtins instance_code;
1924 bool supported = false;
1925 ovlddata *instance = rs6000_overload_info[adj_fcode].first_instance;
1926 gcc_assert (instance != NULL);
1927
1928 /* Functions with no arguments can have only one overloaded instance. */
1929 gcc_assert (nargs > 0 || !instance->next);
1930
1931 /* Standard overload processing involves determining whether an instance
1932 exists that is type-compatible with the overloaded function call. In
1933 a couple of cases, we need to do some extra processing to disambiguate
1934 between multiple compatible instances. */
1935 switch (fcode)
1936 {
1937 /* Need to special case __builtin_cmpb because the overloaded forms
1938 of this function take (unsigned int, unsigned int) or (unsigned
1939 long long int, unsigned long long int). Since C conventions
1940 allow the respective argument types to be implicitly coerced into
1941 each other, the default handling does not provide adequate
1942 discrimination between the desired forms of the function. */
1943 case RS6000_OVLD_SCAL_CMPB:
1944 {
1945 machine_mode arg1_mode = TYPE_MODE (types[0]);
1946 machine_mode arg2_mode = TYPE_MODE (types[1]);
1947
1948 /* If any supplied arguments are wider than 32 bits, resolve to
1949 64-bit variant of built-in function. */
1950 if (GET_MODE_PRECISION (arg1_mode) > 32
1951 || GET_MODE_PRECISION (arg2_mode) > 32)
1952 /* Assure all argument and result types are compatible with
1953 the built-in function represented by RS6000_BIF_CMPB. */
1954 instance_code = RS6000_BIF_CMPB;
1955 else
1956 /* Assure all argument and result types are compatible with
1957 the built-in function represented by RS6000_BIF_CMPB_32. */
1958 instance_code = RS6000_BIF_CMPB_32;
1959
1960 tree call = find_instance (&unsupported_builtin, &instance,
1961 instance_code, fcode, types, args);
1962 if (call != error_mark_node)
1963 return call;
1964 break;
1965 }
1966 case RS6000_OVLD_VEC_VSIE:
1967 {
1968 machine_mode arg1_mode = TYPE_MODE (types[0]);
1969
1970 /* If supplied first argument is wider than 64 bits, resolve to
1971 128-bit variant of built-in function. */
1972 if (GET_MODE_PRECISION (arg1_mode) > 64)
1973 {
1974 /* If first argument is of float variety, choose variant
1975 that expects __ieee128 argument. Otherwise, expect
1976 __int128 argument. */
1977 if (GET_MODE_CLASS (arg1_mode) == MODE_FLOAT)
1978 instance_code = RS6000_BIF_VSIEQPF;
1979 else
1980 instance_code = RS6000_BIF_VSIEQP;
1981 }
1982 else
1983 {
1984 /* If first argument is of float variety, choose variant
1985 that expects double argument. Otherwise, expect
1986 long long int argument. */
1987 if (GET_MODE_CLASS (arg1_mode) == MODE_FLOAT)
1988 instance_code = RS6000_BIF_VSIEDPF;
1989 else
1990 instance_code = RS6000_BIF_VSIEDP;
1991 }
1992
1993 tree call = find_instance (&unsupported_builtin, &instance,
1994 instance_code, fcode, types, args);
1995 if (call != error_mark_node)
1996 return call;
1997 break;
1998 }
1999 default:
2000 /* Standard overload processing. Look for an instance with compatible
2001 parameter types. If it is supported in the current context, resolve
2002 the overloaded call to that instance. */
2003 for (; instance != NULL; instance = instance->next)
2004 {
2005 bool mismatch = false;
2006 tree nextparm = TYPE_ARG_TYPES (instance->fntype);
2007
2008 for (unsigned int arg_i = 0;
2009 arg_i < nargs && nextparm != NULL;
2010 arg_i++)
2011 {
2012 tree parmtype = TREE_VALUE (nextparm);
2013 if (!rs6000_builtin_type_compatible (types[arg_i], parmtype))
2014 {
2015 mismatch = true;
2016 break;
2017 }
2018 nextparm = TREE_CHAIN (nextparm);
2019 }
2020
2021 if (mismatch)
2022 continue;
2023
2024 supported = rs6000_builtin_is_supported (instance->bifid);
2025 if (rs6000_builtin_decl (instance->bifid, false) != error_mark_node
2026 && supported)
2027 {
2028 tree fntype = rs6000_builtin_info[instance->bifid].fntype;
2029 tree ret_type = TREE_TYPE (instance->fntype);
2030 return altivec_build_resolved_builtin (args, nargs, fntype,
2031 ret_type, instance->bifid,
2032 fcode);
2033 }
2034 else
2035 {
2036 unsupported_builtin = true;
2037 break;
2038 }
2039 }
2040 }
2041
2042 if (unsupported_builtin)
2043 {
2044 const char *name = rs6000_overload_info[adj_fcode].ovld_name;
2045 if (!supported)
2046 {
2047 /* Indicate that the instantiation of the overloaded builtin
2048 name is not available with the target flags in effect. */
2049 rs6000_gen_builtins fcode = (rs6000_gen_builtins) instance->bifid;
2050 rs6000_invalid_builtin (fcode);
2051 /* Provide clarity of the relationship between the overload
2052 and the instantiation. */
2053 const char *internal_name
2054 = rs6000_builtin_info[instance->bifid].bifname;
2055 rich_location richloc (line_table, input_location);
2056 inform (&richloc,
2057 "overloaded builtin %qs is implemented by builtin %qs",
2058 name, internal_name);
2059 }
2060 else
2061 error ("%qs is not supported in this compiler configuration", name);
2062
2063 return error_mark_node;
2064 }
2065
2066 /* If we fall through to here, there were no compatible instances. */
2067 const char *name = rs6000_overload_info[adj_fcode].ovld_name;
2068 error ("invalid parameter combination for AltiVec intrinsic %qs", name);
2069 return error_mark_node;
2070 }