]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/rs6000/rs6000.c
8bc4109f67e5740d19307356293d026b9781f2d6
[thirdparty/gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2018 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
84
85 /* This file should be included last. */
86 #include "target-def.h"
87
88 #ifndef TARGET_NO_PROTOTYPE
89 #define TARGET_NO_PROTOTYPE 0
90 #endif
91
92 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
93 systems will also set long double to be IEEE 128-bit. AIX and Darwin
94 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
95 those systems will not pick up this default. This needs to be after all
96 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
97 properly defined. */
98 #ifndef TARGET_IEEEQUAD_DEFAULT
99 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
100 #define TARGET_IEEEQUAD_DEFAULT 1
101 #else
102 #define TARGET_IEEEQUAD_DEFAULT 0
103 #endif
104 #endif
105
106 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
107
108 /* Structure used to define the rs6000 stack */
109 typedef struct rs6000_stack {
110 int reload_completed; /* stack info won't change from here on */
111 int first_gp_reg_save; /* first callee saved GP register used */
112 int first_fp_reg_save; /* first callee saved FP register used */
113 int first_altivec_reg_save; /* first callee saved AltiVec register used */
114 int lr_save_p; /* true if the link reg needs to be saved */
115 int cr_save_p; /* true if the CR reg needs to be saved */
116 unsigned int vrsave_mask; /* mask of vec registers to save */
117 int push_p; /* true if we need to allocate stack space */
118 int calls_p; /* true if the function makes any calls */
119 int world_save_p; /* true if we're saving *everything*:
120 r13-r31, cr, f14-f31, vrsave, v20-v31 */
121 enum rs6000_abi abi; /* which ABI to use */
122 int gp_save_offset; /* offset to save GP regs from initial SP */
123 int fp_save_offset; /* offset to save FP regs from initial SP */
124 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
125 int lr_save_offset; /* offset to save LR from initial SP */
126 int cr_save_offset; /* offset to save CR from initial SP */
127 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
128 int varargs_save_offset; /* offset to save the varargs registers */
129 int ehrd_offset; /* offset to EH return data */
130 int ehcr_offset; /* offset to EH CR field data */
131 int reg_size; /* register size (4 or 8) */
132 HOST_WIDE_INT vars_size; /* variable save area size */
133 int parm_size; /* outgoing parameter size */
134 int save_size; /* save area size */
135 int fixed_size; /* fixed size of stack frame */
136 int gp_size; /* size of saved GP registers */
137 int fp_size; /* size of saved FP registers */
138 int altivec_size; /* size of saved AltiVec registers */
139 int cr_size; /* size to hold CR if not in fixed area */
140 int vrsave_size; /* size to hold VRSAVE */
141 int altivec_padding_size; /* size of altivec alignment padding */
142 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
143 int savres_strategy;
144 } rs6000_stack_t;
145
146 /* A C structure for machine-specific, per-function data.
147 This is added to the cfun structure. */
148 typedef struct GTY(()) machine_function
149 {
150 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
151 int ra_needs_full_frame;
152 /* Flags if __builtin_return_address (0) was used. */
153 int ra_need_lr;
154 /* Cache lr_save_p after expansion of builtin_eh_return. */
155 int lr_save_state;
156 /* Whether we need to save the TOC to the reserved stack location in the
157 function prologue. */
158 bool save_toc_in_prologue;
159 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
160 varargs save area. */
161 HOST_WIDE_INT varargs_save_offset;
162 /* Alternative internal arg pointer for -fsplit-stack. */
163 rtx split_stack_arg_pointer;
164 bool split_stack_argp_used;
165 /* Flag if r2 setup is needed with ELFv2 ABI. */
166 bool r2_setup_needed;
167 /* The number of components we use for separate shrink-wrapping. */
168 int n_components;
169 /* The components already handled by separate shrink-wrapping, which should
170 not be considered by the prologue and epilogue. */
171 bool gpr_is_wrapped_separately[32];
172 bool fpr_is_wrapped_separately[32];
173 bool lr_is_wrapped_separately;
174 bool toc_is_wrapped_separately;
175 } machine_function;
176
177 /* Support targetm.vectorize.builtin_mask_for_load. */
178 static GTY(()) tree altivec_builtin_mask_for_load;
179
180 /* Set to nonzero once AIX common-mode calls have been defined. */
181 static GTY(()) int common_mode_defined;
182
183 /* Label number of label created for -mrelocatable, to call to so we can
184 get the address of the GOT section */
185 static int rs6000_pic_labelno;
186
187 #ifdef USING_ELFOS_H
188 /* Counter for labels which are to be placed in .fixup. */
189 int fixuplabelno = 0;
190 #endif
191
192 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
193 int dot_symbols;
194
195 /* Specify the machine mode that pointers have. After generation of rtl, the
196 compiler makes no further distinction between pointers and any other objects
197 of this machine mode. */
198 scalar_int_mode rs6000_pmode;
199
200 #if TARGET_ELF
201 /* Note whether IEEE 128-bit floating point was passed or returned, either as
202 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
203 floating point. We changed the default C++ mangling for these types and we
204 may want to generate a weak alias of the old mangling (U10__float128) to the
205 new mangling (u9__ieee128). */
206 static bool rs6000_passes_ieee128;
207 #endif
208
209 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
210 name used in current releases (i.e. u9__ieee128). */
211 static bool ieee128_mangling_gcc_8_1;
212
213 /* Width in bits of a pointer. */
214 unsigned rs6000_pointer_size;
215
216 #ifdef HAVE_AS_GNU_ATTRIBUTE
217 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
218 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
219 # endif
220 /* Flag whether floating point values have been passed/returned.
221 Note that this doesn't say whether fprs are used, since the
222 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
223 should be set for soft-float values passed in gprs and ieee128
224 values passed in vsx registers. */
225 static bool rs6000_passes_float;
226 static bool rs6000_passes_long_double;
227 /* Flag whether vector values have been passed/returned. */
228 static bool rs6000_passes_vector;
229 /* Flag whether small (<= 8 byte) structures have been returned. */
230 static bool rs6000_returns_struct;
231 #endif
232
233 /* Value is TRUE if register/mode pair is acceptable. */
234 static bool rs6000_hard_regno_mode_ok_p
235 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
236
237 /* Maximum number of registers needed for a given register class and mode. */
238 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
239
240 /* How many registers are needed for a given register and mode. */
241 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
242
243 /* Map register number to register class. */
244 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
245
246 static int dbg_cost_ctrl;
247
248 /* Built in types. */
249 tree rs6000_builtin_types[RS6000_BTI_MAX];
250 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
251
252 /* Flag to say the TOC is initialized */
253 int toc_initialized, need_toc_init;
254 char toc_label_name[10];
255
256 /* Cached value of rs6000_variable_issue. This is cached in
257 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
258 static short cached_can_issue_more;
259
260 static GTY(()) section *read_only_data_section;
261 static GTY(()) section *private_data_section;
262 static GTY(()) section *tls_data_section;
263 static GTY(()) section *tls_private_data_section;
264 static GTY(()) section *read_only_private_data_section;
265 static GTY(()) section *sdata2_section;
266 static GTY(()) section *toc_section;
267
268 struct builtin_description
269 {
270 const HOST_WIDE_INT mask;
271 const enum insn_code icode;
272 const char *const name;
273 const enum rs6000_builtins code;
274 };
275
276 /* Describe the vector unit used for modes. */
277 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
278 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
279
280 /* Register classes for various constraints that are based on the target
281 switches. */
282 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
283
284 /* Describe the alignment of a vector. */
285 int rs6000_vector_align[NUM_MACHINE_MODES];
286
287 /* Map selected modes to types for builtins. */
288 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
289
290 /* What modes to automatically generate reciprocal divide estimate (fre) and
291 reciprocal sqrt (frsqrte) for. */
292 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
293
294 /* Masks to determine which reciprocal esitmate instructions to generate
295 automatically. */
296 enum rs6000_recip_mask {
297 RECIP_SF_DIV = 0x001, /* Use divide estimate */
298 RECIP_DF_DIV = 0x002,
299 RECIP_V4SF_DIV = 0x004,
300 RECIP_V2DF_DIV = 0x008,
301
302 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
303 RECIP_DF_RSQRT = 0x020,
304 RECIP_V4SF_RSQRT = 0x040,
305 RECIP_V2DF_RSQRT = 0x080,
306
307 /* Various combination of flags for -mrecip=xxx. */
308 RECIP_NONE = 0,
309 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
310 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
311 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
312
313 RECIP_HIGH_PRECISION = RECIP_ALL,
314
315 /* On low precision machines like the power5, don't enable double precision
316 reciprocal square root estimate, since it isn't accurate enough. */
317 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
318 };
319
320 /* -mrecip options. */
321 static struct
322 {
323 const char *string; /* option name */
324 unsigned int mask; /* mask bits to set */
325 } recip_options[] = {
326 { "all", RECIP_ALL },
327 { "none", RECIP_NONE },
328 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
329 | RECIP_V2DF_DIV) },
330 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
331 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
332 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
333 | RECIP_V2DF_RSQRT) },
334 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
335 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
336 };
337
338 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
339 static const struct
340 {
341 const char *cpu;
342 unsigned int cpuid;
343 } cpu_is_info[] = {
344 { "power9", PPC_PLATFORM_POWER9 },
345 { "power8", PPC_PLATFORM_POWER8 },
346 { "power7", PPC_PLATFORM_POWER7 },
347 { "power6x", PPC_PLATFORM_POWER6X },
348 { "power6", PPC_PLATFORM_POWER6 },
349 { "power5+", PPC_PLATFORM_POWER5_PLUS },
350 { "power5", PPC_PLATFORM_POWER5 },
351 { "ppc970", PPC_PLATFORM_PPC970 },
352 { "power4", PPC_PLATFORM_POWER4 },
353 { "ppca2", PPC_PLATFORM_PPCA2 },
354 { "ppc476", PPC_PLATFORM_PPC476 },
355 { "ppc464", PPC_PLATFORM_PPC464 },
356 { "ppc440", PPC_PLATFORM_PPC440 },
357 { "ppc405", PPC_PLATFORM_PPC405 },
358 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
359 };
360
361 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
362 static const struct
363 {
364 const char *hwcap;
365 int mask;
366 unsigned int id;
367 } cpu_supports_info[] = {
368 /* AT_HWCAP masks. */
369 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
370 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
371 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
372 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
373 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
374 { "booke", PPC_FEATURE_BOOKE, 0 },
375 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
376 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
377 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
378 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
379 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
380 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
381 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
382 { "notb", PPC_FEATURE_NO_TB, 0 },
383 { "pa6t", PPC_FEATURE_PA6T, 0 },
384 { "power4", PPC_FEATURE_POWER4, 0 },
385 { "power5", PPC_FEATURE_POWER5, 0 },
386 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
387 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
388 { "ppc32", PPC_FEATURE_32, 0 },
389 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
390 { "ppc64", PPC_FEATURE_64, 0 },
391 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
392 { "smt", PPC_FEATURE_SMT, 0 },
393 { "spe", PPC_FEATURE_HAS_SPE, 0 },
394 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
395 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
396 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
397
398 /* AT_HWCAP2 masks. */
399 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
400 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
401 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
402 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
403 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
404 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
405 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
406 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
407 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
408 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
409 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
410 { "darn", PPC_FEATURE2_DARN, 1 },
411 { "scv", PPC_FEATURE2_SCV, 1 }
412 };
413
414 /* On PowerPC, we have a limited number of target clones that we care about
415 which means we can use an array to hold the options, rather than having more
416 elaborate data structures to identify each possible variation. Order the
417 clones from the default to the highest ISA. */
418 enum {
419 CLONE_DEFAULT = 0, /* default clone. */
420 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
421 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
422 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
423 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
424 CLONE_MAX
425 };
426
427 /* Map compiler ISA bits into HWCAP names. */
428 struct clone_map {
429 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
430 const char *name; /* name to use in __builtin_cpu_supports. */
431 };
432
433 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
434 { 0, "" }, /* Default options. */
435 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
436 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
437 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
438 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
439 };
440
441
442 /* Newer LIBCs explicitly export this symbol to declare that they provide
443 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
444 reference to this symbol whenever we expand a CPU builtin, so that
445 we never link against an old LIBC. */
446 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
447
448 /* True if we have expanded a CPU builtin. */
449 bool cpu_builtin_p;
450
451 /* Pointer to function (in rs6000-c.c) that can define or undefine target
452 macros that have changed. Languages that don't support the preprocessor
453 don't link in rs6000-c.c, so we can't call it directly. */
454 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
455
456 /* Simplfy register classes into simpler classifications. We assume
457 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
458 check for standard register classes (gpr/floating/altivec/vsx) and
459 floating/vector classes (float/altivec/vsx). */
460
461 enum rs6000_reg_type {
462 NO_REG_TYPE,
463 PSEUDO_REG_TYPE,
464 GPR_REG_TYPE,
465 VSX_REG_TYPE,
466 ALTIVEC_REG_TYPE,
467 FPR_REG_TYPE,
468 SPR_REG_TYPE,
469 CR_REG_TYPE
470 };
471
472 /* Map register class to register type. */
473 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
474
475 /* First/last register type for the 'normal' register types (i.e. general
476 purpose, floating point, altivec, and VSX registers). */
477 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
478
479 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
480
481
482 /* Register classes we care about in secondary reload or go if legitimate
483 address. We only need to worry about GPR, FPR, and Altivec registers here,
484 along an ANY field that is the OR of the 3 register classes. */
485
486 enum rs6000_reload_reg_type {
487 RELOAD_REG_GPR, /* General purpose registers. */
488 RELOAD_REG_FPR, /* Traditional floating point regs. */
489 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
490 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
491 N_RELOAD_REG
492 };
493
494 /* For setting up register classes, loop through the 3 register classes mapping
495 into real registers, and skip the ANY class, which is just an OR of the
496 bits. */
497 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
498 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
499
500 /* Map reload register type to a register in the register class. */
501 struct reload_reg_map_type {
502 const char *name; /* Register class name. */
503 int reg; /* Register in the register class. */
504 };
505
506 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
507 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
508 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
509 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
510 { "Any", -1 }, /* RELOAD_REG_ANY. */
511 };
512
513 /* Mask bits for each register class, indexed per mode. Historically the
514 compiler has been more restrictive which types can do PRE_MODIFY instead of
515 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
516 typedef unsigned char addr_mask_type;
517
518 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
519 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
520 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
521 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
522 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
523 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
524 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
525 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
526
527 /* Register type masks based on the type, of valid addressing modes. */
528 struct rs6000_reg_addr {
529 enum insn_code reload_load; /* INSN to reload for loading. */
530 enum insn_code reload_store; /* INSN to reload for storing. */
531 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
532 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
533 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
534 enum insn_code fusion_gpr_ld; /* INSN for fusing gpr ADDIS/loads. */
535 /* INSNs for fusing addi with loads
536 or stores for each reg. class. */
537 enum insn_code fusion_addi_ld[(int)N_RELOAD_REG];
538 enum insn_code fusion_addi_st[(int)N_RELOAD_REG];
539 /* INSNs for fusing addis with loads
540 or stores for each reg. class. */
541 enum insn_code fusion_addis_ld[(int)N_RELOAD_REG];
542 enum insn_code fusion_addis_st[(int)N_RELOAD_REG];
543 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
544 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
545 bool fused_toc; /* Mode supports TOC fusion. */
546 };
547
548 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
549
550 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
551 static inline bool
552 mode_supports_pre_incdec_p (machine_mode mode)
553 {
554 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
555 != 0);
556 }
557
558 /* Helper function to say whether a mode supports PRE_MODIFY. */
559 static inline bool
560 mode_supports_pre_modify_p (machine_mode mode)
561 {
562 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
563 != 0);
564 }
565
566 /* Return true if we have D-form addressing in altivec registers. */
567 static inline bool
568 mode_supports_vmx_dform (machine_mode mode)
569 {
570 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
571 }
572
573 /* Return true if we have D-form addressing in VSX registers. This addressing
574 is more limited than normal d-form addressing in that the offset must be
575 aligned on a 16-byte boundary. */
576 static inline bool
577 mode_supports_dq_form (machine_mode mode)
578 {
579 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
580 != 0);
581 }
582
583 /* Given that there exists at least one variable that is set (produced)
584 by OUT_INSN and read (consumed) by IN_INSN, return true iff
585 IN_INSN represents one or more memory store operations and none of
586 the variables set by OUT_INSN is used by IN_INSN as the address of a
587 store operation. If either IN_INSN or OUT_INSN does not represent
588 a "single" RTL SET expression (as loosely defined by the
589 implementation of the single_set function) or a PARALLEL with only
590 SETs, CLOBBERs, and USEs inside, this function returns false.
591
592 This rs6000-specific version of store_data_bypass_p checks for
593 certain conditions that result in assertion failures (and internal
594 compiler errors) in the generic store_data_bypass_p function and
595 returns false rather than calling store_data_bypass_p if one of the
596 problematic conditions is detected. */
597
598 int
599 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
600 {
601 rtx out_set, in_set;
602 rtx out_pat, in_pat;
603 rtx out_exp, in_exp;
604 int i, j;
605
606 in_set = single_set (in_insn);
607 if (in_set)
608 {
609 if (MEM_P (SET_DEST (in_set)))
610 {
611 out_set = single_set (out_insn);
612 if (!out_set)
613 {
614 out_pat = PATTERN (out_insn);
615 if (GET_CODE (out_pat) == PARALLEL)
616 {
617 for (i = 0; i < XVECLEN (out_pat, 0); i++)
618 {
619 out_exp = XVECEXP (out_pat, 0, i);
620 if ((GET_CODE (out_exp) == CLOBBER)
621 || (GET_CODE (out_exp) == USE))
622 continue;
623 else if (GET_CODE (out_exp) != SET)
624 return false;
625 }
626 }
627 }
628 }
629 }
630 else
631 {
632 in_pat = PATTERN (in_insn);
633 if (GET_CODE (in_pat) != PARALLEL)
634 return false;
635
636 for (i = 0; i < XVECLEN (in_pat, 0); i++)
637 {
638 in_exp = XVECEXP (in_pat, 0, i);
639 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
640 continue;
641 else if (GET_CODE (in_exp) != SET)
642 return false;
643
644 if (MEM_P (SET_DEST (in_exp)))
645 {
646 out_set = single_set (out_insn);
647 if (!out_set)
648 {
649 out_pat = PATTERN (out_insn);
650 if (GET_CODE (out_pat) != PARALLEL)
651 return false;
652 for (j = 0; j < XVECLEN (out_pat, 0); j++)
653 {
654 out_exp = XVECEXP (out_pat, 0, j);
655 if ((GET_CODE (out_exp) == CLOBBER)
656 || (GET_CODE (out_exp) == USE))
657 continue;
658 else if (GET_CODE (out_exp) != SET)
659 return false;
660 }
661 }
662 }
663 }
664 }
665 return store_data_bypass_p (out_insn, in_insn);
666 }
667
668 \f
669 /* Processor costs (relative to an add) */
670
671 const struct processor_costs *rs6000_cost;
672
673 /* Instruction size costs on 32bit processors. */
674 static const
675 struct processor_costs size32_cost = {
676 COSTS_N_INSNS (1), /* mulsi */
677 COSTS_N_INSNS (1), /* mulsi_const */
678 COSTS_N_INSNS (1), /* mulsi_const9 */
679 COSTS_N_INSNS (1), /* muldi */
680 COSTS_N_INSNS (1), /* divsi */
681 COSTS_N_INSNS (1), /* divdi */
682 COSTS_N_INSNS (1), /* fp */
683 COSTS_N_INSNS (1), /* dmul */
684 COSTS_N_INSNS (1), /* sdiv */
685 COSTS_N_INSNS (1), /* ddiv */
686 32, /* cache line size */
687 0, /* l1 cache */
688 0, /* l2 cache */
689 0, /* streams */
690 0, /* SF->DF convert */
691 };
692
693 /* Instruction size costs on 64bit processors. */
694 static const
695 struct processor_costs size64_cost = {
696 COSTS_N_INSNS (1), /* mulsi */
697 COSTS_N_INSNS (1), /* mulsi_const */
698 COSTS_N_INSNS (1), /* mulsi_const9 */
699 COSTS_N_INSNS (1), /* muldi */
700 COSTS_N_INSNS (1), /* divsi */
701 COSTS_N_INSNS (1), /* divdi */
702 COSTS_N_INSNS (1), /* fp */
703 COSTS_N_INSNS (1), /* dmul */
704 COSTS_N_INSNS (1), /* sdiv */
705 COSTS_N_INSNS (1), /* ddiv */
706 128, /* cache line size */
707 0, /* l1 cache */
708 0, /* l2 cache */
709 0, /* streams */
710 0, /* SF->DF convert */
711 };
712
713 /* Instruction costs on RS64A processors. */
714 static const
715 struct processor_costs rs64a_cost = {
716 COSTS_N_INSNS (20), /* mulsi */
717 COSTS_N_INSNS (12), /* mulsi_const */
718 COSTS_N_INSNS (8), /* mulsi_const9 */
719 COSTS_N_INSNS (34), /* muldi */
720 COSTS_N_INSNS (65), /* divsi */
721 COSTS_N_INSNS (67), /* divdi */
722 COSTS_N_INSNS (4), /* fp */
723 COSTS_N_INSNS (4), /* dmul */
724 COSTS_N_INSNS (31), /* sdiv */
725 COSTS_N_INSNS (31), /* ddiv */
726 128, /* cache line size */
727 128, /* l1 cache */
728 2048, /* l2 cache */
729 1, /* streams */
730 0, /* SF->DF convert */
731 };
732
733 /* Instruction costs on MPCCORE processors. */
734 static const
735 struct processor_costs mpccore_cost = {
736 COSTS_N_INSNS (2), /* mulsi */
737 COSTS_N_INSNS (2), /* mulsi_const */
738 COSTS_N_INSNS (2), /* mulsi_const9 */
739 COSTS_N_INSNS (2), /* muldi */
740 COSTS_N_INSNS (6), /* divsi */
741 COSTS_N_INSNS (6), /* divdi */
742 COSTS_N_INSNS (4), /* fp */
743 COSTS_N_INSNS (5), /* dmul */
744 COSTS_N_INSNS (10), /* sdiv */
745 COSTS_N_INSNS (17), /* ddiv */
746 32, /* cache line size */
747 4, /* l1 cache */
748 16, /* l2 cache */
749 1, /* streams */
750 0, /* SF->DF convert */
751 };
752
753 /* Instruction costs on PPC403 processors. */
754 static const
755 struct processor_costs ppc403_cost = {
756 COSTS_N_INSNS (4), /* mulsi */
757 COSTS_N_INSNS (4), /* mulsi_const */
758 COSTS_N_INSNS (4), /* mulsi_const9 */
759 COSTS_N_INSNS (4), /* muldi */
760 COSTS_N_INSNS (33), /* divsi */
761 COSTS_N_INSNS (33), /* divdi */
762 COSTS_N_INSNS (11), /* fp */
763 COSTS_N_INSNS (11), /* dmul */
764 COSTS_N_INSNS (11), /* sdiv */
765 COSTS_N_INSNS (11), /* ddiv */
766 32, /* cache line size */
767 4, /* l1 cache */
768 16, /* l2 cache */
769 1, /* streams */
770 0, /* SF->DF convert */
771 };
772
773 /* Instruction costs on PPC405 processors. */
774 static const
775 struct processor_costs ppc405_cost = {
776 COSTS_N_INSNS (5), /* mulsi */
777 COSTS_N_INSNS (4), /* mulsi_const */
778 COSTS_N_INSNS (3), /* mulsi_const9 */
779 COSTS_N_INSNS (5), /* muldi */
780 COSTS_N_INSNS (35), /* divsi */
781 COSTS_N_INSNS (35), /* divdi */
782 COSTS_N_INSNS (11), /* fp */
783 COSTS_N_INSNS (11), /* dmul */
784 COSTS_N_INSNS (11), /* sdiv */
785 COSTS_N_INSNS (11), /* ddiv */
786 32, /* cache line size */
787 16, /* l1 cache */
788 128, /* l2 cache */
789 1, /* streams */
790 0, /* SF->DF convert */
791 };
792
793 /* Instruction costs on PPC440 processors. */
794 static const
795 struct processor_costs ppc440_cost = {
796 COSTS_N_INSNS (3), /* mulsi */
797 COSTS_N_INSNS (2), /* mulsi_const */
798 COSTS_N_INSNS (2), /* mulsi_const9 */
799 COSTS_N_INSNS (3), /* muldi */
800 COSTS_N_INSNS (34), /* divsi */
801 COSTS_N_INSNS (34), /* divdi */
802 COSTS_N_INSNS (5), /* fp */
803 COSTS_N_INSNS (5), /* dmul */
804 COSTS_N_INSNS (19), /* sdiv */
805 COSTS_N_INSNS (33), /* ddiv */
806 32, /* cache line size */
807 32, /* l1 cache */
808 256, /* l2 cache */
809 1, /* streams */
810 0, /* SF->DF convert */
811 };
812
813 /* Instruction costs on PPC476 processors. */
814 static const
815 struct processor_costs ppc476_cost = {
816 COSTS_N_INSNS (4), /* mulsi */
817 COSTS_N_INSNS (4), /* mulsi_const */
818 COSTS_N_INSNS (4), /* mulsi_const9 */
819 COSTS_N_INSNS (4), /* muldi */
820 COSTS_N_INSNS (11), /* divsi */
821 COSTS_N_INSNS (11), /* divdi */
822 COSTS_N_INSNS (6), /* fp */
823 COSTS_N_INSNS (6), /* dmul */
824 COSTS_N_INSNS (19), /* sdiv */
825 COSTS_N_INSNS (33), /* ddiv */
826 32, /* l1 cache line size */
827 32, /* l1 cache */
828 512, /* l2 cache */
829 1, /* streams */
830 0, /* SF->DF convert */
831 };
832
833 /* Instruction costs on PPC601 processors. */
834 static const
835 struct processor_costs ppc601_cost = {
836 COSTS_N_INSNS (5), /* mulsi */
837 COSTS_N_INSNS (5), /* mulsi_const */
838 COSTS_N_INSNS (5), /* mulsi_const9 */
839 COSTS_N_INSNS (5), /* muldi */
840 COSTS_N_INSNS (36), /* divsi */
841 COSTS_N_INSNS (36), /* divdi */
842 COSTS_N_INSNS (4), /* fp */
843 COSTS_N_INSNS (5), /* dmul */
844 COSTS_N_INSNS (17), /* sdiv */
845 COSTS_N_INSNS (31), /* ddiv */
846 32, /* cache line size */
847 32, /* l1 cache */
848 256, /* l2 cache */
849 1, /* streams */
850 0, /* SF->DF convert */
851 };
852
853 /* Instruction costs on PPC603 processors. */
854 static const
855 struct processor_costs ppc603_cost = {
856 COSTS_N_INSNS (5), /* mulsi */
857 COSTS_N_INSNS (3), /* mulsi_const */
858 COSTS_N_INSNS (2), /* mulsi_const9 */
859 COSTS_N_INSNS (5), /* muldi */
860 COSTS_N_INSNS (37), /* divsi */
861 COSTS_N_INSNS (37), /* divdi */
862 COSTS_N_INSNS (3), /* fp */
863 COSTS_N_INSNS (4), /* dmul */
864 COSTS_N_INSNS (18), /* sdiv */
865 COSTS_N_INSNS (33), /* ddiv */
866 32, /* cache line size */
867 8, /* l1 cache */
868 64, /* l2 cache */
869 1, /* streams */
870 0, /* SF->DF convert */
871 };
872
873 /* Instruction costs on PPC604 processors. */
874 static const
875 struct processor_costs ppc604_cost = {
876 COSTS_N_INSNS (4), /* mulsi */
877 COSTS_N_INSNS (4), /* mulsi_const */
878 COSTS_N_INSNS (4), /* mulsi_const9 */
879 COSTS_N_INSNS (4), /* muldi */
880 COSTS_N_INSNS (20), /* divsi */
881 COSTS_N_INSNS (20), /* divdi */
882 COSTS_N_INSNS (3), /* fp */
883 COSTS_N_INSNS (3), /* dmul */
884 COSTS_N_INSNS (18), /* sdiv */
885 COSTS_N_INSNS (32), /* ddiv */
886 32, /* cache line size */
887 16, /* l1 cache */
888 512, /* l2 cache */
889 1, /* streams */
890 0, /* SF->DF convert */
891 };
892
893 /* Instruction costs on PPC604e processors. */
894 static const
895 struct processor_costs ppc604e_cost = {
896 COSTS_N_INSNS (2), /* mulsi */
897 COSTS_N_INSNS (2), /* mulsi_const */
898 COSTS_N_INSNS (2), /* mulsi_const9 */
899 COSTS_N_INSNS (2), /* muldi */
900 COSTS_N_INSNS (20), /* divsi */
901 COSTS_N_INSNS (20), /* divdi */
902 COSTS_N_INSNS (3), /* fp */
903 COSTS_N_INSNS (3), /* dmul */
904 COSTS_N_INSNS (18), /* sdiv */
905 COSTS_N_INSNS (32), /* ddiv */
906 32, /* cache line size */
907 32, /* l1 cache */
908 1024, /* l2 cache */
909 1, /* streams */
910 0, /* SF->DF convert */
911 };
912
913 /* Instruction costs on PPC620 processors. */
914 static const
915 struct processor_costs ppc620_cost = {
916 COSTS_N_INSNS (5), /* mulsi */
917 COSTS_N_INSNS (4), /* mulsi_const */
918 COSTS_N_INSNS (3), /* mulsi_const9 */
919 COSTS_N_INSNS (7), /* muldi */
920 COSTS_N_INSNS (21), /* divsi */
921 COSTS_N_INSNS (37), /* divdi */
922 COSTS_N_INSNS (3), /* fp */
923 COSTS_N_INSNS (3), /* dmul */
924 COSTS_N_INSNS (18), /* sdiv */
925 COSTS_N_INSNS (32), /* ddiv */
926 128, /* cache line size */
927 32, /* l1 cache */
928 1024, /* l2 cache */
929 1, /* streams */
930 0, /* SF->DF convert */
931 };
932
933 /* Instruction costs on PPC630 processors. */
934 static const
935 struct processor_costs ppc630_cost = {
936 COSTS_N_INSNS (5), /* mulsi */
937 COSTS_N_INSNS (4), /* mulsi_const */
938 COSTS_N_INSNS (3), /* mulsi_const9 */
939 COSTS_N_INSNS (7), /* muldi */
940 COSTS_N_INSNS (21), /* divsi */
941 COSTS_N_INSNS (37), /* divdi */
942 COSTS_N_INSNS (3), /* fp */
943 COSTS_N_INSNS (3), /* dmul */
944 COSTS_N_INSNS (17), /* sdiv */
945 COSTS_N_INSNS (21), /* ddiv */
946 128, /* cache line size */
947 64, /* l1 cache */
948 1024, /* l2 cache */
949 1, /* streams */
950 0, /* SF->DF convert */
951 };
952
953 /* Instruction costs on Cell processor. */
954 /* COSTS_N_INSNS (1) ~ one add. */
955 static const
956 struct processor_costs ppccell_cost = {
957 COSTS_N_INSNS (9/2)+2, /* mulsi */
958 COSTS_N_INSNS (6/2), /* mulsi_const */
959 COSTS_N_INSNS (6/2), /* mulsi_const9 */
960 COSTS_N_INSNS (15/2)+2, /* muldi */
961 COSTS_N_INSNS (38/2), /* divsi */
962 COSTS_N_INSNS (70/2), /* divdi */
963 COSTS_N_INSNS (10/2), /* fp */
964 COSTS_N_INSNS (10/2), /* dmul */
965 COSTS_N_INSNS (74/2), /* sdiv */
966 COSTS_N_INSNS (74/2), /* ddiv */
967 128, /* cache line size */
968 32, /* l1 cache */
969 512, /* l2 cache */
970 6, /* streams */
971 0, /* SF->DF convert */
972 };
973
974 /* Instruction costs on PPC750 and PPC7400 processors. */
975 static const
976 struct processor_costs ppc750_cost = {
977 COSTS_N_INSNS (5), /* mulsi */
978 COSTS_N_INSNS (3), /* mulsi_const */
979 COSTS_N_INSNS (2), /* mulsi_const9 */
980 COSTS_N_INSNS (5), /* muldi */
981 COSTS_N_INSNS (17), /* divsi */
982 COSTS_N_INSNS (17), /* divdi */
983 COSTS_N_INSNS (3), /* fp */
984 COSTS_N_INSNS (3), /* dmul */
985 COSTS_N_INSNS (17), /* sdiv */
986 COSTS_N_INSNS (31), /* ddiv */
987 32, /* cache line size */
988 32, /* l1 cache */
989 512, /* l2 cache */
990 1, /* streams */
991 0, /* SF->DF convert */
992 };
993
994 /* Instruction costs on PPC7450 processors. */
995 static const
996 struct processor_costs ppc7450_cost = {
997 COSTS_N_INSNS (4), /* mulsi */
998 COSTS_N_INSNS (3), /* mulsi_const */
999 COSTS_N_INSNS (3), /* mulsi_const9 */
1000 COSTS_N_INSNS (4), /* muldi */
1001 COSTS_N_INSNS (23), /* divsi */
1002 COSTS_N_INSNS (23), /* divdi */
1003 COSTS_N_INSNS (5), /* fp */
1004 COSTS_N_INSNS (5), /* dmul */
1005 COSTS_N_INSNS (21), /* sdiv */
1006 COSTS_N_INSNS (35), /* ddiv */
1007 32, /* cache line size */
1008 32, /* l1 cache */
1009 1024, /* l2 cache */
1010 1, /* streams */
1011 0, /* SF->DF convert */
1012 };
1013
1014 /* Instruction costs on PPC8540 processors. */
1015 static const
1016 struct processor_costs ppc8540_cost = {
1017 COSTS_N_INSNS (4), /* mulsi */
1018 COSTS_N_INSNS (4), /* mulsi_const */
1019 COSTS_N_INSNS (4), /* mulsi_const9 */
1020 COSTS_N_INSNS (4), /* muldi */
1021 COSTS_N_INSNS (19), /* divsi */
1022 COSTS_N_INSNS (19), /* divdi */
1023 COSTS_N_INSNS (4), /* fp */
1024 COSTS_N_INSNS (4), /* dmul */
1025 COSTS_N_INSNS (29), /* sdiv */
1026 COSTS_N_INSNS (29), /* ddiv */
1027 32, /* cache line size */
1028 32, /* l1 cache */
1029 256, /* l2 cache */
1030 1, /* prefetch streams /*/
1031 0, /* SF->DF convert */
1032 };
1033
1034 /* Instruction costs on E300C2 and E300C3 cores. */
1035 static const
1036 struct processor_costs ppce300c2c3_cost = {
1037 COSTS_N_INSNS (4), /* mulsi */
1038 COSTS_N_INSNS (4), /* mulsi_const */
1039 COSTS_N_INSNS (4), /* mulsi_const9 */
1040 COSTS_N_INSNS (4), /* muldi */
1041 COSTS_N_INSNS (19), /* divsi */
1042 COSTS_N_INSNS (19), /* divdi */
1043 COSTS_N_INSNS (3), /* fp */
1044 COSTS_N_INSNS (4), /* dmul */
1045 COSTS_N_INSNS (18), /* sdiv */
1046 COSTS_N_INSNS (33), /* ddiv */
1047 32,
1048 16, /* l1 cache */
1049 16, /* l2 cache */
1050 1, /* prefetch streams /*/
1051 0, /* SF->DF convert */
1052 };
1053
1054 /* Instruction costs on PPCE500MC processors. */
1055 static const
1056 struct processor_costs ppce500mc_cost = {
1057 COSTS_N_INSNS (4), /* mulsi */
1058 COSTS_N_INSNS (4), /* mulsi_const */
1059 COSTS_N_INSNS (4), /* mulsi_const9 */
1060 COSTS_N_INSNS (4), /* muldi */
1061 COSTS_N_INSNS (14), /* divsi */
1062 COSTS_N_INSNS (14), /* divdi */
1063 COSTS_N_INSNS (8), /* fp */
1064 COSTS_N_INSNS (10), /* dmul */
1065 COSTS_N_INSNS (36), /* sdiv */
1066 COSTS_N_INSNS (66), /* ddiv */
1067 64, /* cache line size */
1068 32, /* l1 cache */
1069 128, /* l2 cache */
1070 1, /* prefetch streams /*/
1071 0, /* SF->DF convert */
1072 };
1073
1074 /* Instruction costs on PPCE500MC64 processors. */
1075 static const
1076 struct processor_costs ppce500mc64_cost = {
1077 COSTS_N_INSNS (4), /* mulsi */
1078 COSTS_N_INSNS (4), /* mulsi_const */
1079 COSTS_N_INSNS (4), /* mulsi_const9 */
1080 COSTS_N_INSNS (4), /* muldi */
1081 COSTS_N_INSNS (14), /* divsi */
1082 COSTS_N_INSNS (14), /* divdi */
1083 COSTS_N_INSNS (4), /* fp */
1084 COSTS_N_INSNS (10), /* dmul */
1085 COSTS_N_INSNS (36), /* sdiv */
1086 COSTS_N_INSNS (66), /* ddiv */
1087 64, /* cache line size */
1088 32, /* l1 cache */
1089 128, /* l2 cache */
1090 1, /* prefetch streams /*/
1091 0, /* SF->DF convert */
1092 };
1093
1094 /* Instruction costs on PPCE5500 processors. */
1095 static const
1096 struct processor_costs ppce5500_cost = {
1097 COSTS_N_INSNS (5), /* mulsi */
1098 COSTS_N_INSNS (5), /* mulsi_const */
1099 COSTS_N_INSNS (4), /* mulsi_const9 */
1100 COSTS_N_INSNS (5), /* muldi */
1101 COSTS_N_INSNS (14), /* divsi */
1102 COSTS_N_INSNS (14), /* divdi */
1103 COSTS_N_INSNS (7), /* fp */
1104 COSTS_N_INSNS (10), /* dmul */
1105 COSTS_N_INSNS (36), /* sdiv */
1106 COSTS_N_INSNS (66), /* ddiv */
1107 64, /* cache line size */
1108 32, /* l1 cache */
1109 128, /* l2 cache */
1110 1, /* prefetch streams /*/
1111 0, /* SF->DF convert */
1112 };
1113
1114 /* Instruction costs on PPCE6500 processors. */
1115 static const
1116 struct processor_costs ppce6500_cost = {
1117 COSTS_N_INSNS (5), /* mulsi */
1118 COSTS_N_INSNS (5), /* mulsi_const */
1119 COSTS_N_INSNS (4), /* mulsi_const9 */
1120 COSTS_N_INSNS (5), /* muldi */
1121 COSTS_N_INSNS (14), /* divsi */
1122 COSTS_N_INSNS (14), /* divdi */
1123 COSTS_N_INSNS (7), /* fp */
1124 COSTS_N_INSNS (10), /* dmul */
1125 COSTS_N_INSNS (36), /* sdiv */
1126 COSTS_N_INSNS (66), /* ddiv */
1127 64, /* cache line size */
1128 32, /* l1 cache */
1129 128, /* l2 cache */
1130 1, /* prefetch streams /*/
1131 0, /* SF->DF convert */
1132 };
1133
1134 /* Instruction costs on AppliedMicro Titan processors. */
1135 static const
1136 struct processor_costs titan_cost = {
1137 COSTS_N_INSNS (5), /* mulsi */
1138 COSTS_N_INSNS (5), /* mulsi_const */
1139 COSTS_N_INSNS (5), /* mulsi_const9 */
1140 COSTS_N_INSNS (5), /* muldi */
1141 COSTS_N_INSNS (18), /* divsi */
1142 COSTS_N_INSNS (18), /* divdi */
1143 COSTS_N_INSNS (10), /* fp */
1144 COSTS_N_INSNS (10), /* dmul */
1145 COSTS_N_INSNS (46), /* sdiv */
1146 COSTS_N_INSNS (72), /* ddiv */
1147 32, /* cache line size */
1148 32, /* l1 cache */
1149 512, /* l2 cache */
1150 1, /* prefetch streams /*/
1151 0, /* SF->DF convert */
1152 };
1153
1154 /* Instruction costs on POWER4 and POWER5 processors. */
1155 static const
1156 struct processor_costs power4_cost = {
1157 COSTS_N_INSNS (3), /* mulsi */
1158 COSTS_N_INSNS (2), /* mulsi_const */
1159 COSTS_N_INSNS (2), /* mulsi_const9 */
1160 COSTS_N_INSNS (4), /* muldi */
1161 COSTS_N_INSNS (18), /* divsi */
1162 COSTS_N_INSNS (34), /* divdi */
1163 COSTS_N_INSNS (3), /* fp */
1164 COSTS_N_INSNS (3), /* dmul */
1165 COSTS_N_INSNS (17), /* sdiv */
1166 COSTS_N_INSNS (17), /* ddiv */
1167 128, /* cache line size */
1168 32, /* l1 cache */
1169 1024, /* l2 cache */
1170 8, /* prefetch streams /*/
1171 0, /* SF->DF convert */
1172 };
1173
1174 /* Instruction costs on POWER6 processors. */
1175 static const
1176 struct processor_costs power6_cost = {
1177 COSTS_N_INSNS (8), /* mulsi */
1178 COSTS_N_INSNS (8), /* mulsi_const */
1179 COSTS_N_INSNS (8), /* mulsi_const9 */
1180 COSTS_N_INSNS (8), /* muldi */
1181 COSTS_N_INSNS (22), /* divsi */
1182 COSTS_N_INSNS (28), /* divdi */
1183 COSTS_N_INSNS (3), /* fp */
1184 COSTS_N_INSNS (3), /* dmul */
1185 COSTS_N_INSNS (13), /* sdiv */
1186 COSTS_N_INSNS (16), /* ddiv */
1187 128, /* cache line size */
1188 64, /* l1 cache */
1189 2048, /* l2 cache */
1190 16, /* prefetch streams */
1191 0, /* SF->DF convert */
1192 };
1193
1194 /* Instruction costs on POWER7 processors. */
1195 static const
1196 struct processor_costs power7_cost = {
1197 COSTS_N_INSNS (2), /* mulsi */
1198 COSTS_N_INSNS (2), /* mulsi_const */
1199 COSTS_N_INSNS (2), /* mulsi_const9 */
1200 COSTS_N_INSNS (2), /* muldi */
1201 COSTS_N_INSNS (18), /* divsi */
1202 COSTS_N_INSNS (34), /* divdi */
1203 COSTS_N_INSNS (3), /* fp */
1204 COSTS_N_INSNS (3), /* dmul */
1205 COSTS_N_INSNS (13), /* sdiv */
1206 COSTS_N_INSNS (16), /* ddiv */
1207 128, /* cache line size */
1208 32, /* l1 cache */
1209 256, /* l2 cache */
1210 12, /* prefetch streams */
1211 COSTS_N_INSNS (3), /* SF->DF convert */
1212 };
1213
1214 /* Instruction costs on POWER8 processors. */
1215 static const
1216 struct processor_costs power8_cost = {
1217 COSTS_N_INSNS (3), /* mulsi */
1218 COSTS_N_INSNS (3), /* mulsi_const */
1219 COSTS_N_INSNS (3), /* mulsi_const9 */
1220 COSTS_N_INSNS (3), /* muldi */
1221 COSTS_N_INSNS (19), /* divsi */
1222 COSTS_N_INSNS (35), /* divdi */
1223 COSTS_N_INSNS (3), /* fp */
1224 COSTS_N_INSNS (3), /* dmul */
1225 COSTS_N_INSNS (14), /* sdiv */
1226 COSTS_N_INSNS (17), /* ddiv */
1227 128, /* cache line size */
1228 32, /* l1 cache */
1229 256, /* l2 cache */
1230 12, /* prefetch streams */
1231 COSTS_N_INSNS (3), /* SF->DF convert */
1232 };
1233
1234 /* Instruction costs on POWER9 processors. */
1235 static const
1236 struct processor_costs power9_cost = {
1237 COSTS_N_INSNS (3), /* mulsi */
1238 COSTS_N_INSNS (3), /* mulsi_const */
1239 COSTS_N_INSNS (3), /* mulsi_const9 */
1240 COSTS_N_INSNS (3), /* muldi */
1241 COSTS_N_INSNS (8), /* divsi */
1242 COSTS_N_INSNS (12), /* divdi */
1243 COSTS_N_INSNS (3), /* fp */
1244 COSTS_N_INSNS (3), /* dmul */
1245 COSTS_N_INSNS (13), /* sdiv */
1246 COSTS_N_INSNS (18), /* ddiv */
1247 128, /* cache line size */
1248 32, /* l1 cache */
1249 512, /* l2 cache */
1250 8, /* prefetch streams */
1251 COSTS_N_INSNS (3), /* SF->DF convert */
1252 };
1253
1254 /* Instruction costs on POWER A2 processors. */
1255 static const
1256 struct processor_costs ppca2_cost = {
1257 COSTS_N_INSNS (16), /* mulsi */
1258 COSTS_N_INSNS (16), /* mulsi_const */
1259 COSTS_N_INSNS (16), /* mulsi_const9 */
1260 COSTS_N_INSNS (16), /* muldi */
1261 COSTS_N_INSNS (22), /* divsi */
1262 COSTS_N_INSNS (28), /* divdi */
1263 COSTS_N_INSNS (3), /* fp */
1264 COSTS_N_INSNS (3), /* dmul */
1265 COSTS_N_INSNS (59), /* sdiv */
1266 COSTS_N_INSNS (72), /* ddiv */
1267 64,
1268 16, /* l1 cache */
1269 2048, /* l2 cache */
1270 16, /* prefetch streams */
1271 0, /* SF->DF convert */
1272 };
1273
1274 \f
1275 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1276 #undef RS6000_BUILTIN_0
1277 #undef RS6000_BUILTIN_1
1278 #undef RS6000_BUILTIN_2
1279 #undef RS6000_BUILTIN_3
1280 #undef RS6000_BUILTIN_A
1281 #undef RS6000_BUILTIN_D
1282 #undef RS6000_BUILTIN_H
1283 #undef RS6000_BUILTIN_P
1284 #undef RS6000_BUILTIN_X
1285
1286 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1287 { NAME, ICODE, MASK, ATTR },
1288
1289 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1290 { NAME, ICODE, MASK, ATTR },
1291
1292 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1293 { NAME, ICODE, MASK, ATTR },
1294
1295 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1296 { NAME, ICODE, MASK, ATTR },
1297
1298 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1299 { NAME, ICODE, MASK, ATTR },
1300
1301 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1302 { NAME, ICODE, MASK, ATTR },
1303
1304 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1305 { NAME, ICODE, MASK, ATTR },
1306
1307 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1308 { NAME, ICODE, MASK, ATTR },
1309
1310 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1311 { NAME, ICODE, MASK, ATTR },
1312
1313 struct rs6000_builtin_info_type {
1314 const char *name;
1315 const enum insn_code icode;
1316 const HOST_WIDE_INT mask;
1317 const unsigned attr;
1318 };
1319
1320 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1321 {
1322 #include "rs6000-builtin.def"
1323 };
1324
1325 #undef RS6000_BUILTIN_0
1326 #undef RS6000_BUILTIN_1
1327 #undef RS6000_BUILTIN_2
1328 #undef RS6000_BUILTIN_3
1329 #undef RS6000_BUILTIN_A
1330 #undef RS6000_BUILTIN_D
1331 #undef RS6000_BUILTIN_H
1332 #undef RS6000_BUILTIN_P
1333 #undef RS6000_BUILTIN_X
1334
1335 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1336 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1337
1338 \f
1339 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1340 static struct machine_function * rs6000_init_machine_status (void);
1341 static int rs6000_ra_ever_killed (void);
1342 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1343 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1344 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1345 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1346 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1347 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1348 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1349 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1350 bool);
1351 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1352 unsigned int);
1353 static bool is_microcoded_insn (rtx_insn *);
1354 static bool is_nonpipeline_insn (rtx_insn *);
1355 static bool is_cracked_insn (rtx_insn *);
1356 static bool is_load_insn (rtx, rtx *);
1357 static bool is_store_insn (rtx, rtx *);
1358 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1359 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1360 static bool insn_must_be_first_in_group (rtx_insn *);
1361 static bool insn_must_be_last_in_group (rtx_insn *);
1362 static void altivec_init_builtins (void);
1363 static tree builtin_function_type (machine_mode, machine_mode,
1364 machine_mode, machine_mode,
1365 enum rs6000_builtins, const char *name);
1366 static void rs6000_common_init_builtins (void);
1367 static void htm_init_builtins (void);
1368 static rs6000_stack_t *rs6000_stack_info (void);
1369 static void is_altivec_return_reg (rtx, void *);
1370 int easy_vector_constant (rtx, machine_mode);
1371 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1372 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1373 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1374 bool, bool);
1375 #if TARGET_MACHO
1376 static void macho_branch_islands (void);
1377 #endif
1378 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1379 int, int *);
1380 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1381 int, int, int *);
1382 static bool rs6000_mode_dependent_address (const_rtx);
1383 static bool rs6000_debug_mode_dependent_address (const_rtx);
1384 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1385 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1386 machine_mode, rtx);
1387 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1388 machine_mode,
1389 rtx);
1390 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1391 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1392 enum reg_class);
1393 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1394 reg_class_t,
1395 reg_class_t);
1396 static bool rs6000_debug_can_change_mode_class (machine_mode,
1397 machine_mode,
1398 reg_class_t);
1399 static bool rs6000_save_toc_in_prologue_p (void);
1400 static rtx rs6000_internal_arg_pointer (void);
1401
1402 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1403 int, int *)
1404 = rs6000_legitimize_reload_address;
1405
1406 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1407 = rs6000_mode_dependent_address;
1408
1409 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1410 machine_mode, rtx)
1411 = rs6000_secondary_reload_class;
1412
1413 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1414 = rs6000_preferred_reload_class;
1415
1416 const int INSN_NOT_AVAILABLE = -1;
1417
1418 static void rs6000_print_isa_options (FILE *, int, const char *,
1419 HOST_WIDE_INT);
1420 static void rs6000_print_builtin_options (FILE *, int, const char *,
1421 HOST_WIDE_INT);
1422 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1423
1424 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1425 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1426 enum rs6000_reg_type,
1427 machine_mode,
1428 secondary_reload_info *,
1429 bool);
1430 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1431 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1432 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1433
1434 /* Hash table stuff for keeping track of TOC entries. */
1435
1436 struct GTY((for_user)) toc_hash_struct
1437 {
1438 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1439 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1440 rtx key;
1441 machine_mode key_mode;
1442 int labelno;
1443 };
1444
1445 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1446 {
1447 static hashval_t hash (toc_hash_struct *);
1448 static bool equal (toc_hash_struct *, toc_hash_struct *);
1449 };
1450
1451 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1452
1453 /* Hash table to keep track of the argument types for builtin functions. */
1454
1455 struct GTY((for_user)) builtin_hash_struct
1456 {
1457 tree type;
1458 machine_mode mode[4]; /* return value + 3 arguments. */
1459 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1460 };
1461
1462 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1463 {
1464 static hashval_t hash (builtin_hash_struct *);
1465 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1466 };
1467
1468 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1469
1470 \f
1471 /* Default register names. */
1472 char rs6000_reg_names[][8] =
1473 {
1474 "0", "1", "2", "3", "4", "5", "6", "7",
1475 "8", "9", "10", "11", "12", "13", "14", "15",
1476 "16", "17", "18", "19", "20", "21", "22", "23",
1477 "24", "25", "26", "27", "28", "29", "30", "31",
1478 "0", "1", "2", "3", "4", "5", "6", "7",
1479 "8", "9", "10", "11", "12", "13", "14", "15",
1480 "16", "17", "18", "19", "20", "21", "22", "23",
1481 "24", "25", "26", "27", "28", "29", "30", "31",
1482 "mq", "lr", "ctr","ap",
1483 "0", "1", "2", "3", "4", "5", "6", "7",
1484 "ca",
1485 /* AltiVec registers. */
1486 "0", "1", "2", "3", "4", "5", "6", "7",
1487 "8", "9", "10", "11", "12", "13", "14", "15",
1488 "16", "17", "18", "19", "20", "21", "22", "23",
1489 "24", "25", "26", "27", "28", "29", "30", "31",
1490 "vrsave", "vscr",
1491 /* Soft frame pointer. */
1492 "sfp",
1493 /* HTM SPR registers. */
1494 "tfhar", "tfiar", "texasr"
1495 };
1496
1497 #ifdef TARGET_REGNAMES
1498 static const char alt_reg_names[][8] =
1499 {
1500 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1501 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1502 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1503 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1504 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1505 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1506 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1507 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1508 "mq", "lr", "ctr", "ap",
1509 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1510 "ca",
1511 /* AltiVec registers. */
1512 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1513 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1514 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1515 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1516 "vrsave", "vscr",
1517 /* Soft frame pointer. */
1518 "sfp",
1519 /* HTM SPR registers. */
1520 "tfhar", "tfiar", "texasr"
1521 };
1522 #endif
1523
1524 /* Table of valid machine attributes. */
1525
1526 static const struct attribute_spec rs6000_attribute_table[] =
1527 {
1528 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1529 affects_type_identity, handler, exclude } */
1530 { "altivec", 1, 1, false, true, false, false,
1531 rs6000_handle_altivec_attribute, NULL },
1532 { "longcall", 0, 0, false, true, true, false,
1533 rs6000_handle_longcall_attribute, NULL },
1534 { "shortcall", 0, 0, false, true, true, false,
1535 rs6000_handle_longcall_attribute, NULL },
1536 { "ms_struct", 0, 0, false, false, false, false,
1537 rs6000_handle_struct_attribute, NULL },
1538 { "gcc_struct", 0, 0, false, false, false, false,
1539 rs6000_handle_struct_attribute, NULL },
1540 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1541 SUBTARGET_ATTRIBUTE_TABLE,
1542 #endif
1543 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1544 };
1545 \f
1546 #ifndef TARGET_PROFILE_KERNEL
1547 #define TARGET_PROFILE_KERNEL 0
1548 #endif
1549
1550 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1551 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1552 \f
1553 /* Initialize the GCC target structure. */
1554 #undef TARGET_ATTRIBUTE_TABLE
1555 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1556 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1557 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1558 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1559 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1560
1561 #undef TARGET_ASM_ALIGNED_DI_OP
1562 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1563
1564 /* Default unaligned ops are only provided for ELF. Find the ops needed
1565 for non-ELF systems. */
1566 #ifndef OBJECT_FORMAT_ELF
1567 #if TARGET_XCOFF
1568 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1569 64-bit targets. */
1570 #undef TARGET_ASM_UNALIGNED_HI_OP
1571 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1572 #undef TARGET_ASM_UNALIGNED_SI_OP
1573 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1574 #undef TARGET_ASM_UNALIGNED_DI_OP
1575 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1576 #else
1577 /* For Darwin. */
1578 #undef TARGET_ASM_UNALIGNED_HI_OP
1579 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1580 #undef TARGET_ASM_UNALIGNED_SI_OP
1581 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1582 #undef TARGET_ASM_UNALIGNED_DI_OP
1583 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1584 #undef TARGET_ASM_ALIGNED_DI_OP
1585 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1586 #endif
1587 #endif
1588
1589 /* This hook deals with fixups for relocatable code and DI-mode objects
1590 in 64-bit code. */
1591 #undef TARGET_ASM_INTEGER
1592 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1593
1594 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1595 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1596 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1597 #endif
1598
1599 #undef TARGET_SET_UP_BY_PROLOGUE
1600 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1601
1602 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1603 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1604 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1605 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1606 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1607 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1608 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1609 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1610 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1611 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1612 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1613 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1614
1615 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1616 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1617
1618 #undef TARGET_INTERNAL_ARG_POINTER
1619 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1620
1621 #undef TARGET_HAVE_TLS
1622 #define TARGET_HAVE_TLS HAVE_AS_TLS
1623
1624 #undef TARGET_CANNOT_FORCE_CONST_MEM
1625 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1626
1627 #undef TARGET_DELEGITIMIZE_ADDRESS
1628 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1629
1630 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1631 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1632
1633 #undef TARGET_LEGITIMATE_COMBINED_INSN
1634 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1635
1636 #undef TARGET_ASM_FUNCTION_PROLOGUE
1637 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1638 #undef TARGET_ASM_FUNCTION_EPILOGUE
1639 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1640
1641 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1642 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1643
1644 #undef TARGET_LEGITIMIZE_ADDRESS
1645 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1646
1647 #undef TARGET_SCHED_VARIABLE_ISSUE
1648 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1649
1650 #undef TARGET_SCHED_ISSUE_RATE
1651 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1652 #undef TARGET_SCHED_ADJUST_COST
1653 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1654 #undef TARGET_SCHED_ADJUST_PRIORITY
1655 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1656 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1657 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1658 #undef TARGET_SCHED_INIT
1659 #define TARGET_SCHED_INIT rs6000_sched_init
1660 #undef TARGET_SCHED_FINISH
1661 #define TARGET_SCHED_FINISH rs6000_sched_finish
1662 #undef TARGET_SCHED_REORDER
1663 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1664 #undef TARGET_SCHED_REORDER2
1665 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1666
1667 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1668 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1669
1670 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1671 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1672
1673 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1674 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1675 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1676 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1677 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1678 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1679 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1680 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1681
1682 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1683 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1684
1685 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1686 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1687 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1688 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1689 rs6000_builtin_support_vector_misalignment
1690 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1691 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1692 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1693 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1694 rs6000_builtin_vectorization_cost
1695 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1696 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1697 rs6000_preferred_simd_mode
1698 #undef TARGET_VECTORIZE_INIT_COST
1699 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1700 #undef TARGET_VECTORIZE_ADD_STMT_COST
1701 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1702 #undef TARGET_VECTORIZE_FINISH_COST
1703 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1704 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1705 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1706
1707 #undef TARGET_INIT_BUILTINS
1708 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1709 #undef TARGET_BUILTIN_DECL
1710 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1711
1712 #undef TARGET_FOLD_BUILTIN
1713 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1714 #undef TARGET_GIMPLE_FOLD_BUILTIN
1715 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1716
1717 #undef TARGET_EXPAND_BUILTIN
1718 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1719
1720 #undef TARGET_MANGLE_TYPE
1721 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1722
1723 #undef TARGET_INIT_LIBFUNCS
1724 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1725
1726 #if TARGET_MACHO
1727 #undef TARGET_BINDS_LOCAL_P
1728 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1729 #endif
1730
1731 #undef TARGET_MS_BITFIELD_LAYOUT_P
1732 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1733
1734 #undef TARGET_ASM_OUTPUT_MI_THUNK
1735 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1736
1737 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1738 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1739
1740 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1741 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1742
1743 #undef TARGET_REGISTER_MOVE_COST
1744 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1745 #undef TARGET_MEMORY_MOVE_COST
1746 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1747 #undef TARGET_CANNOT_COPY_INSN_P
1748 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1749 #undef TARGET_RTX_COSTS
1750 #define TARGET_RTX_COSTS rs6000_rtx_costs
1751 #undef TARGET_ADDRESS_COST
1752 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1753 #undef TARGET_INSN_COST
1754 #define TARGET_INSN_COST rs6000_insn_cost
1755
1756 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1757 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1758
1759 #undef TARGET_PROMOTE_FUNCTION_MODE
1760 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1761
1762 #undef TARGET_RETURN_IN_MEMORY
1763 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1764
1765 #undef TARGET_RETURN_IN_MSB
1766 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1767
1768 #undef TARGET_SETUP_INCOMING_VARARGS
1769 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1770
1771 /* Always strict argument naming on rs6000. */
1772 #undef TARGET_STRICT_ARGUMENT_NAMING
1773 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1774 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1775 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1776 #undef TARGET_SPLIT_COMPLEX_ARG
1777 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1778 #undef TARGET_MUST_PASS_IN_STACK
1779 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1780 #undef TARGET_PASS_BY_REFERENCE
1781 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1782 #undef TARGET_ARG_PARTIAL_BYTES
1783 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1784 #undef TARGET_FUNCTION_ARG_ADVANCE
1785 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1786 #undef TARGET_FUNCTION_ARG
1787 #define TARGET_FUNCTION_ARG rs6000_function_arg
1788 #undef TARGET_FUNCTION_ARG_PADDING
1789 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1790 #undef TARGET_FUNCTION_ARG_BOUNDARY
1791 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1792
1793 #undef TARGET_BUILD_BUILTIN_VA_LIST
1794 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1795
1796 #undef TARGET_EXPAND_BUILTIN_VA_START
1797 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1798
1799 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1800 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1801
1802 #undef TARGET_EH_RETURN_FILTER_MODE
1803 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1804
1805 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1806 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1807
1808 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1809 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1810
1811 #undef TARGET_FLOATN_MODE
1812 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1813
1814 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1815 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1816
1817 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1818 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1819
1820 #undef TARGET_MD_ASM_ADJUST
1821 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1822
1823 #undef TARGET_OPTION_OVERRIDE
1824 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1825
1826 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1827 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1828 rs6000_builtin_vectorized_function
1829
1830 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1831 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1832 rs6000_builtin_md_vectorized_function
1833
1834 #undef TARGET_STACK_PROTECT_GUARD
1835 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1836
1837 #if !TARGET_MACHO
1838 #undef TARGET_STACK_PROTECT_FAIL
1839 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1840 #endif
1841
1842 #ifdef HAVE_AS_TLS
1843 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1844 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1845 #endif
1846
1847 /* Use a 32-bit anchor range. This leads to sequences like:
1848
1849 addis tmp,anchor,high
1850 add dest,tmp,low
1851
1852 where tmp itself acts as an anchor, and can be shared between
1853 accesses to the same 64k page. */
1854 #undef TARGET_MIN_ANCHOR_OFFSET
1855 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1856 #undef TARGET_MAX_ANCHOR_OFFSET
1857 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1858 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1859 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1860 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1861 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1862
1863 #undef TARGET_BUILTIN_RECIPROCAL
1864 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1865
1866 #undef TARGET_SECONDARY_RELOAD
1867 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1868 #undef TARGET_SECONDARY_MEMORY_NEEDED
1869 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1870 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1871 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1872
1873 #undef TARGET_LEGITIMATE_ADDRESS_P
1874 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1875
1876 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1877 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1878
1879 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1880 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1881
1882 #undef TARGET_CAN_ELIMINATE
1883 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1884
1885 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1886 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1887
1888 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1889 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1890
1891 #undef TARGET_TRAMPOLINE_INIT
1892 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1893
1894 #undef TARGET_FUNCTION_VALUE
1895 #define TARGET_FUNCTION_VALUE rs6000_function_value
1896
1897 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1898 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1899
1900 #undef TARGET_OPTION_SAVE
1901 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1902
1903 #undef TARGET_OPTION_RESTORE
1904 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1905
1906 #undef TARGET_OPTION_PRINT
1907 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1908
1909 #undef TARGET_CAN_INLINE_P
1910 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1911
1912 #undef TARGET_SET_CURRENT_FUNCTION
1913 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1914
1915 #undef TARGET_LEGITIMATE_CONSTANT_P
1916 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1917
1918 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1919 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1920
1921 #undef TARGET_CAN_USE_DOLOOP_P
1922 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1923
1924 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1925 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1926
1927 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1928 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1929 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1930 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1931 #undef TARGET_UNWIND_WORD_MODE
1932 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1933
1934 #undef TARGET_OFFLOAD_OPTIONS
1935 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1936
1937 #undef TARGET_C_MODE_FOR_SUFFIX
1938 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1939
1940 #undef TARGET_INVALID_BINARY_OP
1941 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1942
1943 #undef TARGET_OPTAB_SUPPORTED_P
1944 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1945
1946 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1947 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1948
1949 #undef TARGET_COMPARE_VERSION_PRIORITY
1950 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1951
1952 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1953 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1954 rs6000_generate_version_dispatcher_body
1955
1956 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1957 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1958 rs6000_get_function_versions_dispatcher
1959
1960 #undef TARGET_OPTION_FUNCTION_VERSIONS
1961 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1962
1963 #undef TARGET_HARD_REGNO_NREGS
1964 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1965 #undef TARGET_HARD_REGNO_MODE_OK
1966 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1967
1968 #undef TARGET_MODES_TIEABLE_P
1969 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1970
1971 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1972 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1973 rs6000_hard_regno_call_part_clobbered
1974
1975 #undef TARGET_SLOW_UNALIGNED_ACCESS
1976 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1977
1978 #undef TARGET_CAN_CHANGE_MODE_CLASS
1979 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1980
1981 #undef TARGET_CONSTANT_ALIGNMENT
1982 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1983
1984 #undef TARGET_STARTING_FRAME_OFFSET
1985 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1986
1987 #if TARGET_ELF && RS6000_WEAK
1988 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1989 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1990 #endif
1991 \f
1992
1993 /* Processor table. */
1994 struct rs6000_ptt
1995 {
1996 const char *const name; /* Canonical processor name. */
1997 const enum processor_type processor; /* Processor type enum value. */
1998 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1999 };
2000
2001 static struct rs6000_ptt const processor_target_table[] =
2002 {
2003 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
2004 #include "rs6000-cpus.def"
2005 #undef RS6000_CPU
2006 };
2007
2008 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2009 name is invalid. */
2010
2011 static int
2012 rs6000_cpu_name_lookup (const char *name)
2013 {
2014 size_t i;
2015
2016 if (name != NULL)
2017 {
2018 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2019 if (! strcmp (name, processor_target_table[i].name))
2020 return (int)i;
2021 }
2022
2023 return -1;
2024 }
2025
2026 \f
2027 /* Return number of consecutive hard regs needed starting at reg REGNO
2028 to hold something of mode MODE.
2029 This is ordinarily the length in words of a value of mode MODE
2030 but can be less for certain modes in special long registers.
2031
2032 POWER and PowerPC GPRs hold 32 bits worth;
2033 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2034
2035 static int
2036 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2037 {
2038 unsigned HOST_WIDE_INT reg_size;
2039
2040 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2041 128-bit floating point that can go in vector registers, which has VSX
2042 memory addressing. */
2043 if (FP_REGNO_P (regno))
2044 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2045 ? UNITS_PER_VSX_WORD
2046 : UNITS_PER_FP_WORD);
2047
2048 else if (ALTIVEC_REGNO_P (regno))
2049 reg_size = UNITS_PER_ALTIVEC_WORD;
2050
2051 else
2052 reg_size = UNITS_PER_WORD;
2053
2054 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2055 }
2056
2057 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2058 MODE. */
2059 static int
2060 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2061 {
2062 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2063
2064 if (COMPLEX_MODE_P (mode))
2065 mode = GET_MODE_INNER (mode);
2066
2067 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2068 register combinations, and use PTImode where we need to deal with quad
2069 word memory operations. Don't allow quad words in the argument or frame
2070 pointer registers, just registers 0..31. */
2071 if (mode == PTImode)
2072 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2073 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2074 && ((regno & 1) == 0));
2075
2076 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2077 implementations. Don't allow an item to be split between a FP register
2078 and an Altivec register. Allow TImode in all VSX registers if the user
2079 asked for it. */
2080 if (TARGET_VSX && VSX_REGNO_P (regno)
2081 && (VECTOR_MEM_VSX_P (mode)
2082 || FLOAT128_VECTOR_P (mode)
2083 || reg_addr[mode].scalar_in_vmx_p
2084 || mode == TImode
2085 || (TARGET_VADDUQM && mode == V1TImode)))
2086 {
2087 if (FP_REGNO_P (regno))
2088 return FP_REGNO_P (last_regno);
2089
2090 if (ALTIVEC_REGNO_P (regno))
2091 {
2092 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2093 return 0;
2094
2095 return ALTIVEC_REGNO_P (last_regno);
2096 }
2097 }
2098
2099 /* The GPRs can hold any mode, but values bigger than one register
2100 cannot go past R31. */
2101 if (INT_REGNO_P (regno))
2102 return INT_REGNO_P (last_regno);
2103
2104 /* The float registers (except for VSX vector modes) can only hold floating
2105 modes and DImode. */
2106 if (FP_REGNO_P (regno))
2107 {
2108 if (FLOAT128_VECTOR_P (mode))
2109 return false;
2110
2111 if (SCALAR_FLOAT_MODE_P (mode)
2112 && (mode != TDmode || (regno % 2) == 0)
2113 && FP_REGNO_P (last_regno))
2114 return 1;
2115
2116 if (GET_MODE_CLASS (mode) == MODE_INT)
2117 {
2118 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2119 return 1;
2120
2121 if (TARGET_P8_VECTOR && (mode == SImode))
2122 return 1;
2123
2124 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2125 return 1;
2126 }
2127
2128 return 0;
2129 }
2130
2131 /* The CR register can only hold CC modes. */
2132 if (CR_REGNO_P (regno))
2133 return GET_MODE_CLASS (mode) == MODE_CC;
2134
2135 if (CA_REGNO_P (regno))
2136 return mode == Pmode || mode == SImode;
2137
2138 /* AltiVec only in AldyVec registers. */
2139 if (ALTIVEC_REGNO_P (regno))
2140 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2141 || mode == V1TImode);
2142
2143 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2144 and it must be able to fit within the register set. */
2145
2146 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2147 }
2148
2149 /* Implement TARGET_HARD_REGNO_NREGS. */
2150
2151 static unsigned int
2152 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2153 {
2154 return rs6000_hard_regno_nregs[mode][regno];
2155 }
2156
2157 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2158
2159 static bool
2160 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2161 {
2162 return rs6000_hard_regno_mode_ok_p[mode][regno];
2163 }
2164
2165 /* Implement TARGET_MODES_TIEABLE_P.
2166
2167 PTImode cannot tie with other modes because PTImode is restricted to even
2168 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2169 57744).
2170
2171 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2172 128-bit floating point on VSX systems ties with other vectors. */
2173
2174 static bool
2175 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2176 {
2177 if (mode1 == PTImode)
2178 return mode2 == PTImode;
2179 if (mode2 == PTImode)
2180 return false;
2181
2182 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2183 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2184 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2185 return false;
2186
2187 if (SCALAR_FLOAT_MODE_P (mode1))
2188 return SCALAR_FLOAT_MODE_P (mode2);
2189 if (SCALAR_FLOAT_MODE_P (mode2))
2190 return false;
2191
2192 if (GET_MODE_CLASS (mode1) == MODE_CC)
2193 return GET_MODE_CLASS (mode2) == MODE_CC;
2194 if (GET_MODE_CLASS (mode2) == MODE_CC)
2195 return false;
2196
2197 return true;
2198 }
2199
2200 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2201
2202 static bool
2203 rs6000_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
2204 {
2205 if (TARGET_32BIT
2206 && TARGET_POWERPC64
2207 && GET_MODE_SIZE (mode) > 4
2208 && INT_REGNO_P (regno))
2209 return true;
2210
2211 if (TARGET_VSX
2212 && FP_REGNO_P (regno)
2213 && GET_MODE_SIZE (mode) > 8
2214 && !FLOAT128_2REG_P (mode))
2215 return true;
2216
2217 return false;
2218 }
2219
2220 /* Print interesting facts about registers. */
2221 static void
2222 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2223 {
2224 int r, m;
2225
2226 for (r = first_regno; r <= last_regno; ++r)
2227 {
2228 const char *comma = "";
2229 int len;
2230
2231 if (first_regno == last_regno)
2232 fprintf (stderr, "%s:\t", reg_name);
2233 else
2234 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2235
2236 len = 8;
2237 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2238 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2239 {
2240 if (len > 70)
2241 {
2242 fprintf (stderr, ",\n\t");
2243 len = 8;
2244 comma = "";
2245 }
2246
2247 if (rs6000_hard_regno_nregs[m][r] > 1)
2248 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2249 rs6000_hard_regno_nregs[m][r]);
2250 else
2251 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2252
2253 comma = ", ";
2254 }
2255
2256 if (call_used_regs[r])
2257 {
2258 if (len > 70)
2259 {
2260 fprintf (stderr, ",\n\t");
2261 len = 8;
2262 comma = "";
2263 }
2264
2265 len += fprintf (stderr, "%s%s", comma, "call-used");
2266 comma = ", ";
2267 }
2268
2269 if (fixed_regs[r])
2270 {
2271 if (len > 70)
2272 {
2273 fprintf (stderr, ",\n\t");
2274 len = 8;
2275 comma = "";
2276 }
2277
2278 len += fprintf (stderr, "%s%s", comma, "fixed");
2279 comma = ", ";
2280 }
2281
2282 if (len > 70)
2283 {
2284 fprintf (stderr, ",\n\t");
2285 comma = "";
2286 }
2287
2288 len += fprintf (stderr, "%sreg-class = %s", comma,
2289 reg_class_names[(int)rs6000_regno_regclass[r]]);
2290 comma = ", ";
2291
2292 if (len > 70)
2293 {
2294 fprintf (stderr, ",\n\t");
2295 comma = "";
2296 }
2297
2298 fprintf (stderr, "%sregno = %d\n", comma, r);
2299 }
2300 }
2301
2302 static const char *
2303 rs6000_debug_vector_unit (enum rs6000_vector v)
2304 {
2305 const char *ret;
2306
2307 switch (v)
2308 {
2309 case VECTOR_NONE: ret = "none"; break;
2310 case VECTOR_ALTIVEC: ret = "altivec"; break;
2311 case VECTOR_VSX: ret = "vsx"; break;
2312 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2313 case VECTOR_OTHER: ret = "other"; break;
2314 default: ret = "unknown"; break;
2315 }
2316
2317 return ret;
2318 }
2319
2320 /* Inner function printing just the address mask for a particular reload
2321 register class. */
2322 DEBUG_FUNCTION char *
2323 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2324 {
2325 static char ret[8];
2326 char *p = ret;
2327
2328 if ((mask & RELOAD_REG_VALID) != 0)
2329 *p++ = 'v';
2330 else if (keep_spaces)
2331 *p++ = ' ';
2332
2333 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2334 *p++ = 'm';
2335 else if (keep_spaces)
2336 *p++ = ' ';
2337
2338 if ((mask & RELOAD_REG_INDEXED) != 0)
2339 *p++ = 'i';
2340 else if (keep_spaces)
2341 *p++ = ' ';
2342
2343 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2344 *p++ = 'O';
2345 else if ((mask & RELOAD_REG_OFFSET) != 0)
2346 *p++ = 'o';
2347 else if (keep_spaces)
2348 *p++ = ' ';
2349
2350 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2351 *p++ = '+';
2352 else if (keep_spaces)
2353 *p++ = ' ';
2354
2355 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2356 *p++ = '+';
2357 else if (keep_spaces)
2358 *p++ = ' ';
2359
2360 if ((mask & RELOAD_REG_AND_M16) != 0)
2361 *p++ = '&';
2362 else if (keep_spaces)
2363 *p++ = ' ';
2364
2365 *p = '\0';
2366
2367 return ret;
2368 }
2369
2370 /* Print the address masks in a human readble fashion. */
2371 DEBUG_FUNCTION void
2372 rs6000_debug_print_mode (ssize_t m)
2373 {
2374 ssize_t rc;
2375 int spaces = 0;
2376 bool fuse_extra_p;
2377
2378 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2379 for (rc = 0; rc < N_RELOAD_REG; rc++)
2380 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2381 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2382
2383 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2384 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2385 fprintf (stderr, " Reload=%c%c",
2386 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2387 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2388 else
2389 spaces += sizeof (" Reload=sl") - 1;
2390
2391 if (reg_addr[m].scalar_in_vmx_p)
2392 {
2393 fprintf (stderr, "%*s Upper=y", spaces, "");
2394 spaces = 0;
2395 }
2396 else
2397 spaces += sizeof (" Upper=y") - 1;
2398
2399 fuse_extra_p = ((reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2400 || reg_addr[m].fused_toc);
2401 if (!fuse_extra_p)
2402 {
2403 for (rc = 0; rc < N_RELOAD_REG; rc++)
2404 {
2405 if (rc != RELOAD_REG_ANY)
2406 {
2407 if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2408 || reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2409 || reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing
2410 || reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing
2411 || reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2412 {
2413 fuse_extra_p = true;
2414 break;
2415 }
2416 }
2417 }
2418 }
2419
2420 if (fuse_extra_p)
2421 {
2422 fprintf (stderr, "%*s Fuse:", spaces, "");
2423 spaces = 0;
2424
2425 for (rc = 0; rc < N_RELOAD_REG; rc++)
2426 {
2427 if (rc != RELOAD_REG_ANY)
2428 {
2429 char load, store;
2430
2431 if (reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing)
2432 load = 'l';
2433 else if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing)
2434 load = 'L';
2435 else
2436 load = '-';
2437
2438 if (reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2439 store = 's';
2440 else if (reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing)
2441 store = 'S';
2442 else
2443 store = '-';
2444
2445 if (load == '-' && store == '-')
2446 spaces += 5;
2447 else
2448 {
2449 fprintf (stderr, "%*s%c=%c%c", (spaces + 1), "",
2450 reload_reg_map[rc].name[0], load, store);
2451 spaces = 0;
2452 }
2453 }
2454 }
2455
2456 if (reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2457 {
2458 fprintf (stderr, "%*sP8gpr", (spaces + 1), "");
2459 spaces = 0;
2460 }
2461 else
2462 spaces += sizeof (" P8gpr") - 1;
2463
2464 if (reg_addr[m].fused_toc)
2465 {
2466 fprintf (stderr, "%*sToc", (spaces + 1), "");
2467 spaces = 0;
2468 }
2469 else
2470 spaces += sizeof (" Toc") - 1;
2471 }
2472 else
2473 spaces += sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2474
2475 if (rs6000_vector_unit[m] != VECTOR_NONE
2476 || rs6000_vector_mem[m] != VECTOR_NONE)
2477 {
2478 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2479 spaces, "",
2480 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2481 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2482 }
2483
2484 fputs ("\n", stderr);
2485 }
2486
2487 #define DEBUG_FMT_ID "%-32s= "
2488 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2489 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2490 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2491
2492 /* Print various interesting information with -mdebug=reg. */
2493 static void
2494 rs6000_debug_reg_global (void)
2495 {
2496 static const char *const tf[2] = { "false", "true" };
2497 const char *nl = (const char *)0;
2498 int m;
2499 size_t m1, m2, v;
2500 char costly_num[20];
2501 char nop_num[20];
2502 char flags_buffer[40];
2503 const char *costly_str;
2504 const char *nop_str;
2505 const char *trace_str;
2506 const char *abi_str;
2507 const char *cmodel_str;
2508 struct cl_target_option cl_opts;
2509
2510 /* Modes we want tieable information on. */
2511 static const machine_mode print_tieable_modes[] = {
2512 QImode,
2513 HImode,
2514 SImode,
2515 DImode,
2516 TImode,
2517 PTImode,
2518 SFmode,
2519 DFmode,
2520 TFmode,
2521 IFmode,
2522 KFmode,
2523 SDmode,
2524 DDmode,
2525 TDmode,
2526 V16QImode,
2527 V8HImode,
2528 V4SImode,
2529 V2DImode,
2530 V1TImode,
2531 V32QImode,
2532 V16HImode,
2533 V8SImode,
2534 V4DImode,
2535 V2TImode,
2536 V4SFmode,
2537 V2DFmode,
2538 V8SFmode,
2539 V4DFmode,
2540 CCmode,
2541 CCUNSmode,
2542 CCEQmode,
2543 };
2544
2545 /* Virtual regs we are interested in. */
2546 const static struct {
2547 int regno; /* register number. */
2548 const char *name; /* register name. */
2549 } virtual_regs[] = {
2550 { STACK_POINTER_REGNUM, "stack pointer:" },
2551 { TOC_REGNUM, "toc: " },
2552 { STATIC_CHAIN_REGNUM, "static chain: " },
2553 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2554 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2555 { ARG_POINTER_REGNUM, "arg pointer: " },
2556 { FRAME_POINTER_REGNUM, "frame pointer:" },
2557 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2558 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2559 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2560 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2561 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2562 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2563 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2564 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2565 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2566 };
2567
2568 fputs ("\nHard register information:\n", stderr);
2569 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2570 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2571 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2572 LAST_ALTIVEC_REGNO,
2573 "vs");
2574 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2575 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2576 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2577 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2578 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2579 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2580
2581 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2582 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2583 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2584
2585 fprintf (stderr,
2586 "\n"
2587 "d reg_class = %s\n"
2588 "f reg_class = %s\n"
2589 "v reg_class = %s\n"
2590 "wa reg_class = %s\n"
2591 "wb reg_class = %s\n"
2592 "wd reg_class = %s\n"
2593 "we reg_class = %s\n"
2594 "wf reg_class = %s\n"
2595 "wg reg_class = %s\n"
2596 "wh reg_class = %s\n"
2597 "wi reg_class = %s\n"
2598 "wj reg_class = %s\n"
2599 "wk reg_class = %s\n"
2600 "wl reg_class = %s\n"
2601 "wm reg_class = %s\n"
2602 "wo reg_class = %s\n"
2603 "wp reg_class = %s\n"
2604 "wq reg_class = %s\n"
2605 "wr reg_class = %s\n"
2606 "ws reg_class = %s\n"
2607 "wt reg_class = %s\n"
2608 "wu reg_class = %s\n"
2609 "wv reg_class = %s\n"
2610 "ww reg_class = %s\n"
2611 "wx reg_class = %s\n"
2612 "wy reg_class = %s\n"
2613 "wz reg_class = %s\n"
2614 "wA reg_class = %s\n"
2615 "wH reg_class = %s\n"
2616 "wI reg_class = %s\n"
2617 "wJ reg_class = %s\n"
2618 "wK reg_class = %s\n"
2619 "\n",
2620 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2621 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2622 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2623 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2624 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2625 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2626 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2627 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2628 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2629 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2630 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2631 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2632 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2633 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2634 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2635 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2636 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2637 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2638 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2639 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2640 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2641 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2642 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2643 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2644 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2645 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2646 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2647 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2648 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2649 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2650 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2651 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2652
2653 nl = "\n";
2654 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2655 rs6000_debug_print_mode (m);
2656
2657 fputs ("\n", stderr);
2658
2659 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2660 {
2661 machine_mode mode1 = print_tieable_modes[m1];
2662 bool first_time = true;
2663
2664 nl = (const char *)0;
2665 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2666 {
2667 machine_mode mode2 = print_tieable_modes[m2];
2668 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2669 {
2670 if (first_time)
2671 {
2672 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2673 nl = "\n";
2674 first_time = false;
2675 }
2676
2677 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2678 }
2679 }
2680
2681 if (!first_time)
2682 fputs ("\n", stderr);
2683 }
2684
2685 if (nl)
2686 fputs (nl, stderr);
2687
2688 if (rs6000_recip_control)
2689 {
2690 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2691
2692 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2693 if (rs6000_recip_bits[m])
2694 {
2695 fprintf (stderr,
2696 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2697 GET_MODE_NAME (m),
2698 (RS6000_RECIP_AUTO_RE_P (m)
2699 ? "auto"
2700 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2701 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2702 ? "auto"
2703 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2704 }
2705
2706 fputs ("\n", stderr);
2707 }
2708
2709 if (rs6000_cpu_index >= 0)
2710 {
2711 const char *name = processor_target_table[rs6000_cpu_index].name;
2712 HOST_WIDE_INT flags
2713 = processor_target_table[rs6000_cpu_index].target_enable;
2714
2715 sprintf (flags_buffer, "-mcpu=%s flags", name);
2716 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2717 }
2718 else
2719 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2720
2721 if (rs6000_tune_index >= 0)
2722 {
2723 const char *name = processor_target_table[rs6000_tune_index].name;
2724 HOST_WIDE_INT flags
2725 = processor_target_table[rs6000_tune_index].target_enable;
2726
2727 sprintf (flags_buffer, "-mtune=%s flags", name);
2728 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2729 }
2730 else
2731 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2732
2733 cl_target_option_save (&cl_opts, &global_options);
2734 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2735 rs6000_isa_flags);
2736
2737 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2738 rs6000_isa_flags_explicit);
2739
2740 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2741 rs6000_builtin_mask);
2742
2743 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2744
2745 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2746 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2747
2748 switch (rs6000_sched_costly_dep)
2749 {
2750 case max_dep_latency:
2751 costly_str = "max_dep_latency";
2752 break;
2753
2754 case no_dep_costly:
2755 costly_str = "no_dep_costly";
2756 break;
2757
2758 case all_deps_costly:
2759 costly_str = "all_deps_costly";
2760 break;
2761
2762 case true_store_to_load_dep_costly:
2763 costly_str = "true_store_to_load_dep_costly";
2764 break;
2765
2766 case store_to_load_dep_costly:
2767 costly_str = "store_to_load_dep_costly";
2768 break;
2769
2770 default:
2771 costly_str = costly_num;
2772 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2773 break;
2774 }
2775
2776 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2777
2778 switch (rs6000_sched_insert_nops)
2779 {
2780 case sched_finish_regroup_exact:
2781 nop_str = "sched_finish_regroup_exact";
2782 break;
2783
2784 case sched_finish_pad_groups:
2785 nop_str = "sched_finish_pad_groups";
2786 break;
2787
2788 case sched_finish_none:
2789 nop_str = "sched_finish_none";
2790 break;
2791
2792 default:
2793 nop_str = nop_num;
2794 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2795 break;
2796 }
2797
2798 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2799
2800 switch (rs6000_sdata)
2801 {
2802 default:
2803 case SDATA_NONE:
2804 break;
2805
2806 case SDATA_DATA:
2807 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2808 break;
2809
2810 case SDATA_SYSV:
2811 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2812 break;
2813
2814 case SDATA_EABI:
2815 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2816 break;
2817
2818 }
2819
2820 switch (rs6000_traceback)
2821 {
2822 case traceback_default: trace_str = "default"; break;
2823 case traceback_none: trace_str = "none"; break;
2824 case traceback_part: trace_str = "part"; break;
2825 case traceback_full: trace_str = "full"; break;
2826 default: trace_str = "unknown"; break;
2827 }
2828
2829 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2830
2831 switch (rs6000_current_cmodel)
2832 {
2833 case CMODEL_SMALL: cmodel_str = "small"; break;
2834 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2835 case CMODEL_LARGE: cmodel_str = "large"; break;
2836 default: cmodel_str = "unknown"; break;
2837 }
2838
2839 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2840
2841 switch (rs6000_current_abi)
2842 {
2843 case ABI_NONE: abi_str = "none"; break;
2844 case ABI_AIX: abi_str = "aix"; break;
2845 case ABI_ELFv2: abi_str = "ELFv2"; break;
2846 case ABI_V4: abi_str = "V4"; break;
2847 case ABI_DARWIN: abi_str = "darwin"; break;
2848 default: abi_str = "unknown"; break;
2849 }
2850
2851 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2852
2853 if (rs6000_altivec_abi)
2854 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2855
2856 if (rs6000_darwin64_abi)
2857 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2858
2859 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2860 (TARGET_SOFT_FLOAT ? "true" : "false"));
2861
2862 if (TARGET_LINK_STACK)
2863 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2864
2865 if (TARGET_P8_FUSION)
2866 {
2867 char options[80];
2868
2869 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2870 if (TARGET_TOC_FUSION)
2871 strcat (options, ", toc");
2872
2873 if (TARGET_P8_FUSION_SIGN)
2874 strcat (options, ", sign");
2875
2876 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2877 }
2878
2879 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2880 TARGET_SECURE_PLT ? "secure" : "bss");
2881 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2882 aix_struct_return ? "aix" : "sysv");
2883 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2884 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2885 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2886 tf[!!rs6000_align_branch_targets]);
2887 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2888 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2889 rs6000_long_double_type_size);
2890 if (rs6000_long_double_type_size == 128)
2891 {
2892 fprintf (stderr, DEBUG_FMT_S, "long double type",
2893 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2894 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2895 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2896 }
2897 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2898 (int)rs6000_sched_restricted_insns_priority);
2899 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2900 (int)END_BUILTINS);
2901 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2902 (int)RS6000_BUILTIN_COUNT);
2903
2904 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2905 (int)TARGET_FLOAT128_ENABLE_TYPE);
2906
2907 if (TARGET_VSX)
2908 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2909 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2910
2911 if (TARGET_DIRECT_MOVE_128)
2912 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2913 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2914 }
2915
2916 \f
2917 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2918 legitimate address support to figure out the appropriate addressing to
2919 use. */
2920
2921 static void
2922 rs6000_setup_reg_addr_masks (void)
2923 {
2924 ssize_t rc, reg, m, nregs;
2925 addr_mask_type any_addr_mask, addr_mask;
2926
2927 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2928 {
2929 machine_mode m2 = (machine_mode) m;
2930 bool complex_p = false;
2931 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2932 size_t msize;
2933
2934 if (COMPLEX_MODE_P (m2))
2935 {
2936 complex_p = true;
2937 m2 = GET_MODE_INNER (m2);
2938 }
2939
2940 msize = GET_MODE_SIZE (m2);
2941
2942 /* SDmode is special in that we want to access it only via REG+REG
2943 addressing on power7 and above, since we want to use the LFIWZX and
2944 STFIWZX instructions to load it. */
2945 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2946
2947 any_addr_mask = 0;
2948 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2949 {
2950 addr_mask = 0;
2951 reg = reload_reg_map[rc].reg;
2952
2953 /* Can mode values go in the GPR/FPR/Altivec registers? */
2954 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2955 {
2956 bool small_int_vsx_p = (small_int_p
2957 && (rc == RELOAD_REG_FPR
2958 || rc == RELOAD_REG_VMX));
2959
2960 nregs = rs6000_hard_regno_nregs[m][reg];
2961 addr_mask |= RELOAD_REG_VALID;
2962
2963 /* Indicate if the mode takes more than 1 physical register. If
2964 it takes a single register, indicate it can do REG+REG
2965 addressing. Small integers in VSX registers can only do
2966 REG+REG addressing. */
2967 if (small_int_vsx_p)
2968 addr_mask |= RELOAD_REG_INDEXED;
2969 else if (nregs > 1 || m == BLKmode || complex_p)
2970 addr_mask |= RELOAD_REG_MULTIPLE;
2971 else
2972 addr_mask |= RELOAD_REG_INDEXED;
2973
2974 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2975 addressing. If we allow scalars into Altivec registers,
2976 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2977
2978 For VSX systems, we don't allow update addressing for
2979 DFmode/SFmode if those registers can go in both the
2980 traditional floating point registers and Altivec registers.
2981 The load/store instructions for the Altivec registers do not
2982 have update forms. If we allowed update addressing, it seems
2983 to break IV-OPT code using floating point if the index type is
2984 int instead of long (PR target/81550 and target/84042). */
2985
2986 if (TARGET_UPDATE
2987 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2988 && msize <= 8
2989 && !VECTOR_MODE_P (m2)
2990 && !FLOAT128_VECTOR_P (m2)
2991 && !complex_p
2992 && (m != E_DFmode || !TARGET_VSX)
2993 && (m != E_SFmode || !TARGET_P8_VECTOR)
2994 && !small_int_vsx_p)
2995 {
2996 addr_mask |= RELOAD_REG_PRE_INCDEC;
2997
2998 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2999 we don't allow PRE_MODIFY for some multi-register
3000 operations. */
3001 switch (m)
3002 {
3003 default:
3004 addr_mask |= RELOAD_REG_PRE_MODIFY;
3005 break;
3006
3007 case E_DImode:
3008 if (TARGET_POWERPC64)
3009 addr_mask |= RELOAD_REG_PRE_MODIFY;
3010 break;
3011
3012 case E_DFmode:
3013 case E_DDmode:
3014 if (TARGET_HARD_FLOAT)
3015 addr_mask |= RELOAD_REG_PRE_MODIFY;
3016 break;
3017 }
3018 }
3019 }
3020
3021 /* GPR and FPR registers can do REG+OFFSET addressing, except
3022 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
3023 for 64-bit scalars and 32-bit SFmode to altivec registers. */
3024 if ((addr_mask != 0) && !indexed_only_p
3025 && msize <= 8
3026 && (rc == RELOAD_REG_GPR
3027 || ((msize == 8 || m2 == SFmode)
3028 && (rc == RELOAD_REG_FPR
3029 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
3030 addr_mask |= RELOAD_REG_OFFSET;
3031
3032 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
3033 instructions are enabled. The offset for 128-bit VSX registers is
3034 only 12-bits. While GPRs can handle the full offset range, VSX
3035 registers can only handle the restricted range. */
3036 else if ((addr_mask != 0) && !indexed_only_p
3037 && msize == 16 && TARGET_P9_VECTOR
3038 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
3039 || (m2 == TImode && TARGET_VSX)))
3040 {
3041 addr_mask |= RELOAD_REG_OFFSET;
3042 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
3043 addr_mask |= RELOAD_REG_QUAD_OFFSET;
3044 }
3045
3046 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
3047 addressing on 128-bit types. */
3048 if (rc == RELOAD_REG_VMX && msize == 16
3049 && (addr_mask & RELOAD_REG_VALID) != 0)
3050 addr_mask |= RELOAD_REG_AND_M16;
3051
3052 reg_addr[m].addr_mask[rc] = addr_mask;
3053 any_addr_mask |= addr_mask;
3054 }
3055
3056 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
3057 }
3058 }
3059
3060 \f
3061 /* Initialize the various global tables that are based on register size. */
3062 static void
3063 rs6000_init_hard_regno_mode_ok (bool global_init_p)
3064 {
3065 ssize_t r, m, c;
3066 int align64;
3067 int align32;
3068
3069 /* Precalculate REGNO_REG_CLASS. */
3070 rs6000_regno_regclass[0] = GENERAL_REGS;
3071 for (r = 1; r < 32; ++r)
3072 rs6000_regno_regclass[r] = BASE_REGS;
3073
3074 for (r = 32; r < 64; ++r)
3075 rs6000_regno_regclass[r] = FLOAT_REGS;
3076
3077 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
3078 rs6000_regno_regclass[r] = NO_REGS;
3079
3080 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3081 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3082
3083 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3084 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3085 rs6000_regno_regclass[r] = CR_REGS;
3086
3087 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3088 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3089 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3090 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3091 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3092 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3093 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3094 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3095 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3096 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3097
3098 /* Precalculate register class to simpler reload register class. We don't
3099 need all of the register classes that are combinations of different
3100 classes, just the simple ones that have constraint letters. */
3101 for (c = 0; c < N_REG_CLASSES; c++)
3102 reg_class_to_reg_type[c] = NO_REG_TYPE;
3103
3104 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3105 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3106 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3107 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3108 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3109 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3110 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3111 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3112 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3113 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3114
3115 if (TARGET_VSX)
3116 {
3117 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3118 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3119 }
3120 else
3121 {
3122 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3123 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3124 }
3125
3126 /* Precalculate the valid memory formats as well as the vector information,
3127 this must be set up before the rs6000_hard_regno_nregs_internal calls
3128 below. */
3129 gcc_assert ((int)VECTOR_NONE == 0);
3130 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3131 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3132
3133 gcc_assert ((int)CODE_FOR_nothing == 0);
3134 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3135
3136 gcc_assert ((int)NO_REGS == 0);
3137 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3138
3139 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3140 believes it can use native alignment or still uses 128-bit alignment. */
3141 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3142 {
3143 align64 = 64;
3144 align32 = 32;
3145 }
3146 else
3147 {
3148 align64 = 128;
3149 align32 = 128;
3150 }
3151
3152 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3153 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3154 if (TARGET_FLOAT128_TYPE)
3155 {
3156 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3157 rs6000_vector_align[KFmode] = 128;
3158
3159 if (FLOAT128_IEEE_P (TFmode))
3160 {
3161 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3162 rs6000_vector_align[TFmode] = 128;
3163 }
3164 }
3165
3166 /* V2DF mode, VSX only. */
3167 if (TARGET_VSX)
3168 {
3169 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3170 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3171 rs6000_vector_align[V2DFmode] = align64;
3172 }
3173
3174 /* V4SF mode, either VSX or Altivec. */
3175 if (TARGET_VSX)
3176 {
3177 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3178 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3179 rs6000_vector_align[V4SFmode] = align32;
3180 }
3181 else if (TARGET_ALTIVEC)
3182 {
3183 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3184 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3185 rs6000_vector_align[V4SFmode] = align32;
3186 }
3187
3188 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3189 and stores. */
3190 if (TARGET_ALTIVEC)
3191 {
3192 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3193 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3194 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3195 rs6000_vector_align[V4SImode] = align32;
3196 rs6000_vector_align[V8HImode] = align32;
3197 rs6000_vector_align[V16QImode] = align32;
3198
3199 if (TARGET_VSX)
3200 {
3201 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3202 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3203 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3204 }
3205 else
3206 {
3207 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3208 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3209 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3210 }
3211 }
3212
3213 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3214 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3215 if (TARGET_VSX)
3216 {
3217 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3218 rs6000_vector_unit[V2DImode]
3219 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3220 rs6000_vector_align[V2DImode] = align64;
3221
3222 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3223 rs6000_vector_unit[V1TImode]
3224 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3225 rs6000_vector_align[V1TImode] = 128;
3226 }
3227
3228 /* DFmode, see if we want to use the VSX unit. Memory is handled
3229 differently, so don't set rs6000_vector_mem. */
3230 if (TARGET_VSX)
3231 {
3232 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3233 rs6000_vector_align[DFmode] = 64;
3234 }
3235
3236 /* SFmode, see if we want to use the VSX unit. */
3237 if (TARGET_P8_VECTOR)
3238 {
3239 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3240 rs6000_vector_align[SFmode] = 32;
3241 }
3242
3243 /* Allow TImode in VSX register and set the VSX memory macros. */
3244 if (TARGET_VSX)
3245 {
3246 rs6000_vector_mem[TImode] = VECTOR_VSX;
3247 rs6000_vector_align[TImode] = align64;
3248 }
3249
3250 /* Register class constraints for the constraints that depend on compile
3251 switches. When the VSX code was added, different constraints were added
3252 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3253 of the VSX registers are used. The register classes for scalar floating
3254 point types is set, based on whether we allow that type into the upper
3255 (Altivec) registers. GCC has register classes to target the Altivec
3256 registers for load/store operations, to select using a VSX memory
3257 operation instead of the traditional floating point operation. The
3258 constraints are:
3259
3260 d - Register class to use with traditional DFmode instructions.
3261 f - Register class to use with traditional SFmode instructions.
3262 v - Altivec register.
3263 wa - Any VSX register.
3264 wc - Reserved to represent individual CR bits (used in LLVM).
3265 wd - Preferred register class for V2DFmode.
3266 wf - Preferred register class for V4SFmode.
3267 wg - Float register for power6x move insns.
3268 wh - FP register for direct move instructions.
3269 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3270 wj - FP or VSX register to hold 64-bit integers for direct moves.
3271 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3272 wl - Float register if we can do 32-bit signed int loads.
3273 wm - VSX register for ISA 2.07 direct move operations.
3274 wn - always NO_REGS.
3275 wr - GPR if 64-bit mode is permitted.
3276 ws - Register class to do ISA 2.06 DF operations.
3277 wt - VSX register for TImode in VSX registers.
3278 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3279 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3280 ww - Register class to do SF conversions in with VSX operations.
3281 wx - Float register if we can do 32-bit int stores.
3282 wy - Register class to do ISA 2.07 SF operations.
3283 wz - Float register if we can do 32-bit unsigned int loads.
3284 wH - Altivec register if SImode is allowed in VSX registers.
3285 wI - VSX register if SImode is allowed in VSX registers.
3286 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3287 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3288
3289 if (TARGET_HARD_FLOAT)
3290 {
3291 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3292 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3293 }
3294
3295 if (TARGET_VSX)
3296 {
3297 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3298 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3299 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3300 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3301 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3302 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3303 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3304 }
3305
3306 /* Add conditional constraints based on various options, to allow us to
3307 collapse multiple insn patterns. */
3308 if (TARGET_ALTIVEC)
3309 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3310
3311 if (TARGET_MFPGPR) /* DFmode */
3312 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3313
3314 if (TARGET_LFIWAX)
3315 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3316
3317 if (TARGET_DIRECT_MOVE)
3318 {
3319 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3320 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3321 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3322 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3323 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3324 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3325 }
3326
3327 if (TARGET_POWERPC64)
3328 {
3329 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3330 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3331 }
3332
3333 if (TARGET_P8_VECTOR) /* SFmode */
3334 {
3335 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3336 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3337 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3338 }
3339 else if (TARGET_VSX)
3340 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3341
3342 if (TARGET_STFIWX)
3343 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3344
3345 if (TARGET_LFIWZX)
3346 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3347
3348 if (TARGET_FLOAT128_TYPE)
3349 {
3350 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3351 if (FLOAT128_IEEE_P (TFmode))
3352 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3353 }
3354
3355 if (TARGET_P9_VECTOR)
3356 {
3357 /* Support for new D-form instructions. */
3358 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3359
3360 /* Support for ISA 3.0 (power9) vectors. */
3361 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3362 }
3363
3364 /* Support for new direct moves (ISA 3.0 + 64bit). */
3365 if (TARGET_DIRECT_MOVE_128)
3366 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3367
3368 /* Support small integers in VSX registers. */
3369 if (TARGET_P8_VECTOR)
3370 {
3371 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3372 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3373 if (TARGET_P9_VECTOR)
3374 {
3375 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3376 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3377 }
3378 }
3379
3380 /* Set up the reload helper and direct move functions. */
3381 if (TARGET_VSX || TARGET_ALTIVEC)
3382 {
3383 if (TARGET_64BIT)
3384 {
3385 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3386 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3387 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3388 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3389 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3390 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3391 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3392 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3393 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3394 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3395 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3396 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3397 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3398 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3399 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3400 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3401 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3402 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3403 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3404 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3405
3406 if (FLOAT128_VECTOR_P (KFmode))
3407 {
3408 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3409 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3410 }
3411
3412 if (FLOAT128_VECTOR_P (TFmode))
3413 {
3414 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3415 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3416 }
3417
3418 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3419 available. */
3420 if (TARGET_NO_SDMODE_STACK)
3421 {
3422 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3423 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3424 }
3425
3426 if (TARGET_VSX)
3427 {
3428 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3429 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3430 }
3431
3432 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3433 {
3434 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3435 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3436 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3437 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3438 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3439 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3440 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3441 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3442 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3443
3444 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3445 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3446 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3447 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3448 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3449 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3450 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3451 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3452 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3453
3454 if (FLOAT128_VECTOR_P (KFmode))
3455 {
3456 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3457 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3458 }
3459
3460 if (FLOAT128_VECTOR_P (TFmode))
3461 {
3462 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3463 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3464 }
3465 }
3466 }
3467 else
3468 {
3469 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3470 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3471 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3472 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3473 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3474 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3475 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3476 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3477 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3478 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3479 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3480 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3481 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3482 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3483 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3484 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3485 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3486 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3487 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3488 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3489
3490 if (FLOAT128_VECTOR_P (KFmode))
3491 {
3492 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3493 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3494 }
3495
3496 if (FLOAT128_IEEE_P (TFmode))
3497 {
3498 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3499 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3500 }
3501
3502 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3503 available. */
3504 if (TARGET_NO_SDMODE_STACK)
3505 {
3506 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3507 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3508 }
3509
3510 if (TARGET_VSX)
3511 {
3512 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3513 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3514 }
3515
3516 if (TARGET_DIRECT_MOVE)
3517 {
3518 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3519 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3520 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3521 }
3522 }
3523
3524 reg_addr[DFmode].scalar_in_vmx_p = true;
3525 reg_addr[DImode].scalar_in_vmx_p = true;
3526
3527 if (TARGET_P8_VECTOR)
3528 {
3529 reg_addr[SFmode].scalar_in_vmx_p = true;
3530 reg_addr[SImode].scalar_in_vmx_p = true;
3531
3532 if (TARGET_P9_VECTOR)
3533 {
3534 reg_addr[HImode].scalar_in_vmx_p = true;
3535 reg_addr[QImode].scalar_in_vmx_p = true;
3536 }
3537 }
3538 }
3539
3540 /* Setup the fusion operations. */
3541 if (TARGET_P8_FUSION)
3542 {
3543 reg_addr[QImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_qi;
3544 reg_addr[HImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_hi;
3545 reg_addr[SImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_si;
3546 if (TARGET_64BIT)
3547 reg_addr[DImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_di;
3548 }
3549
3550 if (TARGET_P9_FUSION)
3551 {
3552 struct fuse_insns {
3553 enum machine_mode mode; /* mode of the fused type. */
3554 enum machine_mode pmode; /* pointer mode. */
3555 enum rs6000_reload_reg_type rtype; /* register type. */
3556 enum insn_code load; /* load insn. */
3557 enum insn_code store; /* store insn. */
3558 };
3559
3560 static const struct fuse_insns addis_insns[] = {
3561 { E_SFmode, E_DImode, RELOAD_REG_FPR,
3562 CODE_FOR_fusion_vsx_di_sf_load,
3563 CODE_FOR_fusion_vsx_di_sf_store },
3564
3565 { E_SFmode, E_SImode, RELOAD_REG_FPR,
3566 CODE_FOR_fusion_vsx_si_sf_load,
3567 CODE_FOR_fusion_vsx_si_sf_store },
3568
3569 { E_DFmode, E_DImode, RELOAD_REG_FPR,
3570 CODE_FOR_fusion_vsx_di_df_load,
3571 CODE_FOR_fusion_vsx_di_df_store },
3572
3573 { E_DFmode, E_SImode, RELOAD_REG_FPR,
3574 CODE_FOR_fusion_vsx_si_df_load,
3575 CODE_FOR_fusion_vsx_si_df_store },
3576
3577 { E_DImode, E_DImode, RELOAD_REG_FPR,
3578 CODE_FOR_fusion_vsx_di_di_load,
3579 CODE_FOR_fusion_vsx_di_di_store },
3580
3581 { E_DImode, E_SImode, RELOAD_REG_FPR,
3582 CODE_FOR_fusion_vsx_si_di_load,
3583 CODE_FOR_fusion_vsx_si_di_store },
3584
3585 { E_QImode, E_DImode, RELOAD_REG_GPR,
3586 CODE_FOR_fusion_gpr_di_qi_load,
3587 CODE_FOR_fusion_gpr_di_qi_store },
3588
3589 { E_QImode, E_SImode, RELOAD_REG_GPR,
3590 CODE_FOR_fusion_gpr_si_qi_load,
3591 CODE_FOR_fusion_gpr_si_qi_store },
3592
3593 { E_HImode, E_DImode, RELOAD_REG_GPR,
3594 CODE_FOR_fusion_gpr_di_hi_load,
3595 CODE_FOR_fusion_gpr_di_hi_store },
3596
3597 { E_HImode, E_SImode, RELOAD_REG_GPR,
3598 CODE_FOR_fusion_gpr_si_hi_load,
3599 CODE_FOR_fusion_gpr_si_hi_store },
3600
3601 { E_SImode, E_DImode, RELOAD_REG_GPR,
3602 CODE_FOR_fusion_gpr_di_si_load,
3603 CODE_FOR_fusion_gpr_di_si_store },
3604
3605 { E_SImode, E_SImode, RELOAD_REG_GPR,
3606 CODE_FOR_fusion_gpr_si_si_load,
3607 CODE_FOR_fusion_gpr_si_si_store },
3608
3609 { E_SFmode, E_DImode, RELOAD_REG_GPR,
3610 CODE_FOR_fusion_gpr_di_sf_load,
3611 CODE_FOR_fusion_gpr_di_sf_store },
3612
3613 { E_SFmode, E_SImode, RELOAD_REG_GPR,
3614 CODE_FOR_fusion_gpr_si_sf_load,
3615 CODE_FOR_fusion_gpr_si_sf_store },
3616
3617 { E_DImode, E_DImode, RELOAD_REG_GPR,
3618 CODE_FOR_fusion_gpr_di_di_load,
3619 CODE_FOR_fusion_gpr_di_di_store },
3620
3621 { E_DFmode, E_DImode, RELOAD_REG_GPR,
3622 CODE_FOR_fusion_gpr_di_df_load,
3623 CODE_FOR_fusion_gpr_di_df_store },
3624 };
3625
3626 machine_mode cur_pmode = Pmode;
3627 size_t i;
3628
3629 for (i = 0; i < ARRAY_SIZE (addis_insns); i++)
3630 {
3631 machine_mode xmode = addis_insns[i].mode;
3632 enum rs6000_reload_reg_type rtype = addis_insns[i].rtype;
3633
3634 if (addis_insns[i].pmode != cur_pmode)
3635 continue;
3636
3637 if (rtype == RELOAD_REG_FPR && !TARGET_HARD_FLOAT)
3638 continue;
3639
3640 reg_addr[xmode].fusion_addis_ld[rtype] = addis_insns[i].load;
3641 reg_addr[xmode].fusion_addis_st[rtype] = addis_insns[i].store;
3642
3643 if (rtype == RELOAD_REG_FPR && TARGET_P9_VECTOR)
3644 {
3645 reg_addr[xmode].fusion_addis_ld[RELOAD_REG_VMX]
3646 = addis_insns[i].load;
3647 reg_addr[xmode].fusion_addis_st[RELOAD_REG_VMX]
3648 = addis_insns[i].store;
3649 }
3650 }
3651 }
3652
3653 /* Note which types we support fusing TOC setup plus memory insn. We only do
3654 fused TOCs for medium/large code models. */
3655 if (TARGET_P8_FUSION && TARGET_TOC_FUSION && TARGET_POWERPC64
3656 && (TARGET_CMODEL != CMODEL_SMALL))
3657 {
3658 reg_addr[QImode].fused_toc = true;
3659 reg_addr[HImode].fused_toc = true;
3660 reg_addr[SImode].fused_toc = true;
3661 reg_addr[DImode].fused_toc = true;
3662 if (TARGET_HARD_FLOAT)
3663 {
3664 reg_addr[SFmode].fused_toc = true;
3665 reg_addr[DFmode].fused_toc = true;
3666 }
3667 }
3668
3669 /* Precalculate HARD_REGNO_NREGS. */
3670 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3671 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3672 rs6000_hard_regno_nregs[m][r]
3673 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3674
3675 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3676 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3677 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3678 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3679 rs6000_hard_regno_mode_ok_p[m][r] = true;
3680
3681 /* Precalculate CLASS_MAX_NREGS sizes. */
3682 for (c = 0; c < LIM_REG_CLASSES; ++c)
3683 {
3684 int reg_size;
3685
3686 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3687 reg_size = UNITS_PER_VSX_WORD;
3688
3689 else if (c == ALTIVEC_REGS)
3690 reg_size = UNITS_PER_ALTIVEC_WORD;
3691
3692 else if (c == FLOAT_REGS)
3693 reg_size = UNITS_PER_FP_WORD;
3694
3695 else
3696 reg_size = UNITS_PER_WORD;
3697
3698 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3699 {
3700 machine_mode m2 = (machine_mode)m;
3701 int reg_size2 = reg_size;
3702
3703 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3704 in VSX. */
3705 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3706 reg_size2 = UNITS_PER_FP_WORD;
3707
3708 rs6000_class_max_nregs[m][c]
3709 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3710 }
3711 }
3712
3713 /* Calculate which modes to automatically generate code to use a the
3714 reciprocal divide and square root instructions. In the future, possibly
3715 automatically generate the instructions even if the user did not specify
3716 -mrecip. The older machines double precision reciprocal sqrt estimate is
3717 not accurate enough. */
3718 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3719 if (TARGET_FRES)
3720 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3721 if (TARGET_FRE)
3722 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3723 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3724 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3725 if (VECTOR_UNIT_VSX_P (V2DFmode))
3726 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3727
3728 if (TARGET_FRSQRTES)
3729 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3730 if (TARGET_FRSQRTE)
3731 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3732 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3733 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3734 if (VECTOR_UNIT_VSX_P (V2DFmode))
3735 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3736
3737 if (rs6000_recip_control)
3738 {
3739 if (!flag_finite_math_only)
3740 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3741 "-ffast-math");
3742 if (flag_trapping_math)
3743 warning (0, "%qs requires %qs or %qs", "-mrecip",
3744 "-fno-trapping-math", "-ffast-math");
3745 if (!flag_reciprocal_math)
3746 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3747 "-ffast-math");
3748 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3749 {
3750 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3751 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3752 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3753
3754 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3755 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3756 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3757
3758 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3759 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3760 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3761
3762 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3763 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3764 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3765
3766 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3767 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3768 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3769
3770 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3771 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3772 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3773
3774 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3775 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3776 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3777
3778 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3779 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3780 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3781 }
3782 }
3783
3784 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3785 legitimate address support to figure out the appropriate addressing to
3786 use. */
3787 rs6000_setup_reg_addr_masks ();
3788
3789 if (global_init_p || TARGET_DEBUG_TARGET)
3790 {
3791 if (TARGET_DEBUG_REG)
3792 rs6000_debug_reg_global ();
3793
3794 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3795 fprintf (stderr,
3796 "SImode variable mult cost = %d\n"
3797 "SImode constant mult cost = %d\n"
3798 "SImode short constant mult cost = %d\n"
3799 "DImode multipliciation cost = %d\n"
3800 "SImode division cost = %d\n"
3801 "DImode division cost = %d\n"
3802 "Simple fp operation cost = %d\n"
3803 "DFmode multiplication cost = %d\n"
3804 "SFmode division cost = %d\n"
3805 "DFmode division cost = %d\n"
3806 "cache line size = %d\n"
3807 "l1 cache size = %d\n"
3808 "l2 cache size = %d\n"
3809 "simultaneous prefetches = %d\n"
3810 "\n",
3811 rs6000_cost->mulsi,
3812 rs6000_cost->mulsi_const,
3813 rs6000_cost->mulsi_const9,
3814 rs6000_cost->muldi,
3815 rs6000_cost->divsi,
3816 rs6000_cost->divdi,
3817 rs6000_cost->fp,
3818 rs6000_cost->dmul,
3819 rs6000_cost->sdiv,
3820 rs6000_cost->ddiv,
3821 rs6000_cost->cache_line_size,
3822 rs6000_cost->l1_cache_size,
3823 rs6000_cost->l2_cache_size,
3824 rs6000_cost->simultaneous_prefetches);
3825 }
3826 }
3827
3828 #if TARGET_MACHO
3829 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3830
3831 static void
3832 darwin_rs6000_override_options (void)
3833 {
3834 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3835 off. */
3836 rs6000_altivec_abi = 1;
3837 TARGET_ALTIVEC_VRSAVE = 1;
3838 rs6000_current_abi = ABI_DARWIN;
3839
3840 if (DEFAULT_ABI == ABI_DARWIN
3841 && TARGET_64BIT)
3842 darwin_one_byte_bool = 1;
3843
3844 if (TARGET_64BIT && ! TARGET_POWERPC64)
3845 {
3846 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3847 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3848 }
3849 if (flag_mkernel)
3850 {
3851 rs6000_default_long_calls = 1;
3852 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3853 }
3854
3855 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3856 Altivec. */
3857 if (!flag_mkernel && !flag_apple_kext
3858 && TARGET_64BIT
3859 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3860 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3861
3862 /* Unless the user (not the configurer) has explicitly overridden
3863 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3864 G4 unless targeting the kernel. */
3865 if (!flag_mkernel
3866 && !flag_apple_kext
3867 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3868 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3869 && ! global_options_set.x_rs6000_cpu_index)
3870 {
3871 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3872 }
3873 }
3874 #endif
3875
3876 /* If not otherwise specified by a target, make 'long double' equivalent to
3877 'double'. */
3878
3879 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3880 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3881 #endif
3882
3883 /* Return the builtin mask of the various options used that could affect which
3884 builtins were used. In the past we used target_flags, but we've run out of
3885 bits, and some options are no longer in target_flags. */
3886
3887 HOST_WIDE_INT
3888 rs6000_builtin_mask_calculate (void)
3889 {
3890 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3891 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3892 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3893 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3894 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3895 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3896 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3897 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3898 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3899 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3900 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3901 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3902 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3903 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3904 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3905 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3906 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3907 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3908 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3909 | ((TARGET_LONG_DOUBLE_128
3910 && TARGET_HARD_FLOAT
3911 && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0)
3912 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3913 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3914 }
3915
3916 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3917 to clobber the XER[CA] bit because clobbering that bit without telling
3918 the compiler worked just fine with versions of GCC before GCC 5, and
3919 breaking a lot of older code in ways that are hard to track down is
3920 not such a great idea. */
3921
3922 static rtx_insn *
3923 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3924 vec<const char *> &/*constraints*/,
3925 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3926 {
3927 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3928 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3929 return NULL;
3930 }
3931
3932 /* Override command line options.
3933
3934 Combine build-specific configuration information with options
3935 specified on the command line to set various state variables which
3936 influence code generation, optimization, and expansion of built-in
3937 functions. Assure that command-line configuration preferences are
3938 compatible with each other and with the build configuration; issue
3939 warnings while adjusting configuration or error messages while
3940 rejecting configuration.
3941
3942 Upon entry to this function:
3943
3944 This function is called once at the beginning of
3945 compilation, and then again at the start and end of compiling
3946 each section of code that has a different configuration, as
3947 indicated, for example, by adding the
3948
3949 __attribute__((__target__("cpu=power9")))
3950
3951 qualifier to a function definition or, for example, by bracketing
3952 code between
3953
3954 #pragma GCC target("altivec")
3955
3956 and
3957
3958 #pragma GCC reset_options
3959
3960 directives. Parameter global_init_p is true for the initial
3961 invocation, which initializes global variables, and false for all
3962 subsequent invocations.
3963
3964
3965 Various global state information is assumed to be valid. This
3966 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3967 default CPU specified at build configure time, TARGET_DEFAULT,
3968 representing the default set of option flags for the default
3969 target, and global_options_set.x_rs6000_isa_flags, representing
3970 which options were requested on the command line.
3971
3972 Upon return from this function:
3973
3974 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3975 was set by name on the command line. Additionally, if certain
3976 attributes are automatically enabled or disabled by this function
3977 in order to assure compatibility between options and
3978 configuration, the flags associated with those attributes are
3979 also set. By setting these "explicit bits", we avoid the risk
3980 that other code might accidentally overwrite these particular
3981 attributes with "default values".
3982
3983 The various bits of rs6000_isa_flags are set to indicate the
3984 target options that have been selected for the most current
3985 compilation efforts. This has the effect of also turning on the
3986 associated TARGET_XXX values since these are macros which are
3987 generally defined to test the corresponding bit of the
3988 rs6000_isa_flags variable.
3989
3990 The variable rs6000_builtin_mask is set to represent the target
3991 options for the most current compilation efforts, consistent with
3992 the current contents of rs6000_isa_flags. This variable controls
3993 expansion of built-in functions.
3994
3995 Various other global variables and fields of global structures
3996 (over 50 in all) are initialized to reflect the desired options
3997 for the most current compilation efforts. */
3998
3999 static bool
4000 rs6000_option_override_internal (bool global_init_p)
4001 {
4002 bool ret = true;
4003
4004 HOST_WIDE_INT set_masks;
4005 HOST_WIDE_INT ignore_masks;
4006 int cpu_index = -1;
4007 int tune_index;
4008 struct cl_target_option *main_target_opt
4009 = ((global_init_p || target_option_default_node == NULL)
4010 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
4011
4012 /* Print defaults. */
4013 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
4014 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
4015
4016 /* Remember the explicit arguments. */
4017 if (global_init_p)
4018 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
4019
4020 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
4021 library functions, so warn about it. The flag may be useful for
4022 performance studies from time to time though, so don't disable it
4023 entirely. */
4024 if (global_options_set.x_rs6000_alignment_flags
4025 && rs6000_alignment_flags == MASK_ALIGN_POWER
4026 && DEFAULT_ABI == ABI_DARWIN
4027 && TARGET_64BIT)
4028 warning (0, "%qs is not supported for 64-bit Darwin;"
4029 " it is incompatible with the installed C and C++ libraries",
4030 "-malign-power");
4031
4032 /* Numerous experiment shows that IRA based loop pressure
4033 calculation works better for RTL loop invariant motion on targets
4034 with enough (>= 32) registers. It is an expensive optimization.
4035 So it is on only for peak performance. */
4036 if (optimize >= 3 && global_init_p
4037 && !global_options_set.x_flag_ira_loop_pressure)
4038 flag_ira_loop_pressure = 1;
4039
4040 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
4041 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
4042 options were already specified. */
4043 if (flag_sanitize & SANITIZE_USER_ADDRESS
4044 && !global_options_set.x_flag_asynchronous_unwind_tables)
4045 flag_asynchronous_unwind_tables = 1;
4046
4047 /* Set the pointer size. */
4048 if (TARGET_64BIT)
4049 {
4050 rs6000_pmode = DImode;
4051 rs6000_pointer_size = 64;
4052 }
4053 else
4054 {
4055 rs6000_pmode = SImode;
4056 rs6000_pointer_size = 32;
4057 }
4058
4059 /* Some OSs don't support saving the high part of 64-bit registers on context
4060 switch. Other OSs don't support saving Altivec registers. On those OSs,
4061 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
4062 if the user wants either, the user must explicitly specify them and we
4063 won't interfere with the user's specification. */
4064
4065 set_masks = POWERPC_MASKS;
4066 #ifdef OS_MISSING_POWERPC64
4067 if (OS_MISSING_POWERPC64)
4068 set_masks &= ~OPTION_MASK_POWERPC64;
4069 #endif
4070 #ifdef OS_MISSING_ALTIVEC
4071 if (OS_MISSING_ALTIVEC)
4072 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
4073 | OTHER_VSX_VECTOR_MASKS);
4074 #endif
4075
4076 /* Don't override by the processor default if given explicitly. */
4077 set_masks &= ~rs6000_isa_flags_explicit;
4078
4079 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
4080 the cpu in a target attribute or pragma, but did not specify a tuning
4081 option, use the cpu for the tuning option rather than the option specified
4082 with -mtune on the command line. Process a '--with-cpu' configuration
4083 request as an implicit --cpu. */
4084 if (rs6000_cpu_index >= 0)
4085 cpu_index = rs6000_cpu_index;
4086 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
4087 cpu_index = main_target_opt->x_rs6000_cpu_index;
4088 else if (OPTION_TARGET_CPU_DEFAULT)
4089 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
4090
4091 if (cpu_index >= 0)
4092 {
4093 const char *unavailable_cpu = NULL;
4094 switch (processor_target_table[cpu_index].processor)
4095 {
4096 #ifndef HAVE_AS_POWER9
4097 case PROCESSOR_POWER9:
4098 unavailable_cpu = "power9";
4099 break;
4100 #endif
4101 #ifndef HAVE_AS_POWER8
4102 case PROCESSOR_POWER8:
4103 unavailable_cpu = "power8";
4104 break;
4105 #endif
4106 #ifndef HAVE_AS_POPCNTD
4107 case PROCESSOR_POWER7:
4108 unavailable_cpu = "power7";
4109 break;
4110 #endif
4111 #ifndef HAVE_AS_DFP
4112 case PROCESSOR_POWER6:
4113 unavailable_cpu = "power6";
4114 break;
4115 #endif
4116 #ifndef HAVE_AS_POPCNTB
4117 case PROCESSOR_POWER5:
4118 unavailable_cpu = "power5";
4119 break;
4120 #endif
4121 default:
4122 break;
4123 }
4124 if (unavailable_cpu)
4125 {
4126 cpu_index = -1;
4127 warning (0, "will not generate %qs instructions because "
4128 "assembler lacks %qs support", unavailable_cpu,
4129 unavailable_cpu);
4130 }
4131 }
4132
4133 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4134 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4135 with those from the cpu, except for options that were explicitly set. If
4136 we don't have a cpu, do not override the target bits set in
4137 TARGET_DEFAULT. */
4138 if (cpu_index >= 0)
4139 {
4140 rs6000_cpu_index = cpu_index;
4141 rs6000_isa_flags &= ~set_masks;
4142 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
4143 & set_masks);
4144 }
4145 else
4146 {
4147 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4148 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4149 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4150 to using rs6000_isa_flags, we need to do the initialization here.
4151
4152 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4153 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4154 HOST_WIDE_INT flags;
4155 if (TARGET_DEFAULT)
4156 flags = TARGET_DEFAULT;
4157 else
4158 {
4159 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4160 const char *default_cpu = (!TARGET_POWERPC64
4161 ? "powerpc"
4162 : (BYTES_BIG_ENDIAN
4163 ? "powerpc64"
4164 : "powerpc64le"));
4165 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
4166 flags = processor_target_table[default_cpu_index].target_enable;
4167 }
4168 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
4169 }
4170
4171 if (rs6000_tune_index >= 0)
4172 tune_index = rs6000_tune_index;
4173 else if (cpu_index >= 0)
4174 rs6000_tune_index = tune_index = cpu_index;
4175 else
4176 {
4177 size_t i;
4178 enum processor_type tune_proc
4179 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
4180
4181 tune_index = -1;
4182 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
4183 if (processor_target_table[i].processor == tune_proc)
4184 {
4185 tune_index = i;
4186 break;
4187 }
4188 }
4189
4190 if (cpu_index >= 0)
4191 rs6000_cpu = processor_target_table[cpu_index].processor;
4192 else
4193 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
4194
4195 gcc_assert (tune_index >= 0);
4196 rs6000_tune = processor_target_table[tune_index].processor;
4197
4198 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
4199 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
4200 || rs6000_cpu == PROCESSOR_PPCE5500)
4201 {
4202 if (TARGET_ALTIVEC)
4203 error ("AltiVec not supported in this target");
4204 }
4205
4206 /* If we are optimizing big endian systems for space, use the load/store
4207 multiple instructions. */
4208 if (BYTES_BIG_ENDIAN && optimize_size)
4209 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
4210
4211 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
4212 because the hardware doesn't support the instructions used in little
4213 endian mode, and causes an alignment trap. The 750 does not cause an
4214 alignment trap (except when the target is unaligned). */
4215
4216 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
4217 {
4218 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
4219 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
4220 warning (0, "%qs is not supported on little endian systems",
4221 "-mmultiple");
4222 }
4223
4224 /* If little-endian, default to -mstrict-align on older processors.
4225 Testing for htm matches power8 and later. */
4226 if (!BYTES_BIG_ENDIAN
4227 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
4228 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
4229
4230 if (!rs6000_fold_gimple)
4231 fprintf (stderr,
4232 "gimple folding of rs6000 builtins has been disabled.\n");
4233
4234 /* Add some warnings for VSX. */
4235 if (TARGET_VSX)
4236 {
4237 const char *msg = NULL;
4238 if (!TARGET_HARD_FLOAT)
4239 {
4240 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4241 msg = N_("-mvsx requires hardware floating point");
4242 else
4243 {
4244 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4245 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4246 }
4247 }
4248 else if (TARGET_AVOID_XFORM > 0)
4249 msg = N_("-mvsx needs indexed addressing");
4250 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4251 & OPTION_MASK_ALTIVEC))
4252 {
4253 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4254 msg = N_("-mvsx and -mno-altivec are incompatible");
4255 else
4256 msg = N_("-mno-altivec disables vsx");
4257 }
4258
4259 if (msg)
4260 {
4261 warning (0, msg);
4262 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4263 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4264 }
4265 }
4266
4267 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4268 the -mcpu setting to enable options that conflict. */
4269 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4270 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4271 | OPTION_MASK_ALTIVEC
4272 | OPTION_MASK_VSX)) != 0)
4273 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4274 | OPTION_MASK_DIRECT_MOVE)
4275 & ~rs6000_isa_flags_explicit);
4276
4277 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4278 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4279
4280 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4281 off all of the options that depend on those flags. */
4282 ignore_masks = rs6000_disable_incompatible_switches ();
4283
4284 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4285 unless the user explicitly used the -mno-<option> to disable the code. */
4286 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4287 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4288 else if (TARGET_P9_MINMAX)
4289 {
4290 if (cpu_index >= 0)
4291 {
4292 if (cpu_index == PROCESSOR_POWER9)
4293 {
4294 /* legacy behavior: allow -mcpu=power9 with certain
4295 capabilities explicitly disabled. */
4296 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4297 }
4298 else
4299 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4300 "for <xxx> less than power9", "-mcpu");
4301 }
4302 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4303 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4304 & rs6000_isa_flags_explicit))
4305 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4306 were explicitly cleared. */
4307 error ("%qs incompatible with explicitly disabled options",
4308 "-mpower9-minmax");
4309 else
4310 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4311 }
4312 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4313 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4314 else if (TARGET_VSX)
4315 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4316 else if (TARGET_POPCNTD)
4317 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4318 else if (TARGET_DFP)
4319 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4320 else if (TARGET_CMPB)
4321 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4322 else if (TARGET_FPRND)
4323 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4324 else if (TARGET_POPCNTB)
4325 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4326 else if (TARGET_ALTIVEC)
4327 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4328
4329 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4330 {
4331 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4332 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4333 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4334 }
4335
4336 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4337 {
4338 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4339 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4340 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4341 }
4342
4343 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4344 {
4345 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4346 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4347 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4348 }
4349
4350 if (TARGET_P8_VECTOR && !TARGET_VSX)
4351 {
4352 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4353 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4354 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4355 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4356 {
4357 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4358 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4359 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4360 }
4361 else
4362 {
4363 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4364 not explicit. */
4365 rs6000_isa_flags |= OPTION_MASK_VSX;
4366 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4367 }
4368 }
4369
4370 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4371 {
4372 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4373 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4374 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4375 }
4376
4377 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4378 silently turn off quad memory mode. */
4379 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4380 {
4381 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4382 warning (0, N_("-mquad-memory requires 64-bit mode"));
4383
4384 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4385 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4386
4387 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4388 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4389 }
4390
4391 /* Non-atomic quad memory load/store are disabled for little endian, since
4392 the words are reversed, but atomic operations can still be done by
4393 swapping the words. */
4394 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4395 {
4396 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4397 warning (0, N_("-mquad-memory is not available in little endian "
4398 "mode"));
4399
4400 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4401 }
4402
4403 /* Assume if the user asked for normal quad memory instructions, they want
4404 the atomic versions as well, unless they explicity told us not to use quad
4405 word atomic instructions. */
4406 if (TARGET_QUAD_MEMORY
4407 && !TARGET_QUAD_MEMORY_ATOMIC
4408 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4409 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4410
4411 /* If we can shrink-wrap the TOC register save separately, then use
4412 -msave-toc-indirect unless explicitly disabled. */
4413 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4414 && flag_shrink_wrap_separate
4415 && optimize_function_for_speed_p (cfun))
4416 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4417
4418 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4419 generating power8 instructions. */
4420 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4421 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4422 & OPTION_MASK_P8_FUSION);
4423
4424 /* Setting additional fusion flags turns on base fusion. */
4425 if (!TARGET_P8_FUSION && (TARGET_P8_FUSION_SIGN || TARGET_TOC_FUSION))
4426 {
4427 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4428 {
4429 if (TARGET_P8_FUSION_SIGN)
4430 error ("%qs requires %qs", "-mpower8-fusion-sign",
4431 "-mpower8-fusion");
4432
4433 if (TARGET_TOC_FUSION)
4434 error ("%qs requires %qs", "-mtoc-fusion", "-mpower8-fusion");
4435
4436 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4437 }
4438 else
4439 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4440 }
4441
4442 /* Power9 fusion is a superset over power8 fusion. */
4443 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4444 {
4445 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4446 {
4447 /* We prefer to not mention undocumented options in
4448 error messages. However, if users have managed to select
4449 power9-fusion without selecting power8-fusion, they
4450 already know about undocumented flags. */
4451 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4452 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4453 }
4454 else
4455 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4456 }
4457
4458 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4459 generating power9 instructions. */
4460 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4461 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4462 & OPTION_MASK_P9_FUSION);
4463
4464 /* Power8 does not fuse sign extended loads with the addis. If we are
4465 optimizing at high levels for speed, convert a sign extended load into a
4466 zero extending load, and an explicit sign extension. */
4467 if (TARGET_P8_FUSION
4468 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4469 && optimize_function_for_speed_p (cfun)
4470 && optimize >= 3)
4471 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4472
4473 /* TOC fusion requires 64-bit and medium/large code model. */
4474 if (TARGET_TOC_FUSION && !TARGET_POWERPC64)
4475 {
4476 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4477 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4478 warning (0, N_("-mtoc-fusion requires 64-bit"));
4479 }
4480
4481 if (TARGET_TOC_FUSION && (TARGET_CMODEL == CMODEL_SMALL))
4482 {
4483 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4484 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4485 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4486 }
4487
4488 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4489 model. */
4490 if (TARGET_P8_FUSION && !TARGET_TOC_FUSION && TARGET_POWERPC64
4491 && (TARGET_CMODEL != CMODEL_SMALL)
4492 && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
4493 rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
4494
4495 /* ISA 3.0 vector instructions include ISA 2.07. */
4496 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4497 {
4498 /* We prefer to not mention undocumented options in
4499 error messages. However, if users have managed to select
4500 power9-vector without selecting power8-vector, they
4501 already know about undocumented flags. */
4502 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4503 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4504 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4505 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4506 {
4507 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4508 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4509 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4510 }
4511 else
4512 {
4513 /* OPTION_MASK_P9_VECTOR is explicit and
4514 OPTION_MASK_P8_VECTOR is not explicit. */
4515 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4516 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4517 }
4518 }
4519
4520 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4521 support. If we only have ISA 2.06 support, and the user did not specify
4522 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4523 but we don't enable the full vectorization support */
4524 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4525 TARGET_ALLOW_MOVMISALIGN = 1;
4526
4527 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4528 {
4529 if (TARGET_ALLOW_MOVMISALIGN > 0
4530 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4531 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4532
4533 TARGET_ALLOW_MOVMISALIGN = 0;
4534 }
4535
4536 /* Determine when unaligned vector accesses are permitted, and when
4537 they are preferred over masked Altivec loads. Note that if
4538 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4539 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4540 not true. */
4541 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4542 {
4543 if (!TARGET_VSX)
4544 {
4545 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4546 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4547
4548 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4549 }
4550
4551 else if (!TARGET_ALLOW_MOVMISALIGN)
4552 {
4553 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4554 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4555 "-mallow-movmisalign");
4556
4557 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4558 }
4559 }
4560
4561 /* Set long double size before the IEEE 128-bit tests. */
4562 if (!global_options_set.x_rs6000_long_double_type_size)
4563 {
4564 if (main_target_opt != NULL
4565 && (main_target_opt->x_rs6000_long_double_type_size
4566 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
4567 error ("target attribute or pragma changes long double size");
4568 else
4569 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
4570 }
4571
4572 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4573 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4574 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4575 those systems will not pick up this default. Warn if the user changes the
4576 default unless either the user used the -Wno-psabi option, or the compiler
4577 was built to enable multilibs to switch between the two long double
4578 types. */
4579 if (!global_options_set.x_rs6000_ieeequad)
4580 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4581
4582 else if (!TARGET_IEEEQUAD_MULTILIB
4583 && rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT
4584 && TARGET_LONG_DOUBLE_128)
4585 {
4586 static bool warned_change_long_double;
4587 if (!warned_change_long_double)
4588 {
4589 warned_change_long_double = true;
4590 if (TARGET_IEEEQUAD)
4591 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4592 else
4593 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4594 }
4595 }
4596
4597 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4598 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4599 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4600 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4601 the keyword as well as the type. */
4602 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4603
4604 /* IEEE 128-bit floating point requires VSX support. */
4605 if (TARGET_FLOAT128_KEYWORD)
4606 {
4607 if (!TARGET_VSX)
4608 {
4609 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4610 error ("%qs requires VSX support", "-mfloat128");
4611
4612 TARGET_FLOAT128_TYPE = 0;
4613 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4614 | OPTION_MASK_FLOAT128_HW);
4615 }
4616 else if (!TARGET_FLOAT128_TYPE)
4617 {
4618 TARGET_FLOAT128_TYPE = 1;
4619 warning (0, "The -mfloat128 option may not be fully supported");
4620 }
4621 }
4622
4623 /* Enable the __float128 keyword under Linux by default. */
4624 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4625 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4626 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4627
4628 /* If we have are supporting the float128 type and full ISA 3.0 support,
4629 enable -mfloat128-hardware by default. However, don't enable the
4630 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4631 because sometimes the compiler wants to put things in an integer
4632 container, and if we don't have __int128 support, it is impossible. */
4633 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4634 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4635 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4636 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4637
4638 if (TARGET_FLOAT128_HW
4639 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4640 {
4641 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4642 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4643
4644 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4645 }
4646
4647 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4648 {
4649 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4650 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4651
4652 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4653 }
4654
4655 /* Print the options after updating the defaults. */
4656 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4657 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4658
4659 /* E500mc does "better" if we inline more aggressively. Respect the
4660 user's opinion, though. */
4661 if (rs6000_block_move_inline_limit == 0
4662 && (rs6000_tune == PROCESSOR_PPCE500MC
4663 || rs6000_tune == PROCESSOR_PPCE500MC64
4664 || rs6000_tune == PROCESSOR_PPCE5500
4665 || rs6000_tune == PROCESSOR_PPCE6500))
4666 rs6000_block_move_inline_limit = 128;
4667
4668 /* store_one_arg depends on expand_block_move to handle at least the
4669 size of reg_parm_stack_space. */
4670 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4671 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4672
4673 if (global_init_p)
4674 {
4675 /* If the appropriate debug option is enabled, replace the target hooks
4676 with debug versions that call the real version and then prints
4677 debugging information. */
4678 if (TARGET_DEBUG_COST)
4679 {
4680 targetm.rtx_costs = rs6000_debug_rtx_costs;
4681 targetm.address_cost = rs6000_debug_address_cost;
4682 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4683 }
4684
4685 if (TARGET_DEBUG_ADDR)
4686 {
4687 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4688 targetm.legitimize_address = rs6000_debug_legitimize_address;
4689 rs6000_secondary_reload_class_ptr
4690 = rs6000_debug_secondary_reload_class;
4691 targetm.secondary_memory_needed
4692 = rs6000_debug_secondary_memory_needed;
4693 targetm.can_change_mode_class
4694 = rs6000_debug_can_change_mode_class;
4695 rs6000_preferred_reload_class_ptr
4696 = rs6000_debug_preferred_reload_class;
4697 rs6000_legitimize_reload_address_ptr
4698 = rs6000_debug_legitimize_reload_address;
4699 rs6000_mode_dependent_address_ptr
4700 = rs6000_debug_mode_dependent_address;
4701 }
4702
4703 if (rs6000_veclibabi_name)
4704 {
4705 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4706 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4707 else
4708 {
4709 error ("unknown vectorization library ABI type (%qs) for "
4710 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4711 ret = false;
4712 }
4713 }
4714 }
4715
4716 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4717 target attribute or pragma which automatically enables both options,
4718 unless the altivec ABI was set. This is set by default for 64-bit, but
4719 not for 32-bit. */
4720 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4721 {
4722 TARGET_FLOAT128_TYPE = 0;
4723 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4724 | OPTION_MASK_FLOAT128_KEYWORD)
4725 & ~rs6000_isa_flags_explicit);
4726 }
4727
4728 /* Enable Altivec ABI for AIX -maltivec. */
4729 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4730 {
4731 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4732 error ("target attribute or pragma changes AltiVec ABI");
4733 else
4734 rs6000_altivec_abi = 1;
4735 }
4736
4737 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4738 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4739 be explicitly overridden in either case. */
4740 if (TARGET_ELF)
4741 {
4742 if (!global_options_set.x_rs6000_altivec_abi
4743 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4744 {
4745 if (main_target_opt != NULL &&
4746 !main_target_opt->x_rs6000_altivec_abi)
4747 error ("target attribute or pragma changes AltiVec ABI");
4748 else
4749 rs6000_altivec_abi = 1;
4750 }
4751 }
4752
4753 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4754 So far, the only darwin64 targets are also MACH-O. */
4755 if (TARGET_MACHO
4756 && DEFAULT_ABI == ABI_DARWIN
4757 && TARGET_64BIT)
4758 {
4759 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4760 error ("target attribute or pragma changes darwin64 ABI");
4761 else
4762 {
4763 rs6000_darwin64_abi = 1;
4764 /* Default to natural alignment, for better performance. */
4765 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4766 }
4767 }
4768
4769 /* Place FP constants in the constant pool instead of TOC
4770 if section anchors enabled. */
4771 if (flag_section_anchors
4772 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4773 TARGET_NO_FP_IN_TOC = 1;
4774
4775 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4776 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4777
4778 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4779 SUBTARGET_OVERRIDE_OPTIONS;
4780 #endif
4781 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4782 SUBSUBTARGET_OVERRIDE_OPTIONS;
4783 #endif
4784 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4785 SUB3TARGET_OVERRIDE_OPTIONS;
4786 #endif
4787
4788 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4789 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4790
4791 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4792 && rs6000_tune != PROCESSOR_POWER5
4793 && rs6000_tune != PROCESSOR_POWER6
4794 && rs6000_tune != PROCESSOR_POWER7
4795 && rs6000_tune != PROCESSOR_POWER8
4796 && rs6000_tune != PROCESSOR_POWER9
4797 && rs6000_tune != PROCESSOR_PPCA2
4798 && rs6000_tune != PROCESSOR_CELL
4799 && rs6000_tune != PROCESSOR_PPC476);
4800 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4801 || rs6000_tune == PROCESSOR_POWER5
4802 || rs6000_tune == PROCESSOR_POWER7
4803 || rs6000_tune == PROCESSOR_POWER8);
4804 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4805 || rs6000_tune == PROCESSOR_POWER5
4806 || rs6000_tune == PROCESSOR_POWER6
4807 || rs6000_tune == PROCESSOR_POWER7
4808 || rs6000_tune == PROCESSOR_POWER8
4809 || rs6000_tune == PROCESSOR_POWER9
4810 || rs6000_tune == PROCESSOR_PPCE500MC
4811 || rs6000_tune == PROCESSOR_PPCE500MC64
4812 || rs6000_tune == PROCESSOR_PPCE5500
4813 || rs6000_tune == PROCESSOR_PPCE6500);
4814
4815 /* Allow debug switches to override the above settings. These are set to -1
4816 in rs6000.opt to indicate the user hasn't directly set the switch. */
4817 if (TARGET_ALWAYS_HINT >= 0)
4818 rs6000_always_hint = TARGET_ALWAYS_HINT;
4819
4820 if (TARGET_SCHED_GROUPS >= 0)
4821 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4822
4823 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4824 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4825
4826 rs6000_sched_restricted_insns_priority
4827 = (rs6000_sched_groups ? 1 : 0);
4828
4829 /* Handle -msched-costly-dep option. */
4830 rs6000_sched_costly_dep
4831 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4832
4833 if (rs6000_sched_costly_dep_str)
4834 {
4835 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4836 rs6000_sched_costly_dep = no_dep_costly;
4837 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4838 rs6000_sched_costly_dep = all_deps_costly;
4839 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4840 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4841 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4842 rs6000_sched_costly_dep = store_to_load_dep_costly;
4843 else
4844 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4845 atoi (rs6000_sched_costly_dep_str));
4846 }
4847
4848 /* Handle -minsert-sched-nops option. */
4849 rs6000_sched_insert_nops
4850 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4851
4852 if (rs6000_sched_insert_nops_str)
4853 {
4854 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4855 rs6000_sched_insert_nops = sched_finish_none;
4856 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4857 rs6000_sched_insert_nops = sched_finish_pad_groups;
4858 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4859 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4860 else
4861 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4862 atoi (rs6000_sched_insert_nops_str));
4863 }
4864
4865 /* Handle stack protector */
4866 if (!global_options_set.x_rs6000_stack_protector_guard)
4867 #ifdef TARGET_THREAD_SSP_OFFSET
4868 rs6000_stack_protector_guard = SSP_TLS;
4869 #else
4870 rs6000_stack_protector_guard = SSP_GLOBAL;
4871 #endif
4872
4873 #ifdef TARGET_THREAD_SSP_OFFSET
4874 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4875 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4876 #endif
4877
4878 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4879 {
4880 char *endp;
4881 const char *str = rs6000_stack_protector_guard_offset_str;
4882
4883 errno = 0;
4884 long offset = strtol (str, &endp, 0);
4885 if (!*str || *endp || errno)
4886 error ("%qs is not a valid number in %qs", str,
4887 "-mstack-protector-guard-offset=");
4888
4889 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4890 || (TARGET_64BIT && (offset & 3)))
4891 error ("%qs is not a valid offset in %qs", str,
4892 "-mstack-protector-guard-offset=");
4893
4894 rs6000_stack_protector_guard_offset = offset;
4895 }
4896
4897 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4898 {
4899 const char *str = rs6000_stack_protector_guard_reg_str;
4900 int reg = decode_reg_name (str);
4901
4902 if (!IN_RANGE (reg, 1, 31))
4903 error ("%qs is not a valid base register in %qs", str,
4904 "-mstack-protector-guard-reg=");
4905
4906 rs6000_stack_protector_guard_reg = reg;
4907 }
4908
4909 if (rs6000_stack_protector_guard == SSP_TLS
4910 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4911 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4912
4913 if (global_init_p)
4914 {
4915 #ifdef TARGET_REGNAMES
4916 /* If the user desires alternate register names, copy in the
4917 alternate names now. */
4918 if (TARGET_REGNAMES)
4919 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4920 #endif
4921
4922 /* Set aix_struct_return last, after the ABI is determined.
4923 If -maix-struct-return or -msvr4-struct-return was explicitly
4924 used, don't override with the ABI default. */
4925 if (!global_options_set.x_aix_struct_return)
4926 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4927
4928 #if 0
4929 /* IBM XL compiler defaults to unsigned bitfields. */
4930 if (TARGET_XL_COMPAT)
4931 flag_signed_bitfields = 0;
4932 #endif
4933
4934 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4935 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4936
4937 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4938
4939 /* We can only guarantee the availability of DI pseudo-ops when
4940 assembling for 64-bit targets. */
4941 if (!TARGET_64BIT)
4942 {
4943 targetm.asm_out.aligned_op.di = NULL;
4944 targetm.asm_out.unaligned_op.di = NULL;
4945 }
4946
4947
4948 /* Set branch target alignment, if not optimizing for size. */
4949 if (!optimize_size)
4950 {
4951 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4952 aligned 8byte to avoid misprediction by the branch predictor. */
4953 if (rs6000_tune == PROCESSOR_TITAN
4954 || rs6000_tune == PROCESSOR_CELL)
4955 {
4956 if (align_functions <= 0)
4957 align_functions = 8;
4958 if (align_jumps <= 0)
4959 align_jumps = 8;
4960 if (align_loops <= 0)
4961 align_loops = 8;
4962 }
4963 if (rs6000_align_branch_targets)
4964 {
4965 if (align_functions <= 0)
4966 align_functions = 16;
4967 if (align_jumps <= 0)
4968 align_jumps = 16;
4969 if (align_loops <= 0)
4970 {
4971 can_override_loop_align = 1;
4972 align_loops = 16;
4973 }
4974 }
4975 if (align_jumps_max_skip <= 0)
4976 align_jumps_max_skip = 15;
4977 if (align_loops_max_skip <= 0)
4978 align_loops_max_skip = 15;
4979 }
4980
4981 /* Arrange to save and restore machine status around nested functions. */
4982 init_machine_status = rs6000_init_machine_status;
4983
4984 /* We should always be splitting complex arguments, but we can't break
4985 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4986 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4987 targetm.calls.split_complex_arg = NULL;
4988
4989 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4990 if (DEFAULT_ABI == ABI_AIX)
4991 targetm.calls.custom_function_descriptors = 0;
4992 }
4993
4994 /* Initialize rs6000_cost with the appropriate target costs. */
4995 if (optimize_size)
4996 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4997 else
4998 switch (rs6000_tune)
4999 {
5000 case PROCESSOR_RS64A:
5001 rs6000_cost = &rs64a_cost;
5002 break;
5003
5004 case PROCESSOR_MPCCORE:
5005 rs6000_cost = &mpccore_cost;
5006 break;
5007
5008 case PROCESSOR_PPC403:
5009 rs6000_cost = &ppc403_cost;
5010 break;
5011
5012 case PROCESSOR_PPC405:
5013 rs6000_cost = &ppc405_cost;
5014 break;
5015
5016 case PROCESSOR_PPC440:
5017 rs6000_cost = &ppc440_cost;
5018 break;
5019
5020 case PROCESSOR_PPC476:
5021 rs6000_cost = &ppc476_cost;
5022 break;
5023
5024 case PROCESSOR_PPC601:
5025 rs6000_cost = &ppc601_cost;
5026 break;
5027
5028 case PROCESSOR_PPC603:
5029 rs6000_cost = &ppc603_cost;
5030 break;
5031
5032 case PROCESSOR_PPC604:
5033 rs6000_cost = &ppc604_cost;
5034 break;
5035
5036 case PROCESSOR_PPC604e:
5037 rs6000_cost = &ppc604e_cost;
5038 break;
5039
5040 case PROCESSOR_PPC620:
5041 rs6000_cost = &ppc620_cost;
5042 break;
5043
5044 case PROCESSOR_PPC630:
5045 rs6000_cost = &ppc630_cost;
5046 break;
5047
5048 case PROCESSOR_CELL:
5049 rs6000_cost = &ppccell_cost;
5050 break;
5051
5052 case PROCESSOR_PPC750:
5053 case PROCESSOR_PPC7400:
5054 rs6000_cost = &ppc750_cost;
5055 break;
5056
5057 case PROCESSOR_PPC7450:
5058 rs6000_cost = &ppc7450_cost;
5059 break;
5060
5061 case PROCESSOR_PPC8540:
5062 case PROCESSOR_PPC8548:
5063 rs6000_cost = &ppc8540_cost;
5064 break;
5065
5066 case PROCESSOR_PPCE300C2:
5067 case PROCESSOR_PPCE300C3:
5068 rs6000_cost = &ppce300c2c3_cost;
5069 break;
5070
5071 case PROCESSOR_PPCE500MC:
5072 rs6000_cost = &ppce500mc_cost;
5073 break;
5074
5075 case PROCESSOR_PPCE500MC64:
5076 rs6000_cost = &ppce500mc64_cost;
5077 break;
5078
5079 case PROCESSOR_PPCE5500:
5080 rs6000_cost = &ppce5500_cost;
5081 break;
5082
5083 case PROCESSOR_PPCE6500:
5084 rs6000_cost = &ppce6500_cost;
5085 break;
5086
5087 case PROCESSOR_TITAN:
5088 rs6000_cost = &titan_cost;
5089 break;
5090
5091 case PROCESSOR_POWER4:
5092 case PROCESSOR_POWER5:
5093 rs6000_cost = &power4_cost;
5094 break;
5095
5096 case PROCESSOR_POWER6:
5097 rs6000_cost = &power6_cost;
5098 break;
5099
5100 case PROCESSOR_POWER7:
5101 rs6000_cost = &power7_cost;
5102 break;
5103
5104 case PROCESSOR_POWER8:
5105 rs6000_cost = &power8_cost;
5106 break;
5107
5108 case PROCESSOR_POWER9:
5109 rs6000_cost = &power9_cost;
5110 break;
5111
5112 case PROCESSOR_PPCA2:
5113 rs6000_cost = &ppca2_cost;
5114 break;
5115
5116 default:
5117 gcc_unreachable ();
5118 }
5119
5120 if (global_init_p)
5121 {
5122 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
5123 rs6000_cost->simultaneous_prefetches,
5124 global_options.x_param_values,
5125 global_options_set.x_param_values);
5126 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
5127 global_options.x_param_values,
5128 global_options_set.x_param_values);
5129 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
5130 rs6000_cost->cache_line_size,
5131 global_options.x_param_values,
5132 global_options_set.x_param_values);
5133 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
5134 global_options.x_param_values,
5135 global_options_set.x_param_values);
5136
5137 /* Increase loop peeling limits based on performance analysis. */
5138 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
5139 global_options.x_param_values,
5140 global_options_set.x_param_values);
5141 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
5142 global_options.x_param_values,
5143 global_options_set.x_param_values);
5144
5145 /* Use the 'model' -fsched-pressure algorithm by default. */
5146 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
5147 SCHED_PRESSURE_MODEL,
5148 global_options.x_param_values,
5149 global_options_set.x_param_values);
5150
5151 /* If using typedef char *va_list, signal that
5152 __builtin_va_start (&ap, 0) can be optimized to
5153 ap = __builtin_next_arg (0). */
5154 if (DEFAULT_ABI != ABI_V4)
5155 targetm.expand_builtin_va_start = NULL;
5156 }
5157
5158 /* If not explicitly specified via option, decide whether to generate indexed
5159 load/store instructions. A value of -1 indicates that the
5160 initial value of this variable has not been overwritten. During
5161 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5162 if (TARGET_AVOID_XFORM == -1)
5163 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5164 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5165 need indexed accesses and the type used is the scalar type of the element
5166 being loaded or stored. */
5167 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
5168 && !TARGET_ALTIVEC);
5169
5170 /* Set the -mrecip options. */
5171 if (rs6000_recip_name)
5172 {
5173 char *p = ASTRDUP (rs6000_recip_name);
5174 char *q;
5175 unsigned int mask, i;
5176 bool invert;
5177
5178 while ((q = strtok (p, ",")) != NULL)
5179 {
5180 p = NULL;
5181 if (*q == '!')
5182 {
5183 invert = true;
5184 q++;
5185 }
5186 else
5187 invert = false;
5188
5189 if (!strcmp (q, "default"))
5190 mask = ((TARGET_RECIP_PRECISION)
5191 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
5192 else
5193 {
5194 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
5195 if (!strcmp (q, recip_options[i].string))
5196 {
5197 mask = recip_options[i].mask;
5198 break;
5199 }
5200
5201 if (i == ARRAY_SIZE (recip_options))
5202 {
5203 error ("unknown option for %<%s=%s%>", "-mrecip", q);
5204 invert = false;
5205 mask = 0;
5206 ret = false;
5207 }
5208 }
5209
5210 if (invert)
5211 rs6000_recip_control &= ~mask;
5212 else
5213 rs6000_recip_control |= mask;
5214 }
5215 }
5216
5217 /* Set the builtin mask of the various options used that could affect which
5218 builtins were used. In the past we used target_flags, but we've run out
5219 of bits, and some options are no longer in target_flags. */
5220 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
5221 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
5222 rs6000_print_builtin_options (stderr, 0, "builtin mask",
5223 rs6000_builtin_mask);
5224
5225 /* Initialize all of the registers. */
5226 rs6000_init_hard_regno_mode_ok (global_init_p);
5227
5228 /* Save the initial options in case the user does function specific options */
5229 if (global_init_p)
5230 target_option_default_node = target_option_current_node
5231 = build_target_option_node (&global_options);
5232
5233 /* If not explicitly specified via option, decide whether to generate the
5234 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5235 if (TARGET_LINK_STACK == -1)
5236 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
5237
5238 /* Deprecate use of -mno-speculate-indirect-jumps. */
5239 if (!rs6000_speculate_indirect_jumps)
5240 warning (0, "%qs is deprecated and not recommended in any circumstances",
5241 "-mno-speculate-indirect-jumps");
5242
5243 return ret;
5244 }
5245
5246 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5247 define the target cpu type. */
5248
5249 static void
5250 rs6000_option_override (void)
5251 {
5252 (void) rs6000_option_override_internal (true);
5253 }
5254
5255 \f
5256 /* Implement targetm.vectorize.builtin_mask_for_load. */
5257 static tree
5258 rs6000_builtin_mask_for_load (void)
5259 {
5260 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5261 if ((TARGET_ALTIVEC && !TARGET_VSX)
5262 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
5263 return altivec_builtin_mask_for_load;
5264 else
5265 return 0;
5266 }
5267
5268 /* Implement LOOP_ALIGN. */
5269 int
5270 rs6000_loop_align (rtx label)
5271 {
5272 basic_block bb;
5273 int ninsns;
5274
5275 /* Don't override loop alignment if -falign-loops was specified. */
5276 if (!can_override_loop_align)
5277 return align_loops_log;
5278
5279 bb = BLOCK_FOR_INSN (label);
5280 ninsns = num_loop_insns(bb->loop_father);
5281
5282 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5283 if (ninsns > 4 && ninsns <= 8
5284 && (rs6000_tune == PROCESSOR_POWER4
5285 || rs6000_tune == PROCESSOR_POWER5
5286 || rs6000_tune == PROCESSOR_POWER6
5287 || rs6000_tune == PROCESSOR_POWER7
5288 || rs6000_tune == PROCESSOR_POWER8))
5289 return 5;
5290 else
5291 return align_loops_log;
5292 }
5293
5294 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5295 static int
5296 rs6000_loop_align_max_skip (rtx_insn *label)
5297 {
5298 return (1 << rs6000_loop_align (label)) - 1;
5299 }
5300
5301 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5302 after applying N number of iterations. This routine does not determine
5303 how may iterations are required to reach desired alignment. */
5304
5305 static bool
5306 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5307 {
5308 if (is_packed)
5309 return false;
5310
5311 if (TARGET_32BIT)
5312 {
5313 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5314 return true;
5315
5316 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5317 return true;
5318
5319 return false;
5320 }
5321 else
5322 {
5323 if (TARGET_MACHO)
5324 return false;
5325
5326 /* Assuming that all other types are naturally aligned. CHECKME! */
5327 return true;
5328 }
5329 }
5330
5331 /* Return true if the vector misalignment factor is supported by the
5332 target. */
5333 static bool
5334 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5335 const_tree type,
5336 int misalignment,
5337 bool is_packed)
5338 {
5339 if (TARGET_VSX)
5340 {
5341 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5342 return true;
5343
5344 /* Return if movmisalign pattern is not supported for this mode. */
5345 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5346 return false;
5347
5348 if (misalignment == -1)
5349 {
5350 /* Misalignment factor is unknown at compile time but we know
5351 it's word aligned. */
5352 if (rs6000_vector_alignment_reachable (type, is_packed))
5353 {
5354 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5355
5356 if (element_size == 64 || element_size == 32)
5357 return true;
5358 }
5359
5360 return false;
5361 }
5362
5363 /* VSX supports word-aligned vector. */
5364 if (misalignment % 4 == 0)
5365 return true;
5366 }
5367 return false;
5368 }
5369
5370 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5371 static int
5372 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5373 tree vectype, int misalign)
5374 {
5375 unsigned elements;
5376 tree elem_type;
5377
5378 switch (type_of_cost)
5379 {
5380 case scalar_stmt:
5381 case scalar_load:
5382 case scalar_store:
5383 case vector_stmt:
5384 case vector_load:
5385 case vector_store:
5386 case vec_to_scalar:
5387 case scalar_to_vec:
5388 case cond_branch_not_taken:
5389 return 1;
5390
5391 case vec_perm:
5392 if (TARGET_VSX)
5393 return 3;
5394 else
5395 return 1;
5396
5397 case vec_promote_demote:
5398 if (TARGET_VSX)
5399 return 4;
5400 else
5401 return 1;
5402
5403 case cond_branch_taken:
5404 return 3;
5405
5406 case unaligned_load:
5407 case vector_gather_load:
5408 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5409 return 1;
5410
5411 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5412 {
5413 elements = TYPE_VECTOR_SUBPARTS (vectype);
5414 if (elements == 2)
5415 /* Double word aligned. */
5416 return 2;
5417
5418 if (elements == 4)
5419 {
5420 switch (misalign)
5421 {
5422 case 8:
5423 /* Double word aligned. */
5424 return 2;
5425
5426 case -1:
5427 /* Unknown misalignment. */
5428 case 4:
5429 case 12:
5430 /* Word aligned. */
5431 return 22;
5432
5433 default:
5434 gcc_unreachable ();
5435 }
5436 }
5437 }
5438
5439 if (TARGET_ALTIVEC)
5440 /* Misaligned loads are not supported. */
5441 gcc_unreachable ();
5442
5443 return 2;
5444
5445 case unaligned_store:
5446 case vector_scatter_store:
5447 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5448 return 1;
5449
5450 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5451 {
5452 elements = TYPE_VECTOR_SUBPARTS (vectype);
5453 if (elements == 2)
5454 /* Double word aligned. */
5455 return 2;
5456
5457 if (elements == 4)
5458 {
5459 switch (misalign)
5460 {
5461 case 8:
5462 /* Double word aligned. */
5463 return 2;
5464
5465 case -1:
5466 /* Unknown misalignment. */
5467 case 4:
5468 case 12:
5469 /* Word aligned. */
5470 return 23;
5471
5472 default:
5473 gcc_unreachable ();
5474 }
5475 }
5476 }
5477
5478 if (TARGET_ALTIVEC)
5479 /* Misaligned stores are not supported. */
5480 gcc_unreachable ();
5481
5482 return 2;
5483
5484 case vec_construct:
5485 /* This is a rough approximation assuming non-constant elements
5486 constructed into a vector via element insertion. FIXME:
5487 vec_construct is not granular enough for uniformly good
5488 decisions. If the initialization is a splat, this is
5489 cheaper than we estimate. Improve this someday. */
5490 elem_type = TREE_TYPE (vectype);
5491 /* 32-bit vectors loaded into registers are stored as double
5492 precision, so we need 2 permutes, 2 converts, and 1 merge
5493 to construct a vector of short floats from them. */
5494 if (SCALAR_FLOAT_TYPE_P (elem_type)
5495 && TYPE_PRECISION (elem_type) == 32)
5496 return 5;
5497 /* On POWER9, integer vector types are built up in GPRs and then
5498 use a direct move (2 cycles). For POWER8 this is even worse,
5499 as we need two direct moves and a merge, and the direct moves
5500 are five cycles. */
5501 else if (INTEGRAL_TYPE_P (elem_type))
5502 {
5503 if (TARGET_P9_VECTOR)
5504 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5505 else
5506 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5507 }
5508 else
5509 /* V2DFmode doesn't need a direct move. */
5510 return 2;
5511
5512 default:
5513 gcc_unreachable ();
5514 }
5515 }
5516
5517 /* Implement targetm.vectorize.preferred_simd_mode. */
5518
5519 static machine_mode
5520 rs6000_preferred_simd_mode (scalar_mode mode)
5521 {
5522 if (TARGET_VSX)
5523 switch (mode)
5524 {
5525 case E_DFmode:
5526 return V2DFmode;
5527 default:;
5528 }
5529 if (TARGET_ALTIVEC || TARGET_VSX)
5530 switch (mode)
5531 {
5532 case E_SFmode:
5533 return V4SFmode;
5534 case E_TImode:
5535 return V1TImode;
5536 case E_DImode:
5537 return V2DImode;
5538 case E_SImode:
5539 return V4SImode;
5540 case E_HImode:
5541 return V8HImode;
5542 case E_QImode:
5543 return V16QImode;
5544 default:;
5545 }
5546 return word_mode;
5547 }
5548
5549 typedef struct _rs6000_cost_data
5550 {
5551 struct loop *loop_info;
5552 unsigned cost[3];
5553 } rs6000_cost_data;
5554
5555 /* Test for likely overcommitment of vector hardware resources. If a
5556 loop iteration is relatively large, and too large a percentage of
5557 instructions in the loop are vectorized, the cost model may not
5558 adequately reflect delays from unavailable vector resources.
5559 Penalize the loop body cost for this case. */
5560
5561 static void
5562 rs6000_density_test (rs6000_cost_data *data)
5563 {
5564 const int DENSITY_PCT_THRESHOLD = 85;
5565 const int DENSITY_SIZE_THRESHOLD = 70;
5566 const int DENSITY_PENALTY = 10;
5567 struct loop *loop = data->loop_info;
5568 basic_block *bbs = get_loop_body (loop);
5569 int nbbs = loop->num_nodes;
5570 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5571 int i, density_pct;
5572
5573 for (i = 0; i < nbbs; i++)
5574 {
5575 basic_block bb = bbs[i];
5576 gimple_stmt_iterator gsi;
5577
5578 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5579 {
5580 gimple *stmt = gsi_stmt (gsi);
5581 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5582
5583 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5584 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5585 not_vec_cost++;
5586 }
5587 }
5588
5589 free (bbs);
5590 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5591
5592 if (density_pct > DENSITY_PCT_THRESHOLD
5593 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5594 {
5595 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5596 if (dump_enabled_p ())
5597 dump_printf_loc (MSG_NOTE, vect_location,
5598 "density %d%%, cost %d exceeds threshold, penalizing "
5599 "loop body cost by %d%%", density_pct,
5600 vec_cost + not_vec_cost, DENSITY_PENALTY);
5601 }
5602 }
5603
5604 /* Implement targetm.vectorize.init_cost. */
5605
5606 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5607 instruction is needed by the vectorization. */
5608 static bool rs6000_vect_nonmem;
5609
5610 static void *
5611 rs6000_init_cost (struct loop *loop_info)
5612 {
5613 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5614 data->loop_info = loop_info;
5615 data->cost[vect_prologue] = 0;
5616 data->cost[vect_body] = 0;
5617 data->cost[vect_epilogue] = 0;
5618 rs6000_vect_nonmem = false;
5619 return data;
5620 }
5621
5622 /* Implement targetm.vectorize.add_stmt_cost. */
5623
5624 static unsigned
5625 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5626 struct _stmt_vec_info *stmt_info, int misalign,
5627 enum vect_cost_model_location where)
5628 {
5629 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5630 unsigned retval = 0;
5631
5632 if (flag_vect_cost_model)
5633 {
5634 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5635 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5636 misalign);
5637 /* Statements in an inner loop relative to the loop being
5638 vectorized are weighted more heavily. The value here is
5639 arbitrary and could potentially be improved with analysis. */
5640 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5641 count *= 50; /* FIXME. */
5642
5643 retval = (unsigned) (count * stmt_cost);
5644 cost_data->cost[where] += retval;
5645
5646 /* Check whether we're doing something other than just a copy loop.
5647 Not all such loops may be profitably vectorized; see
5648 rs6000_finish_cost. */
5649 if ((kind == vec_to_scalar || kind == vec_perm
5650 || kind == vec_promote_demote || kind == vec_construct
5651 || kind == scalar_to_vec)
5652 || (where == vect_body && kind == vector_stmt))
5653 rs6000_vect_nonmem = true;
5654 }
5655
5656 return retval;
5657 }
5658
5659 /* Implement targetm.vectorize.finish_cost. */
5660
5661 static void
5662 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5663 unsigned *body_cost, unsigned *epilogue_cost)
5664 {
5665 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5666
5667 if (cost_data->loop_info)
5668 rs6000_density_test (cost_data);
5669
5670 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5671 that require versioning for any reason. The vectorization is at
5672 best a wash inside the loop, and the versioning checks make
5673 profitability highly unlikely and potentially quite harmful. */
5674 if (cost_data->loop_info)
5675 {
5676 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5677 if (!rs6000_vect_nonmem
5678 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5679 && LOOP_REQUIRES_VERSIONING (vec_info))
5680 cost_data->cost[vect_body] += 10000;
5681 }
5682
5683 *prologue_cost = cost_data->cost[vect_prologue];
5684 *body_cost = cost_data->cost[vect_body];
5685 *epilogue_cost = cost_data->cost[vect_epilogue];
5686 }
5687
5688 /* Implement targetm.vectorize.destroy_cost_data. */
5689
5690 static void
5691 rs6000_destroy_cost_data (void *data)
5692 {
5693 free (data);
5694 }
5695
5696 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5697 library with vectorized intrinsics. */
5698
5699 static tree
5700 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5701 tree type_in)
5702 {
5703 char name[32];
5704 const char *suffix = NULL;
5705 tree fntype, new_fndecl, bdecl = NULL_TREE;
5706 int n_args = 1;
5707 const char *bname;
5708 machine_mode el_mode, in_mode;
5709 int n, in_n;
5710
5711 /* Libmass is suitable for unsafe math only as it does not correctly support
5712 parts of IEEE with the required precision such as denormals. Only support
5713 it if we have VSX to use the simd d2 or f4 functions.
5714 XXX: Add variable length support. */
5715 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5716 return NULL_TREE;
5717
5718 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5719 n = TYPE_VECTOR_SUBPARTS (type_out);
5720 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5721 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5722 if (el_mode != in_mode
5723 || n != in_n)
5724 return NULL_TREE;
5725
5726 switch (fn)
5727 {
5728 CASE_CFN_ATAN2:
5729 CASE_CFN_HYPOT:
5730 CASE_CFN_POW:
5731 n_args = 2;
5732 gcc_fallthrough ();
5733
5734 CASE_CFN_ACOS:
5735 CASE_CFN_ACOSH:
5736 CASE_CFN_ASIN:
5737 CASE_CFN_ASINH:
5738 CASE_CFN_ATAN:
5739 CASE_CFN_ATANH:
5740 CASE_CFN_CBRT:
5741 CASE_CFN_COS:
5742 CASE_CFN_COSH:
5743 CASE_CFN_ERF:
5744 CASE_CFN_ERFC:
5745 CASE_CFN_EXP2:
5746 CASE_CFN_EXP:
5747 CASE_CFN_EXPM1:
5748 CASE_CFN_LGAMMA:
5749 CASE_CFN_LOG10:
5750 CASE_CFN_LOG1P:
5751 CASE_CFN_LOG2:
5752 CASE_CFN_LOG:
5753 CASE_CFN_SIN:
5754 CASE_CFN_SINH:
5755 CASE_CFN_SQRT:
5756 CASE_CFN_TAN:
5757 CASE_CFN_TANH:
5758 if (el_mode == DFmode && n == 2)
5759 {
5760 bdecl = mathfn_built_in (double_type_node, fn);
5761 suffix = "d2"; /* pow -> powd2 */
5762 }
5763 else if (el_mode == SFmode && n == 4)
5764 {
5765 bdecl = mathfn_built_in (float_type_node, fn);
5766 suffix = "4"; /* powf -> powf4 */
5767 }
5768 else
5769 return NULL_TREE;
5770 if (!bdecl)
5771 return NULL_TREE;
5772 break;
5773
5774 default:
5775 return NULL_TREE;
5776 }
5777
5778 gcc_assert (suffix != NULL);
5779 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5780 if (!bname)
5781 return NULL_TREE;
5782
5783 strcpy (name, bname + sizeof ("__builtin_") - 1);
5784 strcat (name, suffix);
5785
5786 if (n_args == 1)
5787 fntype = build_function_type_list (type_out, type_in, NULL);
5788 else if (n_args == 2)
5789 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5790 else
5791 gcc_unreachable ();
5792
5793 /* Build a function declaration for the vectorized function. */
5794 new_fndecl = build_decl (BUILTINS_LOCATION,
5795 FUNCTION_DECL, get_identifier (name), fntype);
5796 TREE_PUBLIC (new_fndecl) = 1;
5797 DECL_EXTERNAL (new_fndecl) = 1;
5798 DECL_IS_NOVOPS (new_fndecl) = 1;
5799 TREE_READONLY (new_fndecl) = 1;
5800
5801 return new_fndecl;
5802 }
5803
5804 /* Returns a function decl for a vectorized version of the builtin function
5805 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5806 if it is not available. */
5807
5808 static tree
5809 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5810 tree type_in)
5811 {
5812 machine_mode in_mode, out_mode;
5813 int in_n, out_n;
5814
5815 if (TARGET_DEBUG_BUILTIN)
5816 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5817 combined_fn_name (combined_fn (fn)),
5818 GET_MODE_NAME (TYPE_MODE (type_out)),
5819 GET_MODE_NAME (TYPE_MODE (type_in)));
5820
5821 if (TREE_CODE (type_out) != VECTOR_TYPE
5822 || TREE_CODE (type_in) != VECTOR_TYPE)
5823 return NULL_TREE;
5824
5825 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5826 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5827 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5828 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5829
5830 switch (fn)
5831 {
5832 CASE_CFN_COPYSIGN:
5833 if (VECTOR_UNIT_VSX_P (V2DFmode)
5834 && out_mode == DFmode && out_n == 2
5835 && in_mode == DFmode && in_n == 2)
5836 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5837 if (VECTOR_UNIT_VSX_P (V4SFmode)
5838 && out_mode == SFmode && out_n == 4
5839 && in_mode == SFmode && in_n == 4)
5840 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5841 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5842 && out_mode == SFmode && out_n == 4
5843 && in_mode == SFmode && in_n == 4)
5844 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5845 break;
5846 CASE_CFN_CEIL:
5847 if (VECTOR_UNIT_VSX_P (V2DFmode)
5848 && out_mode == DFmode && out_n == 2
5849 && in_mode == DFmode && in_n == 2)
5850 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5851 if (VECTOR_UNIT_VSX_P (V4SFmode)
5852 && out_mode == SFmode && out_n == 4
5853 && in_mode == SFmode && in_n == 4)
5854 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5855 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5856 && out_mode == SFmode && out_n == 4
5857 && in_mode == SFmode && in_n == 4)
5858 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5859 break;
5860 CASE_CFN_FLOOR:
5861 if (VECTOR_UNIT_VSX_P (V2DFmode)
5862 && out_mode == DFmode && out_n == 2
5863 && in_mode == DFmode && in_n == 2)
5864 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5865 if (VECTOR_UNIT_VSX_P (V4SFmode)
5866 && out_mode == SFmode && out_n == 4
5867 && in_mode == SFmode && in_n == 4)
5868 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5869 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5870 && out_mode == SFmode && out_n == 4
5871 && in_mode == SFmode && in_n == 4)
5872 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5873 break;
5874 CASE_CFN_FMA:
5875 if (VECTOR_UNIT_VSX_P (V2DFmode)
5876 && out_mode == DFmode && out_n == 2
5877 && in_mode == DFmode && in_n == 2)
5878 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5879 if (VECTOR_UNIT_VSX_P (V4SFmode)
5880 && out_mode == SFmode && out_n == 4
5881 && in_mode == SFmode && in_n == 4)
5882 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5883 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5884 && out_mode == SFmode && out_n == 4
5885 && in_mode == SFmode && in_n == 4)
5886 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5887 break;
5888 CASE_CFN_TRUNC:
5889 if (VECTOR_UNIT_VSX_P (V2DFmode)
5890 && out_mode == DFmode && out_n == 2
5891 && in_mode == DFmode && in_n == 2)
5892 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5893 if (VECTOR_UNIT_VSX_P (V4SFmode)
5894 && out_mode == SFmode && out_n == 4
5895 && in_mode == SFmode && in_n == 4)
5896 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5897 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5898 && out_mode == SFmode && out_n == 4
5899 && in_mode == SFmode && in_n == 4)
5900 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5901 break;
5902 CASE_CFN_NEARBYINT:
5903 if (VECTOR_UNIT_VSX_P (V2DFmode)
5904 && flag_unsafe_math_optimizations
5905 && out_mode == DFmode && out_n == 2
5906 && in_mode == DFmode && in_n == 2)
5907 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5908 if (VECTOR_UNIT_VSX_P (V4SFmode)
5909 && flag_unsafe_math_optimizations
5910 && out_mode == SFmode && out_n == 4
5911 && in_mode == SFmode && in_n == 4)
5912 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5913 break;
5914 CASE_CFN_RINT:
5915 if (VECTOR_UNIT_VSX_P (V2DFmode)
5916 && !flag_trapping_math
5917 && out_mode == DFmode && out_n == 2
5918 && in_mode == DFmode && in_n == 2)
5919 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5920 if (VECTOR_UNIT_VSX_P (V4SFmode)
5921 && !flag_trapping_math
5922 && out_mode == SFmode && out_n == 4
5923 && in_mode == SFmode && in_n == 4)
5924 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5925 break;
5926 default:
5927 break;
5928 }
5929
5930 /* Generate calls to libmass if appropriate. */
5931 if (rs6000_veclib_handler)
5932 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5933
5934 return NULL_TREE;
5935 }
5936
5937 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5938
5939 static tree
5940 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5941 tree type_in)
5942 {
5943 machine_mode in_mode, out_mode;
5944 int in_n, out_n;
5945
5946 if (TARGET_DEBUG_BUILTIN)
5947 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5948 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5949 GET_MODE_NAME (TYPE_MODE (type_out)),
5950 GET_MODE_NAME (TYPE_MODE (type_in)));
5951
5952 if (TREE_CODE (type_out) != VECTOR_TYPE
5953 || TREE_CODE (type_in) != VECTOR_TYPE)
5954 return NULL_TREE;
5955
5956 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5957 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5958 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5959 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5960
5961 enum rs6000_builtins fn
5962 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5963 switch (fn)
5964 {
5965 case RS6000_BUILTIN_RSQRTF:
5966 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5967 && out_mode == SFmode && out_n == 4
5968 && in_mode == SFmode && in_n == 4)
5969 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5970 break;
5971 case RS6000_BUILTIN_RSQRT:
5972 if (VECTOR_UNIT_VSX_P (V2DFmode)
5973 && out_mode == DFmode && out_n == 2
5974 && in_mode == DFmode && in_n == 2)
5975 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5976 break;
5977 case RS6000_BUILTIN_RECIPF:
5978 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5979 && out_mode == SFmode && out_n == 4
5980 && in_mode == SFmode && in_n == 4)
5981 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5982 break;
5983 case RS6000_BUILTIN_RECIP:
5984 if (VECTOR_UNIT_VSX_P (V2DFmode)
5985 && out_mode == DFmode && out_n == 2
5986 && in_mode == DFmode && in_n == 2)
5987 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5988 break;
5989 default:
5990 break;
5991 }
5992 return NULL_TREE;
5993 }
5994 \f
5995 /* Default CPU string for rs6000*_file_start functions. */
5996 static const char *rs6000_default_cpu;
5997
5998 /* Do anything needed at the start of the asm file. */
5999
6000 static void
6001 rs6000_file_start (void)
6002 {
6003 char buffer[80];
6004 const char *start = buffer;
6005 FILE *file = asm_out_file;
6006
6007 rs6000_default_cpu = TARGET_CPU_DEFAULT;
6008
6009 default_file_start ();
6010
6011 if (flag_verbose_asm)
6012 {
6013 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
6014
6015 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
6016 {
6017 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
6018 start = "";
6019 }
6020
6021 if (global_options_set.x_rs6000_cpu_index)
6022 {
6023 fprintf (file, "%s -mcpu=%s", start,
6024 processor_target_table[rs6000_cpu_index].name);
6025 start = "";
6026 }
6027
6028 if (global_options_set.x_rs6000_tune_index)
6029 {
6030 fprintf (file, "%s -mtune=%s", start,
6031 processor_target_table[rs6000_tune_index].name);
6032 start = "";
6033 }
6034
6035 if (PPC405_ERRATUM77)
6036 {
6037 fprintf (file, "%s PPC405CR_ERRATUM77", start);
6038 start = "";
6039 }
6040
6041 #ifdef USING_ELFOS_H
6042 switch (rs6000_sdata)
6043 {
6044 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
6045 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
6046 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
6047 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
6048 }
6049
6050 if (rs6000_sdata && g_switch_value)
6051 {
6052 fprintf (file, "%s -G %d", start,
6053 g_switch_value);
6054 start = "";
6055 }
6056 #endif
6057
6058 if (*start == '\0')
6059 putc ('\n', file);
6060 }
6061
6062 #ifdef USING_ELFOS_H
6063 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
6064 && !global_options_set.x_rs6000_cpu_index)
6065 {
6066 fputs ("\t.machine ", asm_out_file);
6067 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
6068 fputs ("power9\n", asm_out_file);
6069 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
6070 fputs ("power8\n", asm_out_file);
6071 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
6072 fputs ("power7\n", asm_out_file);
6073 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
6074 fputs ("power6\n", asm_out_file);
6075 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
6076 fputs ("power5\n", asm_out_file);
6077 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
6078 fputs ("power4\n", asm_out_file);
6079 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
6080 fputs ("ppc64\n", asm_out_file);
6081 else
6082 fputs ("ppc\n", asm_out_file);
6083 }
6084 #endif
6085
6086 if (DEFAULT_ABI == ABI_ELFv2)
6087 fprintf (file, "\t.abiversion 2\n");
6088 }
6089
6090 \f
6091 /* Return nonzero if this function is known to have a null epilogue. */
6092
6093 int
6094 direct_return (void)
6095 {
6096 if (reload_completed)
6097 {
6098 rs6000_stack_t *info = rs6000_stack_info ();
6099
6100 if (info->first_gp_reg_save == 32
6101 && info->first_fp_reg_save == 64
6102 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
6103 && ! info->lr_save_p
6104 && ! info->cr_save_p
6105 && info->vrsave_size == 0
6106 && ! info->push_p)
6107 return 1;
6108 }
6109
6110 return 0;
6111 }
6112
6113 /* Return the number of instructions it takes to form a constant in an
6114 integer register. */
6115
6116 int
6117 num_insns_constant_wide (HOST_WIDE_INT value)
6118 {
6119 /* signed constant loadable with addi */
6120 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
6121 return 1;
6122
6123 /* constant loadable with addis */
6124 else if ((value & 0xffff) == 0
6125 && (value >> 31 == -1 || value >> 31 == 0))
6126 return 1;
6127
6128 else if (TARGET_POWERPC64)
6129 {
6130 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
6131 HOST_WIDE_INT high = value >> 31;
6132
6133 if (high == 0 || high == -1)
6134 return 2;
6135
6136 high >>= 1;
6137
6138 if (low == 0)
6139 return num_insns_constant_wide (high) + 1;
6140 else if (high == 0)
6141 return num_insns_constant_wide (low) + 1;
6142 else
6143 return (num_insns_constant_wide (high)
6144 + num_insns_constant_wide (low) + 1);
6145 }
6146
6147 else
6148 return 2;
6149 }
6150
6151 int
6152 num_insns_constant (rtx op, machine_mode mode)
6153 {
6154 HOST_WIDE_INT low, high;
6155
6156 switch (GET_CODE (op))
6157 {
6158 case CONST_INT:
6159 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
6160 && rs6000_is_valid_and_mask (op, mode))
6161 return 2;
6162 else
6163 return num_insns_constant_wide (INTVAL (op));
6164
6165 case CONST_WIDE_INT:
6166 {
6167 int i;
6168 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
6169 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
6170 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
6171 return ins;
6172 }
6173
6174 case CONST_DOUBLE:
6175 if (mode == SFmode || mode == SDmode)
6176 {
6177 long l;
6178
6179 if (DECIMAL_FLOAT_MODE_P (mode))
6180 REAL_VALUE_TO_TARGET_DECIMAL32
6181 (*CONST_DOUBLE_REAL_VALUE (op), l);
6182 else
6183 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6184 return num_insns_constant_wide ((HOST_WIDE_INT) l);
6185 }
6186
6187 long l[2];
6188 if (DECIMAL_FLOAT_MODE_P (mode))
6189 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
6190 else
6191 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6192 high = l[WORDS_BIG_ENDIAN == 0];
6193 low = l[WORDS_BIG_ENDIAN != 0];
6194
6195 if (TARGET_32BIT)
6196 return (num_insns_constant_wide (low)
6197 + num_insns_constant_wide (high));
6198 else
6199 {
6200 if ((high == 0 && low >= 0)
6201 || (high == -1 && low < 0))
6202 return num_insns_constant_wide (low);
6203
6204 else if (rs6000_is_valid_and_mask (op, mode))
6205 return 2;
6206
6207 else if (low == 0)
6208 return num_insns_constant_wide (high) + 1;
6209
6210 else
6211 return (num_insns_constant_wide (high)
6212 + num_insns_constant_wide (low) + 1);
6213 }
6214
6215 default:
6216 gcc_unreachable ();
6217 }
6218 }
6219
6220 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6221 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6222 corresponding element of the vector, but for V4SFmode, the
6223 corresponding "float" is interpreted as an SImode integer. */
6224
6225 HOST_WIDE_INT
6226 const_vector_elt_as_int (rtx op, unsigned int elt)
6227 {
6228 rtx tmp;
6229
6230 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6231 gcc_assert (GET_MODE (op) != V2DImode
6232 && GET_MODE (op) != V2DFmode);
6233
6234 tmp = CONST_VECTOR_ELT (op, elt);
6235 if (GET_MODE (op) == V4SFmode)
6236 tmp = gen_lowpart (SImode, tmp);
6237 return INTVAL (tmp);
6238 }
6239
6240 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6241 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6242 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6243 all items are set to the same value and contain COPIES replicas of the
6244 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6245 operand and the others are set to the value of the operand's msb. */
6246
6247 static bool
6248 vspltis_constant (rtx op, unsigned step, unsigned copies)
6249 {
6250 machine_mode mode = GET_MODE (op);
6251 machine_mode inner = GET_MODE_INNER (mode);
6252
6253 unsigned i;
6254 unsigned nunits;
6255 unsigned bitsize;
6256 unsigned mask;
6257
6258 HOST_WIDE_INT val;
6259 HOST_WIDE_INT splat_val;
6260 HOST_WIDE_INT msb_val;
6261
6262 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6263 return false;
6264
6265 nunits = GET_MODE_NUNITS (mode);
6266 bitsize = GET_MODE_BITSIZE (inner);
6267 mask = GET_MODE_MASK (inner);
6268
6269 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6270 splat_val = val;
6271 msb_val = val >= 0 ? 0 : -1;
6272
6273 /* Construct the value to be splatted, if possible. If not, return 0. */
6274 for (i = 2; i <= copies; i *= 2)
6275 {
6276 HOST_WIDE_INT small_val;
6277 bitsize /= 2;
6278 small_val = splat_val >> bitsize;
6279 mask >>= bitsize;
6280 if (splat_val != ((HOST_WIDE_INT)
6281 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6282 | (small_val & mask)))
6283 return false;
6284 splat_val = small_val;
6285 }
6286
6287 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6288 if (EASY_VECTOR_15 (splat_val))
6289 ;
6290
6291 /* Also check if we can splat, and then add the result to itself. Do so if
6292 the value is positive, of if the splat instruction is using OP's mode;
6293 for splat_val < 0, the splat and the add should use the same mode. */
6294 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6295 && (splat_val >= 0 || (step == 1 && copies == 1)))
6296 ;
6297
6298 /* Also check if are loading up the most significant bit which can be done by
6299 loading up -1 and shifting the value left by -1. */
6300 else if (EASY_VECTOR_MSB (splat_val, inner))
6301 ;
6302
6303 else
6304 return false;
6305
6306 /* Check if VAL is present in every STEP-th element, and the
6307 other elements are filled with its most significant bit. */
6308 for (i = 1; i < nunits; ++i)
6309 {
6310 HOST_WIDE_INT desired_val;
6311 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6312 if ((i & (step - 1)) == 0)
6313 desired_val = val;
6314 else
6315 desired_val = msb_val;
6316
6317 if (desired_val != const_vector_elt_as_int (op, elt))
6318 return false;
6319 }
6320
6321 return true;
6322 }
6323
6324 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6325 instruction, filling in the bottom elements with 0 or -1.
6326
6327 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6328 for the number of zeroes to shift in, or negative for the number of 0xff
6329 bytes to shift in.
6330
6331 OP is a CONST_VECTOR. */
6332
6333 int
6334 vspltis_shifted (rtx op)
6335 {
6336 machine_mode mode = GET_MODE (op);
6337 machine_mode inner = GET_MODE_INNER (mode);
6338
6339 unsigned i, j;
6340 unsigned nunits;
6341 unsigned mask;
6342
6343 HOST_WIDE_INT val;
6344
6345 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6346 return false;
6347
6348 /* We need to create pseudo registers to do the shift, so don't recognize
6349 shift vector constants after reload. */
6350 if (!can_create_pseudo_p ())
6351 return false;
6352
6353 nunits = GET_MODE_NUNITS (mode);
6354 mask = GET_MODE_MASK (inner);
6355
6356 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6357
6358 /* Check if the value can really be the operand of a vspltis[bhw]. */
6359 if (EASY_VECTOR_15 (val))
6360 ;
6361
6362 /* Also check if we are loading up the most significant bit which can be done
6363 by loading up -1 and shifting the value left by -1. */
6364 else if (EASY_VECTOR_MSB (val, inner))
6365 ;
6366
6367 else
6368 return 0;
6369
6370 /* Check if VAL is present in every STEP-th element until we find elements
6371 that are 0 or all 1 bits. */
6372 for (i = 1; i < nunits; ++i)
6373 {
6374 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6375 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6376
6377 /* If the value isn't the splat value, check for the remaining elements
6378 being 0/-1. */
6379 if (val != elt_val)
6380 {
6381 if (elt_val == 0)
6382 {
6383 for (j = i+1; j < nunits; ++j)
6384 {
6385 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6386 if (const_vector_elt_as_int (op, elt2) != 0)
6387 return 0;
6388 }
6389
6390 return (nunits - i) * GET_MODE_SIZE (inner);
6391 }
6392
6393 else if ((elt_val & mask) == mask)
6394 {
6395 for (j = i+1; j < nunits; ++j)
6396 {
6397 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6398 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6399 return 0;
6400 }
6401
6402 return -((nunits - i) * GET_MODE_SIZE (inner));
6403 }
6404
6405 else
6406 return 0;
6407 }
6408 }
6409
6410 /* If all elements are equal, we don't need to do VLSDOI. */
6411 return 0;
6412 }
6413
6414
6415 /* Return true if OP is of the given MODE and can be synthesized
6416 with a vspltisb, vspltish or vspltisw. */
6417
6418 bool
6419 easy_altivec_constant (rtx op, machine_mode mode)
6420 {
6421 unsigned step, copies;
6422
6423 if (mode == VOIDmode)
6424 mode = GET_MODE (op);
6425 else if (mode != GET_MODE (op))
6426 return false;
6427
6428 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6429 constants. */
6430 if (mode == V2DFmode)
6431 return zero_constant (op, mode);
6432
6433 else if (mode == V2DImode)
6434 {
6435 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6436 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6437 return false;
6438
6439 if (zero_constant (op, mode))
6440 return true;
6441
6442 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6443 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6444 return true;
6445
6446 return false;
6447 }
6448
6449 /* V1TImode is a special container for TImode. Ignore for now. */
6450 else if (mode == V1TImode)
6451 return false;
6452
6453 /* Start with a vspltisw. */
6454 step = GET_MODE_NUNITS (mode) / 4;
6455 copies = 1;
6456
6457 if (vspltis_constant (op, step, copies))
6458 return true;
6459
6460 /* Then try with a vspltish. */
6461 if (step == 1)
6462 copies <<= 1;
6463 else
6464 step >>= 1;
6465
6466 if (vspltis_constant (op, step, copies))
6467 return true;
6468
6469 /* And finally a vspltisb. */
6470 if (step == 1)
6471 copies <<= 1;
6472 else
6473 step >>= 1;
6474
6475 if (vspltis_constant (op, step, copies))
6476 return true;
6477
6478 if (vspltis_shifted (op) != 0)
6479 return true;
6480
6481 return false;
6482 }
6483
6484 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6485 result is OP. Abort if it is not possible. */
6486
6487 rtx
6488 gen_easy_altivec_constant (rtx op)
6489 {
6490 machine_mode mode = GET_MODE (op);
6491 int nunits = GET_MODE_NUNITS (mode);
6492 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6493 unsigned step = nunits / 4;
6494 unsigned copies = 1;
6495
6496 /* Start with a vspltisw. */
6497 if (vspltis_constant (op, step, copies))
6498 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6499
6500 /* Then try with a vspltish. */
6501 if (step == 1)
6502 copies <<= 1;
6503 else
6504 step >>= 1;
6505
6506 if (vspltis_constant (op, step, copies))
6507 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6508
6509 /* And finally a vspltisb. */
6510 if (step == 1)
6511 copies <<= 1;
6512 else
6513 step >>= 1;
6514
6515 if (vspltis_constant (op, step, copies))
6516 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6517
6518 gcc_unreachable ();
6519 }
6520
6521 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6522 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6523
6524 Return the number of instructions needed (1 or 2) into the address pointed
6525 via NUM_INSNS_PTR.
6526
6527 Return the constant that is being split via CONSTANT_PTR. */
6528
6529 bool
6530 xxspltib_constant_p (rtx op,
6531 machine_mode mode,
6532 int *num_insns_ptr,
6533 int *constant_ptr)
6534 {
6535 size_t nunits = GET_MODE_NUNITS (mode);
6536 size_t i;
6537 HOST_WIDE_INT value;
6538 rtx element;
6539
6540 /* Set the returned values to out of bound values. */
6541 *num_insns_ptr = -1;
6542 *constant_ptr = 256;
6543
6544 if (!TARGET_P9_VECTOR)
6545 return false;
6546
6547 if (mode == VOIDmode)
6548 mode = GET_MODE (op);
6549
6550 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6551 return false;
6552
6553 /* Handle (vec_duplicate <constant>). */
6554 if (GET_CODE (op) == VEC_DUPLICATE)
6555 {
6556 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6557 && mode != V2DImode)
6558 return false;
6559
6560 element = XEXP (op, 0);
6561 if (!CONST_INT_P (element))
6562 return false;
6563
6564 value = INTVAL (element);
6565 if (!IN_RANGE (value, -128, 127))
6566 return false;
6567 }
6568
6569 /* Handle (const_vector [...]). */
6570 else if (GET_CODE (op) == CONST_VECTOR)
6571 {
6572 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6573 && mode != V2DImode)
6574 return false;
6575
6576 element = CONST_VECTOR_ELT (op, 0);
6577 if (!CONST_INT_P (element))
6578 return false;
6579
6580 value = INTVAL (element);
6581 if (!IN_RANGE (value, -128, 127))
6582 return false;
6583
6584 for (i = 1; i < nunits; i++)
6585 {
6586 element = CONST_VECTOR_ELT (op, i);
6587 if (!CONST_INT_P (element))
6588 return false;
6589
6590 if (value != INTVAL (element))
6591 return false;
6592 }
6593 }
6594
6595 /* Handle integer constants being loaded into the upper part of the VSX
6596 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6597 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6598 else if (CONST_INT_P (op))
6599 {
6600 if (!SCALAR_INT_MODE_P (mode))
6601 return false;
6602
6603 value = INTVAL (op);
6604 if (!IN_RANGE (value, -128, 127))
6605 return false;
6606
6607 if (!IN_RANGE (value, -1, 0))
6608 {
6609 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6610 return false;
6611
6612 if (EASY_VECTOR_15 (value))
6613 return false;
6614 }
6615 }
6616
6617 else
6618 return false;
6619
6620 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6621 sign extend. Special case 0/-1 to allow getting any VSX register instead
6622 of an Altivec register. */
6623 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6624 && EASY_VECTOR_15 (value))
6625 return false;
6626
6627 /* Return # of instructions and the constant byte for XXSPLTIB. */
6628 if (mode == V16QImode)
6629 *num_insns_ptr = 1;
6630
6631 else if (IN_RANGE (value, -1, 0))
6632 *num_insns_ptr = 1;
6633
6634 else
6635 *num_insns_ptr = 2;
6636
6637 *constant_ptr = (int) value;
6638 return true;
6639 }
6640
6641 const char *
6642 output_vec_const_move (rtx *operands)
6643 {
6644 int shift;
6645 machine_mode mode;
6646 rtx dest, vec;
6647
6648 dest = operands[0];
6649 vec = operands[1];
6650 mode = GET_MODE (dest);
6651
6652 if (TARGET_VSX)
6653 {
6654 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6655 int xxspltib_value = 256;
6656 int num_insns = -1;
6657
6658 if (zero_constant (vec, mode))
6659 {
6660 if (TARGET_P9_VECTOR)
6661 return "xxspltib %x0,0";
6662
6663 else if (dest_vmx_p)
6664 return "vspltisw %0,0";
6665
6666 else
6667 return "xxlxor %x0,%x0,%x0";
6668 }
6669
6670 if (all_ones_constant (vec, mode))
6671 {
6672 if (TARGET_P9_VECTOR)
6673 return "xxspltib %x0,255";
6674
6675 else if (dest_vmx_p)
6676 return "vspltisw %0,-1";
6677
6678 else if (TARGET_P8_VECTOR)
6679 return "xxlorc %x0,%x0,%x0";
6680
6681 else
6682 gcc_unreachable ();
6683 }
6684
6685 if (TARGET_P9_VECTOR
6686 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6687 {
6688 if (num_insns == 1)
6689 {
6690 operands[2] = GEN_INT (xxspltib_value & 0xff);
6691 return "xxspltib %x0,%2";
6692 }
6693
6694 return "#";
6695 }
6696 }
6697
6698 if (TARGET_ALTIVEC)
6699 {
6700 rtx splat_vec;
6701
6702 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6703 if (zero_constant (vec, mode))
6704 return "vspltisw %0,0";
6705
6706 if (all_ones_constant (vec, mode))
6707 return "vspltisw %0,-1";
6708
6709 /* Do we need to construct a value using VSLDOI? */
6710 shift = vspltis_shifted (vec);
6711 if (shift != 0)
6712 return "#";
6713
6714 splat_vec = gen_easy_altivec_constant (vec);
6715 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6716 operands[1] = XEXP (splat_vec, 0);
6717 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6718 return "#";
6719
6720 switch (GET_MODE (splat_vec))
6721 {
6722 case E_V4SImode:
6723 return "vspltisw %0,%1";
6724
6725 case E_V8HImode:
6726 return "vspltish %0,%1";
6727
6728 case E_V16QImode:
6729 return "vspltisb %0,%1";
6730
6731 default:
6732 gcc_unreachable ();
6733 }
6734 }
6735
6736 gcc_unreachable ();
6737 }
6738
6739 /* Initialize vector TARGET to VALS. */
6740
6741 void
6742 rs6000_expand_vector_init (rtx target, rtx vals)
6743 {
6744 machine_mode mode = GET_MODE (target);
6745 machine_mode inner_mode = GET_MODE_INNER (mode);
6746 int n_elts = GET_MODE_NUNITS (mode);
6747 int n_var = 0, one_var = -1;
6748 bool all_same = true, all_const_zero = true;
6749 rtx x, mem;
6750 int i;
6751
6752 for (i = 0; i < n_elts; ++i)
6753 {
6754 x = XVECEXP (vals, 0, i);
6755 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6756 ++n_var, one_var = i;
6757 else if (x != CONST0_RTX (inner_mode))
6758 all_const_zero = false;
6759
6760 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6761 all_same = false;
6762 }
6763
6764 if (n_var == 0)
6765 {
6766 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6767 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6768 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6769 {
6770 /* Zero register. */
6771 emit_move_insn (target, CONST0_RTX (mode));
6772 return;
6773 }
6774 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6775 {
6776 /* Splat immediate. */
6777 emit_insn (gen_rtx_SET (target, const_vec));
6778 return;
6779 }
6780 else
6781 {
6782 /* Load from constant pool. */
6783 emit_move_insn (target, const_vec);
6784 return;
6785 }
6786 }
6787
6788 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6789 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6790 {
6791 rtx op[2];
6792 size_t i;
6793 size_t num_elements = all_same ? 1 : 2;
6794 for (i = 0; i < num_elements; i++)
6795 {
6796 op[i] = XVECEXP (vals, 0, i);
6797 /* Just in case there is a SUBREG with a smaller mode, do a
6798 conversion. */
6799 if (GET_MODE (op[i]) != inner_mode)
6800 {
6801 rtx tmp = gen_reg_rtx (inner_mode);
6802 convert_move (tmp, op[i], 0);
6803 op[i] = tmp;
6804 }
6805 /* Allow load with splat double word. */
6806 else if (MEM_P (op[i]))
6807 {
6808 if (!all_same)
6809 op[i] = force_reg (inner_mode, op[i]);
6810 }
6811 else if (!REG_P (op[i]))
6812 op[i] = force_reg (inner_mode, op[i]);
6813 }
6814
6815 if (all_same)
6816 {
6817 if (mode == V2DFmode)
6818 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6819 else
6820 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6821 }
6822 else
6823 {
6824 if (mode == V2DFmode)
6825 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6826 else
6827 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6828 }
6829 return;
6830 }
6831
6832 /* Special case initializing vector int if we are on 64-bit systems with
6833 direct move or we have the ISA 3.0 instructions. */
6834 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6835 && TARGET_DIRECT_MOVE_64BIT)
6836 {
6837 if (all_same)
6838 {
6839 rtx element0 = XVECEXP (vals, 0, 0);
6840 if (MEM_P (element0))
6841 element0 = rs6000_address_for_fpconvert (element0);
6842 else
6843 element0 = force_reg (SImode, element0);
6844
6845 if (TARGET_P9_VECTOR)
6846 emit_insn (gen_vsx_splat_v4si (target, element0));
6847 else
6848 {
6849 rtx tmp = gen_reg_rtx (DImode);
6850 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6851 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6852 }
6853 return;
6854 }
6855 else
6856 {
6857 rtx elements[4];
6858 size_t i;
6859
6860 for (i = 0; i < 4; i++)
6861 {
6862 elements[i] = XVECEXP (vals, 0, i);
6863 if (!CONST_INT_P (elements[i]) && !REG_P (elements[i]))
6864 elements[i] = copy_to_mode_reg (SImode, elements[i]);
6865 }
6866
6867 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6868 elements[2], elements[3]));
6869 return;
6870 }
6871 }
6872
6873 /* With single precision floating point on VSX, know that internally single
6874 precision is actually represented as a double, and either make 2 V2DF
6875 vectors, and convert these vectors to single precision, or do one
6876 conversion, and splat the result to the other elements. */
6877 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6878 {
6879 if (all_same)
6880 {
6881 rtx element0 = XVECEXP (vals, 0, 0);
6882
6883 if (TARGET_P9_VECTOR)
6884 {
6885 if (MEM_P (element0))
6886 element0 = rs6000_address_for_fpconvert (element0);
6887
6888 emit_insn (gen_vsx_splat_v4sf (target, element0));
6889 }
6890
6891 else
6892 {
6893 rtx freg = gen_reg_rtx (V4SFmode);
6894 rtx sreg = force_reg (SFmode, element0);
6895 rtx cvt = (TARGET_XSCVDPSPN
6896 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6897 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6898
6899 emit_insn (cvt);
6900 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6901 const0_rtx));
6902 }
6903 }
6904 else
6905 {
6906 rtx dbl_even = gen_reg_rtx (V2DFmode);
6907 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6908 rtx flt_even = gen_reg_rtx (V4SFmode);
6909 rtx flt_odd = gen_reg_rtx (V4SFmode);
6910 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6911 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6912 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6913 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6914
6915 /* Use VMRGEW if we can instead of doing a permute. */
6916 if (TARGET_P8_VECTOR)
6917 {
6918 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6919 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6920 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6921 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6922 if (BYTES_BIG_ENDIAN)
6923 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6924 else
6925 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6926 }
6927 else
6928 {
6929 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6930 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6931 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6932 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6933 rs6000_expand_extract_even (target, flt_even, flt_odd);
6934 }
6935 }
6936 return;
6937 }
6938
6939 /* Special case initializing vector short/char that are splats if we are on
6940 64-bit systems with direct move. */
6941 if (all_same && TARGET_DIRECT_MOVE_64BIT
6942 && (mode == V16QImode || mode == V8HImode))
6943 {
6944 rtx op0 = XVECEXP (vals, 0, 0);
6945 rtx di_tmp = gen_reg_rtx (DImode);
6946
6947 if (!REG_P (op0))
6948 op0 = force_reg (GET_MODE_INNER (mode), op0);
6949
6950 if (mode == V16QImode)
6951 {
6952 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6953 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6954 return;
6955 }
6956
6957 if (mode == V8HImode)
6958 {
6959 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6960 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6961 return;
6962 }
6963 }
6964
6965 /* Store value to stack temp. Load vector element. Splat. However, splat
6966 of 64-bit items is not supported on Altivec. */
6967 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6968 {
6969 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6970 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6971 XVECEXP (vals, 0, 0));
6972 x = gen_rtx_UNSPEC (VOIDmode,
6973 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6974 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6975 gen_rtvec (2,
6976 gen_rtx_SET (target, mem),
6977 x)));
6978 x = gen_rtx_VEC_SELECT (inner_mode, target,
6979 gen_rtx_PARALLEL (VOIDmode,
6980 gen_rtvec (1, const0_rtx)));
6981 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6982 return;
6983 }
6984
6985 /* One field is non-constant. Load constant then overwrite
6986 varying field. */
6987 if (n_var == 1)
6988 {
6989 rtx copy = copy_rtx (vals);
6990
6991 /* Load constant part of vector, substitute neighboring value for
6992 varying element. */
6993 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6994 rs6000_expand_vector_init (target, copy);
6995
6996 /* Insert variable. */
6997 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6998 return;
6999 }
7000
7001 /* Construct the vector in memory one field at a time
7002 and load the whole vector. */
7003 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7004 for (i = 0; i < n_elts; i++)
7005 emit_move_insn (adjust_address_nv (mem, inner_mode,
7006 i * GET_MODE_SIZE (inner_mode)),
7007 XVECEXP (vals, 0, i));
7008 emit_move_insn (target, mem);
7009 }
7010
7011 /* Set field ELT of TARGET to VAL. */
7012
7013 void
7014 rs6000_expand_vector_set (rtx target, rtx val, int elt)
7015 {
7016 machine_mode mode = GET_MODE (target);
7017 machine_mode inner_mode = GET_MODE_INNER (mode);
7018 rtx reg = gen_reg_rtx (mode);
7019 rtx mask, mem, x;
7020 int width = GET_MODE_SIZE (inner_mode);
7021 int i;
7022
7023 val = force_reg (GET_MODE (val), val);
7024
7025 if (VECTOR_MEM_VSX_P (mode))
7026 {
7027 rtx insn = NULL_RTX;
7028 rtx elt_rtx = GEN_INT (elt);
7029
7030 if (mode == V2DFmode)
7031 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
7032
7033 else if (mode == V2DImode)
7034 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
7035
7036 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
7037 {
7038 if (mode == V4SImode)
7039 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
7040 else if (mode == V8HImode)
7041 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
7042 else if (mode == V16QImode)
7043 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
7044 else if (mode == V4SFmode)
7045 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
7046 }
7047
7048 if (insn)
7049 {
7050 emit_insn (insn);
7051 return;
7052 }
7053 }
7054
7055 /* Simplify setting single element vectors like V1TImode. */
7056 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
7057 {
7058 emit_move_insn (target, gen_lowpart (mode, val));
7059 return;
7060 }
7061
7062 /* Load single variable value. */
7063 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7064 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
7065 x = gen_rtx_UNSPEC (VOIDmode,
7066 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7067 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7068 gen_rtvec (2,
7069 gen_rtx_SET (reg, mem),
7070 x)));
7071
7072 /* Linear sequence. */
7073 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
7074 for (i = 0; i < 16; ++i)
7075 XVECEXP (mask, 0, i) = GEN_INT (i);
7076
7077 /* Set permute mask to insert element into target. */
7078 for (i = 0; i < width; ++i)
7079 XVECEXP (mask, 0, elt*width + i)
7080 = GEN_INT (i + 0x10);
7081 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
7082
7083 if (BYTES_BIG_ENDIAN)
7084 x = gen_rtx_UNSPEC (mode,
7085 gen_rtvec (3, target, reg,
7086 force_reg (V16QImode, x)),
7087 UNSPEC_VPERM);
7088 else
7089 {
7090 if (TARGET_P9_VECTOR)
7091 x = gen_rtx_UNSPEC (mode,
7092 gen_rtvec (3, reg, target,
7093 force_reg (V16QImode, x)),
7094 UNSPEC_VPERMR);
7095 else
7096 {
7097 /* Invert selector. We prefer to generate VNAND on P8 so
7098 that future fusion opportunities can kick in, but must
7099 generate VNOR elsewhere. */
7100 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
7101 rtx iorx = (TARGET_P8_VECTOR
7102 ? gen_rtx_IOR (V16QImode, notx, notx)
7103 : gen_rtx_AND (V16QImode, notx, notx));
7104 rtx tmp = gen_reg_rtx (V16QImode);
7105 emit_insn (gen_rtx_SET (tmp, iorx));
7106
7107 /* Permute with operands reversed and adjusted selector. */
7108 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
7109 UNSPEC_VPERM);
7110 }
7111 }
7112
7113 emit_insn (gen_rtx_SET (target, x));
7114 }
7115
7116 /* Extract field ELT from VEC into TARGET. */
7117
7118 void
7119 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
7120 {
7121 machine_mode mode = GET_MODE (vec);
7122 machine_mode inner_mode = GET_MODE_INNER (mode);
7123 rtx mem;
7124
7125 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
7126 {
7127 switch (mode)
7128 {
7129 default:
7130 break;
7131 case E_V1TImode:
7132 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
7133 emit_move_insn (target, gen_lowpart (TImode, vec));
7134 break;
7135 case E_V2DFmode:
7136 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
7137 return;
7138 case E_V2DImode:
7139 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
7140 return;
7141 case E_V4SFmode:
7142 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
7143 return;
7144 case E_V16QImode:
7145 if (TARGET_DIRECT_MOVE_64BIT)
7146 {
7147 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
7148 return;
7149 }
7150 else
7151 break;
7152 case E_V8HImode:
7153 if (TARGET_DIRECT_MOVE_64BIT)
7154 {
7155 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
7156 return;
7157 }
7158 else
7159 break;
7160 case E_V4SImode:
7161 if (TARGET_DIRECT_MOVE_64BIT)
7162 {
7163 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
7164 return;
7165 }
7166 break;
7167 }
7168 }
7169 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
7170 && TARGET_DIRECT_MOVE_64BIT)
7171 {
7172 if (GET_MODE (elt) != DImode)
7173 {
7174 rtx tmp = gen_reg_rtx (DImode);
7175 convert_move (tmp, elt, 0);
7176 elt = tmp;
7177 }
7178 else if (!REG_P (elt))
7179 elt = force_reg (DImode, elt);
7180
7181 switch (mode)
7182 {
7183 case E_V2DFmode:
7184 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
7185 return;
7186
7187 case E_V2DImode:
7188 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
7189 return;
7190
7191 case E_V4SFmode:
7192 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
7193 return;
7194
7195 case E_V4SImode:
7196 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
7197 return;
7198
7199 case E_V8HImode:
7200 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
7201 return;
7202
7203 case E_V16QImode:
7204 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
7205 return;
7206
7207 default:
7208 gcc_unreachable ();
7209 }
7210 }
7211
7212 gcc_assert (CONST_INT_P (elt));
7213
7214 /* Allocate mode-sized buffer. */
7215 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7216
7217 emit_move_insn (mem, vec);
7218
7219 /* Add offset to field within buffer matching vector element. */
7220 mem = adjust_address_nv (mem, inner_mode,
7221 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
7222
7223 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
7224 }
7225
7226 /* Helper function to return the register number of a RTX. */
7227 static inline int
7228 regno_or_subregno (rtx op)
7229 {
7230 if (REG_P (op))
7231 return REGNO (op);
7232 else if (SUBREG_P (op))
7233 return subreg_regno (op);
7234 else
7235 gcc_unreachable ();
7236 }
7237
7238 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7239 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7240 temporary (BASE_TMP) to fixup the address. Return the new memory address
7241 that is valid for reads or writes to a given register (SCALAR_REG). */
7242
7243 rtx
7244 rs6000_adjust_vec_address (rtx scalar_reg,
7245 rtx mem,
7246 rtx element,
7247 rtx base_tmp,
7248 machine_mode scalar_mode)
7249 {
7250 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7251 rtx addr = XEXP (mem, 0);
7252 rtx element_offset;
7253 rtx new_addr;
7254 bool valid_addr_p;
7255
7256 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7257 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7258
7259 /* Calculate what we need to add to the address to get the element
7260 address. */
7261 if (CONST_INT_P (element))
7262 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7263 else
7264 {
7265 int byte_shift = exact_log2 (scalar_size);
7266 gcc_assert (byte_shift >= 0);
7267
7268 if (byte_shift == 0)
7269 element_offset = element;
7270
7271 else
7272 {
7273 if (TARGET_POWERPC64)
7274 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7275 else
7276 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7277
7278 element_offset = base_tmp;
7279 }
7280 }
7281
7282 /* Create the new address pointing to the element within the vector. If we
7283 are adding 0, we don't have to change the address. */
7284 if (element_offset == const0_rtx)
7285 new_addr = addr;
7286
7287 /* A simple indirect address can be converted into a reg + offset
7288 address. */
7289 else if (REG_P (addr) || SUBREG_P (addr))
7290 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7291
7292 /* Optimize D-FORM addresses with constant offset with a constant element, to
7293 include the element offset in the address directly. */
7294 else if (GET_CODE (addr) == PLUS)
7295 {
7296 rtx op0 = XEXP (addr, 0);
7297 rtx op1 = XEXP (addr, 1);
7298 rtx insn;
7299
7300 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7301 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7302 {
7303 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7304 rtx offset_rtx = GEN_INT (offset);
7305
7306 if (IN_RANGE (offset, -32768, 32767)
7307 && (scalar_size < 8 || (offset & 0x3) == 0))
7308 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7309 else
7310 {
7311 emit_move_insn (base_tmp, offset_rtx);
7312 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7313 }
7314 }
7315 else
7316 {
7317 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7318 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7319
7320 /* Note, ADDI requires the register being added to be a base
7321 register. If the register was R0, load it up into the temporary
7322 and do the add. */
7323 if (op1_reg_p
7324 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7325 {
7326 insn = gen_add3_insn (base_tmp, op1, element_offset);
7327 gcc_assert (insn != NULL_RTX);
7328 emit_insn (insn);
7329 }
7330
7331 else if (ele_reg_p
7332 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7333 {
7334 insn = gen_add3_insn (base_tmp, element_offset, op1);
7335 gcc_assert (insn != NULL_RTX);
7336 emit_insn (insn);
7337 }
7338
7339 else
7340 {
7341 emit_move_insn (base_tmp, op1);
7342 emit_insn (gen_add2_insn (base_tmp, element_offset));
7343 }
7344
7345 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7346 }
7347 }
7348
7349 else
7350 {
7351 emit_move_insn (base_tmp, addr);
7352 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7353 }
7354
7355 /* If we have a PLUS, we need to see whether the particular register class
7356 allows for D-FORM or X-FORM addressing. */
7357 if (GET_CODE (new_addr) == PLUS)
7358 {
7359 rtx op1 = XEXP (new_addr, 1);
7360 addr_mask_type addr_mask;
7361 int scalar_regno = regno_or_subregno (scalar_reg);
7362
7363 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7364 if (INT_REGNO_P (scalar_regno))
7365 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7366
7367 else if (FP_REGNO_P (scalar_regno))
7368 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7369
7370 else if (ALTIVEC_REGNO_P (scalar_regno))
7371 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7372
7373 else
7374 gcc_unreachable ();
7375
7376 if (REG_P (op1) || SUBREG_P (op1))
7377 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7378 else
7379 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7380 }
7381
7382 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7383 valid_addr_p = true;
7384
7385 else
7386 valid_addr_p = false;
7387
7388 if (!valid_addr_p)
7389 {
7390 emit_move_insn (base_tmp, new_addr);
7391 new_addr = base_tmp;
7392 }
7393
7394 return change_address (mem, scalar_mode, new_addr);
7395 }
7396
7397 /* Split a variable vec_extract operation into the component instructions. */
7398
7399 void
7400 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7401 rtx tmp_altivec)
7402 {
7403 machine_mode mode = GET_MODE (src);
7404 machine_mode scalar_mode = GET_MODE (dest);
7405 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7406 int byte_shift = exact_log2 (scalar_size);
7407
7408 gcc_assert (byte_shift >= 0);
7409
7410 /* If we are given a memory address, optimize to load just the element. We
7411 don't have to adjust the vector element number on little endian
7412 systems. */
7413 if (MEM_P (src))
7414 {
7415 gcc_assert (REG_P (tmp_gpr));
7416 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7417 tmp_gpr, scalar_mode));
7418 return;
7419 }
7420
7421 else if (REG_P (src) || SUBREG_P (src))
7422 {
7423 int bit_shift = byte_shift + 3;
7424 rtx element2;
7425 int dest_regno = regno_or_subregno (dest);
7426 int src_regno = regno_or_subregno (src);
7427 int element_regno = regno_or_subregno (element);
7428
7429 gcc_assert (REG_P (tmp_gpr));
7430
7431 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7432 a general purpose register. */
7433 if (TARGET_P9_VECTOR
7434 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7435 && INT_REGNO_P (dest_regno)
7436 && ALTIVEC_REGNO_P (src_regno)
7437 && INT_REGNO_P (element_regno))
7438 {
7439 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7440 rtx element_si = gen_rtx_REG (SImode, element_regno);
7441
7442 if (mode == V16QImode)
7443 emit_insn (BYTES_BIG_ENDIAN
7444 ? gen_vextublx (dest_si, element_si, src)
7445 : gen_vextubrx (dest_si, element_si, src));
7446
7447 else if (mode == V8HImode)
7448 {
7449 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7450 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7451 emit_insn (BYTES_BIG_ENDIAN
7452 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7453 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7454 }
7455
7456
7457 else
7458 {
7459 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7460 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7461 emit_insn (BYTES_BIG_ENDIAN
7462 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7463 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7464 }
7465
7466 return;
7467 }
7468
7469
7470 gcc_assert (REG_P (tmp_altivec));
7471
7472 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7473 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7474 will shift the element into the upper position (adding 3 to convert a
7475 byte shift into a bit shift). */
7476 if (scalar_size == 8)
7477 {
7478 if (!BYTES_BIG_ENDIAN)
7479 {
7480 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7481 element2 = tmp_gpr;
7482 }
7483 else
7484 element2 = element;
7485
7486 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7487 bit. */
7488 emit_insn (gen_rtx_SET (tmp_gpr,
7489 gen_rtx_AND (DImode,
7490 gen_rtx_ASHIFT (DImode,
7491 element2,
7492 GEN_INT (6)),
7493 GEN_INT (64))));
7494 }
7495 else
7496 {
7497 if (!BYTES_BIG_ENDIAN)
7498 {
7499 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7500
7501 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7502 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7503 element2 = tmp_gpr;
7504 }
7505 else
7506 element2 = element;
7507
7508 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7509 }
7510
7511 /* Get the value into the lower byte of the Altivec register where VSLO
7512 expects it. */
7513 if (TARGET_P9_VECTOR)
7514 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7515 else if (can_create_pseudo_p ())
7516 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7517 else
7518 {
7519 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7520 emit_move_insn (tmp_di, tmp_gpr);
7521 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7522 }
7523
7524 /* Do the VSLO to get the value into the final location. */
7525 switch (mode)
7526 {
7527 case E_V2DFmode:
7528 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7529 return;
7530
7531 case E_V2DImode:
7532 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7533 return;
7534
7535 case E_V4SFmode:
7536 {
7537 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7538 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7539 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7540 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7541 tmp_altivec));
7542
7543 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7544 return;
7545 }
7546
7547 case E_V4SImode:
7548 case E_V8HImode:
7549 case E_V16QImode:
7550 {
7551 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7552 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7553 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7554 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7555 tmp_altivec));
7556 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7557 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7558 GEN_INT (64 - (8 * scalar_size))));
7559 return;
7560 }
7561
7562 default:
7563 gcc_unreachable ();
7564 }
7565
7566 return;
7567 }
7568 else
7569 gcc_unreachable ();
7570 }
7571
7572 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
7573 two SImode values. */
7574
7575 static void
7576 rs6000_split_v4si_init_di_reg (rtx dest, rtx si1, rtx si2, rtx tmp)
7577 {
7578 const unsigned HOST_WIDE_INT mask_32bit = HOST_WIDE_INT_C (0xffffffff);
7579
7580 if (CONST_INT_P (si1) && CONST_INT_P (si2))
7581 {
7582 unsigned HOST_WIDE_INT const1 = (UINTVAL (si1) & mask_32bit) << 32;
7583 unsigned HOST_WIDE_INT const2 = UINTVAL (si2) & mask_32bit;
7584
7585 emit_move_insn (dest, GEN_INT (const1 | const2));
7586 return;
7587 }
7588
7589 /* Put si1 into upper 32-bits of dest. */
7590 if (CONST_INT_P (si1))
7591 emit_move_insn (dest, GEN_INT ((UINTVAL (si1) & mask_32bit) << 32));
7592 else
7593 {
7594 /* Generate RLDIC. */
7595 rtx si1_di = gen_rtx_REG (DImode, regno_or_subregno (si1));
7596 rtx shift_rtx = gen_rtx_ASHIFT (DImode, si1_di, GEN_INT (32));
7597 rtx mask_rtx = GEN_INT (mask_32bit << 32);
7598 rtx and_rtx = gen_rtx_AND (DImode, shift_rtx, mask_rtx);
7599 gcc_assert (!reg_overlap_mentioned_p (dest, si1));
7600 emit_insn (gen_rtx_SET (dest, and_rtx));
7601 }
7602
7603 /* Put si2 into the temporary. */
7604 gcc_assert (!reg_overlap_mentioned_p (dest, tmp));
7605 if (CONST_INT_P (si2))
7606 emit_move_insn (tmp, GEN_INT (UINTVAL (si2) & mask_32bit));
7607 else
7608 emit_insn (gen_zero_extendsidi2 (tmp, si2));
7609
7610 /* Combine the two parts. */
7611 emit_insn (gen_iordi3 (dest, dest, tmp));
7612 return;
7613 }
7614
7615 /* Split a V4SI initialization. */
7616
7617 void
7618 rs6000_split_v4si_init (rtx operands[])
7619 {
7620 rtx dest = operands[0];
7621
7622 /* Destination is a GPR, build up the two DImode parts in place. */
7623 if (REG_P (dest) || SUBREG_P (dest))
7624 {
7625 int d_regno = regno_or_subregno (dest);
7626 rtx scalar1 = operands[1];
7627 rtx scalar2 = operands[2];
7628 rtx scalar3 = operands[3];
7629 rtx scalar4 = operands[4];
7630 rtx tmp1 = operands[5];
7631 rtx tmp2 = operands[6];
7632
7633 /* Even though we only need one temporary (plus the destination, which
7634 has an early clobber constraint, try to use two temporaries, one for
7635 each double word created. That way the 2nd insn scheduling pass can
7636 rearrange things so the two parts are done in parallel. */
7637 if (BYTES_BIG_ENDIAN)
7638 {
7639 rtx di_lo = gen_rtx_REG (DImode, d_regno);
7640 rtx di_hi = gen_rtx_REG (DImode, d_regno + 1);
7641 rs6000_split_v4si_init_di_reg (di_lo, scalar1, scalar2, tmp1);
7642 rs6000_split_v4si_init_di_reg (di_hi, scalar3, scalar4, tmp2);
7643 }
7644 else
7645 {
7646 rtx di_lo = gen_rtx_REG (DImode, d_regno + 1);
7647 rtx di_hi = gen_rtx_REG (DImode, d_regno);
7648 rs6000_split_v4si_init_di_reg (di_lo, scalar4, scalar3, tmp1);
7649 rs6000_split_v4si_init_di_reg (di_hi, scalar2, scalar1, tmp2);
7650 }
7651 return;
7652 }
7653
7654 else
7655 gcc_unreachable ();
7656 }
7657
7658 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7659 selects whether the alignment is abi mandated, optional, or
7660 both abi and optional alignment. */
7661
7662 unsigned int
7663 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7664 {
7665 if (how != align_opt)
7666 {
7667 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7668 align = 128;
7669 }
7670
7671 if (how != align_abi)
7672 {
7673 if (TREE_CODE (type) == ARRAY_TYPE
7674 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7675 {
7676 if (align < BITS_PER_WORD)
7677 align = BITS_PER_WORD;
7678 }
7679 }
7680
7681 return align;
7682 }
7683
7684 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7685 instructions simply ignore the low bits; VSX memory instructions
7686 are aligned to 4 or 8 bytes. */
7687
7688 static bool
7689 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7690 {
7691 return (STRICT_ALIGNMENT
7692 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7693 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7694 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7695 && (int) align < VECTOR_ALIGN (mode)))));
7696 }
7697
7698 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7699
7700 bool
7701 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7702 {
7703 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7704 {
7705 if (computed != 128)
7706 {
7707 static bool warned;
7708 if (!warned && warn_psabi)
7709 {
7710 warned = true;
7711 inform (input_location,
7712 "the layout of aggregates containing vectors with"
7713 " %d-byte alignment has changed in GCC 5",
7714 computed / BITS_PER_UNIT);
7715 }
7716 }
7717 /* In current GCC there is no special case. */
7718 return false;
7719 }
7720
7721 return false;
7722 }
7723
7724 /* AIX increases natural record alignment to doubleword if the first
7725 field is an FP double while the FP fields remain word aligned. */
7726
7727 unsigned int
7728 rs6000_special_round_type_align (tree type, unsigned int computed,
7729 unsigned int specified)
7730 {
7731 unsigned int align = MAX (computed, specified);
7732 tree field = TYPE_FIELDS (type);
7733
7734 /* Skip all non field decls */
7735 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7736 field = DECL_CHAIN (field);
7737
7738 if (field != NULL && field != type)
7739 {
7740 type = TREE_TYPE (field);
7741 while (TREE_CODE (type) == ARRAY_TYPE)
7742 type = TREE_TYPE (type);
7743
7744 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7745 align = MAX (align, 64);
7746 }
7747
7748 return align;
7749 }
7750
7751 /* Darwin increases record alignment to the natural alignment of
7752 the first field. */
7753
7754 unsigned int
7755 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7756 unsigned int specified)
7757 {
7758 unsigned int align = MAX (computed, specified);
7759
7760 if (TYPE_PACKED (type))
7761 return align;
7762
7763 /* Find the first field, looking down into aggregates. */
7764 do {
7765 tree field = TYPE_FIELDS (type);
7766 /* Skip all non field decls */
7767 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7768 field = DECL_CHAIN (field);
7769 if (! field)
7770 break;
7771 /* A packed field does not contribute any extra alignment. */
7772 if (DECL_PACKED (field))
7773 return align;
7774 type = TREE_TYPE (field);
7775 while (TREE_CODE (type) == ARRAY_TYPE)
7776 type = TREE_TYPE (type);
7777 } while (AGGREGATE_TYPE_P (type));
7778
7779 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7780 align = MAX (align, TYPE_ALIGN (type));
7781
7782 return align;
7783 }
7784
7785 /* Return 1 for an operand in small memory on V.4/eabi. */
7786
7787 int
7788 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7789 machine_mode mode ATTRIBUTE_UNUSED)
7790 {
7791 #if TARGET_ELF
7792 rtx sym_ref;
7793
7794 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7795 return 0;
7796
7797 if (DEFAULT_ABI != ABI_V4)
7798 return 0;
7799
7800 if (GET_CODE (op) == SYMBOL_REF)
7801 sym_ref = op;
7802
7803 else if (GET_CODE (op) != CONST
7804 || GET_CODE (XEXP (op, 0)) != PLUS
7805 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
7806 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
7807 return 0;
7808
7809 else
7810 {
7811 rtx sum = XEXP (op, 0);
7812 HOST_WIDE_INT summand;
7813
7814 /* We have to be careful here, because it is the referenced address
7815 that must be 32k from _SDA_BASE_, not just the symbol. */
7816 summand = INTVAL (XEXP (sum, 1));
7817 if (summand < 0 || summand > g_switch_value)
7818 return 0;
7819
7820 sym_ref = XEXP (sum, 0);
7821 }
7822
7823 return SYMBOL_REF_SMALL_P (sym_ref);
7824 #else
7825 return 0;
7826 #endif
7827 }
7828
7829 /* Return true if either operand is a general purpose register. */
7830
7831 bool
7832 gpr_or_gpr_p (rtx op0, rtx op1)
7833 {
7834 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7835 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7836 }
7837
7838 /* Return true if this is a move direct operation between GPR registers and
7839 floating point/VSX registers. */
7840
7841 bool
7842 direct_move_p (rtx op0, rtx op1)
7843 {
7844 int regno0, regno1;
7845
7846 if (!REG_P (op0) || !REG_P (op1))
7847 return false;
7848
7849 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7850 return false;
7851
7852 regno0 = REGNO (op0);
7853 regno1 = REGNO (op1);
7854 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
7855 return false;
7856
7857 if (INT_REGNO_P (regno0))
7858 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7859
7860 else if (INT_REGNO_P (regno1))
7861 {
7862 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7863 return true;
7864
7865 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7866 return true;
7867 }
7868
7869 return false;
7870 }
7871
7872 /* Return true if the OFFSET is valid for the quad address instructions that
7873 use d-form (register + offset) addressing. */
7874
7875 static inline bool
7876 quad_address_offset_p (HOST_WIDE_INT offset)
7877 {
7878 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7879 }
7880
7881 /* Return true if the ADDR is an acceptable address for a quad memory
7882 operation of mode MODE (either LQ/STQ for general purpose registers, or
7883 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7884 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7885 3.0 LXV/STXV instruction. */
7886
7887 bool
7888 quad_address_p (rtx addr, machine_mode mode, bool strict)
7889 {
7890 rtx op0, op1;
7891
7892 if (GET_MODE_SIZE (mode) != 16)
7893 return false;
7894
7895 if (legitimate_indirect_address_p (addr, strict))
7896 return true;
7897
7898 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7899 return false;
7900
7901 if (GET_CODE (addr) != PLUS)
7902 return false;
7903
7904 op0 = XEXP (addr, 0);
7905 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7906 return false;
7907
7908 op1 = XEXP (addr, 1);
7909 if (!CONST_INT_P (op1))
7910 return false;
7911
7912 return quad_address_offset_p (INTVAL (op1));
7913 }
7914
7915 /* Return true if this is a load or store quad operation. This function does
7916 not handle the atomic quad memory instructions. */
7917
7918 bool
7919 quad_load_store_p (rtx op0, rtx op1)
7920 {
7921 bool ret;
7922
7923 if (!TARGET_QUAD_MEMORY)
7924 ret = false;
7925
7926 else if (REG_P (op0) && MEM_P (op1))
7927 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7928 && quad_memory_operand (op1, GET_MODE (op1))
7929 && !reg_overlap_mentioned_p (op0, op1));
7930
7931 else if (MEM_P (op0) && REG_P (op1))
7932 ret = (quad_memory_operand (op0, GET_MODE (op0))
7933 && quad_int_reg_operand (op1, GET_MODE (op1)));
7934
7935 else
7936 ret = false;
7937
7938 if (TARGET_DEBUG_ADDR)
7939 {
7940 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7941 ret ? "true" : "false");
7942 debug_rtx (gen_rtx_SET (op0, op1));
7943 }
7944
7945 return ret;
7946 }
7947
7948 /* Given an address, return a constant offset term if one exists. */
7949
7950 static rtx
7951 address_offset (rtx op)
7952 {
7953 if (GET_CODE (op) == PRE_INC
7954 || GET_CODE (op) == PRE_DEC)
7955 op = XEXP (op, 0);
7956 else if (GET_CODE (op) == PRE_MODIFY
7957 || GET_CODE (op) == LO_SUM)
7958 op = XEXP (op, 1);
7959
7960 if (GET_CODE (op) == CONST)
7961 op = XEXP (op, 0);
7962
7963 if (GET_CODE (op) == PLUS)
7964 op = XEXP (op, 1);
7965
7966 if (CONST_INT_P (op))
7967 return op;
7968
7969 return NULL_RTX;
7970 }
7971
7972 /* Return true if the MEM operand is a memory operand suitable for use
7973 with a (full width, possibly multiple) gpr load/store. On
7974 powerpc64 this means the offset must be divisible by 4.
7975 Implements 'Y' constraint.
7976
7977 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7978 a constraint function we know the operand has satisfied a suitable
7979 memory predicate. Also accept some odd rtl generated by reload
7980 (see rs6000_legitimize_reload_address for various forms). It is
7981 important that reload rtl be accepted by appropriate constraints
7982 but not by the operand predicate.
7983
7984 Offsetting a lo_sum should not be allowed, except where we know by
7985 alignment that a 32k boundary is not crossed, but see the ???
7986 comment in rs6000_legitimize_reload_address. Note that by
7987 "offsetting" here we mean a further offset to access parts of the
7988 MEM. It's fine to have a lo_sum where the inner address is offset
7989 from a sym, since the same sym+offset will appear in the high part
7990 of the address calculation. */
7991
7992 bool
7993 mem_operand_gpr (rtx op, machine_mode mode)
7994 {
7995 unsigned HOST_WIDE_INT offset;
7996 int extra;
7997 rtx addr = XEXP (op, 0);
7998
7999 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
8000 if (TARGET_UPDATE
8001 && (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
8002 && mode_supports_pre_incdec_p (mode)
8003 && legitimate_indirect_address_p (XEXP (addr, 0), false))
8004 return true;
8005
8006 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
8007 if (!rs6000_offsettable_memref_p (op, mode, false))
8008 return false;
8009
8010 op = address_offset (addr);
8011 if (op == NULL_RTX)
8012 return true;
8013
8014 offset = INTVAL (op);
8015 if (TARGET_POWERPC64 && (offset & 3) != 0)
8016 return false;
8017
8018 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8019 if (extra < 0)
8020 extra = 0;
8021
8022 if (GET_CODE (addr) == LO_SUM)
8023 /* For lo_sum addresses, we must allow any offset except one that
8024 causes a wrap, so test only the low 16 bits. */
8025 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8026
8027 return offset + 0x8000 < 0x10000u - extra;
8028 }
8029
8030 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8031 enforce an offset divisible by 4 even for 32-bit. */
8032
8033 bool
8034 mem_operand_ds_form (rtx op, machine_mode mode)
8035 {
8036 unsigned HOST_WIDE_INT offset;
8037 int extra;
8038 rtx addr = XEXP (op, 0);
8039
8040 if (!offsettable_address_p (false, mode, addr))
8041 return false;
8042
8043 op = address_offset (addr);
8044 if (op == NULL_RTX)
8045 return true;
8046
8047 offset = INTVAL (op);
8048 if ((offset & 3) != 0)
8049 return false;
8050
8051 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8052 if (extra < 0)
8053 extra = 0;
8054
8055 if (GET_CODE (addr) == LO_SUM)
8056 /* For lo_sum addresses, we must allow any offset except one that
8057 causes a wrap, so test only the low 16 bits. */
8058 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8059
8060 return offset + 0x8000 < 0x10000u - extra;
8061 }
8062 \f
8063 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8064
8065 static bool
8066 reg_offset_addressing_ok_p (machine_mode mode)
8067 {
8068 switch (mode)
8069 {
8070 case E_V16QImode:
8071 case E_V8HImode:
8072 case E_V4SFmode:
8073 case E_V4SImode:
8074 case E_V2DFmode:
8075 case E_V2DImode:
8076 case E_V1TImode:
8077 case E_TImode:
8078 case E_TFmode:
8079 case E_KFmode:
8080 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8081 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8082 a vector mode, if we want to use the VSX registers to move it around,
8083 we need to restrict ourselves to reg+reg addressing. Similarly for
8084 IEEE 128-bit floating point that is passed in a single vector
8085 register. */
8086 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
8087 return mode_supports_dq_form (mode);
8088 break;
8089
8090 case E_SDmode:
8091 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8092 addressing for the LFIWZX and STFIWX instructions. */
8093 if (TARGET_NO_SDMODE_STACK)
8094 return false;
8095 break;
8096
8097 default:
8098 break;
8099 }
8100
8101 return true;
8102 }
8103
8104 static bool
8105 virtual_stack_registers_memory_p (rtx op)
8106 {
8107 int regnum;
8108
8109 if (GET_CODE (op) == REG)
8110 regnum = REGNO (op);
8111
8112 else if (GET_CODE (op) == PLUS
8113 && GET_CODE (XEXP (op, 0)) == REG
8114 && GET_CODE (XEXP (op, 1)) == CONST_INT)
8115 regnum = REGNO (XEXP (op, 0));
8116
8117 else
8118 return false;
8119
8120 return (regnum >= FIRST_VIRTUAL_REGISTER
8121 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
8122 }
8123
8124 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8125 is known to not straddle a 32k boundary. This function is used
8126 to determine whether -mcmodel=medium code can use TOC pointer
8127 relative addressing for OP. This means the alignment of the TOC
8128 pointer must also be taken into account, and unfortunately that is
8129 only 8 bytes. */
8130
8131 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8132 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8133 #endif
8134
8135 static bool
8136 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
8137 machine_mode mode)
8138 {
8139 tree decl;
8140 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
8141
8142 if (GET_CODE (op) != SYMBOL_REF)
8143 return false;
8144
8145 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8146 SYMBOL_REF. */
8147 if (mode_supports_dq_form (mode))
8148 return false;
8149
8150 dsize = GET_MODE_SIZE (mode);
8151 decl = SYMBOL_REF_DECL (op);
8152 if (!decl)
8153 {
8154 if (dsize == 0)
8155 return false;
8156
8157 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8158 replacing memory addresses with an anchor plus offset. We
8159 could find the decl by rummaging around in the block->objects
8160 VEC for the given offset but that seems like too much work. */
8161 dalign = BITS_PER_UNIT;
8162 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
8163 && SYMBOL_REF_ANCHOR_P (op)
8164 && SYMBOL_REF_BLOCK (op) != NULL)
8165 {
8166 struct object_block *block = SYMBOL_REF_BLOCK (op);
8167
8168 dalign = block->alignment;
8169 offset += SYMBOL_REF_BLOCK_OFFSET (op);
8170 }
8171 else if (CONSTANT_POOL_ADDRESS_P (op))
8172 {
8173 /* It would be nice to have get_pool_align().. */
8174 machine_mode cmode = get_pool_mode (op);
8175
8176 dalign = GET_MODE_ALIGNMENT (cmode);
8177 }
8178 }
8179 else if (DECL_P (decl))
8180 {
8181 dalign = DECL_ALIGN (decl);
8182
8183 if (dsize == 0)
8184 {
8185 /* Allow BLKmode when the entire object is known to not
8186 cross a 32k boundary. */
8187 if (!DECL_SIZE_UNIT (decl))
8188 return false;
8189
8190 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
8191 return false;
8192
8193 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
8194 if (dsize > 32768)
8195 return false;
8196
8197 dalign /= BITS_PER_UNIT;
8198 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8199 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8200 return dalign >= dsize;
8201 }
8202 }
8203 else
8204 gcc_unreachable ();
8205
8206 /* Find how many bits of the alignment we know for this access. */
8207 dalign /= BITS_PER_UNIT;
8208 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8209 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8210 mask = dalign - 1;
8211 lsb = offset & -offset;
8212 mask &= lsb - 1;
8213 dalign = mask + 1;
8214
8215 return dalign >= dsize;
8216 }
8217
8218 static bool
8219 constant_pool_expr_p (rtx op)
8220 {
8221 rtx base, offset;
8222
8223 split_const (op, &base, &offset);
8224 return (GET_CODE (base) == SYMBOL_REF
8225 && CONSTANT_POOL_ADDRESS_P (base)
8226 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
8227 }
8228
8229 /* These are only used to pass through from print_operand/print_operand_address
8230 to rs6000_output_addr_const_extra over the intervening function
8231 output_addr_const which is not target code. */
8232 static const_rtx tocrel_base_oac, tocrel_offset_oac;
8233
8234 /* Return true if OP is a toc pointer relative address (the output
8235 of create_TOC_reference). If STRICT, do not match non-split
8236 -mcmodel=large/medium toc pointer relative addresses. If the pointers
8237 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
8238 TOCREL_OFFSET_RET respectively. */
8239
8240 bool
8241 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
8242 const_rtx *tocrel_offset_ret)
8243 {
8244 if (!TARGET_TOC)
8245 return false;
8246
8247 if (TARGET_CMODEL != CMODEL_SMALL)
8248 {
8249 /* When strict ensure we have everything tidy. */
8250 if (strict
8251 && !(GET_CODE (op) == LO_SUM
8252 && REG_P (XEXP (op, 0))
8253 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
8254 return false;
8255
8256 /* When not strict, allow non-split TOC addresses and also allow
8257 (lo_sum (high ..)) TOC addresses created during reload. */
8258 if (GET_CODE (op) == LO_SUM)
8259 op = XEXP (op, 1);
8260 }
8261
8262 const_rtx tocrel_base = op;
8263 const_rtx tocrel_offset = const0_rtx;
8264
8265 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
8266 {
8267 tocrel_base = XEXP (op, 0);
8268 tocrel_offset = XEXP (op, 1);
8269 }
8270
8271 if (tocrel_base_ret)
8272 *tocrel_base_ret = tocrel_base;
8273 if (tocrel_offset_ret)
8274 *tocrel_offset_ret = tocrel_offset;
8275
8276 return (GET_CODE (tocrel_base) == UNSPEC
8277 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
8278 }
8279
8280 /* Return true if X is a constant pool address, and also for cmodel=medium
8281 if X is a toc-relative address known to be offsettable within MODE. */
8282
8283 bool
8284 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
8285 bool strict)
8286 {
8287 const_rtx tocrel_base, tocrel_offset;
8288 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
8289 && (TARGET_CMODEL != CMODEL_MEDIUM
8290 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
8291 || mode == QImode
8292 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
8293 INTVAL (tocrel_offset), mode)));
8294 }
8295
8296 static bool
8297 legitimate_small_data_p (machine_mode mode, rtx x)
8298 {
8299 return (DEFAULT_ABI == ABI_V4
8300 && !flag_pic && !TARGET_TOC
8301 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
8302 && small_data_operand (x, mode));
8303 }
8304
8305 bool
8306 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
8307 bool strict, bool worst_case)
8308 {
8309 unsigned HOST_WIDE_INT offset;
8310 unsigned int extra;
8311
8312 if (GET_CODE (x) != PLUS)
8313 return false;
8314 if (!REG_P (XEXP (x, 0)))
8315 return false;
8316 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8317 return false;
8318 if (mode_supports_dq_form (mode))
8319 return quad_address_p (x, mode, strict);
8320 if (!reg_offset_addressing_ok_p (mode))
8321 return virtual_stack_registers_memory_p (x);
8322 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
8323 return true;
8324 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8325 return false;
8326
8327 offset = INTVAL (XEXP (x, 1));
8328 extra = 0;
8329 switch (mode)
8330 {
8331 case E_DFmode:
8332 case E_DDmode:
8333 case E_DImode:
8334 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8335 addressing. */
8336 if (VECTOR_MEM_VSX_P (mode))
8337 return false;
8338
8339 if (!worst_case)
8340 break;
8341 if (!TARGET_POWERPC64)
8342 extra = 4;
8343 else if (offset & 3)
8344 return false;
8345 break;
8346
8347 case E_TFmode:
8348 case E_IFmode:
8349 case E_KFmode:
8350 case E_TDmode:
8351 case E_TImode:
8352 case E_PTImode:
8353 extra = 8;
8354 if (!worst_case)
8355 break;
8356 if (!TARGET_POWERPC64)
8357 extra = 12;
8358 else if (offset & 3)
8359 return false;
8360 break;
8361
8362 default:
8363 break;
8364 }
8365
8366 offset += 0x8000;
8367 return offset < 0x10000 - extra;
8368 }
8369
8370 bool
8371 legitimate_indexed_address_p (rtx x, int strict)
8372 {
8373 rtx op0, op1;
8374
8375 if (GET_CODE (x) != PLUS)
8376 return false;
8377
8378 op0 = XEXP (x, 0);
8379 op1 = XEXP (x, 1);
8380
8381 return (REG_P (op0) && REG_P (op1)
8382 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8383 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8384 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8385 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8386 }
8387
8388 bool
8389 avoiding_indexed_address_p (machine_mode mode)
8390 {
8391 /* Avoid indexed addressing for modes that have non-indexed
8392 load/store instruction forms. */
8393 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8394 }
8395
8396 bool
8397 legitimate_indirect_address_p (rtx x, int strict)
8398 {
8399 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8400 }
8401
8402 bool
8403 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8404 {
8405 if (!TARGET_MACHO || !flag_pic
8406 || mode != SImode || GET_CODE (x) != MEM)
8407 return false;
8408 x = XEXP (x, 0);
8409
8410 if (GET_CODE (x) != LO_SUM)
8411 return false;
8412 if (GET_CODE (XEXP (x, 0)) != REG)
8413 return false;
8414 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8415 return false;
8416 x = XEXP (x, 1);
8417
8418 return CONSTANT_P (x);
8419 }
8420
8421 static bool
8422 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8423 {
8424 if (GET_CODE (x) != LO_SUM)
8425 return false;
8426 if (GET_CODE (XEXP (x, 0)) != REG)
8427 return false;
8428 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8429 return false;
8430 /* quad word addresses are restricted, and we can't use LO_SUM. */
8431 if (mode_supports_dq_form (mode))
8432 return false;
8433 x = XEXP (x, 1);
8434
8435 if (TARGET_ELF || TARGET_MACHO)
8436 {
8437 bool large_toc_ok;
8438
8439 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8440 return false;
8441 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8442 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8443 recognizes some LO_SUM addresses as valid although this
8444 function says opposite. In most cases, LRA through different
8445 transformations can generate correct code for address reloads.
8446 It can not manage only some LO_SUM cases. So we need to add
8447 code analogous to one in rs6000_legitimize_reload_address for
8448 LOW_SUM here saying that some addresses are still valid. */
8449 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8450 && small_toc_ref (x, VOIDmode));
8451 if (TARGET_TOC && ! large_toc_ok)
8452 return false;
8453 if (GET_MODE_NUNITS (mode) != 1)
8454 return false;
8455 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8456 && !(/* ??? Assume floating point reg based on mode? */
8457 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8458 return false;
8459
8460 return CONSTANT_P (x) || large_toc_ok;
8461 }
8462
8463 return false;
8464 }
8465
8466
8467 /* Try machine-dependent ways of modifying an illegitimate address
8468 to be legitimate. If we find one, return the new, valid address.
8469 This is used from only one place: `memory_address' in explow.c.
8470
8471 OLDX is the address as it was before break_out_memory_refs was
8472 called. In some cases it is useful to look at this to decide what
8473 needs to be done.
8474
8475 It is always safe for this function to do nothing. It exists to
8476 recognize opportunities to optimize the output.
8477
8478 On RS/6000, first check for the sum of a register with a constant
8479 integer that is out of range. If so, generate code to add the
8480 constant with the low-order 16 bits masked to the register and force
8481 this result into another register (this can be done with `cau').
8482 Then generate an address of REG+(CONST&0xffff), allowing for the
8483 possibility of bit 16 being a one.
8484
8485 Then check for the sum of a register and something not constant, try to
8486 load the other things into a register and return the sum. */
8487
8488 static rtx
8489 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8490 machine_mode mode)
8491 {
8492 unsigned int extra;
8493
8494 if (!reg_offset_addressing_ok_p (mode)
8495 || mode_supports_dq_form (mode))
8496 {
8497 if (virtual_stack_registers_memory_p (x))
8498 return x;
8499
8500 /* In theory we should not be seeing addresses of the form reg+0,
8501 but just in case it is generated, optimize it away. */
8502 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8503 return force_reg (Pmode, XEXP (x, 0));
8504
8505 /* For TImode with load/store quad, restrict addresses to just a single
8506 pointer, so it works with both GPRs and VSX registers. */
8507 /* Make sure both operands are registers. */
8508 else if (GET_CODE (x) == PLUS
8509 && (mode != TImode || !TARGET_VSX))
8510 return gen_rtx_PLUS (Pmode,
8511 force_reg (Pmode, XEXP (x, 0)),
8512 force_reg (Pmode, XEXP (x, 1)));
8513 else
8514 return force_reg (Pmode, x);
8515 }
8516 if (GET_CODE (x) == SYMBOL_REF)
8517 {
8518 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8519 if (model != 0)
8520 return rs6000_legitimize_tls_address (x, model);
8521 }
8522
8523 extra = 0;
8524 switch (mode)
8525 {
8526 case E_TFmode:
8527 case E_TDmode:
8528 case E_TImode:
8529 case E_PTImode:
8530 case E_IFmode:
8531 case E_KFmode:
8532 /* As in legitimate_offset_address_p we do not assume
8533 worst-case. The mode here is just a hint as to the registers
8534 used. A TImode is usually in gprs, but may actually be in
8535 fprs. Leave worst-case scenario for reload to handle via
8536 insn constraints. PTImode is only GPRs. */
8537 extra = 8;
8538 break;
8539 default:
8540 break;
8541 }
8542
8543 if (GET_CODE (x) == PLUS
8544 && GET_CODE (XEXP (x, 0)) == REG
8545 && GET_CODE (XEXP (x, 1)) == CONST_INT
8546 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8547 >= 0x10000 - extra))
8548 {
8549 HOST_WIDE_INT high_int, low_int;
8550 rtx sum;
8551 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8552 if (low_int >= 0x8000 - extra)
8553 low_int = 0;
8554 high_int = INTVAL (XEXP (x, 1)) - low_int;
8555 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8556 GEN_INT (high_int)), 0);
8557 return plus_constant (Pmode, sum, low_int);
8558 }
8559 else if (GET_CODE (x) == PLUS
8560 && GET_CODE (XEXP (x, 0)) == REG
8561 && GET_CODE (XEXP (x, 1)) != CONST_INT
8562 && GET_MODE_NUNITS (mode) == 1
8563 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8564 || (/* ??? Assume floating point reg based on mode? */
8565 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8566 && !avoiding_indexed_address_p (mode))
8567 {
8568 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8569 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8570 }
8571 else if ((TARGET_ELF
8572 #if TARGET_MACHO
8573 || !MACHO_DYNAMIC_NO_PIC_P
8574 #endif
8575 )
8576 && TARGET_32BIT
8577 && TARGET_NO_TOC
8578 && ! flag_pic
8579 && GET_CODE (x) != CONST_INT
8580 && GET_CODE (x) != CONST_WIDE_INT
8581 && GET_CODE (x) != CONST_DOUBLE
8582 && CONSTANT_P (x)
8583 && GET_MODE_NUNITS (mode) == 1
8584 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8585 || (/* ??? Assume floating point reg based on mode? */
8586 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8587 {
8588 rtx reg = gen_reg_rtx (Pmode);
8589 if (TARGET_ELF)
8590 emit_insn (gen_elf_high (reg, x));
8591 else
8592 emit_insn (gen_macho_high (reg, x));
8593 return gen_rtx_LO_SUM (Pmode, reg, x);
8594 }
8595 else if (TARGET_TOC
8596 && GET_CODE (x) == SYMBOL_REF
8597 && constant_pool_expr_p (x)
8598 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8599 return create_TOC_reference (x, NULL_RTX);
8600 else
8601 return x;
8602 }
8603
8604 /* Debug version of rs6000_legitimize_address. */
8605 static rtx
8606 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8607 {
8608 rtx ret;
8609 rtx_insn *insns;
8610
8611 start_sequence ();
8612 ret = rs6000_legitimize_address (x, oldx, mode);
8613 insns = get_insns ();
8614 end_sequence ();
8615
8616 if (ret != x)
8617 {
8618 fprintf (stderr,
8619 "\nrs6000_legitimize_address: mode %s, old code %s, "
8620 "new code %s, modified\n",
8621 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8622 GET_RTX_NAME (GET_CODE (ret)));
8623
8624 fprintf (stderr, "Original address:\n");
8625 debug_rtx (x);
8626
8627 fprintf (stderr, "oldx:\n");
8628 debug_rtx (oldx);
8629
8630 fprintf (stderr, "New address:\n");
8631 debug_rtx (ret);
8632
8633 if (insns)
8634 {
8635 fprintf (stderr, "Insns added:\n");
8636 debug_rtx_list (insns, 20);
8637 }
8638 }
8639 else
8640 {
8641 fprintf (stderr,
8642 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8643 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8644
8645 debug_rtx (x);
8646 }
8647
8648 if (insns)
8649 emit_insn (insns);
8650
8651 return ret;
8652 }
8653
8654 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8655 We need to emit DTP-relative relocations. */
8656
8657 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8658 static void
8659 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8660 {
8661 switch (size)
8662 {
8663 case 4:
8664 fputs ("\t.long\t", file);
8665 break;
8666 case 8:
8667 fputs (DOUBLE_INT_ASM_OP, file);
8668 break;
8669 default:
8670 gcc_unreachable ();
8671 }
8672 output_addr_const (file, x);
8673 if (TARGET_ELF)
8674 fputs ("@dtprel+0x8000", file);
8675 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8676 {
8677 switch (SYMBOL_REF_TLS_MODEL (x))
8678 {
8679 case 0:
8680 break;
8681 case TLS_MODEL_LOCAL_EXEC:
8682 fputs ("@le", file);
8683 break;
8684 case TLS_MODEL_INITIAL_EXEC:
8685 fputs ("@ie", file);
8686 break;
8687 case TLS_MODEL_GLOBAL_DYNAMIC:
8688 case TLS_MODEL_LOCAL_DYNAMIC:
8689 fputs ("@m", file);
8690 break;
8691 default:
8692 gcc_unreachable ();
8693 }
8694 }
8695 }
8696
8697 /* Return true if X is a symbol that refers to real (rather than emulated)
8698 TLS. */
8699
8700 static bool
8701 rs6000_real_tls_symbol_ref_p (rtx x)
8702 {
8703 return (GET_CODE (x) == SYMBOL_REF
8704 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8705 }
8706
8707 /* In the name of slightly smaller debug output, and to cater to
8708 general assembler lossage, recognize various UNSPEC sequences
8709 and turn them back into a direct symbol reference. */
8710
8711 static rtx
8712 rs6000_delegitimize_address (rtx orig_x)
8713 {
8714 rtx x, y, offset;
8715
8716 orig_x = delegitimize_mem_from_attrs (orig_x);
8717 x = orig_x;
8718 if (MEM_P (x))
8719 x = XEXP (x, 0);
8720
8721 y = x;
8722 if (TARGET_CMODEL != CMODEL_SMALL
8723 && GET_CODE (y) == LO_SUM)
8724 y = XEXP (y, 1);
8725
8726 offset = NULL_RTX;
8727 if (GET_CODE (y) == PLUS
8728 && GET_MODE (y) == Pmode
8729 && CONST_INT_P (XEXP (y, 1)))
8730 {
8731 offset = XEXP (y, 1);
8732 y = XEXP (y, 0);
8733 }
8734
8735 if (GET_CODE (y) == UNSPEC
8736 && XINT (y, 1) == UNSPEC_TOCREL)
8737 {
8738 y = XVECEXP (y, 0, 0);
8739
8740 #ifdef HAVE_AS_TLS
8741 /* Do not associate thread-local symbols with the original
8742 constant pool symbol. */
8743 if (TARGET_XCOFF
8744 && GET_CODE (y) == SYMBOL_REF
8745 && CONSTANT_POOL_ADDRESS_P (y)
8746 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8747 return orig_x;
8748 #endif
8749
8750 if (offset != NULL_RTX)
8751 y = gen_rtx_PLUS (Pmode, y, offset);
8752 if (!MEM_P (orig_x))
8753 return y;
8754 else
8755 return replace_equiv_address_nv (orig_x, y);
8756 }
8757
8758 if (TARGET_MACHO
8759 && GET_CODE (orig_x) == LO_SUM
8760 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8761 {
8762 y = XEXP (XEXP (orig_x, 1), 0);
8763 if (GET_CODE (y) == UNSPEC
8764 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8765 return XVECEXP (y, 0, 0);
8766 }
8767
8768 return orig_x;
8769 }
8770
8771 /* Return true if X shouldn't be emitted into the debug info.
8772 The linker doesn't like .toc section references from
8773 .debug_* sections, so reject .toc section symbols. */
8774
8775 static bool
8776 rs6000_const_not_ok_for_debug_p (rtx x)
8777 {
8778 if (GET_CODE (x) == UNSPEC)
8779 return true;
8780 if (GET_CODE (x) == SYMBOL_REF
8781 && CONSTANT_POOL_ADDRESS_P (x))
8782 {
8783 rtx c = get_pool_constant (x);
8784 machine_mode cmode = get_pool_mode (x);
8785 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8786 return true;
8787 }
8788
8789 return false;
8790 }
8791
8792
8793 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8794
8795 static bool
8796 rs6000_legitimate_combined_insn (rtx_insn *insn)
8797 {
8798 int icode = INSN_CODE (insn);
8799
8800 /* Reject creating doloop insns. Combine should not be allowed
8801 to create these for a number of reasons:
8802 1) In a nested loop, if combine creates one of these in an
8803 outer loop and the register allocator happens to allocate ctr
8804 to the outer loop insn, then the inner loop can't use ctr.
8805 Inner loops ought to be more highly optimized.
8806 2) Combine often wants to create one of these from what was
8807 originally a three insn sequence, first combining the three
8808 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8809 allocated ctr, the splitter takes use back to the three insn
8810 sequence. It's better to stop combine at the two insn
8811 sequence.
8812 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8813 insns, the register allocator sometimes uses floating point
8814 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8815 jump insn and output reloads are not implemented for jumps,
8816 the ctrsi/ctrdi splitters need to handle all possible cases.
8817 That's a pain, and it gets to be seriously difficult when a
8818 splitter that runs after reload needs memory to transfer from
8819 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8820 for the difficult case. It's better to not create problems
8821 in the first place. */
8822 if (icode != CODE_FOR_nothing
8823 && (icode == CODE_FOR_bdz_si
8824 || icode == CODE_FOR_bdz_di
8825 || icode == CODE_FOR_bdnz_si
8826 || icode == CODE_FOR_bdnz_di
8827 || icode == CODE_FOR_bdztf_si
8828 || icode == CODE_FOR_bdztf_di
8829 || icode == CODE_FOR_bdnztf_si
8830 || icode == CODE_FOR_bdnztf_di))
8831 return false;
8832
8833 return true;
8834 }
8835
8836 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8837
8838 static GTY(()) rtx rs6000_tls_symbol;
8839 static rtx
8840 rs6000_tls_get_addr (void)
8841 {
8842 if (!rs6000_tls_symbol)
8843 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8844
8845 return rs6000_tls_symbol;
8846 }
8847
8848 /* Construct the SYMBOL_REF for TLS GOT references. */
8849
8850 static GTY(()) rtx rs6000_got_symbol;
8851 static rtx
8852 rs6000_got_sym (void)
8853 {
8854 if (!rs6000_got_symbol)
8855 {
8856 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8857 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8858 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8859 }
8860
8861 return rs6000_got_symbol;
8862 }
8863
8864 /* AIX Thread-Local Address support. */
8865
8866 static rtx
8867 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8868 {
8869 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8870 const char *name;
8871 char *tlsname;
8872
8873 name = XSTR (addr, 0);
8874 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8875 or the symbol will be in TLS private data section. */
8876 if (name[strlen (name) - 1] != ']'
8877 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8878 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8879 {
8880 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8881 strcpy (tlsname, name);
8882 strcat (tlsname,
8883 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8884 tlsaddr = copy_rtx (addr);
8885 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8886 }
8887 else
8888 tlsaddr = addr;
8889
8890 /* Place addr into TOC constant pool. */
8891 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8892
8893 /* Output the TOC entry and create the MEM referencing the value. */
8894 if (constant_pool_expr_p (XEXP (sym, 0))
8895 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8896 {
8897 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8898 mem = gen_const_mem (Pmode, tocref);
8899 set_mem_alias_set (mem, get_TOC_alias_set ());
8900 }
8901 else
8902 return sym;
8903
8904 /* Use global-dynamic for local-dynamic. */
8905 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8906 || model == TLS_MODEL_LOCAL_DYNAMIC)
8907 {
8908 /* Create new TOC reference for @m symbol. */
8909 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8910 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8911 strcpy (tlsname, "*LCM");
8912 strcat (tlsname, name + 3);
8913 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8914 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8915 tocref = create_TOC_reference (modaddr, NULL_RTX);
8916 rtx modmem = gen_const_mem (Pmode, tocref);
8917 set_mem_alias_set (modmem, get_TOC_alias_set ());
8918
8919 rtx modreg = gen_reg_rtx (Pmode);
8920 emit_insn (gen_rtx_SET (modreg, modmem));
8921
8922 tmpreg = gen_reg_rtx (Pmode);
8923 emit_insn (gen_rtx_SET (tmpreg, mem));
8924
8925 dest = gen_reg_rtx (Pmode);
8926 if (TARGET_32BIT)
8927 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8928 else
8929 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8930 return dest;
8931 }
8932 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8933 else if (TARGET_32BIT)
8934 {
8935 tlsreg = gen_reg_rtx (SImode);
8936 emit_insn (gen_tls_get_tpointer (tlsreg));
8937 }
8938 else
8939 tlsreg = gen_rtx_REG (DImode, 13);
8940
8941 /* Load the TOC value into temporary register. */
8942 tmpreg = gen_reg_rtx (Pmode);
8943 emit_insn (gen_rtx_SET (tmpreg, mem));
8944 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8945 gen_rtx_MINUS (Pmode, addr, tlsreg));
8946
8947 /* Add TOC symbol value to TLS pointer. */
8948 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8949
8950 return dest;
8951 }
8952
8953 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8954 this (thread-local) address. */
8955
8956 static rtx
8957 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8958 {
8959 rtx dest, insn;
8960
8961 if (TARGET_XCOFF)
8962 return rs6000_legitimize_tls_address_aix (addr, model);
8963
8964 dest = gen_reg_rtx (Pmode);
8965 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8966 {
8967 rtx tlsreg;
8968
8969 if (TARGET_64BIT)
8970 {
8971 tlsreg = gen_rtx_REG (Pmode, 13);
8972 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8973 }
8974 else
8975 {
8976 tlsreg = gen_rtx_REG (Pmode, 2);
8977 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8978 }
8979 emit_insn (insn);
8980 }
8981 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8982 {
8983 rtx tlsreg, tmp;
8984
8985 tmp = gen_reg_rtx (Pmode);
8986 if (TARGET_64BIT)
8987 {
8988 tlsreg = gen_rtx_REG (Pmode, 13);
8989 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8990 }
8991 else
8992 {
8993 tlsreg = gen_rtx_REG (Pmode, 2);
8994 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8995 }
8996 emit_insn (insn);
8997 if (TARGET_64BIT)
8998 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8999 else
9000 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
9001 emit_insn (insn);
9002 }
9003 else
9004 {
9005 rtx r3, got, tga, tmp1, tmp2, call_insn;
9006
9007 /* We currently use relocations like @got@tlsgd for tls, which
9008 means the linker will handle allocation of tls entries, placing
9009 them in the .got section. So use a pointer to the .got section,
9010 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9011 or to secondary GOT sections used by 32-bit -fPIC. */
9012 if (TARGET_64BIT)
9013 got = gen_rtx_REG (Pmode, 2);
9014 else
9015 {
9016 if (flag_pic == 1)
9017 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
9018 else
9019 {
9020 rtx gsym = rs6000_got_sym ();
9021 got = gen_reg_rtx (Pmode);
9022 if (flag_pic == 0)
9023 rs6000_emit_move (got, gsym, Pmode);
9024 else
9025 {
9026 rtx mem, lab;
9027
9028 tmp1 = gen_reg_rtx (Pmode);
9029 tmp2 = gen_reg_rtx (Pmode);
9030 mem = gen_const_mem (Pmode, tmp1);
9031 lab = gen_label_rtx ();
9032 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
9033 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
9034 if (TARGET_LINK_STACK)
9035 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
9036 emit_move_insn (tmp2, mem);
9037 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
9038 set_unique_reg_note (last, REG_EQUAL, gsym);
9039 }
9040 }
9041 }
9042
9043 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
9044 {
9045 tga = rs6000_tls_get_addr ();
9046 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
9047 const0_rtx, Pmode);
9048
9049 r3 = gen_rtx_REG (Pmode, 3);
9050 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9051 {
9052 if (TARGET_64BIT)
9053 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
9054 else
9055 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
9056 }
9057 else if (DEFAULT_ABI == ABI_V4)
9058 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
9059 else
9060 gcc_unreachable ();
9061 call_insn = last_call_insn ();
9062 PATTERN (call_insn) = insn;
9063 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9064 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9065 pic_offset_table_rtx);
9066 }
9067 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
9068 {
9069 tga = rs6000_tls_get_addr ();
9070 tmp1 = gen_reg_rtx (Pmode);
9071 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
9072 const0_rtx, Pmode);
9073
9074 r3 = gen_rtx_REG (Pmode, 3);
9075 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9076 {
9077 if (TARGET_64BIT)
9078 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
9079 else
9080 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
9081 }
9082 else if (DEFAULT_ABI == ABI_V4)
9083 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
9084 else
9085 gcc_unreachable ();
9086 call_insn = last_call_insn ();
9087 PATTERN (call_insn) = insn;
9088 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9089 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9090 pic_offset_table_rtx);
9091
9092 if (rs6000_tls_size == 16)
9093 {
9094 if (TARGET_64BIT)
9095 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
9096 else
9097 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
9098 }
9099 else if (rs6000_tls_size == 32)
9100 {
9101 tmp2 = gen_reg_rtx (Pmode);
9102 if (TARGET_64BIT)
9103 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
9104 else
9105 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
9106 emit_insn (insn);
9107 if (TARGET_64BIT)
9108 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
9109 else
9110 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
9111 }
9112 else
9113 {
9114 tmp2 = gen_reg_rtx (Pmode);
9115 if (TARGET_64BIT)
9116 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
9117 else
9118 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
9119 emit_insn (insn);
9120 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
9121 }
9122 emit_insn (insn);
9123 }
9124 else
9125 {
9126 /* IE, or 64-bit offset LE. */
9127 tmp2 = gen_reg_rtx (Pmode);
9128 if (TARGET_64BIT)
9129 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
9130 else
9131 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
9132 emit_insn (insn);
9133 if (TARGET_64BIT)
9134 insn = gen_tls_tls_64 (dest, tmp2, addr);
9135 else
9136 insn = gen_tls_tls_32 (dest, tmp2, addr);
9137 emit_insn (insn);
9138 }
9139 }
9140
9141 return dest;
9142 }
9143
9144 /* Only create the global variable for the stack protect guard if we are using
9145 the global flavor of that guard. */
9146 static tree
9147 rs6000_init_stack_protect_guard (void)
9148 {
9149 if (rs6000_stack_protector_guard == SSP_GLOBAL)
9150 return default_stack_protect_guard ();
9151
9152 return NULL_TREE;
9153 }
9154
9155 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9156
9157 static bool
9158 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
9159 {
9160 if (GET_CODE (x) == HIGH
9161 && GET_CODE (XEXP (x, 0)) == UNSPEC)
9162 return true;
9163
9164 /* A TLS symbol in the TOC cannot contain a sum. */
9165 if (GET_CODE (x) == CONST
9166 && GET_CODE (XEXP (x, 0)) == PLUS
9167 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9168 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
9169 return true;
9170
9171 /* Do not place an ELF TLS symbol in the constant pool. */
9172 return TARGET_ELF && tls_referenced_p (x);
9173 }
9174
9175 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9176 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9177 can be addressed relative to the toc pointer. */
9178
9179 static bool
9180 use_toc_relative_ref (rtx sym, machine_mode mode)
9181 {
9182 return ((constant_pool_expr_p (sym)
9183 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
9184 get_pool_mode (sym)))
9185 || (TARGET_CMODEL == CMODEL_MEDIUM
9186 && SYMBOL_REF_LOCAL_P (sym)
9187 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
9188 }
9189
9190 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9191 replace the input X, or the original X if no replacement is called for.
9192 The output parameter *WIN is 1 if the calling macro should goto WIN,
9193 0 if it should not.
9194
9195 For RS/6000, we wish to handle large displacements off a base
9196 register by splitting the addend across an addiu/addis and the mem insn.
9197 This cuts number of extra insns needed from 3 to 1.
9198
9199 On Darwin, we use this to generate code for floating point constants.
9200 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9201 The Darwin code is inside #if TARGET_MACHO because only then are the
9202 machopic_* functions defined. */
9203 static rtx
9204 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
9205 int opnum, int type,
9206 int ind_levels ATTRIBUTE_UNUSED, int *win)
9207 {
9208 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9209 bool quad_offset_p = mode_supports_dq_form (mode);
9210
9211 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9212 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9213 if (reg_offset_p
9214 && opnum == 1
9215 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
9216 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
9217 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
9218 && TARGET_P9_VECTOR)
9219 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
9220 && TARGET_P9_VECTOR)))
9221 reg_offset_p = false;
9222
9223 /* We must recognize output that we have already generated ourselves. */
9224 if (GET_CODE (x) == PLUS
9225 && GET_CODE (XEXP (x, 0)) == PLUS
9226 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9227 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9228 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9229 {
9230 if (TARGET_DEBUG_ADDR)
9231 {
9232 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
9233 debug_rtx (x);
9234 }
9235 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9236 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9237 opnum, (enum reload_type) type);
9238 *win = 1;
9239 return x;
9240 }
9241
9242 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9243 if (GET_CODE (x) == LO_SUM
9244 && GET_CODE (XEXP (x, 0)) == HIGH)
9245 {
9246 if (TARGET_DEBUG_ADDR)
9247 {
9248 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
9249 debug_rtx (x);
9250 }
9251 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9252 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9253 opnum, (enum reload_type) type);
9254 *win = 1;
9255 return x;
9256 }
9257
9258 #if TARGET_MACHO
9259 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
9260 && GET_CODE (x) == LO_SUM
9261 && GET_CODE (XEXP (x, 0)) == PLUS
9262 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
9263 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
9264 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
9265 && machopic_operand_p (XEXP (x, 1)))
9266 {
9267 /* Result of previous invocation of this function on Darwin
9268 floating point constant. */
9269 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9270 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9271 opnum, (enum reload_type) type);
9272 *win = 1;
9273 return x;
9274 }
9275 #endif
9276
9277 if (TARGET_CMODEL != CMODEL_SMALL
9278 && reg_offset_p
9279 && !quad_offset_p
9280 && small_toc_ref (x, VOIDmode))
9281 {
9282 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
9283 x = gen_rtx_LO_SUM (Pmode, hi, x);
9284 if (TARGET_DEBUG_ADDR)
9285 {
9286 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
9287 debug_rtx (x);
9288 }
9289 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9290 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9291 opnum, (enum reload_type) type);
9292 *win = 1;
9293 return x;
9294 }
9295
9296 if (GET_CODE (x) == PLUS
9297 && REG_P (XEXP (x, 0))
9298 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
9299 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9300 && CONST_INT_P (XEXP (x, 1))
9301 && reg_offset_p
9302 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9303 {
9304 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9305 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9306 HOST_WIDE_INT high
9307 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9308
9309 /* Check for 32-bit overflow or quad addresses with one of the
9310 four least significant bits set. */
9311 if (high + low != val
9312 || (quad_offset_p && (low & 0xf)))
9313 {
9314 *win = 0;
9315 return x;
9316 }
9317
9318 /* Reload the high part into a base reg; leave the low part
9319 in the mem directly. */
9320
9321 x = gen_rtx_PLUS (GET_MODE (x),
9322 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9323 GEN_INT (high)),
9324 GEN_INT (low));
9325
9326 if (TARGET_DEBUG_ADDR)
9327 {
9328 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9329 debug_rtx (x);
9330 }
9331 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9332 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9333 opnum, (enum reload_type) type);
9334 *win = 1;
9335 return x;
9336 }
9337
9338 if (GET_CODE (x) == SYMBOL_REF
9339 && reg_offset_p
9340 && !quad_offset_p
9341 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9342 #if TARGET_MACHO
9343 && DEFAULT_ABI == ABI_DARWIN
9344 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9345 && machopic_symbol_defined_p (x)
9346 #else
9347 && DEFAULT_ABI == ABI_V4
9348 && !flag_pic
9349 #endif
9350 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9351 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9352 without fprs.
9353 ??? Assume floating point reg based on mode? This assumption is
9354 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9355 where reload ends up doing a DFmode load of a constant from
9356 mem using two gprs. Unfortunately, at this point reload
9357 hasn't yet selected regs so poking around in reload data
9358 won't help and even if we could figure out the regs reliably,
9359 we'd still want to allow this transformation when the mem is
9360 naturally aligned. Since we say the address is good here, we
9361 can't disable offsets from LO_SUMs in mem_operand_gpr.
9362 FIXME: Allow offset from lo_sum for other modes too, when
9363 mem is sufficiently aligned.
9364
9365 Also disallow this if the type can go in VMX/Altivec registers, since
9366 those registers do not have d-form (reg+offset) address modes. */
9367 && !reg_addr[mode].scalar_in_vmx_p
9368 && mode != TFmode
9369 && mode != TDmode
9370 && mode != IFmode
9371 && mode != KFmode
9372 && (mode != TImode || !TARGET_VSX)
9373 && mode != PTImode
9374 && (mode != DImode || TARGET_POWERPC64)
9375 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9376 || TARGET_HARD_FLOAT))
9377 {
9378 #if TARGET_MACHO
9379 if (flag_pic)
9380 {
9381 rtx offset = machopic_gen_offset (x);
9382 x = gen_rtx_LO_SUM (GET_MODE (x),
9383 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9384 gen_rtx_HIGH (Pmode, offset)), offset);
9385 }
9386 else
9387 #endif
9388 x = gen_rtx_LO_SUM (GET_MODE (x),
9389 gen_rtx_HIGH (Pmode, x), x);
9390
9391 if (TARGET_DEBUG_ADDR)
9392 {
9393 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9394 debug_rtx (x);
9395 }
9396 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9397 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9398 opnum, (enum reload_type) type);
9399 *win = 1;
9400 return x;
9401 }
9402
9403 /* Reload an offset address wrapped by an AND that represents the
9404 masking of the lower bits. Strip the outer AND and let reload
9405 convert the offset address into an indirect address. For VSX,
9406 force reload to create the address with an AND in a separate
9407 register, because we can't guarantee an altivec register will
9408 be used. */
9409 if (VECTOR_MEM_ALTIVEC_P (mode)
9410 && GET_CODE (x) == AND
9411 && GET_CODE (XEXP (x, 0)) == PLUS
9412 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9413 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9414 && GET_CODE (XEXP (x, 1)) == CONST_INT
9415 && INTVAL (XEXP (x, 1)) == -16)
9416 {
9417 x = XEXP (x, 0);
9418 *win = 1;
9419 return x;
9420 }
9421
9422 if (TARGET_TOC
9423 && reg_offset_p
9424 && !quad_offset_p
9425 && GET_CODE (x) == SYMBOL_REF
9426 && use_toc_relative_ref (x, mode))
9427 {
9428 x = create_TOC_reference (x, NULL_RTX);
9429 if (TARGET_CMODEL != CMODEL_SMALL)
9430 {
9431 if (TARGET_DEBUG_ADDR)
9432 {
9433 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9434 debug_rtx (x);
9435 }
9436 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9437 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9438 opnum, (enum reload_type) type);
9439 }
9440 *win = 1;
9441 return x;
9442 }
9443 *win = 0;
9444 return x;
9445 }
9446
9447 /* Debug version of rs6000_legitimize_reload_address. */
9448 static rtx
9449 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9450 int opnum, int type,
9451 int ind_levels, int *win)
9452 {
9453 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9454 ind_levels, win);
9455 fprintf (stderr,
9456 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9457 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9458 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9459 debug_rtx (x);
9460
9461 if (x == ret)
9462 fprintf (stderr, "Same address returned\n");
9463 else if (!ret)
9464 fprintf (stderr, "NULL returned\n");
9465 else
9466 {
9467 fprintf (stderr, "New address:\n");
9468 debug_rtx (ret);
9469 }
9470
9471 return ret;
9472 }
9473
9474 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9475 that is a valid memory address for an instruction.
9476 The MODE argument is the machine mode for the MEM expression
9477 that wants to use this address.
9478
9479 On the RS/6000, there are four valid address: a SYMBOL_REF that
9480 refers to a constant pool entry of an address (or the sum of it
9481 plus a constant), a short (16-bit signed) constant plus a register,
9482 the sum of two registers, or a register indirect, possibly with an
9483 auto-increment. For DFmode, DDmode and DImode with a constant plus
9484 register, we must ensure that both words are addressable or PowerPC64
9485 with offset word aligned.
9486
9487 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9488 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9489 because adjacent memory cells are accessed by adding word-sized offsets
9490 during assembly output. */
9491 static bool
9492 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9493 {
9494 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9495 bool quad_offset_p = mode_supports_dq_form (mode);
9496
9497 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9498 if (VECTOR_MEM_ALTIVEC_P (mode)
9499 && GET_CODE (x) == AND
9500 && GET_CODE (XEXP (x, 1)) == CONST_INT
9501 && INTVAL (XEXP (x, 1)) == -16)
9502 x = XEXP (x, 0);
9503
9504 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9505 return 0;
9506 if (legitimate_indirect_address_p (x, reg_ok_strict))
9507 return 1;
9508 if (TARGET_UPDATE
9509 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9510 && mode_supports_pre_incdec_p (mode)
9511 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9512 return 1;
9513 /* Handle restricted vector d-form offsets in ISA 3.0. */
9514 if (quad_offset_p)
9515 {
9516 if (quad_address_p (x, mode, reg_ok_strict))
9517 return 1;
9518 }
9519 else if (virtual_stack_registers_memory_p (x))
9520 return 1;
9521
9522 else if (reg_offset_p)
9523 {
9524 if (legitimate_small_data_p (mode, x))
9525 return 1;
9526 if (legitimate_constant_pool_address_p (x, mode,
9527 reg_ok_strict || lra_in_progress))
9528 return 1;
9529 if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
9530 && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
9531 return 1;
9532 }
9533
9534 /* For TImode, if we have TImode in VSX registers, only allow register
9535 indirect addresses. This will allow the values to go in either GPRs
9536 or VSX registers without reloading. The vector types would tend to
9537 go into VSX registers, so we allow REG+REG, while TImode seems
9538 somewhat split, in that some uses are GPR based, and some VSX based. */
9539 /* FIXME: We could loosen this by changing the following to
9540 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9541 but currently we cannot allow REG+REG addressing for TImode. See
9542 PR72827 for complete details on how this ends up hoodwinking DSE. */
9543 if (mode == TImode && TARGET_VSX)
9544 return 0;
9545 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9546 if (! reg_ok_strict
9547 && reg_offset_p
9548 && GET_CODE (x) == PLUS
9549 && GET_CODE (XEXP (x, 0)) == REG
9550 && (XEXP (x, 0) == virtual_stack_vars_rtx
9551 || XEXP (x, 0) == arg_pointer_rtx)
9552 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9553 return 1;
9554 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9555 return 1;
9556 if (!FLOAT128_2REG_P (mode)
9557 && (TARGET_HARD_FLOAT
9558 || TARGET_POWERPC64
9559 || (mode != DFmode && mode != DDmode))
9560 && (TARGET_POWERPC64 || mode != DImode)
9561 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9562 && mode != PTImode
9563 && !avoiding_indexed_address_p (mode)
9564 && legitimate_indexed_address_p (x, reg_ok_strict))
9565 return 1;
9566 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9567 && mode_supports_pre_modify_p (mode)
9568 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9569 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9570 reg_ok_strict, false)
9571 || (!avoiding_indexed_address_p (mode)
9572 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9573 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9574 return 1;
9575 if (reg_offset_p && !quad_offset_p
9576 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9577 return 1;
9578 return 0;
9579 }
9580
9581 /* Debug version of rs6000_legitimate_address_p. */
9582 static bool
9583 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9584 bool reg_ok_strict)
9585 {
9586 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9587 fprintf (stderr,
9588 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9589 "strict = %d, reload = %s, code = %s\n",
9590 ret ? "true" : "false",
9591 GET_MODE_NAME (mode),
9592 reg_ok_strict,
9593 (reload_completed ? "after" : "before"),
9594 GET_RTX_NAME (GET_CODE (x)));
9595 debug_rtx (x);
9596
9597 return ret;
9598 }
9599
9600 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9601
9602 static bool
9603 rs6000_mode_dependent_address_p (const_rtx addr,
9604 addr_space_t as ATTRIBUTE_UNUSED)
9605 {
9606 return rs6000_mode_dependent_address_ptr (addr);
9607 }
9608
9609 /* Go to LABEL if ADDR (a legitimate address expression)
9610 has an effect that depends on the machine mode it is used for.
9611
9612 On the RS/6000 this is true of all integral offsets (since AltiVec
9613 and VSX modes don't allow them) or is a pre-increment or decrement.
9614
9615 ??? Except that due to conceptual problems in offsettable_address_p
9616 we can't really report the problems of integral offsets. So leave
9617 this assuming that the adjustable offset must be valid for the
9618 sub-words of a TFmode operand, which is what we had before. */
9619
9620 static bool
9621 rs6000_mode_dependent_address (const_rtx addr)
9622 {
9623 switch (GET_CODE (addr))
9624 {
9625 case PLUS:
9626 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9627 is considered a legitimate address before reload, so there
9628 are no offset restrictions in that case. Note that this
9629 condition is safe in strict mode because any address involving
9630 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9631 been rejected as illegitimate. */
9632 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9633 && XEXP (addr, 0) != arg_pointer_rtx
9634 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9635 {
9636 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9637 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9638 }
9639 break;
9640
9641 case LO_SUM:
9642 /* Anything in the constant pool is sufficiently aligned that
9643 all bytes have the same high part address. */
9644 return !legitimate_constant_pool_address_p (addr, QImode, false);
9645
9646 /* Auto-increment cases are now treated generically in recog.c. */
9647 case PRE_MODIFY:
9648 return TARGET_UPDATE;
9649
9650 /* AND is only allowed in Altivec loads. */
9651 case AND:
9652 return true;
9653
9654 default:
9655 break;
9656 }
9657
9658 return false;
9659 }
9660
9661 /* Debug version of rs6000_mode_dependent_address. */
9662 static bool
9663 rs6000_debug_mode_dependent_address (const_rtx addr)
9664 {
9665 bool ret = rs6000_mode_dependent_address (addr);
9666
9667 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9668 ret ? "true" : "false");
9669 debug_rtx (addr);
9670
9671 return ret;
9672 }
9673
9674 /* Implement FIND_BASE_TERM. */
9675
9676 rtx
9677 rs6000_find_base_term (rtx op)
9678 {
9679 rtx base;
9680
9681 base = op;
9682 if (GET_CODE (base) == CONST)
9683 base = XEXP (base, 0);
9684 if (GET_CODE (base) == PLUS)
9685 base = XEXP (base, 0);
9686 if (GET_CODE (base) == UNSPEC)
9687 switch (XINT (base, 1))
9688 {
9689 case UNSPEC_TOCREL:
9690 case UNSPEC_MACHOPIC_OFFSET:
9691 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9692 for aliasing purposes. */
9693 return XVECEXP (base, 0, 0);
9694 }
9695
9696 return op;
9697 }
9698
9699 /* More elaborate version of recog's offsettable_memref_p predicate
9700 that works around the ??? note of rs6000_mode_dependent_address.
9701 In particular it accepts
9702
9703 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9704
9705 in 32-bit mode, that the recog predicate rejects. */
9706
9707 static bool
9708 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9709 {
9710 bool worst_case;
9711
9712 if (!MEM_P (op))
9713 return false;
9714
9715 /* First mimic offsettable_memref_p. */
9716 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9717 return true;
9718
9719 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9720 the latter predicate knows nothing about the mode of the memory
9721 reference and, therefore, assumes that it is the largest supported
9722 mode (TFmode). As a consequence, legitimate offsettable memory
9723 references are rejected. rs6000_legitimate_offset_address_p contains
9724 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9725 at least with a little bit of help here given that we know the
9726 actual registers used. */
9727 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9728 || GET_MODE_SIZE (reg_mode) == 4);
9729 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9730 strict, worst_case);
9731 }
9732
9733 /* Determine the reassociation width to be used in reassociate_bb.
9734 This takes into account how many parallel operations we
9735 can actually do of a given type, and also the latency.
9736 P8:
9737 int add/sub 6/cycle
9738 mul 2/cycle
9739 vect add/sub/mul 2/cycle
9740 fp add/sub/mul 2/cycle
9741 dfp 1/cycle
9742 */
9743
9744 static int
9745 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9746 machine_mode mode)
9747 {
9748 switch (rs6000_tune)
9749 {
9750 case PROCESSOR_POWER8:
9751 case PROCESSOR_POWER9:
9752 if (DECIMAL_FLOAT_MODE_P (mode))
9753 return 1;
9754 if (VECTOR_MODE_P (mode))
9755 return 4;
9756 if (INTEGRAL_MODE_P (mode))
9757 return 1;
9758 if (FLOAT_MODE_P (mode))
9759 return 4;
9760 break;
9761 default:
9762 break;
9763 }
9764 return 1;
9765 }
9766
9767 /* Change register usage conditional on target flags. */
9768 static void
9769 rs6000_conditional_register_usage (void)
9770 {
9771 int i;
9772
9773 if (TARGET_DEBUG_TARGET)
9774 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9775
9776 /* Set MQ register fixed (already call_used) so that it will not be
9777 allocated. */
9778 fixed_regs[64] = 1;
9779
9780 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9781 if (TARGET_64BIT)
9782 fixed_regs[13] = call_used_regs[13]
9783 = call_really_used_regs[13] = 1;
9784
9785 /* Conditionally disable FPRs. */
9786 if (TARGET_SOFT_FLOAT)
9787 for (i = 32; i < 64; i++)
9788 fixed_regs[i] = call_used_regs[i]
9789 = call_really_used_regs[i] = 1;
9790
9791 /* The TOC register is not killed across calls in a way that is
9792 visible to the compiler. */
9793 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9794 call_really_used_regs[2] = 0;
9795
9796 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9797 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9798
9799 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9800 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9801 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9802 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9803
9804 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9805 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9806 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9807 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9808
9809 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9810 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9811 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9812
9813 if (!TARGET_ALTIVEC && !TARGET_VSX)
9814 {
9815 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9816 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9817 call_really_used_regs[VRSAVE_REGNO] = 1;
9818 }
9819
9820 if (TARGET_ALTIVEC || TARGET_VSX)
9821 global_regs[VSCR_REGNO] = 1;
9822
9823 if (TARGET_ALTIVEC_ABI)
9824 {
9825 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9826 call_used_regs[i] = call_really_used_regs[i] = 1;
9827
9828 /* AIX reserves VR20:31 in non-extended ABI mode. */
9829 if (TARGET_XCOFF)
9830 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9831 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9832 }
9833 }
9834
9835 \f
9836 /* Output insns to set DEST equal to the constant SOURCE as a series of
9837 lis, ori and shl instructions and return TRUE. */
9838
9839 bool
9840 rs6000_emit_set_const (rtx dest, rtx source)
9841 {
9842 machine_mode mode = GET_MODE (dest);
9843 rtx temp, set;
9844 rtx_insn *insn;
9845 HOST_WIDE_INT c;
9846
9847 gcc_checking_assert (CONST_INT_P (source));
9848 c = INTVAL (source);
9849 switch (mode)
9850 {
9851 case E_QImode:
9852 case E_HImode:
9853 emit_insn (gen_rtx_SET (dest, source));
9854 return true;
9855
9856 case E_SImode:
9857 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9858
9859 emit_insn (gen_rtx_SET (copy_rtx (temp),
9860 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9861 emit_insn (gen_rtx_SET (dest,
9862 gen_rtx_IOR (SImode, copy_rtx (temp),
9863 GEN_INT (c & 0xffff))));
9864 break;
9865
9866 case E_DImode:
9867 if (!TARGET_POWERPC64)
9868 {
9869 rtx hi, lo;
9870
9871 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9872 DImode);
9873 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9874 DImode);
9875 emit_move_insn (hi, GEN_INT (c >> 32));
9876 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9877 emit_move_insn (lo, GEN_INT (c));
9878 }
9879 else
9880 rs6000_emit_set_long_const (dest, c);
9881 break;
9882
9883 default:
9884 gcc_unreachable ();
9885 }
9886
9887 insn = get_last_insn ();
9888 set = single_set (insn);
9889 if (! CONSTANT_P (SET_SRC (set)))
9890 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9891
9892 return true;
9893 }
9894
9895 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9896 Output insns to set DEST equal to the constant C as a series of
9897 lis, ori and shl instructions. */
9898
9899 static void
9900 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9901 {
9902 rtx temp;
9903 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9904
9905 ud1 = c & 0xffff;
9906 c = c >> 16;
9907 ud2 = c & 0xffff;
9908 c = c >> 16;
9909 ud3 = c & 0xffff;
9910 c = c >> 16;
9911 ud4 = c & 0xffff;
9912
9913 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9914 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9915 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9916
9917 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9918 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9919 {
9920 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9921
9922 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9923 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9924 if (ud1 != 0)
9925 emit_move_insn (dest,
9926 gen_rtx_IOR (DImode, copy_rtx (temp),
9927 GEN_INT (ud1)));
9928 }
9929 else if (ud3 == 0 && ud4 == 0)
9930 {
9931 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9932
9933 gcc_assert (ud2 & 0x8000);
9934 emit_move_insn (copy_rtx (temp),
9935 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9936 if (ud1 != 0)
9937 emit_move_insn (copy_rtx (temp),
9938 gen_rtx_IOR (DImode, copy_rtx (temp),
9939 GEN_INT (ud1)));
9940 emit_move_insn (dest,
9941 gen_rtx_ZERO_EXTEND (DImode,
9942 gen_lowpart (SImode,
9943 copy_rtx (temp))));
9944 }
9945 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9946 || (ud4 == 0 && ! (ud3 & 0x8000)))
9947 {
9948 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9949
9950 emit_move_insn (copy_rtx (temp),
9951 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9952 if (ud2 != 0)
9953 emit_move_insn (copy_rtx (temp),
9954 gen_rtx_IOR (DImode, copy_rtx (temp),
9955 GEN_INT (ud2)));
9956 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9957 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9958 GEN_INT (16)));
9959 if (ud1 != 0)
9960 emit_move_insn (dest,
9961 gen_rtx_IOR (DImode, copy_rtx (temp),
9962 GEN_INT (ud1)));
9963 }
9964 else
9965 {
9966 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9967
9968 emit_move_insn (copy_rtx (temp),
9969 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9970 if (ud3 != 0)
9971 emit_move_insn (copy_rtx (temp),
9972 gen_rtx_IOR (DImode, copy_rtx (temp),
9973 GEN_INT (ud3)));
9974
9975 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9976 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9977 GEN_INT (32)));
9978 if (ud2 != 0)
9979 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9980 gen_rtx_IOR (DImode, copy_rtx (temp),
9981 GEN_INT (ud2 << 16)));
9982 if (ud1 != 0)
9983 emit_move_insn (dest,
9984 gen_rtx_IOR (DImode, copy_rtx (temp),
9985 GEN_INT (ud1)));
9986 }
9987 }
9988
9989 /* Helper for the following. Get rid of [r+r] memory refs
9990 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9991
9992 static void
9993 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9994 {
9995 if (GET_CODE (operands[0]) == MEM
9996 && GET_CODE (XEXP (operands[0], 0)) != REG
9997 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9998 GET_MODE (operands[0]), false))
9999 operands[0]
10000 = replace_equiv_address (operands[0],
10001 copy_addr_to_reg (XEXP (operands[0], 0)));
10002
10003 if (GET_CODE (operands[1]) == MEM
10004 && GET_CODE (XEXP (operands[1], 0)) != REG
10005 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
10006 GET_MODE (operands[1]), false))
10007 operands[1]
10008 = replace_equiv_address (operands[1],
10009 copy_addr_to_reg (XEXP (operands[1], 0)));
10010 }
10011
10012 /* Generate a vector of constants to permute MODE for a little-endian
10013 storage operation by swapping the two halves of a vector. */
10014 static rtvec
10015 rs6000_const_vec (machine_mode mode)
10016 {
10017 int i, subparts;
10018 rtvec v;
10019
10020 switch (mode)
10021 {
10022 case E_V1TImode:
10023 subparts = 1;
10024 break;
10025 case E_V2DFmode:
10026 case E_V2DImode:
10027 subparts = 2;
10028 break;
10029 case E_V4SFmode:
10030 case E_V4SImode:
10031 subparts = 4;
10032 break;
10033 case E_V8HImode:
10034 subparts = 8;
10035 break;
10036 case E_V16QImode:
10037 subparts = 16;
10038 break;
10039 default:
10040 gcc_unreachable();
10041 }
10042
10043 v = rtvec_alloc (subparts);
10044
10045 for (i = 0; i < subparts / 2; ++i)
10046 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
10047 for (i = subparts / 2; i < subparts; ++i)
10048 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
10049
10050 return v;
10051 }
10052
10053 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
10054 store operation. */
10055 void
10056 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
10057 {
10058 /* Scalar permutations are easier to express in integer modes rather than
10059 floating-point modes, so cast them here. We use V1TImode instead
10060 of TImode to ensure that the values don't go through GPRs. */
10061 if (FLOAT128_VECTOR_P (mode))
10062 {
10063 dest = gen_lowpart (V1TImode, dest);
10064 source = gen_lowpart (V1TImode, source);
10065 mode = V1TImode;
10066 }
10067
10068 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
10069 scalar. */
10070 if (mode == TImode || mode == V1TImode)
10071 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
10072 GEN_INT (64))));
10073 else
10074 {
10075 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
10076 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
10077 }
10078 }
10079
10080 /* Emit a little-endian load from vector memory location SOURCE to VSX
10081 register DEST in mode MODE. The load is done with two permuting
10082 insn's that represent an lxvd2x and xxpermdi. */
10083 void
10084 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
10085 {
10086 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10087 V1TImode). */
10088 if (mode == TImode || mode == V1TImode)
10089 {
10090 mode = V2DImode;
10091 dest = gen_lowpart (V2DImode, dest);
10092 source = adjust_address (source, V2DImode, 0);
10093 }
10094
10095 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
10096 rs6000_emit_le_vsx_permute (tmp, source, mode);
10097 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10098 }
10099
10100 /* Emit a little-endian store to vector memory location DEST from VSX
10101 register SOURCE in mode MODE. The store is done with two permuting
10102 insn's that represent an xxpermdi and an stxvd2x. */
10103 void
10104 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
10105 {
10106 /* This should never be called during or after LRA, because it does
10107 not re-permute the source register. It is intended only for use
10108 during expand. */
10109 gcc_assert (!lra_in_progress && !reload_completed);
10110
10111 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10112 V1TImode). */
10113 if (mode == TImode || mode == V1TImode)
10114 {
10115 mode = V2DImode;
10116 dest = adjust_address (dest, V2DImode, 0);
10117 source = gen_lowpart (V2DImode, source);
10118 }
10119
10120 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
10121 rs6000_emit_le_vsx_permute (tmp, source, mode);
10122 rs6000_emit_le_vsx_permute (dest, tmp, mode);
10123 }
10124
10125 /* Emit a sequence representing a little-endian VSX load or store,
10126 moving data from SOURCE to DEST in mode MODE. This is done
10127 separately from rs6000_emit_move to ensure it is called only
10128 during expand. LE VSX loads and stores introduced later are
10129 handled with a split. The expand-time RTL generation allows
10130 us to optimize away redundant pairs of register-permutes. */
10131 void
10132 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
10133 {
10134 gcc_assert (!BYTES_BIG_ENDIAN
10135 && VECTOR_MEM_VSX_P (mode)
10136 && !TARGET_P9_VECTOR
10137 && !gpr_or_gpr_p (dest, source)
10138 && (MEM_P (source) ^ MEM_P (dest)));
10139
10140 if (MEM_P (source))
10141 {
10142 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
10143 rs6000_emit_le_vsx_load (dest, source, mode);
10144 }
10145 else
10146 {
10147 if (!REG_P (source))
10148 source = force_reg (mode, source);
10149 rs6000_emit_le_vsx_store (dest, source, mode);
10150 }
10151 }
10152
10153 /* Return whether a SFmode or SImode move can be done without converting one
10154 mode to another. This arrises when we have:
10155
10156 (SUBREG:SF (REG:SI ...))
10157 (SUBREG:SI (REG:SF ...))
10158
10159 and one of the values is in a floating point/vector register, where SFmode
10160 scalars are stored in DFmode format. */
10161
10162 bool
10163 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
10164 {
10165 if (TARGET_ALLOW_SF_SUBREG)
10166 return true;
10167
10168 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
10169 return true;
10170
10171 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
10172 return true;
10173
10174 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10175 if (SUBREG_P (dest))
10176 {
10177 rtx dest_subreg = SUBREG_REG (dest);
10178 rtx src_subreg = SUBREG_REG (src);
10179 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
10180 }
10181
10182 return false;
10183 }
10184
10185
10186 /* Helper function to change moves with:
10187
10188 (SUBREG:SF (REG:SI)) and
10189 (SUBREG:SI (REG:SF))
10190
10191 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10192 values are stored as DFmode values in the VSX registers. We need to convert
10193 the bits before we can use a direct move or operate on the bits in the
10194 vector register as an integer type.
10195
10196 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10197
10198 static bool
10199 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
10200 {
10201 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
10202 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
10203 && SUBREG_P (source) && sf_subreg_operand (source, mode))
10204 {
10205 rtx inner_source = SUBREG_REG (source);
10206 machine_mode inner_mode = GET_MODE (inner_source);
10207
10208 if (mode == SImode && inner_mode == SFmode)
10209 {
10210 emit_insn (gen_movsi_from_sf (dest, inner_source));
10211 return true;
10212 }
10213
10214 if (mode == SFmode && inner_mode == SImode)
10215 {
10216 emit_insn (gen_movsf_from_si (dest, inner_source));
10217 return true;
10218 }
10219 }
10220
10221 return false;
10222 }
10223
10224 /* Emit a move from SOURCE to DEST in mode MODE. */
10225 void
10226 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
10227 {
10228 rtx operands[2];
10229 operands[0] = dest;
10230 operands[1] = source;
10231
10232 if (TARGET_DEBUG_ADDR)
10233 {
10234 fprintf (stderr,
10235 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
10236 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10237 GET_MODE_NAME (mode),
10238 lra_in_progress,
10239 reload_completed,
10240 can_create_pseudo_p ());
10241 debug_rtx (dest);
10242 fprintf (stderr, "source:\n");
10243 debug_rtx (source);
10244 }
10245
10246 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10247 if (CONST_WIDE_INT_P (operands[1])
10248 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
10249 {
10250 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10251 gcc_unreachable ();
10252 }
10253
10254 #ifdef HAVE_AS_GNU_ATTRIBUTE
10255 /* If we use a long double type, set the flags in .gnu_attribute that say
10256 what the long double type is. This is to allow the linker's warning
10257 message for the wrong long double to be useful, even if the function does
10258 not do a call (for example, doing a 128-bit add on power9 if the long
10259 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
10260 used if they aren't the default long dobule type. */
10261 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
10262 {
10263 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
10264 rs6000_passes_float = rs6000_passes_long_double = true;
10265
10266 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
10267 rs6000_passes_float = rs6000_passes_long_double = true;
10268 }
10269 #endif
10270
10271 /* See if we need to special case SImode/SFmode SUBREG moves. */
10272 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
10273 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
10274 return;
10275
10276 /* Check if GCC is setting up a block move that will end up using FP
10277 registers as temporaries. We must make sure this is acceptable. */
10278 if (GET_CODE (operands[0]) == MEM
10279 && GET_CODE (operands[1]) == MEM
10280 && mode == DImode
10281 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
10282 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
10283 && ! (rs6000_slow_unaligned_access (SImode,
10284 (MEM_ALIGN (operands[0]) > 32
10285 ? 32 : MEM_ALIGN (operands[0])))
10286 || rs6000_slow_unaligned_access (SImode,
10287 (MEM_ALIGN (operands[1]) > 32
10288 ? 32 : MEM_ALIGN (operands[1]))))
10289 && ! MEM_VOLATILE_P (operands [0])
10290 && ! MEM_VOLATILE_P (operands [1]))
10291 {
10292 emit_move_insn (adjust_address (operands[0], SImode, 0),
10293 adjust_address (operands[1], SImode, 0));
10294 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
10295 adjust_address (copy_rtx (operands[1]), SImode, 4));
10296 return;
10297 }
10298
10299 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
10300 && !gpc_reg_operand (operands[1], mode))
10301 operands[1] = force_reg (mode, operands[1]);
10302
10303 /* Recognize the case where operand[1] is a reference to thread-local
10304 data and load its address to a register. */
10305 if (tls_referenced_p (operands[1]))
10306 {
10307 enum tls_model model;
10308 rtx tmp = operands[1];
10309 rtx addend = NULL;
10310
10311 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10312 {
10313 addend = XEXP (XEXP (tmp, 0), 1);
10314 tmp = XEXP (XEXP (tmp, 0), 0);
10315 }
10316
10317 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
10318 model = SYMBOL_REF_TLS_MODEL (tmp);
10319 gcc_assert (model != 0);
10320
10321 tmp = rs6000_legitimize_tls_address (tmp, model);
10322 if (addend)
10323 {
10324 tmp = gen_rtx_PLUS (mode, tmp, addend);
10325 tmp = force_operand (tmp, operands[0]);
10326 }
10327 operands[1] = tmp;
10328 }
10329
10330 /* 128-bit constant floating-point values on Darwin should really be loaded
10331 as two parts. However, this premature splitting is a problem when DFmode
10332 values can go into Altivec registers. */
10333 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
10334 && GET_CODE (operands[1]) == CONST_DOUBLE)
10335 {
10336 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10337 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10338 DFmode);
10339 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10340 GET_MODE_SIZE (DFmode)),
10341 simplify_gen_subreg (DFmode, operands[1], mode,
10342 GET_MODE_SIZE (DFmode)),
10343 DFmode);
10344 return;
10345 }
10346
10347 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10348 p1:SD) if p1 is not of floating point class and p0 is spilled as
10349 we can have no analogous movsd_store for this. */
10350 if (lra_in_progress && mode == DDmode
10351 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10352 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10353 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
10354 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10355 {
10356 enum reg_class cl;
10357 int regno = REGNO (SUBREG_REG (operands[1]));
10358
10359 if (regno >= FIRST_PSEUDO_REGISTER)
10360 {
10361 cl = reg_preferred_class (regno);
10362 regno = reg_renumber[regno];
10363 if (regno < 0)
10364 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10365 }
10366 if (regno >= 0 && ! FP_REGNO_P (regno))
10367 {
10368 mode = SDmode;
10369 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10370 operands[1] = SUBREG_REG (operands[1]);
10371 }
10372 }
10373 if (lra_in_progress
10374 && mode == SDmode
10375 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10376 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10377 && (REG_P (operands[1])
10378 || (GET_CODE (operands[1]) == SUBREG
10379 && REG_P (SUBREG_REG (operands[1])))))
10380 {
10381 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10382 ? SUBREG_REG (operands[1]) : operands[1]);
10383 enum reg_class cl;
10384
10385 if (regno >= FIRST_PSEUDO_REGISTER)
10386 {
10387 cl = reg_preferred_class (regno);
10388 gcc_assert (cl != NO_REGS);
10389 regno = reg_renumber[regno];
10390 if (regno < 0)
10391 regno = ira_class_hard_regs[cl][0];
10392 }
10393 if (FP_REGNO_P (regno))
10394 {
10395 if (GET_MODE (operands[0]) != DDmode)
10396 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10397 emit_insn (gen_movsd_store (operands[0], operands[1]));
10398 }
10399 else if (INT_REGNO_P (regno))
10400 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10401 else
10402 gcc_unreachable();
10403 return;
10404 }
10405 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10406 p:DD)) if p0 is not of floating point class and p1 is spilled as
10407 we can have no analogous movsd_load for this. */
10408 if (lra_in_progress && mode == DDmode
10409 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10410 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10411 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10412 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10413 {
10414 enum reg_class cl;
10415 int regno = REGNO (SUBREG_REG (operands[0]));
10416
10417 if (regno >= FIRST_PSEUDO_REGISTER)
10418 {
10419 cl = reg_preferred_class (regno);
10420 regno = reg_renumber[regno];
10421 if (regno < 0)
10422 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10423 }
10424 if (regno >= 0 && ! FP_REGNO_P (regno))
10425 {
10426 mode = SDmode;
10427 operands[0] = SUBREG_REG (operands[0]);
10428 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10429 }
10430 }
10431 if (lra_in_progress
10432 && mode == SDmode
10433 && (REG_P (operands[0])
10434 || (GET_CODE (operands[0]) == SUBREG
10435 && REG_P (SUBREG_REG (operands[0]))))
10436 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10437 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10438 {
10439 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10440 ? SUBREG_REG (operands[0]) : operands[0]);
10441 enum reg_class cl;
10442
10443 if (regno >= FIRST_PSEUDO_REGISTER)
10444 {
10445 cl = reg_preferred_class (regno);
10446 gcc_assert (cl != NO_REGS);
10447 regno = reg_renumber[regno];
10448 if (regno < 0)
10449 regno = ira_class_hard_regs[cl][0];
10450 }
10451 if (FP_REGNO_P (regno))
10452 {
10453 if (GET_MODE (operands[1]) != DDmode)
10454 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10455 emit_insn (gen_movsd_load (operands[0], operands[1]));
10456 }
10457 else if (INT_REGNO_P (regno))
10458 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10459 else
10460 gcc_unreachable();
10461 return;
10462 }
10463
10464 /* FIXME: In the long term, this switch statement should go away
10465 and be replaced by a sequence of tests based on things like
10466 mode == Pmode. */
10467 switch (mode)
10468 {
10469 case E_HImode:
10470 case E_QImode:
10471 if (CONSTANT_P (operands[1])
10472 && GET_CODE (operands[1]) != CONST_INT)
10473 operands[1] = force_const_mem (mode, operands[1]);
10474 break;
10475
10476 case E_TFmode:
10477 case E_TDmode:
10478 case E_IFmode:
10479 case E_KFmode:
10480 if (FLOAT128_2REG_P (mode))
10481 rs6000_eliminate_indexed_memrefs (operands);
10482 /* fall through */
10483
10484 case E_DFmode:
10485 case E_DDmode:
10486 case E_SFmode:
10487 case E_SDmode:
10488 if (CONSTANT_P (operands[1])
10489 && ! easy_fp_constant (operands[1], mode))
10490 operands[1] = force_const_mem (mode, operands[1]);
10491 break;
10492
10493 case E_V16QImode:
10494 case E_V8HImode:
10495 case E_V4SFmode:
10496 case E_V4SImode:
10497 case E_V2DFmode:
10498 case E_V2DImode:
10499 case E_V1TImode:
10500 if (CONSTANT_P (operands[1])
10501 && !easy_vector_constant (operands[1], mode))
10502 operands[1] = force_const_mem (mode, operands[1]);
10503 break;
10504
10505 case E_SImode:
10506 case E_DImode:
10507 /* Use default pattern for address of ELF small data */
10508 if (TARGET_ELF
10509 && mode == Pmode
10510 && DEFAULT_ABI == ABI_V4
10511 && (GET_CODE (operands[1]) == SYMBOL_REF
10512 || GET_CODE (operands[1]) == CONST)
10513 && small_data_operand (operands[1], mode))
10514 {
10515 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10516 return;
10517 }
10518
10519 if (DEFAULT_ABI == ABI_V4
10520 && mode == Pmode && mode == SImode
10521 && flag_pic == 1 && got_operand (operands[1], mode))
10522 {
10523 emit_insn (gen_movsi_got (operands[0], operands[1]));
10524 return;
10525 }
10526
10527 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10528 && TARGET_NO_TOC
10529 && ! flag_pic
10530 && mode == Pmode
10531 && CONSTANT_P (operands[1])
10532 && GET_CODE (operands[1]) != HIGH
10533 && GET_CODE (operands[1]) != CONST_INT)
10534 {
10535 rtx target = (!can_create_pseudo_p ()
10536 ? operands[0]
10537 : gen_reg_rtx (mode));
10538
10539 /* If this is a function address on -mcall-aixdesc,
10540 convert it to the address of the descriptor. */
10541 if (DEFAULT_ABI == ABI_AIX
10542 && GET_CODE (operands[1]) == SYMBOL_REF
10543 && XSTR (operands[1], 0)[0] == '.')
10544 {
10545 const char *name = XSTR (operands[1], 0);
10546 rtx new_ref;
10547 while (*name == '.')
10548 name++;
10549 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10550 CONSTANT_POOL_ADDRESS_P (new_ref)
10551 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10552 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10553 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10554 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10555 operands[1] = new_ref;
10556 }
10557
10558 if (DEFAULT_ABI == ABI_DARWIN)
10559 {
10560 #if TARGET_MACHO
10561 if (MACHO_DYNAMIC_NO_PIC_P)
10562 {
10563 /* Take care of any required data indirection. */
10564 operands[1] = rs6000_machopic_legitimize_pic_address (
10565 operands[1], mode, operands[0]);
10566 if (operands[0] != operands[1])
10567 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10568 return;
10569 }
10570 #endif
10571 emit_insn (gen_macho_high (target, operands[1]));
10572 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10573 return;
10574 }
10575
10576 emit_insn (gen_elf_high (target, operands[1]));
10577 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10578 return;
10579 }
10580
10581 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10582 and we have put it in the TOC, we just need to make a TOC-relative
10583 reference to it. */
10584 if (TARGET_TOC
10585 && GET_CODE (operands[1]) == SYMBOL_REF
10586 && use_toc_relative_ref (operands[1], mode))
10587 operands[1] = create_TOC_reference (operands[1], operands[0]);
10588 else if (mode == Pmode
10589 && CONSTANT_P (operands[1])
10590 && GET_CODE (operands[1]) != HIGH
10591 && ((GET_CODE (operands[1]) != CONST_INT
10592 && ! easy_fp_constant (operands[1], mode))
10593 || (GET_CODE (operands[1]) == CONST_INT
10594 && (num_insns_constant (operands[1], mode)
10595 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10596 || (GET_CODE (operands[0]) == REG
10597 && FP_REGNO_P (REGNO (operands[0]))))
10598 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10599 && (TARGET_CMODEL == CMODEL_SMALL
10600 || can_create_pseudo_p ()
10601 || (REG_P (operands[0])
10602 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10603 {
10604
10605 #if TARGET_MACHO
10606 /* Darwin uses a special PIC legitimizer. */
10607 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10608 {
10609 operands[1] =
10610 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10611 operands[0]);
10612 if (operands[0] != operands[1])
10613 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10614 return;
10615 }
10616 #endif
10617
10618 /* If we are to limit the number of things we put in the TOC and
10619 this is a symbol plus a constant we can add in one insn,
10620 just put the symbol in the TOC and add the constant. */
10621 if (GET_CODE (operands[1]) == CONST
10622 && TARGET_NO_SUM_IN_TOC
10623 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10624 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10625 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10626 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10627 && ! side_effects_p (operands[0]))
10628 {
10629 rtx sym =
10630 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10631 rtx other = XEXP (XEXP (operands[1], 0), 1);
10632
10633 sym = force_reg (mode, sym);
10634 emit_insn (gen_add3_insn (operands[0], sym, other));
10635 return;
10636 }
10637
10638 operands[1] = force_const_mem (mode, operands[1]);
10639
10640 if (TARGET_TOC
10641 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10642 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10643 {
10644 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10645 operands[0]);
10646 operands[1] = gen_const_mem (mode, tocref);
10647 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10648 }
10649 }
10650 break;
10651
10652 case E_TImode:
10653 if (!VECTOR_MEM_VSX_P (TImode))
10654 rs6000_eliminate_indexed_memrefs (operands);
10655 break;
10656
10657 case E_PTImode:
10658 rs6000_eliminate_indexed_memrefs (operands);
10659 break;
10660
10661 default:
10662 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10663 }
10664
10665 /* Above, we may have called force_const_mem which may have returned
10666 an invalid address. If we can, fix this up; otherwise, reload will
10667 have to deal with it. */
10668 if (GET_CODE (operands[1]) == MEM)
10669 operands[1] = validize_mem (operands[1]);
10670
10671 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10672 }
10673 \f
10674 /* Nonzero if we can use a floating-point register to pass this arg. */
10675 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10676 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10677 && (CUM)->fregno <= FP_ARG_MAX_REG \
10678 && TARGET_HARD_FLOAT)
10679
10680 /* Nonzero if we can use an AltiVec register to pass this arg. */
10681 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10682 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10683 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10684 && TARGET_ALTIVEC_ABI \
10685 && (NAMED))
10686
10687 /* Walk down the type tree of TYPE counting consecutive base elements.
10688 If *MODEP is VOIDmode, then set it to the first valid floating point
10689 or vector type. If a non-floating point or vector type is found, or
10690 if a floating point or vector type that doesn't match a non-VOIDmode
10691 *MODEP is found, then return -1, otherwise return the count in the
10692 sub-tree. */
10693
10694 static int
10695 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10696 {
10697 machine_mode mode;
10698 HOST_WIDE_INT size;
10699
10700 switch (TREE_CODE (type))
10701 {
10702 case REAL_TYPE:
10703 mode = TYPE_MODE (type);
10704 if (!SCALAR_FLOAT_MODE_P (mode))
10705 return -1;
10706
10707 if (*modep == VOIDmode)
10708 *modep = mode;
10709
10710 if (*modep == mode)
10711 return 1;
10712
10713 break;
10714
10715 case COMPLEX_TYPE:
10716 mode = TYPE_MODE (TREE_TYPE (type));
10717 if (!SCALAR_FLOAT_MODE_P (mode))
10718 return -1;
10719
10720 if (*modep == VOIDmode)
10721 *modep = mode;
10722
10723 if (*modep == mode)
10724 return 2;
10725
10726 break;
10727
10728 case VECTOR_TYPE:
10729 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10730 return -1;
10731
10732 /* Use V4SImode as representative of all 128-bit vector types. */
10733 size = int_size_in_bytes (type);
10734 switch (size)
10735 {
10736 case 16:
10737 mode = V4SImode;
10738 break;
10739 default:
10740 return -1;
10741 }
10742
10743 if (*modep == VOIDmode)
10744 *modep = mode;
10745
10746 /* Vector modes are considered to be opaque: two vectors are
10747 equivalent for the purposes of being homogeneous aggregates
10748 if they are the same size. */
10749 if (*modep == mode)
10750 return 1;
10751
10752 break;
10753
10754 case ARRAY_TYPE:
10755 {
10756 int count;
10757 tree index = TYPE_DOMAIN (type);
10758
10759 /* Can't handle incomplete types nor sizes that are not
10760 fixed. */
10761 if (!COMPLETE_TYPE_P (type)
10762 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10763 return -1;
10764
10765 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10766 if (count == -1
10767 || !index
10768 || !TYPE_MAX_VALUE (index)
10769 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10770 || !TYPE_MIN_VALUE (index)
10771 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10772 || count < 0)
10773 return -1;
10774
10775 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10776 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10777
10778 /* There must be no padding. */
10779 if (wi::to_wide (TYPE_SIZE (type))
10780 != count * GET_MODE_BITSIZE (*modep))
10781 return -1;
10782
10783 return count;
10784 }
10785
10786 case RECORD_TYPE:
10787 {
10788 int count = 0;
10789 int sub_count;
10790 tree field;
10791
10792 /* Can't handle incomplete types nor sizes that are not
10793 fixed. */
10794 if (!COMPLETE_TYPE_P (type)
10795 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10796 return -1;
10797
10798 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10799 {
10800 if (TREE_CODE (field) != FIELD_DECL)
10801 continue;
10802
10803 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10804 if (sub_count < 0)
10805 return -1;
10806 count += sub_count;
10807 }
10808
10809 /* There must be no padding. */
10810 if (wi::to_wide (TYPE_SIZE (type))
10811 != count * GET_MODE_BITSIZE (*modep))
10812 return -1;
10813
10814 return count;
10815 }
10816
10817 case UNION_TYPE:
10818 case QUAL_UNION_TYPE:
10819 {
10820 /* These aren't very interesting except in a degenerate case. */
10821 int count = 0;
10822 int sub_count;
10823 tree field;
10824
10825 /* Can't handle incomplete types nor sizes that are not
10826 fixed. */
10827 if (!COMPLETE_TYPE_P (type)
10828 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10829 return -1;
10830
10831 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10832 {
10833 if (TREE_CODE (field) != FIELD_DECL)
10834 continue;
10835
10836 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10837 if (sub_count < 0)
10838 return -1;
10839 count = count > sub_count ? count : sub_count;
10840 }
10841
10842 /* There must be no padding. */
10843 if (wi::to_wide (TYPE_SIZE (type))
10844 != count * GET_MODE_BITSIZE (*modep))
10845 return -1;
10846
10847 return count;
10848 }
10849
10850 default:
10851 break;
10852 }
10853
10854 return -1;
10855 }
10856
10857 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10858 float or vector aggregate that shall be passed in FP/vector registers
10859 according to the ELFv2 ABI, return the homogeneous element mode in
10860 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10861
10862 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10863
10864 static bool
10865 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10866 machine_mode *elt_mode,
10867 int *n_elts)
10868 {
10869 /* Note that we do not accept complex types at the top level as
10870 homogeneous aggregates; these types are handled via the
10871 targetm.calls.split_complex_arg mechanism. Complex types
10872 can be elements of homogeneous aggregates, however. */
10873 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10874 && AGGREGATE_TYPE_P (type))
10875 {
10876 machine_mode field_mode = VOIDmode;
10877 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10878
10879 if (field_count > 0)
10880 {
10881 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode) ?
10882 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
10883
10884 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10885 up to AGGR_ARG_NUM_REG registers. */
10886 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
10887 {
10888 if (elt_mode)
10889 *elt_mode = field_mode;
10890 if (n_elts)
10891 *n_elts = field_count;
10892 return true;
10893 }
10894 }
10895 }
10896
10897 if (elt_mode)
10898 *elt_mode = mode;
10899 if (n_elts)
10900 *n_elts = 1;
10901 return false;
10902 }
10903
10904 /* Return a nonzero value to say to return the function value in
10905 memory, just as large structures are always returned. TYPE will be
10906 the data type of the value, and FNTYPE will be the type of the
10907 function doing the returning, or @code{NULL} for libcalls.
10908
10909 The AIX ABI for the RS/6000 specifies that all structures are
10910 returned in memory. The Darwin ABI does the same.
10911
10912 For the Darwin 64 Bit ABI, a function result can be returned in
10913 registers or in memory, depending on the size of the return data
10914 type. If it is returned in registers, the value occupies the same
10915 registers as it would if it were the first and only function
10916 argument. Otherwise, the function places its result in memory at
10917 the location pointed to by GPR3.
10918
10919 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10920 but a draft put them in memory, and GCC used to implement the draft
10921 instead of the final standard. Therefore, aix_struct_return
10922 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10923 compatibility can change DRAFT_V4_STRUCT_RET to override the
10924 default, and -m switches get the final word. See
10925 rs6000_option_override_internal for more details.
10926
10927 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10928 long double support is enabled. These values are returned in memory.
10929
10930 int_size_in_bytes returns -1 for variable size objects, which go in
10931 memory always. The cast to unsigned makes -1 > 8. */
10932
10933 static bool
10934 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10935 {
10936 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10937 if (TARGET_MACHO
10938 && rs6000_darwin64_abi
10939 && TREE_CODE (type) == RECORD_TYPE
10940 && int_size_in_bytes (type) > 0)
10941 {
10942 CUMULATIVE_ARGS valcum;
10943 rtx valret;
10944
10945 valcum.words = 0;
10946 valcum.fregno = FP_ARG_MIN_REG;
10947 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10948 /* Do a trial code generation as if this were going to be passed
10949 as an argument; if any part goes in memory, we return NULL. */
10950 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10951 if (valret)
10952 return false;
10953 /* Otherwise fall through to more conventional ABI rules. */
10954 }
10955
10956 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10957 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10958 NULL, NULL))
10959 return false;
10960
10961 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10962 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10963 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10964 return false;
10965
10966 if (AGGREGATE_TYPE_P (type)
10967 && (aix_struct_return
10968 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10969 return true;
10970
10971 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10972 modes only exist for GCC vector types if -maltivec. */
10973 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10974 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10975 return false;
10976
10977 /* Return synthetic vectors in memory. */
10978 if (TREE_CODE (type) == VECTOR_TYPE
10979 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10980 {
10981 static bool warned_for_return_big_vectors = false;
10982 if (!warned_for_return_big_vectors)
10983 {
10984 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10985 "non-standard ABI extension with no compatibility "
10986 "guarantee");
10987 warned_for_return_big_vectors = true;
10988 }
10989 return true;
10990 }
10991
10992 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10993 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10994 return true;
10995
10996 return false;
10997 }
10998
10999 /* Specify whether values returned in registers should be at the most
11000 significant end of a register. We want aggregates returned by
11001 value to match the way aggregates are passed to functions. */
11002
11003 static bool
11004 rs6000_return_in_msb (const_tree valtype)
11005 {
11006 return (DEFAULT_ABI == ABI_ELFv2
11007 && BYTES_BIG_ENDIAN
11008 && AGGREGATE_TYPE_P (valtype)
11009 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
11010 == PAD_UPWARD));
11011 }
11012
11013 #ifdef HAVE_AS_GNU_ATTRIBUTE
11014 /* Return TRUE if a call to function FNDECL may be one that
11015 potentially affects the function calling ABI of the object file. */
11016
11017 static bool
11018 call_ABI_of_interest (tree fndecl)
11019 {
11020 if (rs6000_gnu_attr && symtab->state == EXPANSION)
11021 {
11022 struct cgraph_node *c_node;
11023
11024 /* Libcalls are always interesting. */
11025 if (fndecl == NULL_TREE)
11026 return true;
11027
11028 /* Any call to an external function is interesting. */
11029 if (DECL_EXTERNAL (fndecl))
11030 return true;
11031
11032 /* Interesting functions that we are emitting in this object file. */
11033 c_node = cgraph_node::get (fndecl);
11034 c_node = c_node->ultimate_alias_target ();
11035 return !c_node->only_called_directly_p ();
11036 }
11037 return false;
11038 }
11039 #endif
11040
11041 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11042 for a call to a function whose data type is FNTYPE.
11043 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11044
11045 For incoming args we set the number of arguments in the prototype large
11046 so we never return a PARALLEL. */
11047
11048 void
11049 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
11050 rtx libname ATTRIBUTE_UNUSED, int incoming,
11051 int libcall, int n_named_args,
11052 tree fndecl ATTRIBUTE_UNUSED,
11053 machine_mode return_mode ATTRIBUTE_UNUSED)
11054 {
11055 static CUMULATIVE_ARGS zero_cumulative;
11056
11057 *cum = zero_cumulative;
11058 cum->words = 0;
11059 cum->fregno = FP_ARG_MIN_REG;
11060 cum->vregno = ALTIVEC_ARG_MIN_REG;
11061 cum->prototype = (fntype && prototype_p (fntype));
11062 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
11063 ? CALL_LIBCALL : CALL_NORMAL);
11064 cum->sysv_gregno = GP_ARG_MIN_REG;
11065 cum->stdarg = stdarg_p (fntype);
11066 cum->libcall = libcall;
11067
11068 cum->nargs_prototype = 0;
11069 if (incoming || cum->prototype)
11070 cum->nargs_prototype = n_named_args;
11071
11072 /* Check for a longcall attribute. */
11073 if ((!fntype && rs6000_default_long_calls)
11074 || (fntype
11075 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
11076 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
11077 cum->call_cookie |= CALL_LONG;
11078
11079 if (TARGET_DEBUG_ARG)
11080 {
11081 fprintf (stderr, "\ninit_cumulative_args:");
11082 if (fntype)
11083 {
11084 tree ret_type = TREE_TYPE (fntype);
11085 fprintf (stderr, " ret code = %s,",
11086 get_tree_code_name (TREE_CODE (ret_type)));
11087 }
11088
11089 if (cum->call_cookie & CALL_LONG)
11090 fprintf (stderr, " longcall,");
11091
11092 fprintf (stderr, " proto = %d, nargs = %d\n",
11093 cum->prototype, cum->nargs_prototype);
11094 }
11095
11096 #ifdef HAVE_AS_GNU_ATTRIBUTE
11097 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
11098 {
11099 cum->escapes = call_ABI_of_interest (fndecl);
11100 if (cum->escapes)
11101 {
11102 tree return_type;
11103
11104 if (fntype)
11105 {
11106 return_type = TREE_TYPE (fntype);
11107 return_mode = TYPE_MODE (return_type);
11108 }
11109 else
11110 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
11111
11112 if (return_type != NULL)
11113 {
11114 if (TREE_CODE (return_type) == RECORD_TYPE
11115 && TYPE_TRANSPARENT_AGGR (return_type))
11116 {
11117 return_type = TREE_TYPE (first_field (return_type));
11118 return_mode = TYPE_MODE (return_type);
11119 }
11120 if (AGGREGATE_TYPE_P (return_type)
11121 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
11122 <= 8))
11123 rs6000_returns_struct = true;
11124 }
11125 if (SCALAR_FLOAT_MODE_P (return_mode))
11126 {
11127 rs6000_passes_float = true;
11128 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11129 && (FLOAT128_IBM_P (return_mode)
11130 || FLOAT128_IEEE_P (return_mode)
11131 || (return_type != NULL
11132 && (TYPE_MAIN_VARIANT (return_type)
11133 == long_double_type_node))))
11134 rs6000_passes_long_double = true;
11135
11136 /* Note if we passed or return a IEEE 128-bit type. We changed
11137 the mangling for these types, and we may need to make an alias
11138 with the old mangling. */
11139 if (FLOAT128_IEEE_P (return_mode))
11140 rs6000_passes_ieee128 = true;
11141 }
11142 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
11143 rs6000_passes_vector = true;
11144 }
11145 }
11146 #endif
11147
11148 if (fntype
11149 && !TARGET_ALTIVEC
11150 && TARGET_ALTIVEC_ABI
11151 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
11152 {
11153 error ("cannot return value in vector register because"
11154 " altivec instructions are disabled, use %qs"
11155 " to enable them", "-maltivec");
11156 }
11157 }
11158 \f
11159 /* The mode the ABI uses for a word. This is not the same as word_mode
11160 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11161
11162 static scalar_int_mode
11163 rs6000_abi_word_mode (void)
11164 {
11165 return TARGET_32BIT ? SImode : DImode;
11166 }
11167
11168 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11169 static char *
11170 rs6000_offload_options (void)
11171 {
11172 if (TARGET_64BIT)
11173 return xstrdup ("-foffload-abi=lp64");
11174 else
11175 return xstrdup ("-foffload-abi=ilp32");
11176 }
11177
11178 /* On rs6000, function arguments are promoted, as are function return
11179 values. */
11180
11181 static machine_mode
11182 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
11183 machine_mode mode,
11184 int *punsignedp ATTRIBUTE_UNUSED,
11185 const_tree, int)
11186 {
11187 PROMOTE_MODE (mode, *punsignedp, type);
11188
11189 return mode;
11190 }
11191
11192 /* Return true if TYPE must be passed on the stack and not in registers. */
11193
11194 static bool
11195 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
11196 {
11197 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
11198 return must_pass_in_stack_var_size (mode, type);
11199 else
11200 return must_pass_in_stack_var_size_or_pad (mode, type);
11201 }
11202
11203 static inline bool
11204 is_complex_IBM_long_double (machine_mode mode)
11205 {
11206 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
11207 }
11208
11209 /* Whether ABI_V4 passes MODE args to a function in floating point
11210 registers. */
11211
11212 static bool
11213 abi_v4_pass_in_fpr (machine_mode mode, bool named)
11214 {
11215 if (!TARGET_HARD_FLOAT)
11216 return false;
11217 if (mode == DFmode)
11218 return true;
11219 if (mode == SFmode && named)
11220 return true;
11221 /* ABI_V4 passes complex IBM long double in 8 gprs.
11222 Stupid, but we can't change the ABI now. */
11223 if (is_complex_IBM_long_double (mode))
11224 return false;
11225 if (FLOAT128_2REG_P (mode))
11226 return true;
11227 if (DECIMAL_FLOAT_MODE_P (mode))
11228 return true;
11229 return false;
11230 }
11231
11232 /* Implement TARGET_FUNCTION_ARG_PADDING.
11233
11234 For the AIX ABI structs are always stored left shifted in their
11235 argument slot. */
11236
11237 static pad_direction
11238 rs6000_function_arg_padding (machine_mode mode, const_tree type)
11239 {
11240 #ifndef AGGREGATE_PADDING_FIXED
11241 #define AGGREGATE_PADDING_FIXED 0
11242 #endif
11243 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11244 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11245 #endif
11246
11247 if (!AGGREGATE_PADDING_FIXED)
11248 {
11249 /* GCC used to pass structures of the same size as integer types as
11250 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
11251 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11252 passed padded downward, except that -mstrict-align further
11253 muddied the water in that multi-component structures of 2 and 4
11254 bytes in size were passed padded upward.
11255
11256 The following arranges for best compatibility with previous
11257 versions of gcc, but removes the -mstrict-align dependency. */
11258 if (BYTES_BIG_ENDIAN)
11259 {
11260 HOST_WIDE_INT size = 0;
11261
11262 if (mode == BLKmode)
11263 {
11264 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
11265 size = int_size_in_bytes (type);
11266 }
11267 else
11268 size = GET_MODE_SIZE (mode);
11269
11270 if (size == 1 || size == 2 || size == 4)
11271 return PAD_DOWNWARD;
11272 }
11273 return PAD_UPWARD;
11274 }
11275
11276 if (AGGREGATES_PAD_UPWARD_ALWAYS)
11277 {
11278 if (type != 0 && AGGREGATE_TYPE_P (type))
11279 return PAD_UPWARD;
11280 }
11281
11282 /* Fall back to the default. */
11283 return default_function_arg_padding (mode, type);
11284 }
11285
11286 /* If defined, a C expression that gives the alignment boundary, in bits,
11287 of an argument with the specified mode and type. If it is not defined,
11288 PARM_BOUNDARY is used for all arguments.
11289
11290 V.4 wants long longs and doubles to be double word aligned. Just
11291 testing the mode size is a boneheaded way to do this as it means
11292 that other types such as complex int are also double word aligned.
11293 However, we're stuck with this because changing the ABI might break
11294 existing library interfaces.
11295
11296 Quadword align Altivec/VSX vectors.
11297 Quadword align large synthetic vector types. */
11298
11299 static unsigned int
11300 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11301 {
11302 machine_mode elt_mode;
11303 int n_elts;
11304
11305 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11306
11307 if (DEFAULT_ABI == ABI_V4
11308 && (GET_MODE_SIZE (mode) == 8
11309 || (TARGET_HARD_FLOAT
11310 && !is_complex_IBM_long_double (mode)
11311 && FLOAT128_2REG_P (mode))))
11312 return 64;
11313 else if (FLOAT128_VECTOR_P (mode))
11314 return 128;
11315 else if (type && TREE_CODE (type) == VECTOR_TYPE
11316 && int_size_in_bytes (type) >= 8
11317 && int_size_in_bytes (type) < 16)
11318 return 64;
11319 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11320 || (type && TREE_CODE (type) == VECTOR_TYPE
11321 && int_size_in_bytes (type) >= 16))
11322 return 128;
11323
11324 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11325 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11326 -mcompat-align-parm is used. */
11327 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11328 || DEFAULT_ABI == ABI_ELFv2)
11329 && type && TYPE_ALIGN (type) > 64)
11330 {
11331 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11332 or homogeneous float/vector aggregates here. We already handled
11333 vector aggregates above, but still need to check for float here. */
11334 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11335 && !SCALAR_FLOAT_MODE_P (elt_mode));
11336
11337 /* We used to check for BLKmode instead of the above aggregate type
11338 check. Warn when this results in any difference to the ABI. */
11339 if (aggregate_p != (mode == BLKmode))
11340 {
11341 static bool warned;
11342 if (!warned && warn_psabi)
11343 {
11344 warned = true;
11345 inform (input_location,
11346 "the ABI of passing aggregates with %d-byte alignment"
11347 " has changed in GCC 5",
11348 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11349 }
11350 }
11351
11352 if (aggregate_p)
11353 return 128;
11354 }
11355
11356 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11357 implement the "aggregate type" check as a BLKmode check here; this
11358 means certain aggregate types are in fact not aligned. */
11359 if (TARGET_MACHO && rs6000_darwin64_abi
11360 && mode == BLKmode
11361 && type && TYPE_ALIGN (type) > 64)
11362 return 128;
11363
11364 return PARM_BOUNDARY;
11365 }
11366
11367 /* The offset in words to the start of the parameter save area. */
11368
11369 static unsigned int
11370 rs6000_parm_offset (void)
11371 {
11372 return (DEFAULT_ABI == ABI_V4 ? 2
11373 : DEFAULT_ABI == ABI_ELFv2 ? 4
11374 : 6);
11375 }
11376
11377 /* For a function parm of MODE and TYPE, return the starting word in
11378 the parameter area. NWORDS of the parameter area are already used. */
11379
11380 static unsigned int
11381 rs6000_parm_start (machine_mode mode, const_tree type,
11382 unsigned int nwords)
11383 {
11384 unsigned int align;
11385
11386 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11387 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11388 }
11389
11390 /* Compute the size (in words) of a function argument. */
11391
11392 static unsigned long
11393 rs6000_arg_size (machine_mode mode, const_tree type)
11394 {
11395 unsigned long size;
11396
11397 if (mode != BLKmode)
11398 size = GET_MODE_SIZE (mode);
11399 else
11400 size = int_size_in_bytes (type);
11401
11402 if (TARGET_32BIT)
11403 return (size + 3) >> 2;
11404 else
11405 return (size + 7) >> 3;
11406 }
11407 \f
11408 /* Use this to flush pending int fields. */
11409
11410 static void
11411 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11412 HOST_WIDE_INT bitpos, int final)
11413 {
11414 unsigned int startbit, endbit;
11415 int intregs, intoffset;
11416
11417 /* Handle the situations where a float is taking up the first half
11418 of the GPR, and the other half is empty (typically due to
11419 alignment restrictions). We can detect this by a 8-byte-aligned
11420 int field, or by seeing that this is the final flush for this
11421 argument. Count the word and continue on. */
11422 if (cum->floats_in_gpr == 1
11423 && (cum->intoffset % 64 == 0
11424 || (cum->intoffset == -1 && final)))
11425 {
11426 cum->words++;
11427 cum->floats_in_gpr = 0;
11428 }
11429
11430 if (cum->intoffset == -1)
11431 return;
11432
11433 intoffset = cum->intoffset;
11434 cum->intoffset = -1;
11435 cum->floats_in_gpr = 0;
11436
11437 if (intoffset % BITS_PER_WORD != 0)
11438 {
11439 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11440 if (!int_mode_for_size (bits, 0).exists ())
11441 {
11442 /* We couldn't find an appropriate mode, which happens,
11443 e.g., in packed structs when there are 3 bytes to load.
11444 Back intoffset back to the beginning of the word in this
11445 case. */
11446 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11447 }
11448 }
11449
11450 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11451 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11452 intregs = (endbit - startbit) / BITS_PER_WORD;
11453 cum->words += intregs;
11454 /* words should be unsigned. */
11455 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11456 {
11457 int pad = (endbit/BITS_PER_WORD) - cum->words;
11458 cum->words += pad;
11459 }
11460 }
11461
11462 /* The darwin64 ABI calls for us to recurse down through structs,
11463 looking for elements passed in registers. Unfortunately, we have
11464 to track int register count here also because of misalignments
11465 in powerpc alignment mode. */
11466
11467 static void
11468 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11469 const_tree type,
11470 HOST_WIDE_INT startbitpos)
11471 {
11472 tree f;
11473
11474 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11475 if (TREE_CODE (f) == FIELD_DECL)
11476 {
11477 HOST_WIDE_INT bitpos = startbitpos;
11478 tree ftype = TREE_TYPE (f);
11479 machine_mode mode;
11480 if (ftype == error_mark_node)
11481 continue;
11482 mode = TYPE_MODE (ftype);
11483
11484 if (DECL_SIZE (f) != 0
11485 && tree_fits_uhwi_p (bit_position (f)))
11486 bitpos += int_bit_position (f);
11487
11488 /* ??? FIXME: else assume zero offset. */
11489
11490 if (TREE_CODE (ftype) == RECORD_TYPE)
11491 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11492 else if (USE_FP_FOR_ARG_P (cum, mode))
11493 {
11494 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11495 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11496 cum->fregno += n_fpregs;
11497 /* Single-precision floats present a special problem for
11498 us, because they are smaller than an 8-byte GPR, and so
11499 the structure-packing rules combined with the standard
11500 varargs behavior mean that we want to pack float/float
11501 and float/int combinations into a single register's
11502 space. This is complicated by the arg advance flushing,
11503 which works on arbitrarily large groups of int-type
11504 fields. */
11505 if (mode == SFmode)
11506 {
11507 if (cum->floats_in_gpr == 1)
11508 {
11509 /* Two floats in a word; count the word and reset
11510 the float count. */
11511 cum->words++;
11512 cum->floats_in_gpr = 0;
11513 }
11514 else if (bitpos % 64 == 0)
11515 {
11516 /* A float at the beginning of an 8-byte word;
11517 count it and put off adjusting cum->words until
11518 we see if a arg advance flush is going to do it
11519 for us. */
11520 cum->floats_in_gpr++;
11521 }
11522 else
11523 {
11524 /* The float is at the end of a word, preceded
11525 by integer fields, so the arg advance flush
11526 just above has already set cum->words and
11527 everything is taken care of. */
11528 }
11529 }
11530 else
11531 cum->words += n_fpregs;
11532 }
11533 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11534 {
11535 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11536 cum->vregno++;
11537 cum->words += 2;
11538 }
11539 else if (cum->intoffset == -1)
11540 cum->intoffset = bitpos;
11541 }
11542 }
11543
11544 /* Check for an item that needs to be considered specially under the darwin 64
11545 bit ABI. These are record types where the mode is BLK or the structure is
11546 8 bytes in size. */
11547 static int
11548 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11549 {
11550 return rs6000_darwin64_abi
11551 && ((mode == BLKmode
11552 && TREE_CODE (type) == RECORD_TYPE
11553 && int_size_in_bytes (type) > 0)
11554 || (type && TREE_CODE (type) == RECORD_TYPE
11555 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11556 }
11557
11558 /* Update the data in CUM to advance over an argument
11559 of mode MODE and data type TYPE.
11560 (TYPE is null for libcalls where that information may not be available.)
11561
11562 Note that for args passed by reference, function_arg will be called
11563 with MODE and TYPE set to that of the pointer to the arg, not the arg
11564 itself. */
11565
11566 static void
11567 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11568 const_tree type, bool named, int depth)
11569 {
11570 machine_mode elt_mode;
11571 int n_elts;
11572
11573 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11574
11575 /* Only tick off an argument if we're not recursing. */
11576 if (depth == 0)
11577 cum->nargs_prototype--;
11578
11579 #ifdef HAVE_AS_GNU_ATTRIBUTE
11580 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11581 && cum->escapes)
11582 {
11583 if (SCALAR_FLOAT_MODE_P (mode))
11584 {
11585 rs6000_passes_float = true;
11586 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11587 && (FLOAT128_IBM_P (mode)
11588 || FLOAT128_IEEE_P (mode)
11589 || (type != NULL
11590 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11591 rs6000_passes_long_double = true;
11592
11593 /* Note if we passed or return a IEEE 128-bit type. We changed the
11594 mangling for these types, and we may need to make an alias with
11595 the old mangling. */
11596 if (FLOAT128_IEEE_P (mode))
11597 rs6000_passes_ieee128 = true;
11598 }
11599 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11600 rs6000_passes_vector = true;
11601 }
11602 #endif
11603
11604 if (TARGET_ALTIVEC_ABI
11605 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11606 || (type && TREE_CODE (type) == VECTOR_TYPE
11607 && int_size_in_bytes (type) == 16)))
11608 {
11609 bool stack = false;
11610
11611 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11612 {
11613 cum->vregno += n_elts;
11614
11615 if (!TARGET_ALTIVEC)
11616 error ("cannot pass argument in vector register because"
11617 " altivec instructions are disabled, use %qs"
11618 " to enable them", "-maltivec");
11619
11620 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11621 even if it is going to be passed in a vector register.
11622 Darwin does the same for variable-argument functions. */
11623 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11624 && TARGET_64BIT)
11625 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11626 stack = true;
11627 }
11628 else
11629 stack = true;
11630
11631 if (stack)
11632 {
11633 int align;
11634
11635 /* Vector parameters must be 16-byte aligned. In 32-bit
11636 mode this means we need to take into account the offset
11637 to the parameter save area. In 64-bit mode, they just
11638 have to start on an even word, since the parameter save
11639 area is 16-byte aligned. */
11640 if (TARGET_32BIT)
11641 align = -(rs6000_parm_offset () + cum->words) & 3;
11642 else
11643 align = cum->words & 1;
11644 cum->words += align + rs6000_arg_size (mode, type);
11645
11646 if (TARGET_DEBUG_ARG)
11647 {
11648 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11649 cum->words, align);
11650 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11651 cum->nargs_prototype, cum->prototype,
11652 GET_MODE_NAME (mode));
11653 }
11654 }
11655 }
11656 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11657 {
11658 int size = int_size_in_bytes (type);
11659 /* Variable sized types have size == -1 and are
11660 treated as if consisting entirely of ints.
11661 Pad to 16 byte boundary if needed. */
11662 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11663 && (cum->words % 2) != 0)
11664 cum->words++;
11665 /* For varargs, we can just go up by the size of the struct. */
11666 if (!named)
11667 cum->words += (size + 7) / 8;
11668 else
11669 {
11670 /* It is tempting to say int register count just goes up by
11671 sizeof(type)/8, but this is wrong in a case such as
11672 { int; double; int; } [powerpc alignment]. We have to
11673 grovel through the fields for these too. */
11674 cum->intoffset = 0;
11675 cum->floats_in_gpr = 0;
11676 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11677 rs6000_darwin64_record_arg_advance_flush (cum,
11678 size * BITS_PER_UNIT, 1);
11679 }
11680 if (TARGET_DEBUG_ARG)
11681 {
11682 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11683 cum->words, TYPE_ALIGN (type), size);
11684 fprintf (stderr,
11685 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11686 cum->nargs_prototype, cum->prototype,
11687 GET_MODE_NAME (mode));
11688 }
11689 }
11690 else if (DEFAULT_ABI == ABI_V4)
11691 {
11692 if (abi_v4_pass_in_fpr (mode, named))
11693 {
11694 /* _Decimal128 must use an even/odd register pair. This assumes
11695 that the register number is odd when fregno is odd. */
11696 if (mode == TDmode && (cum->fregno % 2) == 1)
11697 cum->fregno++;
11698
11699 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11700 <= FP_ARG_V4_MAX_REG)
11701 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11702 else
11703 {
11704 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11705 if (mode == DFmode || FLOAT128_IBM_P (mode)
11706 || mode == DDmode || mode == TDmode)
11707 cum->words += cum->words & 1;
11708 cum->words += rs6000_arg_size (mode, type);
11709 }
11710 }
11711 else
11712 {
11713 int n_words = rs6000_arg_size (mode, type);
11714 int gregno = cum->sysv_gregno;
11715
11716 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11717 As does any other 2 word item such as complex int due to a
11718 historical mistake. */
11719 if (n_words == 2)
11720 gregno += (1 - gregno) & 1;
11721
11722 /* Multi-reg args are not split between registers and stack. */
11723 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11724 {
11725 /* Long long is aligned on the stack. So are other 2 word
11726 items such as complex int due to a historical mistake. */
11727 if (n_words == 2)
11728 cum->words += cum->words & 1;
11729 cum->words += n_words;
11730 }
11731
11732 /* Note: continuing to accumulate gregno past when we've started
11733 spilling to the stack indicates the fact that we've started
11734 spilling to the stack to expand_builtin_saveregs. */
11735 cum->sysv_gregno = gregno + n_words;
11736 }
11737
11738 if (TARGET_DEBUG_ARG)
11739 {
11740 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11741 cum->words, cum->fregno);
11742 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11743 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11744 fprintf (stderr, "mode = %4s, named = %d\n",
11745 GET_MODE_NAME (mode), named);
11746 }
11747 }
11748 else
11749 {
11750 int n_words = rs6000_arg_size (mode, type);
11751 int start_words = cum->words;
11752 int align_words = rs6000_parm_start (mode, type, start_words);
11753
11754 cum->words = align_words + n_words;
11755
11756 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11757 {
11758 /* _Decimal128 must be passed in an even/odd float register pair.
11759 This assumes that the register number is odd when fregno is
11760 odd. */
11761 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11762 cum->fregno++;
11763 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11764 }
11765
11766 if (TARGET_DEBUG_ARG)
11767 {
11768 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11769 cum->words, cum->fregno);
11770 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11771 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11772 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11773 named, align_words - start_words, depth);
11774 }
11775 }
11776 }
11777
11778 static void
11779 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11780 const_tree type, bool named)
11781 {
11782 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11783 0);
11784 }
11785
11786 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11787 structure between cum->intoffset and bitpos to integer registers. */
11788
11789 static void
11790 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11791 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11792 {
11793 machine_mode mode;
11794 unsigned int regno;
11795 unsigned int startbit, endbit;
11796 int this_regno, intregs, intoffset;
11797 rtx reg;
11798
11799 if (cum->intoffset == -1)
11800 return;
11801
11802 intoffset = cum->intoffset;
11803 cum->intoffset = -1;
11804
11805 /* If this is the trailing part of a word, try to only load that
11806 much into the register. Otherwise load the whole register. Note
11807 that in the latter case we may pick up unwanted bits. It's not a
11808 problem at the moment but may wish to revisit. */
11809
11810 if (intoffset % BITS_PER_WORD != 0)
11811 {
11812 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11813 if (!int_mode_for_size (bits, 0).exists (&mode))
11814 {
11815 /* We couldn't find an appropriate mode, which happens,
11816 e.g., in packed structs when there are 3 bytes to load.
11817 Back intoffset back to the beginning of the word in this
11818 case. */
11819 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11820 mode = word_mode;
11821 }
11822 }
11823 else
11824 mode = word_mode;
11825
11826 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11827 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11828 intregs = (endbit - startbit) / BITS_PER_WORD;
11829 this_regno = cum->words + intoffset / BITS_PER_WORD;
11830
11831 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11832 cum->use_stack = 1;
11833
11834 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11835 if (intregs <= 0)
11836 return;
11837
11838 intoffset /= BITS_PER_UNIT;
11839 do
11840 {
11841 regno = GP_ARG_MIN_REG + this_regno;
11842 reg = gen_rtx_REG (mode, regno);
11843 rvec[(*k)++] =
11844 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11845
11846 this_regno += 1;
11847 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11848 mode = word_mode;
11849 intregs -= 1;
11850 }
11851 while (intregs > 0);
11852 }
11853
11854 /* Recursive workhorse for the following. */
11855
11856 static void
11857 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11858 HOST_WIDE_INT startbitpos, rtx rvec[],
11859 int *k)
11860 {
11861 tree f;
11862
11863 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11864 if (TREE_CODE (f) == FIELD_DECL)
11865 {
11866 HOST_WIDE_INT bitpos = startbitpos;
11867 tree ftype = TREE_TYPE (f);
11868 machine_mode mode;
11869 if (ftype == error_mark_node)
11870 continue;
11871 mode = TYPE_MODE (ftype);
11872
11873 if (DECL_SIZE (f) != 0
11874 && tree_fits_uhwi_p (bit_position (f)))
11875 bitpos += int_bit_position (f);
11876
11877 /* ??? FIXME: else assume zero offset. */
11878
11879 if (TREE_CODE (ftype) == RECORD_TYPE)
11880 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11881 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11882 {
11883 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11884 #if 0
11885 switch (mode)
11886 {
11887 case E_SCmode: mode = SFmode; break;
11888 case E_DCmode: mode = DFmode; break;
11889 case E_TCmode: mode = TFmode; break;
11890 default: break;
11891 }
11892 #endif
11893 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11894 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11895 {
11896 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11897 && (mode == TFmode || mode == TDmode));
11898 /* Long double or _Decimal128 split over regs and memory. */
11899 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11900 cum->use_stack=1;
11901 }
11902 rvec[(*k)++]
11903 = gen_rtx_EXPR_LIST (VOIDmode,
11904 gen_rtx_REG (mode, cum->fregno++),
11905 GEN_INT (bitpos / BITS_PER_UNIT));
11906 if (FLOAT128_2REG_P (mode))
11907 cum->fregno++;
11908 }
11909 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11910 {
11911 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11912 rvec[(*k)++]
11913 = gen_rtx_EXPR_LIST (VOIDmode,
11914 gen_rtx_REG (mode, cum->vregno++),
11915 GEN_INT (bitpos / BITS_PER_UNIT));
11916 }
11917 else if (cum->intoffset == -1)
11918 cum->intoffset = bitpos;
11919 }
11920 }
11921
11922 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11923 the register(s) to be used for each field and subfield of a struct
11924 being passed by value, along with the offset of where the
11925 register's value may be found in the block. FP fields go in FP
11926 register, vector fields go in vector registers, and everything
11927 else goes in int registers, packed as in memory.
11928
11929 This code is also used for function return values. RETVAL indicates
11930 whether this is the case.
11931
11932 Much of this is taken from the SPARC V9 port, which has a similar
11933 calling convention. */
11934
11935 static rtx
11936 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11937 bool named, bool retval)
11938 {
11939 rtx rvec[FIRST_PSEUDO_REGISTER];
11940 int k = 1, kbase = 1;
11941 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11942 /* This is a copy; modifications are not visible to our caller. */
11943 CUMULATIVE_ARGS copy_cum = *orig_cum;
11944 CUMULATIVE_ARGS *cum = &copy_cum;
11945
11946 /* Pad to 16 byte boundary if needed. */
11947 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11948 && (cum->words % 2) != 0)
11949 cum->words++;
11950
11951 cum->intoffset = 0;
11952 cum->use_stack = 0;
11953 cum->named = named;
11954
11955 /* Put entries into rvec[] for individual FP and vector fields, and
11956 for the chunks of memory that go in int regs. Note we start at
11957 element 1; 0 is reserved for an indication of using memory, and
11958 may or may not be filled in below. */
11959 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11960 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11961
11962 /* If any part of the struct went on the stack put all of it there.
11963 This hack is because the generic code for
11964 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11965 parts of the struct are not at the beginning. */
11966 if (cum->use_stack)
11967 {
11968 if (retval)
11969 return NULL_RTX; /* doesn't go in registers at all */
11970 kbase = 0;
11971 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11972 }
11973 if (k > 1 || cum->use_stack)
11974 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11975 else
11976 return NULL_RTX;
11977 }
11978
11979 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11980
11981 static rtx
11982 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11983 int align_words)
11984 {
11985 int n_units;
11986 int i, k;
11987 rtx rvec[GP_ARG_NUM_REG + 1];
11988
11989 if (align_words >= GP_ARG_NUM_REG)
11990 return NULL_RTX;
11991
11992 n_units = rs6000_arg_size (mode, type);
11993
11994 /* Optimize the simple case where the arg fits in one gpr, except in
11995 the case of BLKmode due to assign_parms assuming that registers are
11996 BITS_PER_WORD wide. */
11997 if (n_units == 0
11998 || (n_units == 1 && mode != BLKmode))
11999 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12000
12001 k = 0;
12002 if (align_words + n_units > GP_ARG_NUM_REG)
12003 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12004 using a magic NULL_RTX component.
12005 This is not strictly correct. Only some of the arg belongs in
12006 memory, not all of it. However, the normal scheme using
12007 function_arg_partial_nregs can result in unusual subregs, eg.
12008 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12009 store the whole arg to memory is often more efficient than code
12010 to store pieces, and we know that space is available in the right
12011 place for the whole arg. */
12012 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12013
12014 i = 0;
12015 do
12016 {
12017 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
12018 rtx off = GEN_INT (i++ * 4);
12019 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12020 }
12021 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
12022
12023 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12024 }
12025
12026 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12027 but must also be copied into the parameter save area starting at
12028 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12029 to the GPRs and/or memory. Return the number of elements used. */
12030
12031 static int
12032 rs6000_psave_function_arg (machine_mode mode, const_tree type,
12033 int align_words, rtx *rvec)
12034 {
12035 int k = 0;
12036
12037 if (align_words < GP_ARG_NUM_REG)
12038 {
12039 int n_words = rs6000_arg_size (mode, type);
12040
12041 if (align_words + n_words > GP_ARG_NUM_REG
12042 || mode == BLKmode
12043 || (TARGET_32BIT && TARGET_POWERPC64))
12044 {
12045 /* If this is partially on the stack, then we only
12046 include the portion actually in registers here. */
12047 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12048 int i = 0;
12049
12050 if (align_words + n_words > GP_ARG_NUM_REG)
12051 {
12052 /* Not all of the arg fits in gprs. Say that it goes in memory
12053 too, using a magic NULL_RTX component. Also see comment in
12054 rs6000_mixed_function_arg for why the normal
12055 function_arg_partial_nregs scheme doesn't work in this case. */
12056 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12057 }
12058
12059 do
12060 {
12061 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12062 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
12063 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12064 }
12065 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12066 }
12067 else
12068 {
12069 /* The whole arg fits in gprs. */
12070 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12071 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
12072 }
12073 }
12074 else
12075 {
12076 /* It's entirely in memory. */
12077 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12078 }
12079
12080 return k;
12081 }
12082
12083 /* RVEC is a vector of K components of an argument of mode MODE.
12084 Construct the final function_arg return value from it. */
12085
12086 static rtx
12087 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
12088 {
12089 gcc_assert (k >= 1);
12090
12091 /* Avoid returning a PARALLEL in the trivial cases. */
12092 if (k == 1)
12093 {
12094 if (XEXP (rvec[0], 0) == NULL_RTX)
12095 return NULL_RTX;
12096
12097 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
12098 return XEXP (rvec[0], 0);
12099 }
12100
12101 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12102 }
12103
12104 /* Determine where to put an argument to a function.
12105 Value is zero to push the argument on the stack,
12106 or a hard register in which to store the argument.
12107
12108 MODE is the argument's machine mode.
12109 TYPE is the data type of the argument (as a tree).
12110 This is null for libcalls where that information may
12111 not be available.
12112 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12113 the preceding args and about the function being called. It is
12114 not modified in this routine.
12115 NAMED is nonzero if this argument is a named parameter
12116 (otherwise it is an extra parameter matching an ellipsis).
12117
12118 On RS/6000 the first eight words of non-FP are normally in registers
12119 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12120 Under V.4, the first 8 FP args are in registers.
12121
12122 If this is floating-point and no prototype is specified, we use
12123 both an FP and integer register (or possibly FP reg and stack). Library
12124 functions (when CALL_LIBCALL is set) always have the proper types for args,
12125 so we can pass the FP value just in one register. emit_library_function
12126 doesn't support PARALLEL anyway.
12127
12128 Note that for args passed by reference, function_arg will be called
12129 with MODE and TYPE set to that of the pointer to the arg, not the arg
12130 itself. */
12131
12132 static rtx
12133 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
12134 const_tree type, bool named)
12135 {
12136 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12137 enum rs6000_abi abi = DEFAULT_ABI;
12138 machine_mode elt_mode;
12139 int n_elts;
12140
12141 /* Return a marker to indicate whether CR1 needs to set or clear the
12142 bit that V.4 uses to say fp args were passed in registers.
12143 Assume that we don't need the marker for software floating point,
12144 or compiler generated library calls. */
12145 if (mode == VOIDmode)
12146 {
12147 if (abi == ABI_V4
12148 && (cum->call_cookie & CALL_LIBCALL) == 0
12149 && (cum->stdarg
12150 || (cum->nargs_prototype < 0
12151 && (cum->prototype || TARGET_NO_PROTOTYPE)))
12152 && TARGET_HARD_FLOAT)
12153 return GEN_INT (cum->call_cookie
12154 | ((cum->fregno == FP_ARG_MIN_REG)
12155 ? CALL_V4_SET_FP_ARGS
12156 : CALL_V4_CLEAR_FP_ARGS));
12157
12158 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
12159 }
12160
12161 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12162
12163 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12164 {
12165 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
12166 if (rslt != NULL_RTX)
12167 return rslt;
12168 /* Else fall through to usual handling. */
12169 }
12170
12171 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12172 {
12173 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12174 rtx r, off;
12175 int i, k = 0;
12176
12177 /* Do we also need to pass this argument in the parameter save area?
12178 Library support functions for IEEE 128-bit are assumed to not need the
12179 value passed both in GPRs and in vector registers. */
12180 if (TARGET_64BIT && !cum->prototype
12181 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12182 {
12183 int align_words = ROUND_UP (cum->words, 2);
12184 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12185 }
12186
12187 /* Describe where this argument goes in the vector registers. */
12188 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
12189 {
12190 r = gen_rtx_REG (elt_mode, cum->vregno + i);
12191 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12192 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12193 }
12194
12195 return rs6000_finish_function_arg (mode, rvec, k);
12196 }
12197 else if (TARGET_ALTIVEC_ABI
12198 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
12199 || (type && TREE_CODE (type) == VECTOR_TYPE
12200 && int_size_in_bytes (type) == 16)))
12201 {
12202 if (named || abi == ABI_V4)
12203 return NULL_RTX;
12204 else
12205 {
12206 /* Vector parameters to varargs functions under AIX or Darwin
12207 get passed in memory and possibly also in GPRs. */
12208 int align, align_words, n_words;
12209 machine_mode part_mode;
12210
12211 /* Vector parameters must be 16-byte aligned. In 32-bit
12212 mode this means we need to take into account the offset
12213 to the parameter save area. In 64-bit mode, they just
12214 have to start on an even word, since the parameter save
12215 area is 16-byte aligned. */
12216 if (TARGET_32BIT)
12217 align = -(rs6000_parm_offset () + cum->words) & 3;
12218 else
12219 align = cum->words & 1;
12220 align_words = cum->words + align;
12221
12222 /* Out of registers? Memory, then. */
12223 if (align_words >= GP_ARG_NUM_REG)
12224 return NULL_RTX;
12225
12226 if (TARGET_32BIT && TARGET_POWERPC64)
12227 return rs6000_mixed_function_arg (mode, type, align_words);
12228
12229 /* The vector value goes in GPRs. Only the part of the
12230 value in GPRs is reported here. */
12231 part_mode = mode;
12232 n_words = rs6000_arg_size (mode, type);
12233 if (align_words + n_words > GP_ARG_NUM_REG)
12234 /* Fortunately, there are only two possibilities, the value
12235 is either wholly in GPRs or half in GPRs and half not. */
12236 part_mode = DImode;
12237
12238 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
12239 }
12240 }
12241
12242 else if (abi == ABI_V4)
12243 {
12244 if (abi_v4_pass_in_fpr (mode, named))
12245 {
12246 /* _Decimal128 must use an even/odd register pair. This assumes
12247 that the register number is odd when fregno is odd. */
12248 if (mode == TDmode && (cum->fregno % 2) == 1)
12249 cum->fregno++;
12250
12251 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
12252 <= FP_ARG_V4_MAX_REG)
12253 return gen_rtx_REG (mode, cum->fregno);
12254 else
12255 return NULL_RTX;
12256 }
12257 else
12258 {
12259 int n_words = rs6000_arg_size (mode, type);
12260 int gregno = cum->sysv_gregno;
12261
12262 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
12263 As does any other 2 word item such as complex int due to a
12264 historical mistake. */
12265 if (n_words == 2)
12266 gregno += (1 - gregno) & 1;
12267
12268 /* Multi-reg args are not split between registers and stack. */
12269 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12270 return NULL_RTX;
12271
12272 if (TARGET_32BIT && TARGET_POWERPC64)
12273 return rs6000_mixed_function_arg (mode, type,
12274 gregno - GP_ARG_MIN_REG);
12275 return gen_rtx_REG (mode, gregno);
12276 }
12277 }
12278 else
12279 {
12280 int align_words = rs6000_parm_start (mode, type, cum->words);
12281
12282 /* _Decimal128 must be passed in an even/odd float register pair.
12283 This assumes that the register number is odd when fregno is odd. */
12284 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12285 cum->fregno++;
12286
12287 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12288 {
12289 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12290 rtx r, off;
12291 int i, k = 0;
12292 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12293 int fpr_words;
12294
12295 /* Do we also need to pass this argument in the parameter
12296 save area? */
12297 if (type && (cum->nargs_prototype <= 0
12298 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12299 && TARGET_XL_COMPAT
12300 && align_words >= GP_ARG_NUM_REG)))
12301 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12302
12303 /* Describe where this argument goes in the fprs. */
12304 for (i = 0; i < n_elts
12305 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12306 {
12307 /* Check if the argument is split over registers and memory.
12308 This can only ever happen for long double or _Decimal128;
12309 complex types are handled via split_complex_arg. */
12310 machine_mode fmode = elt_mode;
12311 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12312 {
12313 gcc_assert (FLOAT128_2REG_P (fmode));
12314 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12315 }
12316
12317 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12318 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12319 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12320 }
12321
12322 /* If there were not enough FPRs to hold the argument, the rest
12323 usually goes into memory. However, if the current position
12324 is still within the register parameter area, a portion may
12325 actually have to go into GPRs.
12326
12327 Note that it may happen that the portion of the argument
12328 passed in the first "half" of the first GPR was already
12329 passed in the last FPR as well.
12330
12331 For unnamed arguments, we already set up GPRs to cover the
12332 whole argument in rs6000_psave_function_arg, so there is
12333 nothing further to do at this point. */
12334 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12335 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12336 && cum->nargs_prototype > 0)
12337 {
12338 static bool warned;
12339
12340 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12341 int n_words = rs6000_arg_size (mode, type);
12342
12343 align_words += fpr_words;
12344 n_words -= fpr_words;
12345
12346 do
12347 {
12348 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12349 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12350 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12351 }
12352 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12353
12354 if (!warned && warn_psabi)
12355 {
12356 warned = true;
12357 inform (input_location,
12358 "the ABI of passing homogeneous float aggregates"
12359 " has changed in GCC 5");
12360 }
12361 }
12362
12363 return rs6000_finish_function_arg (mode, rvec, k);
12364 }
12365 else if (align_words < GP_ARG_NUM_REG)
12366 {
12367 if (TARGET_32BIT && TARGET_POWERPC64)
12368 return rs6000_mixed_function_arg (mode, type, align_words);
12369
12370 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12371 }
12372 else
12373 return NULL_RTX;
12374 }
12375 }
12376 \f
12377 /* For an arg passed partly in registers and partly in memory, this is
12378 the number of bytes passed in registers. For args passed entirely in
12379 registers or entirely in memory, zero. When an arg is described by a
12380 PARALLEL, perhaps using more than one register type, this function
12381 returns the number of bytes used by the first element of the PARALLEL. */
12382
12383 static int
12384 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12385 tree type, bool named)
12386 {
12387 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12388 bool passed_in_gprs = true;
12389 int ret = 0;
12390 int align_words;
12391 machine_mode elt_mode;
12392 int n_elts;
12393
12394 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12395
12396 if (DEFAULT_ABI == ABI_V4)
12397 return 0;
12398
12399 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12400 {
12401 /* If we are passing this arg in the fixed parameter save area (gprs or
12402 memory) as well as VRs, we do not use the partial bytes mechanism;
12403 instead, rs6000_function_arg will return a PARALLEL including a memory
12404 element as necessary. Library support functions for IEEE 128-bit are
12405 assumed to not need the value passed both in GPRs and in vector
12406 registers. */
12407 if (TARGET_64BIT && !cum->prototype
12408 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12409 return 0;
12410
12411 /* Otherwise, we pass in VRs only. Check for partial copies. */
12412 passed_in_gprs = false;
12413 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12414 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12415 }
12416
12417 /* In this complicated case we just disable the partial_nregs code. */
12418 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12419 return 0;
12420
12421 align_words = rs6000_parm_start (mode, type, cum->words);
12422
12423 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12424 {
12425 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12426
12427 /* If we are passing this arg in the fixed parameter save area
12428 (gprs or memory) as well as FPRs, we do not use the partial
12429 bytes mechanism; instead, rs6000_function_arg will return a
12430 PARALLEL including a memory element as necessary. */
12431 if (type
12432 && (cum->nargs_prototype <= 0
12433 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12434 && TARGET_XL_COMPAT
12435 && align_words >= GP_ARG_NUM_REG)))
12436 return 0;
12437
12438 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12439 passed_in_gprs = false;
12440 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12441 {
12442 /* Compute number of bytes / words passed in FPRs. If there
12443 is still space available in the register parameter area
12444 *after* that amount, a part of the argument will be passed
12445 in GPRs. In that case, the total amount passed in any
12446 registers is equal to the amount that would have been passed
12447 in GPRs if everything were passed there, so we fall back to
12448 the GPR code below to compute the appropriate value. */
12449 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12450 * MIN (8, GET_MODE_SIZE (elt_mode)));
12451 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12452
12453 if (align_words + fpr_words < GP_ARG_NUM_REG)
12454 passed_in_gprs = true;
12455 else
12456 ret = fpr;
12457 }
12458 }
12459
12460 if (passed_in_gprs
12461 && align_words < GP_ARG_NUM_REG
12462 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12463 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12464
12465 if (ret != 0 && TARGET_DEBUG_ARG)
12466 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12467
12468 return ret;
12469 }
12470 \f
12471 /* A C expression that indicates when an argument must be passed by
12472 reference. If nonzero for an argument, a copy of that argument is
12473 made in memory and a pointer to the argument is passed instead of
12474 the argument itself. The pointer is passed in whatever way is
12475 appropriate for passing a pointer to that type.
12476
12477 Under V.4, aggregates and long double are passed by reference.
12478
12479 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12480 reference unless the AltiVec vector extension ABI is in force.
12481
12482 As an extension to all ABIs, variable sized types are passed by
12483 reference. */
12484
12485 static bool
12486 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12487 machine_mode mode, const_tree type,
12488 bool named ATTRIBUTE_UNUSED)
12489 {
12490 if (!type)
12491 return 0;
12492
12493 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12494 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12495 {
12496 if (TARGET_DEBUG_ARG)
12497 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12498 return 1;
12499 }
12500
12501 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12502 {
12503 if (TARGET_DEBUG_ARG)
12504 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12505 return 1;
12506 }
12507
12508 if (int_size_in_bytes (type) < 0)
12509 {
12510 if (TARGET_DEBUG_ARG)
12511 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12512 return 1;
12513 }
12514
12515 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12516 modes only exist for GCC vector types if -maltivec. */
12517 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12518 {
12519 if (TARGET_DEBUG_ARG)
12520 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12521 return 1;
12522 }
12523
12524 /* Pass synthetic vectors in memory. */
12525 if (TREE_CODE (type) == VECTOR_TYPE
12526 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12527 {
12528 static bool warned_for_pass_big_vectors = false;
12529 if (TARGET_DEBUG_ARG)
12530 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12531 if (!warned_for_pass_big_vectors)
12532 {
12533 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12534 "non-standard ABI extension with no compatibility "
12535 "guarantee");
12536 warned_for_pass_big_vectors = true;
12537 }
12538 return 1;
12539 }
12540
12541 return 0;
12542 }
12543
12544 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12545 already processes. Return true if the parameter must be passed
12546 (fully or partially) on the stack. */
12547
12548 static bool
12549 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12550 {
12551 machine_mode mode;
12552 int unsignedp;
12553 rtx entry_parm;
12554
12555 /* Catch errors. */
12556 if (type == NULL || type == error_mark_node)
12557 return true;
12558
12559 /* Handle types with no storage requirement. */
12560 if (TYPE_MODE (type) == VOIDmode)
12561 return false;
12562
12563 /* Handle complex types. */
12564 if (TREE_CODE (type) == COMPLEX_TYPE)
12565 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12566 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12567
12568 /* Handle transparent aggregates. */
12569 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12570 && TYPE_TRANSPARENT_AGGR (type))
12571 type = TREE_TYPE (first_field (type));
12572
12573 /* See if this arg was passed by invisible reference. */
12574 if (pass_by_reference (get_cumulative_args (args_so_far),
12575 TYPE_MODE (type), type, true))
12576 type = build_pointer_type (type);
12577
12578 /* Find mode as it is passed by the ABI. */
12579 unsignedp = TYPE_UNSIGNED (type);
12580 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12581
12582 /* If we must pass in stack, we need a stack. */
12583 if (rs6000_must_pass_in_stack (mode, type))
12584 return true;
12585
12586 /* If there is no incoming register, we need a stack. */
12587 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12588 if (entry_parm == NULL)
12589 return true;
12590
12591 /* Likewise if we need to pass both in registers and on the stack. */
12592 if (GET_CODE (entry_parm) == PARALLEL
12593 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12594 return true;
12595
12596 /* Also true if we're partially in registers and partially not. */
12597 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12598 return true;
12599
12600 /* Update info on where next arg arrives in registers. */
12601 rs6000_function_arg_advance (args_so_far, mode, type, true);
12602 return false;
12603 }
12604
12605 /* Return true if FUN has no prototype, has a variable argument
12606 list, or passes any parameter in memory. */
12607
12608 static bool
12609 rs6000_function_parms_need_stack (tree fun, bool incoming)
12610 {
12611 tree fntype, result;
12612 CUMULATIVE_ARGS args_so_far_v;
12613 cumulative_args_t args_so_far;
12614
12615 if (!fun)
12616 /* Must be a libcall, all of which only use reg parms. */
12617 return false;
12618
12619 fntype = fun;
12620 if (!TYPE_P (fun))
12621 fntype = TREE_TYPE (fun);
12622
12623 /* Varargs functions need the parameter save area. */
12624 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12625 return true;
12626
12627 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12628 args_so_far = pack_cumulative_args (&args_so_far_v);
12629
12630 /* When incoming, we will have been passed the function decl.
12631 It is necessary to use the decl to handle K&R style functions,
12632 where TYPE_ARG_TYPES may not be available. */
12633 if (incoming)
12634 {
12635 gcc_assert (DECL_P (fun));
12636 result = DECL_RESULT (fun);
12637 }
12638 else
12639 result = TREE_TYPE (fntype);
12640
12641 if (result && aggregate_value_p (result, fntype))
12642 {
12643 if (!TYPE_P (result))
12644 result = TREE_TYPE (result);
12645 result = build_pointer_type (result);
12646 rs6000_parm_needs_stack (args_so_far, result);
12647 }
12648
12649 if (incoming)
12650 {
12651 tree parm;
12652
12653 for (parm = DECL_ARGUMENTS (fun);
12654 parm && parm != void_list_node;
12655 parm = TREE_CHAIN (parm))
12656 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12657 return true;
12658 }
12659 else
12660 {
12661 function_args_iterator args_iter;
12662 tree arg_type;
12663
12664 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12665 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12666 return true;
12667 }
12668
12669 return false;
12670 }
12671
12672 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12673 usually a constant depending on the ABI. However, in the ELFv2 ABI
12674 the register parameter area is optional when calling a function that
12675 has a prototype is scope, has no variable argument list, and passes
12676 all parameters in registers. */
12677
12678 int
12679 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12680 {
12681 int reg_parm_stack_space;
12682
12683 switch (DEFAULT_ABI)
12684 {
12685 default:
12686 reg_parm_stack_space = 0;
12687 break;
12688
12689 case ABI_AIX:
12690 case ABI_DARWIN:
12691 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12692 break;
12693
12694 case ABI_ELFv2:
12695 /* ??? Recomputing this every time is a bit expensive. Is there
12696 a place to cache this information? */
12697 if (rs6000_function_parms_need_stack (fun, incoming))
12698 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12699 else
12700 reg_parm_stack_space = 0;
12701 break;
12702 }
12703
12704 return reg_parm_stack_space;
12705 }
12706
12707 static void
12708 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12709 {
12710 int i;
12711 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12712
12713 if (nregs == 0)
12714 return;
12715
12716 for (i = 0; i < nregs; i++)
12717 {
12718 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12719 if (reload_completed)
12720 {
12721 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12722 tem = NULL_RTX;
12723 else
12724 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12725 i * GET_MODE_SIZE (reg_mode));
12726 }
12727 else
12728 tem = replace_equiv_address (tem, XEXP (tem, 0));
12729
12730 gcc_assert (tem);
12731
12732 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12733 }
12734 }
12735 \f
12736 /* Perform any needed actions needed for a function that is receiving a
12737 variable number of arguments.
12738
12739 CUM is as above.
12740
12741 MODE and TYPE are the mode and type of the current parameter.
12742
12743 PRETEND_SIZE is a variable that should be set to the amount of stack
12744 that must be pushed by the prolog to pretend that our caller pushed
12745 it.
12746
12747 Normally, this macro will push all remaining incoming registers on the
12748 stack and set PRETEND_SIZE to the length of the registers pushed. */
12749
12750 static void
12751 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12752 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12753 int no_rtl)
12754 {
12755 CUMULATIVE_ARGS next_cum;
12756 int reg_size = TARGET_32BIT ? 4 : 8;
12757 rtx save_area = NULL_RTX, mem;
12758 int first_reg_offset;
12759 alias_set_type set;
12760
12761 /* Skip the last named argument. */
12762 next_cum = *get_cumulative_args (cum);
12763 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12764
12765 if (DEFAULT_ABI == ABI_V4)
12766 {
12767 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12768
12769 if (! no_rtl)
12770 {
12771 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12772 HOST_WIDE_INT offset = 0;
12773
12774 /* Try to optimize the size of the varargs save area.
12775 The ABI requires that ap.reg_save_area is doubleword
12776 aligned, but we don't need to allocate space for all
12777 the bytes, only those to which we actually will save
12778 anything. */
12779 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12780 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12781 if (TARGET_HARD_FLOAT
12782 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12783 && cfun->va_list_fpr_size)
12784 {
12785 if (gpr_reg_num)
12786 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12787 * UNITS_PER_FP_WORD;
12788 if (cfun->va_list_fpr_size
12789 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12790 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12791 else
12792 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12793 * UNITS_PER_FP_WORD;
12794 }
12795 if (gpr_reg_num)
12796 {
12797 offset = -((first_reg_offset * reg_size) & ~7);
12798 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12799 {
12800 gpr_reg_num = cfun->va_list_gpr_size;
12801 if (reg_size == 4 && (first_reg_offset & 1))
12802 gpr_reg_num++;
12803 }
12804 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12805 }
12806 else if (fpr_size)
12807 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12808 * UNITS_PER_FP_WORD
12809 - (int) (GP_ARG_NUM_REG * reg_size);
12810
12811 if (gpr_size + fpr_size)
12812 {
12813 rtx reg_save_area
12814 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12815 gcc_assert (GET_CODE (reg_save_area) == MEM);
12816 reg_save_area = XEXP (reg_save_area, 0);
12817 if (GET_CODE (reg_save_area) == PLUS)
12818 {
12819 gcc_assert (XEXP (reg_save_area, 0)
12820 == virtual_stack_vars_rtx);
12821 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
12822 offset += INTVAL (XEXP (reg_save_area, 1));
12823 }
12824 else
12825 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12826 }
12827
12828 cfun->machine->varargs_save_offset = offset;
12829 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12830 }
12831 }
12832 else
12833 {
12834 first_reg_offset = next_cum.words;
12835 save_area = crtl->args.internal_arg_pointer;
12836
12837 if (targetm.calls.must_pass_in_stack (mode, type))
12838 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12839 }
12840
12841 set = get_varargs_alias_set ();
12842 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12843 && cfun->va_list_gpr_size)
12844 {
12845 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12846
12847 if (va_list_gpr_counter_field)
12848 /* V4 va_list_gpr_size counts number of registers needed. */
12849 n_gpr = cfun->va_list_gpr_size;
12850 else
12851 /* char * va_list instead counts number of bytes needed. */
12852 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12853
12854 if (nregs > n_gpr)
12855 nregs = n_gpr;
12856
12857 mem = gen_rtx_MEM (BLKmode,
12858 plus_constant (Pmode, save_area,
12859 first_reg_offset * reg_size));
12860 MEM_NOTRAP_P (mem) = 1;
12861 set_mem_alias_set (mem, set);
12862 set_mem_align (mem, BITS_PER_WORD);
12863
12864 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12865 nregs);
12866 }
12867
12868 /* Save FP registers if needed. */
12869 if (DEFAULT_ABI == ABI_V4
12870 && TARGET_HARD_FLOAT
12871 && ! no_rtl
12872 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12873 && cfun->va_list_fpr_size)
12874 {
12875 int fregno = next_cum.fregno, nregs;
12876 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12877 rtx lab = gen_label_rtx ();
12878 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12879 * UNITS_PER_FP_WORD);
12880
12881 emit_jump_insn
12882 (gen_rtx_SET (pc_rtx,
12883 gen_rtx_IF_THEN_ELSE (VOIDmode,
12884 gen_rtx_NE (VOIDmode, cr1,
12885 const0_rtx),
12886 gen_rtx_LABEL_REF (VOIDmode, lab),
12887 pc_rtx)));
12888
12889 for (nregs = 0;
12890 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12891 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12892 {
12893 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12894 plus_constant (Pmode, save_area, off));
12895 MEM_NOTRAP_P (mem) = 1;
12896 set_mem_alias_set (mem, set);
12897 set_mem_align (mem, GET_MODE_ALIGNMENT (
12898 TARGET_HARD_FLOAT ? DFmode : SFmode));
12899 emit_move_insn (mem, gen_rtx_REG (
12900 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12901 }
12902
12903 emit_label (lab);
12904 }
12905 }
12906
12907 /* Create the va_list data type. */
12908
12909 static tree
12910 rs6000_build_builtin_va_list (void)
12911 {
12912 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12913
12914 /* For AIX, prefer 'char *' because that's what the system
12915 header files like. */
12916 if (DEFAULT_ABI != ABI_V4)
12917 return build_pointer_type (char_type_node);
12918
12919 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12920 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12921 get_identifier ("__va_list_tag"), record);
12922
12923 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12924 unsigned_char_type_node);
12925 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12926 unsigned_char_type_node);
12927 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12928 every user file. */
12929 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12930 get_identifier ("reserved"), short_unsigned_type_node);
12931 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12932 get_identifier ("overflow_arg_area"),
12933 ptr_type_node);
12934 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12935 get_identifier ("reg_save_area"),
12936 ptr_type_node);
12937
12938 va_list_gpr_counter_field = f_gpr;
12939 va_list_fpr_counter_field = f_fpr;
12940
12941 DECL_FIELD_CONTEXT (f_gpr) = record;
12942 DECL_FIELD_CONTEXT (f_fpr) = record;
12943 DECL_FIELD_CONTEXT (f_res) = record;
12944 DECL_FIELD_CONTEXT (f_ovf) = record;
12945 DECL_FIELD_CONTEXT (f_sav) = record;
12946
12947 TYPE_STUB_DECL (record) = type_decl;
12948 TYPE_NAME (record) = type_decl;
12949 TYPE_FIELDS (record) = f_gpr;
12950 DECL_CHAIN (f_gpr) = f_fpr;
12951 DECL_CHAIN (f_fpr) = f_res;
12952 DECL_CHAIN (f_res) = f_ovf;
12953 DECL_CHAIN (f_ovf) = f_sav;
12954
12955 layout_type (record);
12956
12957 /* The correct type is an array type of one element. */
12958 return build_array_type (record, build_index_type (size_zero_node));
12959 }
12960
12961 /* Implement va_start. */
12962
12963 static void
12964 rs6000_va_start (tree valist, rtx nextarg)
12965 {
12966 HOST_WIDE_INT words, n_gpr, n_fpr;
12967 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12968 tree gpr, fpr, ovf, sav, t;
12969
12970 /* Only SVR4 needs something special. */
12971 if (DEFAULT_ABI != ABI_V4)
12972 {
12973 std_expand_builtin_va_start (valist, nextarg);
12974 return;
12975 }
12976
12977 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12978 f_fpr = DECL_CHAIN (f_gpr);
12979 f_res = DECL_CHAIN (f_fpr);
12980 f_ovf = DECL_CHAIN (f_res);
12981 f_sav = DECL_CHAIN (f_ovf);
12982
12983 valist = build_simple_mem_ref (valist);
12984 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12985 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12986 f_fpr, NULL_TREE);
12987 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12988 f_ovf, NULL_TREE);
12989 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12990 f_sav, NULL_TREE);
12991
12992 /* Count number of gp and fp argument registers used. */
12993 words = crtl->args.info.words;
12994 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12995 GP_ARG_NUM_REG);
12996 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12997 FP_ARG_NUM_REG);
12998
12999 if (TARGET_DEBUG_ARG)
13000 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
13001 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
13002 words, n_gpr, n_fpr);
13003
13004 if (cfun->va_list_gpr_size)
13005 {
13006 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
13007 build_int_cst (NULL_TREE, n_gpr));
13008 TREE_SIDE_EFFECTS (t) = 1;
13009 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13010 }
13011
13012 if (cfun->va_list_fpr_size)
13013 {
13014 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
13015 build_int_cst (NULL_TREE, n_fpr));
13016 TREE_SIDE_EFFECTS (t) = 1;
13017 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13018
13019 #ifdef HAVE_AS_GNU_ATTRIBUTE
13020 if (call_ABI_of_interest (cfun->decl))
13021 rs6000_passes_float = true;
13022 #endif
13023 }
13024
13025 /* Find the overflow area. */
13026 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
13027 if (words != 0)
13028 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
13029 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
13030 TREE_SIDE_EFFECTS (t) = 1;
13031 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13032
13033 /* If there were no va_arg invocations, don't set up the register
13034 save area. */
13035 if (!cfun->va_list_gpr_size
13036 && !cfun->va_list_fpr_size
13037 && n_gpr < GP_ARG_NUM_REG
13038 && n_fpr < FP_ARG_V4_MAX_REG)
13039 return;
13040
13041 /* Find the register save area. */
13042 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
13043 if (cfun->machine->varargs_save_offset)
13044 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
13045 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
13046 TREE_SIDE_EFFECTS (t) = 1;
13047 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13048 }
13049
13050 /* Implement va_arg. */
13051
13052 static tree
13053 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
13054 gimple_seq *post_p)
13055 {
13056 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13057 tree gpr, fpr, ovf, sav, reg, t, u;
13058 int size, rsize, n_reg, sav_ofs, sav_scale;
13059 tree lab_false, lab_over, addr;
13060 int align;
13061 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
13062 int regalign = 0;
13063 gimple *stmt;
13064
13065 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
13066 {
13067 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
13068 return build_va_arg_indirect_ref (t);
13069 }
13070
13071 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13072 earlier version of gcc, with the property that it always applied alignment
13073 adjustments to the va-args (even for zero-sized types). The cheapest way
13074 to deal with this is to replicate the effect of the part of
13075 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13076 of relevance.
13077 We don't need to check for pass-by-reference because of the test above.
13078 We can return a simplifed answer, since we know there's no offset to add. */
13079
13080 if (((TARGET_MACHO
13081 && rs6000_darwin64_abi)
13082 || DEFAULT_ABI == ABI_ELFv2
13083 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
13084 && integer_zerop (TYPE_SIZE (type)))
13085 {
13086 unsigned HOST_WIDE_INT align, boundary;
13087 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
13088 align = PARM_BOUNDARY / BITS_PER_UNIT;
13089 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
13090 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
13091 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
13092 boundary /= BITS_PER_UNIT;
13093 if (boundary > align)
13094 {
13095 tree t ;
13096 /* This updates arg ptr by the amount that would be necessary
13097 to align the zero-sized (but not zero-alignment) item. */
13098 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13099 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
13100 gimplify_and_add (t, pre_p);
13101
13102 t = fold_convert (sizetype, valist_tmp);
13103 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13104 fold_convert (TREE_TYPE (valist),
13105 fold_build2 (BIT_AND_EXPR, sizetype, t,
13106 size_int (-boundary))));
13107 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
13108 gimplify_and_add (t, pre_p);
13109 }
13110 /* Since it is zero-sized there's no increment for the item itself. */
13111 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
13112 return build_va_arg_indirect_ref (valist_tmp);
13113 }
13114
13115 if (DEFAULT_ABI != ABI_V4)
13116 {
13117 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
13118 {
13119 tree elem_type = TREE_TYPE (type);
13120 machine_mode elem_mode = TYPE_MODE (elem_type);
13121 int elem_size = GET_MODE_SIZE (elem_mode);
13122
13123 if (elem_size < UNITS_PER_WORD)
13124 {
13125 tree real_part, imag_part;
13126 gimple_seq post = NULL;
13127
13128 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13129 &post);
13130 /* Copy the value into a temporary, lest the formal temporary
13131 be reused out from under us. */
13132 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
13133 gimple_seq_add_seq (pre_p, post);
13134
13135 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13136 post_p);
13137
13138 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
13139 }
13140 }
13141
13142 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
13143 }
13144
13145 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13146 f_fpr = DECL_CHAIN (f_gpr);
13147 f_res = DECL_CHAIN (f_fpr);
13148 f_ovf = DECL_CHAIN (f_res);
13149 f_sav = DECL_CHAIN (f_ovf);
13150
13151 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13152 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13153 f_fpr, NULL_TREE);
13154 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13155 f_ovf, NULL_TREE);
13156 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13157 f_sav, NULL_TREE);
13158
13159 size = int_size_in_bytes (type);
13160 rsize = (size + 3) / 4;
13161 int pad = 4 * rsize - size;
13162 align = 1;
13163
13164 machine_mode mode = TYPE_MODE (type);
13165 if (abi_v4_pass_in_fpr (mode, false))
13166 {
13167 /* FP args go in FP registers, if present. */
13168 reg = fpr;
13169 n_reg = (size + 7) / 8;
13170 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
13171 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
13172 if (mode != SFmode && mode != SDmode)
13173 align = 8;
13174 }
13175 else
13176 {
13177 /* Otherwise into GP registers. */
13178 reg = gpr;
13179 n_reg = rsize;
13180 sav_ofs = 0;
13181 sav_scale = 4;
13182 if (n_reg == 2)
13183 align = 8;
13184 }
13185
13186 /* Pull the value out of the saved registers.... */
13187
13188 lab_over = NULL;
13189 addr = create_tmp_var (ptr_type_node, "addr");
13190
13191 /* AltiVec vectors never go in registers when -mabi=altivec. */
13192 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
13193 align = 16;
13194 else
13195 {
13196 lab_false = create_artificial_label (input_location);
13197 lab_over = create_artificial_label (input_location);
13198
13199 /* Long long is aligned in the registers. As are any other 2 gpr
13200 item such as complex int due to a historical mistake. */
13201 u = reg;
13202 if (n_reg == 2 && reg == gpr)
13203 {
13204 regalign = 1;
13205 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13206 build_int_cst (TREE_TYPE (reg), n_reg - 1));
13207 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
13208 unshare_expr (reg), u);
13209 }
13210 /* _Decimal128 is passed in even/odd fpr pairs; the stored
13211 reg number is 0 for f1, so we want to make it odd. */
13212 else if (reg == fpr && mode == TDmode)
13213 {
13214 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13215 build_int_cst (TREE_TYPE (reg), 1));
13216 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
13217 }
13218
13219 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
13220 t = build2 (GE_EXPR, boolean_type_node, u, t);
13221 u = build1 (GOTO_EXPR, void_type_node, lab_false);
13222 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
13223 gimplify_and_add (t, pre_p);
13224
13225 t = sav;
13226 if (sav_ofs)
13227 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
13228
13229 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13230 build_int_cst (TREE_TYPE (reg), n_reg));
13231 u = fold_convert (sizetype, u);
13232 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
13233 t = fold_build_pointer_plus (t, u);
13234
13235 /* _Decimal32 varargs are located in the second word of the 64-bit
13236 FP register for 32-bit binaries. */
13237 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
13238 t = fold_build_pointer_plus_hwi (t, size);
13239
13240 /* Args are passed right-aligned. */
13241 if (BYTES_BIG_ENDIAN)
13242 t = fold_build_pointer_plus_hwi (t, pad);
13243
13244 gimplify_assign (addr, t, pre_p);
13245
13246 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
13247
13248 stmt = gimple_build_label (lab_false);
13249 gimple_seq_add_stmt (pre_p, stmt);
13250
13251 if ((n_reg == 2 && !regalign) || n_reg > 2)
13252 {
13253 /* Ensure that we don't find any more args in regs.
13254 Alignment has taken care of for special cases. */
13255 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
13256 }
13257 }
13258
13259 /* ... otherwise out of the overflow area. */
13260
13261 /* Care for on-stack alignment if needed. */
13262 t = ovf;
13263 if (align != 1)
13264 {
13265 t = fold_build_pointer_plus_hwi (t, align - 1);
13266 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
13267 build_int_cst (TREE_TYPE (t), -align));
13268 }
13269
13270 /* Args are passed right-aligned. */
13271 if (BYTES_BIG_ENDIAN)
13272 t = fold_build_pointer_plus_hwi (t, pad);
13273
13274 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
13275
13276 gimplify_assign (unshare_expr (addr), t, pre_p);
13277
13278 t = fold_build_pointer_plus_hwi (t, size);
13279 gimplify_assign (unshare_expr (ovf), t, pre_p);
13280
13281 if (lab_over)
13282 {
13283 stmt = gimple_build_label (lab_over);
13284 gimple_seq_add_stmt (pre_p, stmt);
13285 }
13286
13287 if (STRICT_ALIGNMENT
13288 && (TYPE_ALIGN (type)
13289 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
13290 {
13291 /* The value (of type complex double, for example) may not be
13292 aligned in memory in the saved registers, so copy via a
13293 temporary. (This is the same code as used for SPARC.) */
13294 tree tmp = create_tmp_var (type, "va_arg_tmp");
13295 tree dest_addr = build_fold_addr_expr (tmp);
13296
13297 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13298 3, dest_addr, addr, size_int (rsize * 4));
13299 TREE_ADDRESSABLE (tmp) = 1;
13300
13301 gimplify_and_add (copy, pre_p);
13302 addr = dest_addr;
13303 }
13304
13305 addr = fold_convert (ptrtype, addr);
13306 return build_va_arg_indirect_ref (addr);
13307 }
13308
13309 /* Builtins. */
13310
13311 static void
13312 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13313 {
13314 tree t;
13315 unsigned classify = rs6000_builtin_info[(int)code].attr;
13316 const char *attr_string = "";
13317
13318 gcc_assert (name != NULL);
13319 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13320
13321 if (rs6000_builtin_decls[(int)code])
13322 fatal_error (input_location,
13323 "internal error: builtin function %qs already processed",
13324 name);
13325
13326 rs6000_builtin_decls[(int)code] = t =
13327 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13328
13329 /* Set any special attributes. */
13330 if ((classify & RS6000_BTC_CONST) != 0)
13331 {
13332 /* const function, function only depends on the inputs. */
13333 TREE_READONLY (t) = 1;
13334 TREE_NOTHROW (t) = 1;
13335 attr_string = ", const";
13336 }
13337 else if ((classify & RS6000_BTC_PURE) != 0)
13338 {
13339 /* pure function, function can read global memory, but does not set any
13340 external state. */
13341 DECL_PURE_P (t) = 1;
13342 TREE_NOTHROW (t) = 1;
13343 attr_string = ", pure";
13344 }
13345 else if ((classify & RS6000_BTC_FP) != 0)
13346 {
13347 /* Function is a math function. If rounding mode is on, then treat the
13348 function as not reading global memory, but it can have arbitrary side
13349 effects. If it is off, then assume the function is a const function.
13350 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13351 builtin-attribute.def that is used for the math functions. */
13352 TREE_NOTHROW (t) = 1;
13353 if (flag_rounding_math)
13354 {
13355 DECL_PURE_P (t) = 1;
13356 DECL_IS_NOVOPS (t) = 1;
13357 attr_string = ", fp, pure";
13358 }
13359 else
13360 {
13361 TREE_READONLY (t) = 1;
13362 attr_string = ", fp, const";
13363 }
13364 }
13365 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13366 gcc_unreachable ();
13367
13368 if (TARGET_DEBUG_BUILTIN)
13369 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13370 (int)code, name, attr_string);
13371 }
13372
13373 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13374
13375 #undef RS6000_BUILTIN_0
13376 #undef RS6000_BUILTIN_1
13377 #undef RS6000_BUILTIN_2
13378 #undef RS6000_BUILTIN_3
13379 #undef RS6000_BUILTIN_A
13380 #undef RS6000_BUILTIN_D
13381 #undef RS6000_BUILTIN_H
13382 #undef RS6000_BUILTIN_P
13383 #undef RS6000_BUILTIN_X
13384
13385 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13386 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13387 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13388 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13389 { MASK, ICODE, NAME, ENUM },
13390
13391 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13392 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13393 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13394 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13395 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13396
13397 static const struct builtin_description bdesc_3arg[] =
13398 {
13399 #include "rs6000-builtin.def"
13400 };
13401
13402 /* DST operations: void foo (void *, const int, const char). */
13403
13404 #undef RS6000_BUILTIN_0
13405 #undef RS6000_BUILTIN_1
13406 #undef RS6000_BUILTIN_2
13407 #undef RS6000_BUILTIN_3
13408 #undef RS6000_BUILTIN_A
13409 #undef RS6000_BUILTIN_D
13410 #undef RS6000_BUILTIN_H
13411 #undef RS6000_BUILTIN_P
13412 #undef RS6000_BUILTIN_X
13413
13414 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13415 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13416 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13417 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13418 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13419 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13420 { MASK, ICODE, NAME, ENUM },
13421
13422 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13423 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13424 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13425
13426 static const struct builtin_description bdesc_dst[] =
13427 {
13428 #include "rs6000-builtin.def"
13429 };
13430
13431 /* Simple binary operations: VECc = foo (VECa, VECb). */
13432
13433 #undef RS6000_BUILTIN_0
13434 #undef RS6000_BUILTIN_1
13435 #undef RS6000_BUILTIN_2
13436 #undef RS6000_BUILTIN_3
13437 #undef RS6000_BUILTIN_A
13438 #undef RS6000_BUILTIN_D
13439 #undef RS6000_BUILTIN_H
13440 #undef RS6000_BUILTIN_P
13441 #undef RS6000_BUILTIN_X
13442
13443 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13444 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13445 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13446 { MASK, ICODE, NAME, ENUM },
13447
13448 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13449 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13450 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13451 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13452 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13453 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13454
13455 static const struct builtin_description bdesc_2arg[] =
13456 {
13457 #include "rs6000-builtin.def"
13458 };
13459
13460 #undef RS6000_BUILTIN_0
13461 #undef RS6000_BUILTIN_1
13462 #undef RS6000_BUILTIN_2
13463 #undef RS6000_BUILTIN_3
13464 #undef RS6000_BUILTIN_A
13465 #undef RS6000_BUILTIN_D
13466 #undef RS6000_BUILTIN_H
13467 #undef RS6000_BUILTIN_P
13468 #undef RS6000_BUILTIN_X
13469
13470 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13471 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13472 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13473 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13474 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13475 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13476 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13477 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13478 { MASK, ICODE, NAME, ENUM },
13479
13480 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13481
13482 /* AltiVec predicates. */
13483
13484 static const struct builtin_description bdesc_altivec_preds[] =
13485 {
13486 #include "rs6000-builtin.def"
13487 };
13488
13489 /* ABS* operations. */
13490
13491 #undef RS6000_BUILTIN_0
13492 #undef RS6000_BUILTIN_1
13493 #undef RS6000_BUILTIN_2
13494 #undef RS6000_BUILTIN_3
13495 #undef RS6000_BUILTIN_A
13496 #undef RS6000_BUILTIN_D
13497 #undef RS6000_BUILTIN_H
13498 #undef RS6000_BUILTIN_P
13499 #undef RS6000_BUILTIN_X
13500
13501 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13502 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13503 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13504 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13505 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13506 { MASK, ICODE, NAME, ENUM },
13507
13508 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13509 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13510 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13511 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13512
13513 static const struct builtin_description bdesc_abs[] =
13514 {
13515 #include "rs6000-builtin.def"
13516 };
13517
13518 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13519 foo (VECa). */
13520
13521 #undef RS6000_BUILTIN_0
13522 #undef RS6000_BUILTIN_1
13523 #undef RS6000_BUILTIN_2
13524 #undef RS6000_BUILTIN_3
13525 #undef RS6000_BUILTIN_A
13526 #undef RS6000_BUILTIN_D
13527 #undef RS6000_BUILTIN_H
13528 #undef RS6000_BUILTIN_P
13529 #undef RS6000_BUILTIN_X
13530
13531 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13532 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13533 { MASK, ICODE, NAME, ENUM },
13534
13535 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13536 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13537 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13538 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13539 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13540 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13541 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13542
13543 static const struct builtin_description bdesc_1arg[] =
13544 {
13545 #include "rs6000-builtin.def"
13546 };
13547
13548 /* Simple no-argument operations: result = __builtin_darn_32 () */
13549
13550 #undef RS6000_BUILTIN_0
13551 #undef RS6000_BUILTIN_1
13552 #undef RS6000_BUILTIN_2
13553 #undef RS6000_BUILTIN_3
13554 #undef RS6000_BUILTIN_A
13555 #undef RS6000_BUILTIN_D
13556 #undef RS6000_BUILTIN_H
13557 #undef RS6000_BUILTIN_P
13558 #undef RS6000_BUILTIN_X
13559
13560 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13561 { MASK, ICODE, NAME, ENUM },
13562
13563 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13564 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13565 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13566 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13567 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13568 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13569 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13570 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13571
13572 static const struct builtin_description bdesc_0arg[] =
13573 {
13574 #include "rs6000-builtin.def"
13575 };
13576
13577 /* HTM builtins. */
13578 #undef RS6000_BUILTIN_0
13579 #undef RS6000_BUILTIN_1
13580 #undef RS6000_BUILTIN_2
13581 #undef RS6000_BUILTIN_3
13582 #undef RS6000_BUILTIN_A
13583 #undef RS6000_BUILTIN_D
13584 #undef RS6000_BUILTIN_H
13585 #undef RS6000_BUILTIN_P
13586 #undef RS6000_BUILTIN_X
13587
13588 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13589 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13590 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13591 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13592 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13593 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13594 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13595 { MASK, ICODE, NAME, ENUM },
13596
13597 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13598 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13599
13600 static const struct builtin_description bdesc_htm[] =
13601 {
13602 #include "rs6000-builtin.def"
13603 };
13604
13605 #undef RS6000_BUILTIN_0
13606 #undef RS6000_BUILTIN_1
13607 #undef RS6000_BUILTIN_2
13608 #undef RS6000_BUILTIN_3
13609 #undef RS6000_BUILTIN_A
13610 #undef RS6000_BUILTIN_D
13611 #undef RS6000_BUILTIN_H
13612 #undef RS6000_BUILTIN_P
13613
13614 /* Return true if a builtin function is overloaded. */
13615 bool
13616 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13617 {
13618 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13619 }
13620
13621 const char *
13622 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13623 {
13624 return rs6000_builtin_info[(int)fncode].name;
13625 }
13626
13627 /* Expand an expression EXP that calls a builtin without arguments. */
13628 static rtx
13629 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13630 {
13631 rtx pat;
13632 machine_mode tmode = insn_data[icode].operand[0].mode;
13633
13634 if (icode == CODE_FOR_nothing)
13635 /* Builtin not supported on this processor. */
13636 return 0;
13637
13638 if (target == 0
13639 || GET_MODE (target) != tmode
13640 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13641 target = gen_reg_rtx (tmode);
13642
13643 pat = GEN_FCN (icode) (target);
13644 if (! pat)
13645 return 0;
13646 emit_insn (pat);
13647
13648 return target;
13649 }
13650
13651
13652 static rtx
13653 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13654 {
13655 rtx pat;
13656 tree arg0 = CALL_EXPR_ARG (exp, 0);
13657 tree arg1 = CALL_EXPR_ARG (exp, 1);
13658 rtx op0 = expand_normal (arg0);
13659 rtx op1 = expand_normal (arg1);
13660 machine_mode mode0 = insn_data[icode].operand[0].mode;
13661 machine_mode mode1 = insn_data[icode].operand[1].mode;
13662
13663 if (icode == CODE_FOR_nothing)
13664 /* Builtin not supported on this processor. */
13665 return 0;
13666
13667 /* If we got invalid arguments bail out before generating bad rtl. */
13668 if (arg0 == error_mark_node || arg1 == error_mark_node)
13669 return const0_rtx;
13670
13671 if (GET_CODE (op0) != CONST_INT
13672 || INTVAL (op0) > 255
13673 || INTVAL (op0) < 0)
13674 {
13675 error ("argument 1 must be an 8-bit field value");
13676 return const0_rtx;
13677 }
13678
13679 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13680 op0 = copy_to_mode_reg (mode0, op0);
13681
13682 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13683 op1 = copy_to_mode_reg (mode1, op1);
13684
13685 pat = GEN_FCN (icode) (op0, op1);
13686 if (! pat)
13687 return const0_rtx;
13688 emit_insn (pat);
13689
13690 return NULL_RTX;
13691 }
13692
13693 static rtx
13694 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13695 {
13696 rtx pat;
13697 tree arg0 = CALL_EXPR_ARG (exp, 0);
13698 rtx op0 = expand_normal (arg0);
13699 machine_mode tmode = insn_data[icode].operand[0].mode;
13700 machine_mode mode0 = insn_data[icode].operand[1].mode;
13701
13702 if (icode == CODE_FOR_nothing)
13703 /* Builtin not supported on this processor. */
13704 return 0;
13705
13706 /* If we got invalid arguments bail out before generating bad rtl. */
13707 if (arg0 == error_mark_node)
13708 return const0_rtx;
13709
13710 if (icode == CODE_FOR_altivec_vspltisb
13711 || icode == CODE_FOR_altivec_vspltish
13712 || icode == CODE_FOR_altivec_vspltisw)
13713 {
13714 /* Only allow 5-bit *signed* literals. */
13715 if (GET_CODE (op0) != CONST_INT
13716 || INTVAL (op0) > 15
13717 || INTVAL (op0) < -16)
13718 {
13719 error ("argument 1 must be a 5-bit signed literal");
13720 return CONST0_RTX (tmode);
13721 }
13722 }
13723
13724 if (target == 0
13725 || GET_MODE (target) != tmode
13726 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13727 target = gen_reg_rtx (tmode);
13728
13729 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13730 op0 = copy_to_mode_reg (mode0, op0);
13731
13732 pat = GEN_FCN (icode) (target, op0);
13733 if (! pat)
13734 return 0;
13735 emit_insn (pat);
13736
13737 return target;
13738 }
13739
13740 static rtx
13741 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13742 {
13743 rtx pat, scratch1, scratch2;
13744 tree arg0 = CALL_EXPR_ARG (exp, 0);
13745 rtx op0 = expand_normal (arg0);
13746 machine_mode tmode = insn_data[icode].operand[0].mode;
13747 machine_mode mode0 = insn_data[icode].operand[1].mode;
13748
13749 /* If we have invalid arguments, bail out before generating bad rtl. */
13750 if (arg0 == error_mark_node)
13751 return const0_rtx;
13752
13753 if (target == 0
13754 || GET_MODE (target) != tmode
13755 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13756 target = gen_reg_rtx (tmode);
13757
13758 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13759 op0 = copy_to_mode_reg (mode0, op0);
13760
13761 scratch1 = gen_reg_rtx (mode0);
13762 scratch2 = gen_reg_rtx (mode0);
13763
13764 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13765 if (! pat)
13766 return 0;
13767 emit_insn (pat);
13768
13769 return target;
13770 }
13771
13772 static rtx
13773 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13774 {
13775 rtx pat;
13776 tree arg0 = CALL_EXPR_ARG (exp, 0);
13777 tree arg1 = CALL_EXPR_ARG (exp, 1);
13778 rtx op0 = expand_normal (arg0);
13779 rtx op1 = expand_normal (arg1);
13780 machine_mode tmode = insn_data[icode].operand[0].mode;
13781 machine_mode mode0 = insn_data[icode].operand[1].mode;
13782 machine_mode mode1 = insn_data[icode].operand[2].mode;
13783
13784 if (icode == CODE_FOR_nothing)
13785 /* Builtin not supported on this processor. */
13786 return 0;
13787
13788 /* If we got invalid arguments bail out before generating bad rtl. */
13789 if (arg0 == error_mark_node || arg1 == error_mark_node)
13790 return const0_rtx;
13791
13792 if (icode == CODE_FOR_altivec_vcfux
13793 || icode == CODE_FOR_altivec_vcfsx
13794 || icode == CODE_FOR_altivec_vctsxs
13795 || icode == CODE_FOR_altivec_vctuxs
13796 || icode == CODE_FOR_altivec_vspltb
13797 || icode == CODE_FOR_altivec_vsplth
13798 || icode == CODE_FOR_altivec_vspltw)
13799 {
13800 /* Only allow 5-bit unsigned literals. */
13801 STRIP_NOPS (arg1);
13802 if (TREE_CODE (arg1) != INTEGER_CST
13803 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13804 {
13805 error ("argument 2 must be a 5-bit unsigned literal");
13806 return CONST0_RTX (tmode);
13807 }
13808 }
13809 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13810 || icode == CODE_FOR_dfptstsfi_lt_dd
13811 || icode == CODE_FOR_dfptstsfi_gt_dd
13812 || icode == CODE_FOR_dfptstsfi_unordered_dd
13813 || icode == CODE_FOR_dfptstsfi_eq_td
13814 || icode == CODE_FOR_dfptstsfi_lt_td
13815 || icode == CODE_FOR_dfptstsfi_gt_td
13816 || icode == CODE_FOR_dfptstsfi_unordered_td)
13817 {
13818 /* Only allow 6-bit unsigned literals. */
13819 STRIP_NOPS (arg0);
13820 if (TREE_CODE (arg0) != INTEGER_CST
13821 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13822 {
13823 error ("argument 1 must be a 6-bit unsigned literal");
13824 return CONST0_RTX (tmode);
13825 }
13826 }
13827 else if (icode == CODE_FOR_xststdcqp_kf
13828 || icode == CODE_FOR_xststdcqp_tf
13829 || icode == CODE_FOR_xststdcdp
13830 || icode == CODE_FOR_xststdcsp
13831 || icode == CODE_FOR_xvtstdcdp
13832 || icode == CODE_FOR_xvtstdcsp)
13833 {
13834 /* Only allow 7-bit unsigned literals. */
13835 STRIP_NOPS (arg1);
13836 if (TREE_CODE (arg1) != INTEGER_CST
13837 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13838 {
13839 error ("argument 2 must be a 7-bit unsigned literal");
13840 return CONST0_RTX (tmode);
13841 }
13842 }
13843 else if (icode == CODE_FOR_unpackv1ti
13844 || icode == CODE_FOR_unpackkf
13845 || icode == CODE_FOR_unpacktf
13846 || icode == CODE_FOR_unpackif
13847 || icode == CODE_FOR_unpacktd)
13848 {
13849 /* Only allow 1-bit unsigned literals. */
13850 STRIP_NOPS (arg1);
13851 if (TREE_CODE (arg1) != INTEGER_CST
13852 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13853 {
13854 error ("argument 2 must be a 1-bit unsigned literal");
13855 return CONST0_RTX (tmode);
13856 }
13857 }
13858
13859 if (target == 0
13860 || GET_MODE (target) != tmode
13861 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13862 target = gen_reg_rtx (tmode);
13863
13864 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13865 op0 = copy_to_mode_reg (mode0, op0);
13866 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13867 op1 = copy_to_mode_reg (mode1, op1);
13868
13869 pat = GEN_FCN (icode) (target, op0, op1);
13870 if (! pat)
13871 return 0;
13872 emit_insn (pat);
13873
13874 return target;
13875 }
13876
13877 static rtx
13878 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13879 {
13880 rtx pat, scratch;
13881 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13882 tree arg0 = CALL_EXPR_ARG (exp, 1);
13883 tree arg1 = CALL_EXPR_ARG (exp, 2);
13884 rtx op0 = expand_normal (arg0);
13885 rtx op1 = expand_normal (arg1);
13886 machine_mode tmode = SImode;
13887 machine_mode mode0 = insn_data[icode].operand[1].mode;
13888 machine_mode mode1 = insn_data[icode].operand[2].mode;
13889 int cr6_form_int;
13890
13891 if (TREE_CODE (cr6_form) != INTEGER_CST)
13892 {
13893 error ("argument 1 of %qs must be a constant",
13894 "__builtin_altivec_predicate");
13895 return const0_rtx;
13896 }
13897 else
13898 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13899
13900 gcc_assert (mode0 == mode1);
13901
13902 /* If we have invalid arguments, bail out before generating bad rtl. */
13903 if (arg0 == error_mark_node || arg1 == error_mark_node)
13904 return const0_rtx;
13905
13906 if (target == 0
13907 || GET_MODE (target) != tmode
13908 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13909 target = gen_reg_rtx (tmode);
13910
13911 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13912 op0 = copy_to_mode_reg (mode0, op0);
13913 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13914 op1 = copy_to_mode_reg (mode1, op1);
13915
13916 /* Note that for many of the relevant operations (e.g. cmpne or
13917 cmpeq) with float or double operands, it makes more sense for the
13918 mode of the allocated scratch register to select a vector of
13919 integer. But the choice to copy the mode of operand 0 was made
13920 long ago and there are no plans to change it. */
13921 scratch = gen_reg_rtx (mode0);
13922
13923 pat = GEN_FCN (icode) (scratch, op0, op1);
13924 if (! pat)
13925 return 0;
13926 emit_insn (pat);
13927
13928 /* The vec_any* and vec_all* predicates use the same opcodes for two
13929 different operations, but the bits in CR6 will be different
13930 depending on what information we want. So we have to play tricks
13931 with CR6 to get the right bits out.
13932
13933 If you think this is disgusting, look at the specs for the
13934 AltiVec predicates. */
13935
13936 switch (cr6_form_int)
13937 {
13938 case 0:
13939 emit_insn (gen_cr6_test_for_zero (target));
13940 break;
13941 case 1:
13942 emit_insn (gen_cr6_test_for_zero_reverse (target));
13943 break;
13944 case 2:
13945 emit_insn (gen_cr6_test_for_lt (target));
13946 break;
13947 case 3:
13948 emit_insn (gen_cr6_test_for_lt_reverse (target));
13949 break;
13950 default:
13951 error ("argument 1 of %qs is out of range",
13952 "__builtin_altivec_predicate");
13953 break;
13954 }
13955
13956 return target;
13957 }
13958
13959 rtx
13960 swap_endian_selector_for_mode (machine_mode mode)
13961 {
13962 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13963 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13964 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13965 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13966
13967 unsigned int *swaparray, i;
13968 rtx perm[16];
13969
13970 switch (mode)
13971 {
13972 case E_V1TImode:
13973 swaparray = swap1;
13974 break;
13975 case E_V2DFmode:
13976 case E_V2DImode:
13977 swaparray = swap2;
13978 break;
13979 case E_V4SFmode:
13980 case E_V4SImode:
13981 swaparray = swap4;
13982 break;
13983 case E_V8HImode:
13984 swaparray = swap8;
13985 break;
13986 default:
13987 gcc_unreachable ();
13988 }
13989
13990 for (i = 0; i < 16; ++i)
13991 perm[i] = GEN_INT (swaparray[i]);
13992
13993 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13994 gen_rtvec_v (16, perm)));
13995 }
13996
13997 static rtx
13998 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13999 {
14000 rtx pat, addr;
14001 tree arg0 = CALL_EXPR_ARG (exp, 0);
14002 tree arg1 = CALL_EXPR_ARG (exp, 1);
14003 machine_mode tmode = insn_data[icode].operand[0].mode;
14004 machine_mode mode0 = Pmode;
14005 machine_mode mode1 = Pmode;
14006 rtx op0 = expand_normal (arg0);
14007 rtx op1 = expand_normal (arg1);
14008
14009 if (icode == CODE_FOR_nothing)
14010 /* Builtin not supported on this processor. */
14011 return 0;
14012
14013 /* If we got invalid arguments bail out before generating bad rtl. */
14014 if (arg0 == error_mark_node || arg1 == error_mark_node)
14015 return const0_rtx;
14016
14017 if (target == 0
14018 || GET_MODE (target) != tmode
14019 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14020 target = gen_reg_rtx (tmode);
14021
14022 op1 = copy_to_mode_reg (mode1, op1);
14023
14024 /* For LVX, express the RTL accurately by ANDing the address with -16.
14025 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
14026 so the raw address is fine. */
14027 if (icode == CODE_FOR_altivec_lvx_v1ti
14028 || icode == CODE_FOR_altivec_lvx_v2df
14029 || icode == CODE_FOR_altivec_lvx_v2di
14030 || icode == CODE_FOR_altivec_lvx_v4sf
14031 || icode == CODE_FOR_altivec_lvx_v4si
14032 || icode == CODE_FOR_altivec_lvx_v8hi
14033 || icode == CODE_FOR_altivec_lvx_v16qi)
14034 {
14035 rtx rawaddr;
14036 if (op0 == const0_rtx)
14037 rawaddr = op1;
14038 else
14039 {
14040 op0 = copy_to_mode_reg (mode0, op0);
14041 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
14042 }
14043 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14044 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
14045
14046 emit_insn (gen_rtx_SET (target, addr));
14047 }
14048 else
14049 {
14050 if (op0 == const0_rtx)
14051 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
14052 else
14053 {
14054 op0 = copy_to_mode_reg (mode0, op0);
14055 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
14056 gen_rtx_PLUS (Pmode, op1, op0));
14057 }
14058
14059 pat = GEN_FCN (icode) (target, addr);
14060 if (! pat)
14061 return 0;
14062 emit_insn (pat);
14063 }
14064
14065 return target;
14066 }
14067
14068 static rtx
14069 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
14070 {
14071 rtx pat;
14072 tree arg0 = CALL_EXPR_ARG (exp, 0);
14073 tree arg1 = CALL_EXPR_ARG (exp, 1);
14074 tree arg2 = CALL_EXPR_ARG (exp, 2);
14075 rtx op0 = expand_normal (arg0);
14076 rtx op1 = expand_normal (arg1);
14077 rtx op2 = expand_normal (arg2);
14078 machine_mode mode0 = insn_data[icode].operand[0].mode;
14079 machine_mode mode1 = insn_data[icode].operand[1].mode;
14080 machine_mode mode2 = insn_data[icode].operand[2].mode;
14081
14082 if (icode == CODE_FOR_nothing)
14083 /* Builtin not supported on this processor. */
14084 return NULL_RTX;
14085
14086 /* If we got invalid arguments bail out before generating bad rtl. */
14087 if (arg0 == error_mark_node
14088 || arg1 == error_mark_node
14089 || arg2 == error_mark_node)
14090 return NULL_RTX;
14091
14092 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14093 op0 = copy_to_mode_reg (mode0, op0);
14094 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14095 op1 = copy_to_mode_reg (mode1, op1);
14096 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14097 op2 = copy_to_mode_reg (mode2, op2);
14098
14099 pat = GEN_FCN (icode) (op0, op1, op2);
14100 if (pat)
14101 emit_insn (pat);
14102
14103 return NULL_RTX;
14104 }
14105
14106 static rtx
14107 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
14108 {
14109 tree arg0 = CALL_EXPR_ARG (exp, 0);
14110 tree arg1 = CALL_EXPR_ARG (exp, 1);
14111 tree arg2 = CALL_EXPR_ARG (exp, 2);
14112 rtx op0 = expand_normal (arg0);
14113 rtx op1 = expand_normal (arg1);
14114 rtx op2 = expand_normal (arg2);
14115 rtx pat, addr, rawaddr;
14116 machine_mode tmode = insn_data[icode].operand[0].mode;
14117 machine_mode smode = insn_data[icode].operand[1].mode;
14118 machine_mode mode1 = Pmode;
14119 machine_mode mode2 = Pmode;
14120
14121 /* Invalid arguments. Bail before doing anything stoopid! */
14122 if (arg0 == error_mark_node
14123 || arg1 == error_mark_node
14124 || arg2 == error_mark_node)
14125 return const0_rtx;
14126
14127 op2 = copy_to_mode_reg (mode2, op2);
14128
14129 /* For STVX, express the RTL accurately by ANDing the address with -16.
14130 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14131 so the raw address is fine. */
14132 if (icode == CODE_FOR_altivec_stvx_v2df
14133 || icode == CODE_FOR_altivec_stvx_v2di
14134 || icode == CODE_FOR_altivec_stvx_v4sf
14135 || icode == CODE_FOR_altivec_stvx_v4si
14136 || icode == CODE_FOR_altivec_stvx_v8hi
14137 || icode == CODE_FOR_altivec_stvx_v16qi)
14138 {
14139 if (op1 == const0_rtx)
14140 rawaddr = op2;
14141 else
14142 {
14143 op1 = copy_to_mode_reg (mode1, op1);
14144 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14145 }
14146
14147 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14148 addr = gen_rtx_MEM (tmode, addr);
14149
14150 op0 = copy_to_mode_reg (tmode, op0);
14151
14152 emit_insn (gen_rtx_SET (addr, op0));
14153 }
14154 else
14155 {
14156 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14157 op0 = copy_to_mode_reg (smode, op0);
14158
14159 if (op1 == const0_rtx)
14160 addr = gen_rtx_MEM (tmode, op2);
14161 else
14162 {
14163 op1 = copy_to_mode_reg (mode1, op1);
14164 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14165 }
14166
14167 pat = GEN_FCN (icode) (addr, op0);
14168 if (pat)
14169 emit_insn (pat);
14170 }
14171
14172 return NULL_RTX;
14173 }
14174
14175 /* Return the appropriate SPR number associated with the given builtin. */
14176 static inline HOST_WIDE_INT
14177 htm_spr_num (enum rs6000_builtins code)
14178 {
14179 if (code == HTM_BUILTIN_GET_TFHAR
14180 || code == HTM_BUILTIN_SET_TFHAR)
14181 return TFHAR_SPR;
14182 else if (code == HTM_BUILTIN_GET_TFIAR
14183 || code == HTM_BUILTIN_SET_TFIAR)
14184 return TFIAR_SPR;
14185 else if (code == HTM_BUILTIN_GET_TEXASR
14186 || code == HTM_BUILTIN_SET_TEXASR)
14187 return TEXASR_SPR;
14188 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14189 || code == HTM_BUILTIN_SET_TEXASRU);
14190 return TEXASRU_SPR;
14191 }
14192
14193 /* Return the appropriate SPR regno associated with the given builtin. */
14194 static inline HOST_WIDE_INT
14195 htm_spr_regno (enum rs6000_builtins code)
14196 {
14197 if (code == HTM_BUILTIN_GET_TFHAR
14198 || code == HTM_BUILTIN_SET_TFHAR)
14199 return TFHAR_REGNO;
14200 else if (code == HTM_BUILTIN_GET_TFIAR
14201 || code == HTM_BUILTIN_SET_TFIAR)
14202 return TFIAR_REGNO;
14203 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14204 || code == HTM_BUILTIN_SET_TEXASR
14205 || code == HTM_BUILTIN_GET_TEXASRU
14206 || code == HTM_BUILTIN_SET_TEXASRU);
14207 return TEXASR_REGNO;
14208 }
14209
14210 /* Return the correct ICODE value depending on whether we are
14211 setting or reading the HTM SPRs. */
14212 static inline enum insn_code
14213 rs6000_htm_spr_icode (bool nonvoid)
14214 {
14215 if (nonvoid)
14216 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14217 else
14218 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14219 }
14220
14221 /* Expand the HTM builtin in EXP and store the result in TARGET.
14222 Store true in *EXPANDEDP if we found a builtin to expand. */
14223 static rtx
14224 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14225 {
14226 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14227 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14228 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14229 const struct builtin_description *d;
14230 size_t i;
14231
14232 *expandedp = true;
14233
14234 if (!TARGET_POWERPC64
14235 && (fcode == HTM_BUILTIN_TABORTDC
14236 || fcode == HTM_BUILTIN_TABORTDCI))
14237 {
14238 size_t uns_fcode = (size_t)fcode;
14239 const char *name = rs6000_builtin_info[uns_fcode].name;
14240 error ("builtin %qs is only valid in 64-bit mode", name);
14241 return const0_rtx;
14242 }
14243
14244 /* Expand the HTM builtins. */
14245 d = bdesc_htm;
14246 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14247 if (d->code == fcode)
14248 {
14249 rtx op[MAX_HTM_OPERANDS], pat;
14250 int nopnds = 0;
14251 tree arg;
14252 call_expr_arg_iterator iter;
14253 unsigned attr = rs6000_builtin_info[fcode].attr;
14254 enum insn_code icode = d->icode;
14255 const struct insn_operand_data *insn_op;
14256 bool uses_spr = (attr & RS6000_BTC_SPR);
14257 rtx cr = NULL_RTX;
14258
14259 if (uses_spr)
14260 icode = rs6000_htm_spr_icode (nonvoid);
14261 insn_op = &insn_data[icode].operand[0];
14262
14263 if (nonvoid)
14264 {
14265 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14266 if (!target
14267 || GET_MODE (target) != tmode
14268 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14269 target = gen_reg_rtx (tmode);
14270 if (uses_spr)
14271 op[nopnds++] = target;
14272 }
14273
14274 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14275 {
14276 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14277 return const0_rtx;
14278
14279 insn_op = &insn_data[icode].operand[nopnds];
14280
14281 op[nopnds] = expand_normal (arg);
14282
14283 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14284 {
14285 if (!strcmp (insn_op->constraint, "n"))
14286 {
14287 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14288 if (!CONST_INT_P (op[nopnds]))
14289 error ("argument %d must be an unsigned literal", arg_num);
14290 else
14291 error ("argument %d is an unsigned literal that is "
14292 "out of range", arg_num);
14293 return const0_rtx;
14294 }
14295 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14296 }
14297
14298 nopnds++;
14299 }
14300
14301 /* Handle the builtins for extended mnemonics. These accept
14302 no arguments, but map to builtins that take arguments. */
14303 switch (fcode)
14304 {
14305 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14306 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14307 op[nopnds++] = GEN_INT (1);
14308 if (flag_checking)
14309 attr |= RS6000_BTC_UNARY;
14310 break;
14311 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14312 op[nopnds++] = GEN_INT (0);
14313 if (flag_checking)
14314 attr |= RS6000_BTC_UNARY;
14315 break;
14316 default:
14317 break;
14318 }
14319
14320 /* If this builtin accesses SPRs, then pass in the appropriate
14321 SPR number and SPR regno as the last two operands. */
14322 if (uses_spr)
14323 {
14324 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14325 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14326 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14327 }
14328 /* If this builtin accesses a CR, then pass in a scratch
14329 CR as the last operand. */
14330 else if (attr & RS6000_BTC_CR)
14331 { cr = gen_reg_rtx (CCmode);
14332 op[nopnds++] = cr;
14333 }
14334
14335 if (flag_checking)
14336 {
14337 int expected_nopnds = 0;
14338 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14339 expected_nopnds = 1;
14340 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14341 expected_nopnds = 2;
14342 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14343 expected_nopnds = 3;
14344 if (!(attr & RS6000_BTC_VOID))
14345 expected_nopnds += 1;
14346 if (uses_spr)
14347 expected_nopnds += 2;
14348
14349 gcc_assert (nopnds == expected_nopnds
14350 && nopnds <= MAX_HTM_OPERANDS);
14351 }
14352
14353 switch (nopnds)
14354 {
14355 case 1:
14356 pat = GEN_FCN (icode) (op[0]);
14357 break;
14358 case 2:
14359 pat = GEN_FCN (icode) (op[0], op[1]);
14360 break;
14361 case 3:
14362 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14363 break;
14364 case 4:
14365 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14366 break;
14367 default:
14368 gcc_unreachable ();
14369 }
14370 if (!pat)
14371 return NULL_RTX;
14372 emit_insn (pat);
14373
14374 if (attr & RS6000_BTC_CR)
14375 {
14376 if (fcode == HTM_BUILTIN_TBEGIN)
14377 {
14378 /* Emit code to set TARGET to true or false depending on
14379 whether the tbegin. instruction successfully or failed
14380 to start a transaction. We do this by placing the 1's
14381 complement of CR's EQ bit into TARGET. */
14382 rtx scratch = gen_reg_rtx (SImode);
14383 emit_insn (gen_rtx_SET (scratch,
14384 gen_rtx_EQ (SImode, cr,
14385 const0_rtx)));
14386 emit_insn (gen_rtx_SET (target,
14387 gen_rtx_XOR (SImode, scratch,
14388 GEN_INT (1))));
14389 }
14390 else
14391 {
14392 /* Emit code to copy the 4-bit condition register field
14393 CR into the least significant end of register TARGET. */
14394 rtx scratch1 = gen_reg_rtx (SImode);
14395 rtx scratch2 = gen_reg_rtx (SImode);
14396 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14397 emit_insn (gen_movcc (subreg, cr));
14398 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14399 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14400 }
14401 }
14402
14403 if (nonvoid)
14404 return target;
14405 return const0_rtx;
14406 }
14407
14408 *expandedp = false;
14409 return NULL_RTX;
14410 }
14411
14412 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14413
14414 static rtx
14415 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14416 rtx target)
14417 {
14418 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14419 if (fcode == RS6000_BUILTIN_CPU_INIT)
14420 return const0_rtx;
14421
14422 if (target == 0 || GET_MODE (target) != SImode)
14423 target = gen_reg_rtx (SImode);
14424
14425 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14426 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14427 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14428 to a STRING_CST. */
14429 if (TREE_CODE (arg) == ARRAY_REF
14430 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14431 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14432 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14433 arg = TREE_OPERAND (arg, 0);
14434
14435 if (TREE_CODE (arg) != STRING_CST)
14436 {
14437 error ("builtin %qs only accepts a string argument",
14438 rs6000_builtin_info[(size_t) fcode].name);
14439 return const0_rtx;
14440 }
14441
14442 if (fcode == RS6000_BUILTIN_CPU_IS)
14443 {
14444 const char *cpu = TREE_STRING_POINTER (arg);
14445 rtx cpuid = NULL_RTX;
14446 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14447 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14448 {
14449 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14450 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14451 break;
14452 }
14453 if (cpuid == NULL_RTX)
14454 {
14455 /* Invalid CPU argument. */
14456 error ("cpu %qs is an invalid argument to builtin %qs",
14457 cpu, rs6000_builtin_info[(size_t) fcode].name);
14458 return const0_rtx;
14459 }
14460
14461 rtx platform = gen_reg_rtx (SImode);
14462 rtx tcbmem = gen_const_mem (SImode,
14463 gen_rtx_PLUS (Pmode,
14464 gen_rtx_REG (Pmode, TLS_REGNUM),
14465 GEN_INT (TCB_PLATFORM_OFFSET)));
14466 emit_move_insn (platform, tcbmem);
14467 emit_insn (gen_eqsi3 (target, platform, cpuid));
14468 }
14469 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14470 {
14471 const char *hwcap = TREE_STRING_POINTER (arg);
14472 rtx mask = NULL_RTX;
14473 int hwcap_offset;
14474 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14475 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14476 {
14477 mask = GEN_INT (cpu_supports_info[i].mask);
14478 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14479 break;
14480 }
14481 if (mask == NULL_RTX)
14482 {
14483 /* Invalid HWCAP argument. */
14484 error ("%s %qs is an invalid argument to builtin %qs",
14485 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14486 return const0_rtx;
14487 }
14488
14489 rtx tcb_hwcap = gen_reg_rtx (SImode);
14490 rtx tcbmem = gen_const_mem (SImode,
14491 gen_rtx_PLUS (Pmode,
14492 gen_rtx_REG (Pmode, TLS_REGNUM),
14493 GEN_INT (hwcap_offset)));
14494 emit_move_insn (tcb_hwcap, tcbmem);
14495 rtx scratch1 = gen_reg_rtx (SImode);
14496 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14497 rtx scratch2 = gen_reg_rtx (SImode);
14498 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14499 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14500 }
14501 else
14502 gcc_unreachable ();
14503
14504 /* Record that we have expanded a CPU builtin, so that we can later
14505 emit a reference to the special symbol exported by LIBC to ensure we
14506 do not link against an old LIBC that doesn't support this feature. */
14507 cpu_builtin_p = true;
14508
14509 #else
14510 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14511 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14512
14513 /* For old LIBCs, always return FALSE. */
14514 emit_move_insn (target, GEN_INT (0));
14515 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14516
14517 return target;
14518 }
14519
14520 static rtx
14521 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14522 {
14523 rtx pat;
14524 tree arg0 = CALL_EXPR_ARG (exp, 0);
14525 tree arg1 = CALL_EXPR_ARG (exp, 1);
14526 tree arg2 = CALL_EXPR_ARG (exp, 2);
14527 rtx op0 = expand_normal (arg0);
14528 rtx op1 = expand_normal (arg1);
14529 rtx op2 = expand_normal (arg2);
14530 machine_mode tmode = insn_data[icode].operand[0].mode;
14531 machine_mode mode0 = insn_data[icode].operand[1].mode;
14532 machine_mode mode1 = insn_data[icode].operand[2].mode;
14533 machine_mode mode2 = insn_data[icode].operand[3].mode;
14534
14535 if (icode == CODE_FOR_nothing)
14536 /* Builtin not supported on this processor. */
14537 return 0;
14538
14539 /* If we got invalid arguments bail out before generating bad rtl. */
14540 if (arg0 == error_mark_node
14541 || arg1 == error_mark_node
14542 || arg2 == error_mark_node)
14543 return const0_rtx;
14544
14545 /* Check and prepare argument depending on the instruction code.
14546
14547 Note that a switch statement instead of the sequence of tests
14548 would be incorrect as many of the CODE_FOR values could be
14549 CODE_FOR_nothing and that would yield multiple alternatives
14550 with identical values. We'd never reach here at runtime in
14551 this case. */
14552 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14553 || icode == CODE_FOR_altivec_vsldoi_v2df
14554 || icode == CODE_FOR_altivec_vsldoi_v4si
14555 || icode == CODE_FOR_altivec_vsldoi_v8hi
14556 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14557 {
14558 /* Only allow 4-bit unsigned literals. */
14559 STRIP_NOPS (arg2);
14560 if (TREE_CODE (arg2) != INTEGER_CST
14561 || TREE_INT_CST_LOW (arg2) & ~0xf)
14562 {
14563 error ("argument 3 must be a 4-bit unsigned literal");
14564 return CONST0_RTX (tmode);
14565 }
14566 }
14567 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14568 || icode == CODE_FOR_vsx_xxpermdi_v2di
14569 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14570 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
14571 || icode == CODE_FOR_vsx_xxpermdi_v1ti
14572 || icode == CODE_FOR_vsx_xxpermdi_v4sf
14573 || icode == CODE_FOR_vsx_xxpermdi_v4si
14574 || icode == CODE_FOR_vsx_xxpermdi_v8hi
14575 || icode == CODE_FOR_vsx_xxpermdi_v16qi
14576 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14577 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14578 || icode == CODE_FOR_vsx_xxsldwi_v4si
14579 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14580 || icode == CODE_FOR_vsx_xxsldwi_v2di
14581 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14582 {
14583 /* Only allow 2-bit unsigned literals. */
14584 STRIP_NOPS (arg2);
14585 if (TREE_CODE (arg2) != INTEGER_CST
14586 || TREE_INT_CST_LOW (arg2) & ~0x3)
14587 {
14588 error ("argument 3 must be a 2-bit unsigned literal");
14589 return CONST0_RTX (tmode);
14590 }
14591 }
14592 else if (icode == CODE_FOR_vsx_set_v2df
14593 || icode == CODE_FOR_vsx_set_v2di
14594 || icode == CODE_FOR_bcdadd
14595 || icode == CODE_FOR_bcdadd_lt
14596 || icode == CODE_FOR_bcdadd_eq
14597 || icode == CODE_FOR_bcdadd_gt
14598 || icode == CODE_FOR_bcdsub
14599 || icode == CODE_FOR_bcdsub_lt
14600 || icode == CODE_FOR_bcdsub_eq
14601 || icode == CODE_FOR_bcdsub_gt)
14602 {
14603 /* Only allow 1-bit unsigned literals. */
14604 STRIP_NOPS (arg2);
14605 if (TREE_CODE (arg2) != INTEGER_CST
14606 || TREE_INT_CST_LOW (arg2) & ~0x1)
14607 {
14608 error ("argument 3 must be a 1-bit unsigned literal");
14609 return CONST0_RTX (tmode);
14610 }
14611 }
14612 else if (icode == CODE_FOR_dfp_ddedpd_dd
14613 || icode == CODE_FOR_dfp_ddedpd_td)
14614 {
14615 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14616 STRIP_NOPS (arg0);
14617 if (TREE_CODE (arg0) != INTEGER_CST
14618 || TREE_INT_CST_LOW (arg2) & ~0x3)
14619 {
14620 error ("argument 1 must be 0 or 2");
14621 return CONST0_RTX (tmode);
14622 }
14623 }
14624 else if (icode == CODE_FOR_dfp_denbcd_dd
14625 || icode == CODE_FOR_dfp_denbcd_td)
14626 {
14627 /* Only allow 1-bit unsigned literals. */
14628 STRIP_NOPS (arg0);
14629 if (TREE_CODE (arg0) != INTEGER_CST
14630 || TREE_INT_CST_LOW (arg0) & ~0x1)
14631 {
14632 error ("argument 1 must be a 1-bit unsigned literal");
14633 return CONST0_RTX (tmode);
14634 }
14635 }
14636 else if (icode == CODE_FOR_dfp_dscli_dd
14637 || icode == CODE_FOR_dfp_dscli_td
14638 || icode == CODE_FOR_dfp_dscri_dd
14639 || icode == CODE_FOR_dfp_dscri_td)
14640 {
14641 /* Only allow 6-bit unsigned literals. */
14642 STRIP_NOPS (arg1);
14643 if (TREE_CODE (arg1) != INTEGER_CST
14644 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14645 {
14646 error ("argument 2 must be a 6-bit unsigned literal");
14647 return CONST0_RTX (tmode);
14648 }
14649 }
14650 else if (icode == CODE_FOR_crypto_vshasigmaw
14651 || icode == CODE_FOR_crypto_vshasigmad)
14652 {
14653 /* Check whether the 2nd and 3rd arguments are integer constants and in
14654 range and prepare arguments. */
14655 STRIP_NOPS (arg1);
14656 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14657 {
14658 error ("argument 2 must be 0 or 1");
14659 return CONST0_RTX (tmode);
14660 }
14661
14662 STRIP_NOPS (arg2);
14663 if (TREE_CODE (arg2) != INTEGER_CST
14664 || wi::geu_p (wi::to_wide (arg2), 16))
14665 {
14666 error ("argument 3 must be in the range 0..15");
14667 return CONST0_RTX (tmode);
14668 }
14669 }
14670
14671 if (target == 0
14672 || GET_MODE (target) != tmode
14673 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14674 target = gen_reg_rtx (tmode);
14675
14676 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14677 op0 = copy_to_mode_reg (mode0, op0);
14678 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14679 op1 = copy_to_mode_reg (mode1, op1);
14680 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14681 op2 = copy_to_mode_reg (mode2, op2);
14682
14683 pat = GEN_FCN (icode) (target, op0, op1, op2);
14684 if (! pat)
14685 return 0;
14686 emit_insn (pat);
14687
14688 return target;
14689 }
14690
14691
14692 /* Expand the dst builtins. */
14693 static rtx
14694 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14695 bool *expandedp)
14696 {
14697 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14698 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14699 tree arg0, arg1, arg2;
14700 machine_mode mode0, mode1;
14701 rtx pat, op0, op1, op2;
14702 const struct builtin_description *d;
14703 size_t i;
14704
14705 *expandedp = false;
14706
14707 /* Handle DST variants. */
14708 d = bdesc_dst;
14709 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14710 if (d->code == fcode)
14711 {
14712 arg0 = CALL_EXPR_ARG (exp, 0);
14713 arg1 = CALL_EXPR_ARG (exp, 1);
14714 arg2 = CALL_EXPR_ARG (exp, 2);
14715 op0 = expand_normal (arg0);
14716 op1 = expand_normal (arg1);
14717 op2 = expand_normal (arg2);
14718 mode0 = insn_data[d->icode].operand[0].mode;
14719 mode1 = insn_data[d->icode].operand[1].mode;
14720
14721 /* Invalid arguments, bail out before generating bad rtl. */
14722 if (arg0 == error_mark_node
14723 || arg1 == error_mark_node
14724 || arg2 == error_mark_node)
14725 return const0_rtx;
14726
14727 *expandedp = true;
14728 STRIP_NOPS (arg2);
14729 if (TREE_CODE (arg2) != INTEGER_CST
14730 || TREE_INT_CST_LOW (arg2) & ~0x3)
14731 {
14732 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14733 return const0_rtx;
14734 }
14735
14736 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14737 op0 = copy_to_mode_reg (Pmode, op0);
14738 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14739 op1 = copy_to_mode_reg (mode1, op1);
14740
14741 pat = GEN_FCN (d->icode) (op0, op1, op2);
14742 if (pat != 0)
14743 emit_insn (pat);
14744
14745 return NULL_RTX;
14746 }
14747
14748 return NULL_RTX;
14749 }
14750
14751 /* Expand vec_init builtin. */
14752 static rtx
14753 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14754 {
14755 machine_mode tmode = TYPE_MODE (type);
14756 machine_mode inner_mode = GET_MODE_INNER (tmode);
14757 int i, n_elt = GET_MODE_NUNITS (tmode);
14758
14759 gcc_assert (VECTOR_MODE_P (tmode));
14760 gcc_assert (n_elt == call_expr_nargs (exp));
14761
14762 if (!target || !register_operand (target, tmode))
14763 target = gen_reg_rtx (tmode);
14764
14765 /* If we have a vector compromised of a single element, such as V1TImode, do
14766 the initialization directly. */
14767 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14768 {
14769 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14770 emit_move_insn (target, gen_lowpart (tmode, x));
14771 }
14772 else
14773 {
14774 rtvec v = rtvec_alloc (n_elt);
14775
14776 for (i = 0; i < n_elt; ++i)
14777 {
14778 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14779 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14780 }
14781
14782 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14783 }
14784
14785 return target;
14786 }
14787
14788 /* Return the integer constant in ARG. Constrain it to be in the range
14789 of the subparts of VEC_TYPE; issue an error if not. */
14790
14791 static int
14792 get_element_number (tree vec_type, tree arg)
14793 {
14794 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14795
14796 if (!tree_fits_uhwi_p (arg)
14797 || (elt = tree_to_uhwi (arg), elt > max))
14798 {
14799 error ("selector must be an integer constant in the range 0..%wi", max);
14800 return 0;
14801 }
14802
14803 return elt;
14804 }
14805
14806 /* Expand vec_set builtin. */
14807 static rtx
14808 altivec_expand_vec_set_builtin (tree exp)
14809 {
14810 machine_mode tmode, mode1;
14811 tree arg0, arg1, arg2;
14812 int elt;
14813 rtx op0, op1;
14814
14815 arg0 = CALL_EXPR_ARG (exp, 0);
14816 arg1 = CALL_EXPR_ARG (exp, 1);
14817 arg2 = CALL_EXPR_ARG (exp, 2);
14818
14819 tmode = TYPE_MODE (TREE_TYPE (arg0));
14820 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14821 gcc_assert (VECTOR_MODE_P (tmode));
14822
14823 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14824 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14825 elt = get_element_number (TREE_TYPE (arg0), arg2);
14826
14827 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14828 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14829
14830 op0 = force_reg (tmode, op0);
14831 op1 = force_reg (mode1, op1);
14832
14833 rs6000_expand_vector_set (op0, op1, elt);
14834
14835 return op0;
14836 }
14837
14838 /* Expand vec_ext builtin. */
14839 static rtx
14840 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14841 {
14842 machine_mode tmode, mode0;
14843 tree arg0, arg1;
14844 rtx op0;
14845 rtx op1;
14846
14847 arg0 = CALL_EXPR_ARG (exp, 0);
14848 arg1 = CALL_EXPR_ARG (exp, 1);
14849
14850 op0 = expand_normal (arg0);
14851 op1 = expand_normal (arg1);
14852
14853 /* Call get_element_number to validate arg1 if it is a constant. */
14854 if (TREE_CODE (arg1) == INTEGER_CST)
14855 (void) get_element_number (TREE_TYPE (arg0), arg1);
14856
14857 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14858 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14859 gcc_assert (VECTOR_MODE_P (mode0));
14860
14861 op0 = force_reg (mode0, op0);
14862
14863 if (optimize || !target || !register_operand (target, tmode))
14864 target = gen_reg_rtx (tmode);
14865
14866 rs6000_expand_vector_extract (target, op0, op1);
14867
14868 return target;
14869 }
14870
14871 /* Expand the builtin in EXP and store the result in TARGET. Store
14872 true in *EXPANDEDP if we found a builtin to expand. */
14873 static rtx
14874 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14875 {
14876 const struct builtin_description *d;
14877 size_t i;
14878 enum insn_code icode;
14879 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14880 tree arg0, arg1, arg2;
14881 rtx op0, pat;
14882 machine_mode tmode, mode0;
14883 enum rs6000_builtins fcode
14884 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14885
14886 if (rs6000_overloaded_builtin_p (fcode))
14887 {
14888 *expandedp = true;
14889 error ("unresolved overload for Altivec builtin %qF", fndecl);
14890
14891 /* Given it is invalid, just generate a normal call. */
14892 return expand_call (exp, target, false);
14893 }
14894
14895 target = altivec_expand_dst_builtin (exp, target, expandedp);
14896 if (*expandedp)
14897 return target;
14898
14899 *expandedp = true;
14900
14901 switch (fcode)
14902 {
14903 case ALTIVEC_BUILTIN_STVX_V2DF:
14904 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14905 case ALTIVEC_BUILTIN_STVX_V2DI:
14906 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14907 case ALTIVEC_BUILTIN_STVX_V4SF:
14908 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14909 case ALTIVEC_BUILTIN_STVX:
14910 case ALTIVEC_BUILTIN_STVX_V4SI:
14911 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14912 case ALTIVEC_BUILTIN_STVX_V8HI:
14913 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14914 case ALTIVEC_BUILTIN_STVX_V16QI:
14915 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14916 case ALTIVEC_BUILTIN_STVEBX:
14917 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14918 case ALTIVEC_BUILTIN_STVEHX:
14919 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14920 case ALTIVEC_BUILTIN_STVEWX:
14921 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14922 case ALTIVEC_BUILTIN_STVXL_V2DF:
14923 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14924 case ALTIVEC_BUILTIN_STVXL_V2DI:
14925 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14926 case ALTIVEC_BUILTIN_STVXL_V4SF:
14927 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14928 case ALTIVEC_BUILTIN_STVXL:
14929 case ALTIVEC_BUILTIN_STVXL_V4SI:
14930 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14931 case ALTIVEC_BUILTIN_STVXL_V8HI:
14932 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14933 case ALTIVEC_BUILTIN_STVXL_V16QI:
14934 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14935
14936 case ALTIVEC_BUILTIN_STVLX:
14937 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14938 case ALTIVEC_BUILTIN_STVLXL:
14939 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14940 case ALTIVEC_BUILTIN_STVRX:
14941 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14942 case ALTIVEC_BUILTIN_STVRXL:
14943 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14944
14945 case P9V_BUILTIN_STXVL:
14946 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14947
14948 case P9V_BUILTIN_XST_LEN_R:
14949 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14950
14951 case VSX_BUILTIN_STXVD2X_V1TI:
14952 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14953 case VSX_BUILTIN_STXVD2X_V2DF:
14954 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14955 case VSX_BUILTIN_STXVD2X_V2DI:
14956 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14957 case VSX_BUILTIN_STXVW4X_V4SF:
14958 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14959 case VSX_BUILTIN_STXVW4X_V4SI:
14960 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14961 case VSX_BUILTIN_STXVW4X_V8HI:
14962 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14963 case VSX_BUILTIN_STXVW4X_V16QI:
14964 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14965
14966 /* For the following on big endian, it's ok to use any appropriate
14967 unaligned-supporting store, so use a generic expander. For
14968 little-endian, the exact element-reversing instruction must
14969 be used. */
14970 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14971 {
14972 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14973 : CODE_FOR_vsx_st_elemrev_v1ti);
14974 return altivec_expand_stv_builtin (code, exp);
14975 }
14976 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14977 {
14978 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14979 : CODE_FOR_vsx_st_elemrev_v2df);
14980 return altivec_expand_stv_builtin (code, exp);
14981 }
14982 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14983 {
14984 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14985 : CODE_FOR_vsx_st_elemrev_v2di);
14986 return altivec_expand_stv_builtin (code, exp);
14987 }
14988 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14989 {
14990 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14991 : CODE_FOR_vsx_st_elemrev_v4sf);
14992 return altivec_expand_stv_builtin (code, exp);
14993 }
14994 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14995 {
14996 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14997 : CODE_FOR_vsx_st_elemrev_v4si);
14998 return altivec_expand_stv_builtin (code, exp);
14999 }
15000 case VSX_BUILTIN_ST_ELEMREV_V8HI:
15001 {
15002 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
15003 : CODE_FOR_vsx_st_elemrev_v8hi);
15004 return altivec_expand_stv_builtin (code, exp);
15005 }
15006 case VSX_BUILTIN_ST_ELEMREV_V16QI:
15007 {
15008 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
15009 : CODE_FOR_vsx_st_elemrev_v16qi);
15010 return altivec_expand_stv_builtin (code, exp);
15011 }
15012
15013 case ALTIVEC_BUILTIN_MFVSCR:
15014 icode = CODE_FOR_altivec_mfvscr;
15015 tmode = insn_data[icode].operand[0].mode;
15016
15017 if (target == 0
15018 || GET_MODE (target) != tmode
15019 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15020 target = gen_reg_rtx (tmode);
15021
15022 pat = GEN_FCN (icode) (target);
15023 if (! pat)
15024 return 0;
15025 emit_insn (pat);
15026 return target;
15027
15028 case ALTIVEC_BUILTIN_MTVSCR:
15029 icode = CODE_FOR_altivec_mtvscr;
15030 arg0 = CALL_EXPR_ARG (exp, 0);
15031 op0 = expand_normal (arg0);
15032 mode0 = insn_data[icode].operand[0].mode;
15033
15034 /* If we got invalid arguments bail out before generating bad rtl. */
15035 if (arg0 == error_mark_node)
15036 return const0_rtx;
15037
15038 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15039 op0 = copy_to_mode_reg (mode0, op0);
15040
15041 pat = GEN_FCN (icode) (op0);
15042 if (pat)
15043 emit_insn (pat);
15044 return NULL_RTX;
15045
15046 case ALTIVEC_BUILTIN_DSSALL:
15047 emit_insn (gen_altivec_dssall ());
15048 return NULL_RTX;
15049
15050 case ALTIVEC_BUILTIN_DSS:
15051 icode = CODE_FOR_altivec_dss;
15052 arg0 = CALL_EXPR_ARG (exp, 0);
15053 STRIP_NOPS (arg0);
15054 op0 = expand_normal (arg0);
15055 mode0 = insn_data[icode].operand[0].mode;
15056
15057 /* If we got invalid arguments bail out before generating bad rtl. */
15058 if (arg0 == error_mark_node)
15059 return const0_rtx;
15060
15061 if (TREE_CODE (arg0) != INTEGER_CST
15062 || TREE_INT_CST_LOW (arg0) & ~0x3)
15063 {
15064 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
15065 return const0_rtx;
15066 }
15067
15068 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15069 op0 = copy_to_mode_reg (mode0, op0);
15070
15071 emit_insn (gen_altivec_dss (op0));
15072 return NULL_RTX;
15073
15074 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
15075 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
15076 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
15077 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
15078 case VSX_BUILTIN_VEC_INIT_V2DF:
15079 case VSX_BUILTIN_VEC_INIT_V2DI:
15080 case VSX_BUILTIN_VEC_INIT_V1TI:
15081 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
15082
15083 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
15084 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
15085 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
15086 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
15087 case VSX_BUILTIN_VEC_SET_V2DF:
15088 case VSX_BUILTIN_VEC_SET_V2DI:
15089 case VSX_BUILTIN_VEC_SET_V1TI:
15090 return altivec_expand_vec_set_builtin (exp);
15091
15092 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
15093 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
15094 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
15095 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
15096 case VSX_BUILTIN_VEC_EXT_V2DF:
15097 case VSX_BUILTIN_VEC_EXT_V2DI:
15098 case VSX_BUILTIN_VEC_EXT_V1TI:
15099 return altivec_expand_vec_ext_builtin (exp, target);
15100
15101 case P9V_BUILTIN_VEC_EXTRACT4B:
15102 arg1 = CALL_EXPR_ARG (exp, 1);
15103 STRIP_NOPS (arg1);
15104
15105 /* Generate a normal call if it is invalid. */
15106 if (arg1 == error_mark_node)
15107 return expand_call (exp, target, false);
15108
15109 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
15110 {
15111 error ("second argument to %qs must be 0..12", "vec_vextract4b");
15112 return expand_call (exp, target, false);
15113 }
15114 break;
15115
15116 case P9V_BUILTIN_VEC_INSERT4B:
15117 arg2 = CALL_EXPR_ARG (exp, 2);
15118 STRIP_NOPS (arg2);
15119
15120 /* Generate a normal call if it is invalid. */
15121 if (arg2 == error_mark_node)
15122 return expand_call (exp, target, false);
15123
15124 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
15125 {
15126 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15127 return expand_call (exp, target, false);
15128 }
15129 break;
15130
15131 default:
15132 break;
15133 /* Fall through. */
15134 }
15135
15136 /* Expand abs* operations. */
15137 d = bdesc_abs;
15138 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15139 if (d->code == fcode)
15140 return altivec_expand_abs_builtin (d->icode, exp, target);
15141
15142 /* Expand the AltiVec predicates. */
15143 d = bdesc_altivec_preds;
15144 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15145 if (d->code == fcode)
15146 return altivec_expand_predicate_builtin (d->icode, exp, target);
15147
15148 /* LV* are funky. We initialized them differently. */
15149 switch (fcode)
15150 {
15151 case ALTIVEC_BUILTIN_LVSL:
15152 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15153 exp, target, false);
15154 case ALTIVEC_BUILTIN_LVSR:
15155 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15156 exp, target, false);
15157 case ALTIVEC_BUILTIN_LVEBX:
15158 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15159 exp, target, false);
15160 case ALTIVEC_BUILTIN_LVEHX:
15161 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15162 exp, target, false);
15163 case ALTIVEC_BUILTIN_LVEWX:
15164 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15165 exp, target, false);
15166 case ALTIVEC_BUILTIN_LVXL_V2DF:
15167 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15168 exp, target, false);
15169 case ALTIVEC_BUILTIN_LVXL_V2DI:
15170 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15171 exp, target, false);
15172 case ALTIVEC_BUILTIN_LVXL_V4SF:
15173 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15174 exp, target, false);
15175 case ALTIVEC_BUILTIN_LVXL:
15176 case ALTIVEC_BUILTIN_LVXL_V4SI:
15177 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15178 exp, target, false);
15179 case ALTIVEC_BUILTIN_LVXL_V8HI:
15180 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15181 exp, target, false);
15182 case ALTIVEC_BUILTIN_LVXL_V16QI:
15183 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15184 exp, target, false);
15185 case ALTIVEC_BUILTIN_LVX_V1TI:
15186 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
15187 exp, target, false);
15188 case ALTIVEC_BUILTIN_LVX_V2DF:
15189 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
15190 exp, target, false);
15191 case ALTIVEC_BUILTIN_LVX_V2DI:
15192 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
15193 exp, target, false);
15194 case ALTIVEC_BUILTIN_LVX_V4SF:
15195 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
15196 exp, target, false);
15197 case ALTIVEC_BUILTIN_LVX:
15198 case ALTIVEC_BUILTIN_LVX_V4SI:
15199 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
15200 exp, target, false);
15201 case ALTIVEC_BUILTIN_LVX_V8HI:
15202 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
15203 exp, target, false);
15204 case ALTIVEC_BUILTIN_LVX_V16QI:
15205 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
15206 exp, target, false);
15207 case ALTIVEC_BUILTIN_LVLX:
15208 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15209 exp, target, true);
15210 case ALTIVEC_BUILTIN_LVLXL:
15211 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15212 exp, target, true);
15213 case ALTIVEC_BUILTIN_LVRX:
15214 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15215 exp, target, true);
15216 case ALTIVEC_BUILTIN_LVRXL:
15217 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15218 exp, target, true);
15219 case VSX_BUILTIN_LXVD2X_V1TI:
15220 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15221 exp, target, false);
15222 case VSX_BUILTIN_LXVD2X_V2DF:
15223 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15224 exp, target, false);
15225 case VSX_BUILTIN_LXVD2X_V2DI:
15226 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15227 exp, target, false);
15228 case VSX_BUILTIN_LXVW4X_V4SF:
15229 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15230 exp, target, false);
15231 case VSX_BUILTIN_LXVW4X_V4SI:
15232 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15233 exp, target, false);
15234 case VSX_BUILTIN_LXVW4X_V8HI:
15235 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15236 exp, target, false);
15237 case VSX_BUILTIN_LXVW4X_V16QI:
15238 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15239 exp, target, false);
15240 /* For the following on big endian, it's ok to use any appropriate
15241 unaligned-supporting load, so use a generic expander. For
15242 little-endian, the exact element-reversing instruction must
15243 be used. */
15244 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15245 {
15246 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15247 : CODE_FOR_vsx_ld_elemrev_v2df);
15248 return altivec_expand_lv_builtin (code, exp, target, false);
15249 }
15250 case VSX_BUILTIN_LD_ELEMREV_V1TI:
15251 {
15252 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
15253 : CODE_FOR_vsx_ld_elemrev_v1ti);
15254 return altivec_expand_lv_builtin (code, exp, target, false);
15255 }
15256 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15257 {
15258 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15259 : CODE_FOR_vsx_ld_elemrev_v2di);
15260 return altivec_expand_lv_builtin (code, exp, target, false);
15261 }
15262 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15263 {
15264 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15265 : CODE_FOR_vsx_ld_elemrev_v4sf);
15266 return altivec_expand_lv_builtin (code, exp, target, false);
15267 }
15268 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15269 {
15270 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15271 : CODE_FOR_vsx_ld_elemrev_v4si);
15272 return altivec_expand_lv_builtin (code, exp, target, false);
15273 }
15274 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15275 {
15276 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15277 : CODE_FOR_vsx_ld_elemrev_v8hi);
15278 return altivec_expand_lv_builtin (code, exp, target, false);
15279 }
15280 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15281 {
15282 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15283 : CODE_FOR_vsx_ld_elemrev_v16qi);
15284 return altivec_expand_lv_builtin (code, exp, target, false);
15285 }
15286 break;
15287 default:
15288 break;
15289 /* Fall through. */
15290 }
15291
15292 *expandedp = false;
15293 return NULL_RTX;
15294 }
15295
15296 /* Check whether a builtin function is supported in this target
15297 configuration. */
15298 bool
15299 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
15300 {
15301 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
15302 if ((fnmask & rs6000_builtin_mask) != fnmask)
15303 return false;
15304 else
15305 return true;
15306 }
15307
15308 /* Raise an error message for a builtin function that is called without the
15309 appropriate target options being set. */
15310
15311 static void
15312 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15313 {
15314 size_t uns_fncode = (size_t) fncode;
15315 const char *name = rs6000_builtin_info[uns_fncode].name;
15316 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15317
15318 gcc_assert (name != NULL);
15319 if ((fnmask & RS6000_BTM_CELL) != 0)
15320 error ("builtin function %qs is only valid for the cell processor", name);
15321 else if ((fnmask & RS6000_BTM_VSX) != 0)
15322 error ("builtin function %qs requires the %qs option", name, "-mvsx");
15323 else if ((fnmask & RS6000_BTM_HTM) != 0)
15324 error ("builtin function %qs requires the %qs option", name, "-mhtm");
15325 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
15326 error ("builtin function %qs requires the %qs option", name, "-maltivec");
15327 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15328 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15329 error ("builtin function %qs requires the %qs and %qs options",
15330 name, "-mhard-dfp", "-mpower8-vector");
15331 else if ((fnmask & RS6000_BTM_DFP) != 0)
15332 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
15333 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
15334 error ("builtin function %qs requires the %qs option", name,
15335 "-mpower8-vector");
15336 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15337 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15338 error ("builtin function %qs requires the %qs and %qs options",
15339 name, "-mcpu=power9", "-m64");
15340 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
15341 error ("builtin function %qs requires the %qs option", name,
15342 "-mcpu=power9");
15343 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15344 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15345 error ("builtin function %qs requires the %qs and %qs options",
15346 name, "-mcpu=power9", "-m64");
15347 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
15348 error ("builtin function %qs requires the %qs option", name,
15349 "-mcpu=power9");
15350 else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128)
15351 {
15352 if (!TARGET_HARD_FLOAT)
15353 error ("builtin function %qs requires the %qs option", name,
15354 "-mhard-float");
15355 else
15356 error ("builtin function %qs requires the %qs option", name,
15357 TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128");
15358 }
15359 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
15360 error ("builtin function %qs requires the %qs option", name,
15361 "-mhard-float");
15362 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
15363 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
15364 name);
15365 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
15366 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
15367 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15368 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15369 error ("builtin function %qs requires the %qs (or newer), and "
15370 "%qs or %qs options",
15371 name, "-mcpu=power7", "-m64", "-mpowerpc64");
15372 else
15373 error ("builtin function %qs is not supported with the current options",
15374 name);
15375 }
15376
15377 /* Target hook for early folding of built-ins, shamelessly stolen
15378 from ia64.c. */
15379
15380 static tree
15381 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
15382 int n_args ATTRIBUTE_UNUSED,
15383 tree *args ATTRIBUTE_UNUSED,
15384 bool ignore ATTRIBUTE_UNUSED)
15385 {
15386 #ifdef SUBTARGET_FOLD_BUILTIN
15387 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
15388 #else
15389 return NULL_TREE;
15390 #endif
15391 }
15392
15393 /* Helper function to sort out which built-ins may be valid without having
15394 a LHS. */
15395 static bool
15396 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
15397 {
15398 switch (fn_code)
15399 {
15400 case ALTIVEC_BUILTIN_STVX_V16QI:
15401 case ALTIVEC_BUILTIN_STVX_V8HI:
15402 case ALTIVEC_BUILTIN_STVX_V4SI:
15403 case ALTIVEC_BUILTIN_STVX_V4SF:
15404 case ALTIVEC_BUILTIN_STVX_V2DI:
15405 case ALTIVEC_BUILTIN_STVX_V2DF:
15406 return true;
15407 default:
15408 return false;
15409 }
15410 }
15411
15412 /* Helper function to handle the gimple folding of a vector compare
15413 operation. This sets up true/false vectors, and uses the
15414 VEC_COND_EXPR operation.
15415 CODE indicates which comparison is to be made. (EQ, GT, ...).
15416 TYPE indicates the type of the result. */
15417 static tree
15418 fold_build_vec_cmp (tree_code code, tree type,
15419 tree arg0, tree arg1)
15420 {
15421 tree cmp_type = build_same_sized_truth_vector_type (type);
15422 tree zero_vec = build_zero_cst (type);
15423 tree minus_one_vec = build_minus_one_cst (type);
15424 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
15425 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
15426 }
15427
15428 /* Helper function to handle the in-between steps for the
15429 vector compare built-ins. */
15430 static void
15431 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
15432 {
15433 tree arg0 = gimple_call_arg (stmt, 0);
15434 tree arg1 = gimple_call_arg (stmt, 1);
15435 tree lhs = gimple_call_lhs (stmt);
15436 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
15437 gimple *g = gimple_build_assign (lhs, cmp);
15438 gimple_set_location (g, gimple_location (stmt));
15439 gsi_replace (gsi, g, true);
15440 }
15441
15442 /* Helper function to handle the vector merge[hl] built-ins. The
15443 implementation difference between h and l versions for this code are in
15444 the values used when building of the permute vector for high word versus
15445 low word merge. The variance is keyed off the use_high parameter. */
15446 static void
15447 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
15448 {
15449 tree arg0 = gimple_call_arg (stmt, 0);
15450 tree arg1 = gimple_call_arg (stmt, 1);
15451 tree lhs = gimple_call_lhs (stmt);
15452 tree lhs_type = TREE_TYPE (lhs);
15453 tree lhs_type_type = TREE_TYPE (lhs_type);
15454 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15455 int midpoint = n_elts / 2;
15456 int offset = 0;
15457
15458 if (use_high == 1)
15459 offset = midpoint;
15460
15461 tree_vector_builder elts (lhs_type, VECTOR_CST_NELTS (arg0), 1);
15462
15463 for (int i = 0; i < midpoint; i++)
15464 {
15465 elts.safe_push (build_int_cst (lhs_type_type, offset + i));
15466 elts.safe_push (build_int_cst (lhs_type_type, offset + n_elts + i));
15467 }
15468
15469 tree permute = elts.build ();
15470
15471 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15472 gimple_set_location (g, gimple_location (stmt));
15473 gsi_replace (gsi, g, true);
15474 }
15475
15476 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15477 a constant, use rs6000_fold_builtin.) */
15478
15479 bool
15480 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
15481 {
15482 gimple *stmt = gsi_stmt (*gsi);
15483 tree fndecl = gimple_call_fndecl (stmt);
15484 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
15485 enum rs6000_builtins fn_code
15486 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15487 tree arg0, arg1, lhs, temp;
15488 gimple *g;
15489
15490 size_t uns_fncode = (size_t) fn_code;
15491 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
15492 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
15493 const char *fn_name2 = (icode != CODE_FOR_nothing)
15494 ? get_insn_name ((int) icode)
15495 : "nothing";
15496
15497 if (TARGET_DEBUG_BUILTIN)
15498 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
15499 fn_code, fn_name1, fn_name2);
15500
15501 if (!rs6000_fold_gimple)
15502 return false;
15503
15504 /* Prevent gimple folding for code that does not have a LHS, unless it is
15505 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15506 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
15507 return false;
15508
15509 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15510 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
15511 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
15512 if (!func_valid_p)
15513 return false;
15514
15515 switch (fn_code)
15516 {
15517 /* Flavors of vec_add. We deliberately don't expand
15518 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15519 TImode, resulting in much poorer code generation. */
15520 case ALTIVEC_BUILTIN_VADDUBM:
15521 case ALTIVEC_BUILTIN_VADDUHM:
15522 case ALTIVEC_BUILTIN_VADDUWM:
15523 case P8V_BUILTIN_VADDUDM:
15524 case ALTIVEC_BUILTIN_VADDFP:
15525 case VSX_BUILTIN_XVADDDP:
15526 arg0 = gimple_call_arg (stmt, 0);
15527 arg1 = gimple_call_arg (stmt, 1);
15528 lhs = gimple_call_lhs (stmt);
15529 g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
15530 gimple_set_location (g, gimple_location (stmt));
15531 gsi_replace (gsi, g, true);
15532 return true;
15533 /* Flavors of vec_sub. We deliberately don't expand
15534 P8V_BUILTIN_VSUBUQM. */
15535 case ALTIVEC_BUILTIN_VSUBUBM:
15536 case ALTIVEC_BUILTIN_VSUBUHM:
15537 case ALTIVEC_BUILTIN_VSUBUWM:
15538 case P8V_BUILTIN_VSUBUDM:
15539 case ALTIVEC_BUILTIN_VSUBFP:
15540 case VSX_BUILTIN_XVSUBDP:
15541 arg0 = gimple_call_arg (stmt, 0);
15542 arg1 = gimple_call_arg (stmt, 1);
15543 lhs = gimple_call_lhs (stmt);
15544 g = gimple_build_assign (lhs, MINUS_EXPR, arg0, arg1);
15545 gimple_set_location (g, gimple_location (stmt));
15546 gsi_replace (gsi, g, true);
15547 return true;
15548 case VSX_BUILTIN_XVMULSP:
15549 case VSX_BUILTIN_XVMULDP:
15550 arg0 = gimple_call_arg (stmt, 0);
15551 arg1 = gimple_call_arg (stmt, 1);
15552 lhs = gimple_call_lhs (stmt);
15553 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15554 gimple_set_location (g, gimple_location (stmt));
15555 gsi_replace (gsi, g, true);
15556 return true;
15557 /* Even element flavors of vec_mul (signed). */
15558 case ALTIVEC_BUILTIN_VMULESB:
15559 case ALTIVEC_BUILTIN_VMULESH:
15560 case P8V_BUILTIN_VMULESW:
15561 /* Even element flavors of vec_mul (unsigned). */
15562 case ALTIVEC_BUILTIN_VMULEUB:
15563 case ALTIVEC_BUILTIN_VMULEUH:
15564 case P8V_BUILTIN_VMULEUW:
15565 arg0 = gimple_call_arg (stmt, 0);
15566 arg1 = gimple_call_arg (stmt, 1);
15567 lhs = gimple_call_lhs (stmt);
15568 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15569 gimple_set_location (g, gimple_location (stmt));
15570 gsi_replace (gsi, g, true);
15571 return true;
15572 /* Odd element flavors of vec_mul (signed). */
15573 case ALTIVEC_BUILTIN_VMULOSB:
15574 case ALTIVEC_BUILTIN_VMULOSH:
15575 case P8V_BUILTIN_VMULOSW:
15576 /* Odd element flavors of vec_mul (unsigned). */
15577 case ALTIVEC_BUILTIN_VMULOUB:
15578 case ALTIVEC_BUILTIN_VMULOUH:
15579 case P8V_BUILTIN_VMULOUW:
15580 arg0 = gimple_call_arg (stmt, 0);
15581 arg1 = gimple_call_arg (stmt, 1);
15582 lhs = gimple_call_lhs (stmt);
15583 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15584 gimple_set_location (g, gimple_location (stmt));
15585 gsi_replace (gsi, g, true);
15586 return true;
15587 /* Flavors of vec_div (Integer). */
15588 case VSX_BUILTIN_DIV_V2DI:
15589 case VSX_BUILTIN_UDIV_V2DI:
15590 arg0 = gimple_call_arg (stmt, 0);
15591 arg1 = gimple_call_arg (stmt, 1);
15592 lhs = gimple_call_lhs (stmt);
15593 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15594 gimple_set_location (g, gimple_location (stmt));
15595 gsi_replace (gsi, g, true);
15596 return true;
15597 /* Flavors of vec_div (Float). */
15598 case VSX_BUILTIN_XVDIVSP:
15599 case VSX_BUILTIN_XVDIVDP:
15600 arg0 = gimple_call_arg (stmt, 0);
15601 arg1 = gimple_call_arg (stmt, 1);
15602 lhs = gimple_call_lhs (stmt);
15603 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15604 gimple_set_location (g, gimple_location (stmt));
15605 gsi_replace (gsi, g, true);
15606 return true;
15607 /* Flavors of vec_and. */
15608 case ALTIVEC_BUILTIN_VAND:
15609 arg0 = gimple_call_arg (stmt, 0);
15610 arg1 = gimple_call_arg (stmt, 1);
15611 lhs = gimple_call_lhs (stmt);
15612 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15613 gimple_set_location (g, gimple_location (stmt));
15614 gsi_replace (gsi, g, true);
15615 return true;
15616 /* Flavors of vec_andc. */
15617 case ALTIVEC_BUILTIN_VANDC:
15618 arg0 = gimple_call_arg (stmt, 0);
15619 arg1 = gimple_call_arg (stmt, 1);
15620 lhs = gimple_call_lhs (stmt);
15621 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15622 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15623 gimple_set_location (g, gimple_location (stmt));
15624 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15625 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15626 gimple_set_location (g, gimple_location (stmt));
15627 gsi_replace (gsi, g, true);
15628 return true;
15629 /* Flavors of vec_nand. */
15630 case P8V_BUILTIN_VEC_NAND:
15631 case P8V_BUILTIN_NAND_V16QI:
15632 case P8V_BUILTIN_NAND_V8HI:
15633 case P8V_BUILTIN_NAND_V4SI:
15634 case P8V_BUILTIN_NAND_V4SF:
15635 case P8V_BUILTIN_NAND_V2DF:
15636 case P8V_BUILTIN_NAND_V2DI:
15637 arg0 = gimple_call_arg (stmt, 0);
15638 arg1 = gimple_call_arg (stmt, 1);
15639 lhs = gimple_call_lhs (stmt);
15640 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15641 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15642 gimple_set_location (g, gimple_location (stmt));
15643 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15644 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15645 gimple_set_location (g, gimple_location (stmt));
15646 gsi_replace (gsi, g, true);
15647 return true;
15648 /* Flavors of vec_or. */
15649 case ALTIVEC_BUILTIN_VOR:
15650 arg0 = gimple_call_arg (stmt, 0);
15651 arg1 = gimple_call_arg (stmt, 1);
15652 lhs = gimple_call_lhs (stmt);
15653 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15654 gimple_set_location (g, gimple_location (stmt));
15655 gsi_replace (gsi, g, true);
15656 return true;
15657 /* flavors of vec_orc. */
15658 case P8V_BUILTIN_ORC_V16QI:
15659 case P8V_BUILTIN_ORC_V8HI:
15660 case P8V_BUILTIN_ORC_V4SI:
15661 case P8V_BUILTIN_ORC_V4SF:
15662 case P8V_BUILTIN_ORC_V2DF:
15663 case P8V_BUILTIN_ORC_V2DI:
15664 arg0 = gimple_call_arg (stmt, 0);
15665 arg1 = gimple_call_arg (stmt, 1);
15666 lhs = gimple_call_lhs (stmt);
15667 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15668 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15669 gimple_set_location (g, gimple_location (stmt));
15670 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15671 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15672 gimple_set_location (g, gimple_location (stmt));
15673 gsi_replace (gsi, g, true);
15674 return true;
15675 /* Flavors of vec_xor. */
15676 case ALTIVEC_BUILTIN_VXOR:
15677 arg0 = gimple_call_arg (stmt, 0);
15678 arg1 = gimple_call_arg (stmt, 1);
15679 lhs = gimple_call_lhs (stmt);
15680 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15681 gimple_set_location (g, gimple_location (stmt));
15682 gsi_replace (gsi, g, true);
15683 return true;
15684 /* Flavors of vec_nor. */
15685 case ALTIVEC_BUILTIN_VNOR:
15686 arg0 = gimple_call_arg (stmt, 0);
15687 arg1 = gimple_call_arg (stmt, 1);
15688 lhs = gimple_call_lhs (stmt);
15689 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15690 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15691 gimple_set_location (g, gimple_location (stmt));
15692 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15693 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15694 gimple_set_location (g, gimple_location (stmt));
15695 gsi_replace (gsi, g, true);
15696 return true;
15697 /* flavors of vec_abs. */
15698 case ALTIVEC_BUILTIN_ABS_V16QI:
15699 case ALTIVEC_BUILTIN_ABS_V8HI:
15700 case ALTIVEC_BUILTIN_ABS_V4SI:
15701 case ALTIVEC_BUILTIN_ABS_V4SF:
15702 case P8V_BUILTIN_ABS_V2DI:
15703 case VSX_BUILTIN_XVABSDP:
15704 arg0 = gimple_call_arg (stmt, 0);
15705 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15706 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15707 return false;
15708 lhs = gimple_call_lhs (stmt);
15709 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15710 gimple_set_location (g, gimple_location (stmt));
15711 gsi_replace (gsi, g, true);
15712 return true;
15713 /* flavors of vec_min. */
15714 case VSX_BUILTIN_XVMINDP:
15715 case P8V_BUILTIN_VMINSD:
15716 case P8V_BUILTIN_VMINUD:
15717 case ALTIVEC_BUILTIN_VMINSB:
15718 case ALTIVEC_BUILTIN_VMINSH:
15719 case ALTIVEC_BUILTIN_VMINSW:
15720 case ALTIVEC_BUILTIN_VMINUB:
15721 case ALTIVEC_BUILTIN_VMINUH:
15722 case ALTIVEC_BUILTIN_VMINUW:
15723 case ALTIVEC_BUILTIN_VMINFP:
15724 arg0 = gimple_call_arg (stmt, 0);
15725 arg1 = gimple_call_arg (stmt, 1);
15726 lhs = gimple_call_lhs (stmt);
15727 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15728 gimple_set_location (g, gimple_location (stmt));
15729 gsi_replace (gsi, g, true);
15730 return true;
15731 /* flavors of vec_max. */
15732 case VSX_BUILTIN_XVMAXDP:
15733 case P8V_BUILTIN_VMAXSD:
15734 case P8V_BUILTIN_VMAXUD:
15735 case ALTIVEC_BUILTIN_VMAXSB:
15736 case ALTIVEC_BUILTIN_VMAXSH:
15737 case ALTIVEC_BUILTIN_VMAXSW:
15738 case ALTIVEC_BUILTIN_VMAXUB:
15739 case ALTIVEC_BUILTIN_VMAXUH:
15740 case ALTIVEC_BUILTIN_VMAXUW:
15741 case ALTIVEC_BUILTIN_VMAXFP:
15742 arg0 = gimple_call_arg (stmt, 0);
15743 arg1 = gimple_call_arg (stmt, 1);
15744 lhs = gimple_call_lhs (stmt);
15745 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15746 gimple_set_location (g, gimple_location (stmt));
15747 gsi_replace (gsi, g, true);
15748 return true;
15749 /* Flavors of vec_eqv. */
15750 case P8V_BUILTIN_EQV_V16QI:
15751 case P8V_BUILTIN_EQV_V8HI:
15752 case P8V_BUILTIN_EQV_V4SI:
15753 case P8V_BUILTIN_EQV_V4SF:
15754 case P8V_BUILTIN_EQV_V2DF:
15755 case P8V_BUILTIN_EQV_V2DI:
15756 arg0 = gimple_call_arg (stmt, 0);
15757 arg1 = gimple_call_arg (stmt, 1);
15758 lhs = gimple_call_lhs (stmt);
15759 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15760 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15761 gimple_set_location (g, gimple_location (stmt));
15762 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15763 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15764 gimple_set_location (g, gimple_location (stmt));
15765 gsi_replace (gsi, g, true);
15766 return true;
15767 /* Flavors of vec_rotate_left. */
15768 case ALTIVEC_BUILTIN_VRLB:
15769 case ALTIVEC_BUILTIN_VRLH:
15770 case ALTIVEC_BUILTIN_VRLW:
15771 case P8V_BUILTIN_VRLD:
15772 arg0 = gimple_call_arg (stmt, 0);
15773 arg1 = gimple_call_arg (stmt, 1);
15774 lhs = gimple_call_lhs (stmt);
15775 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15776 gimple_set_location (g, gimple_location (stmt));
15777 gsi_replace (gsi, g, true);
15778 return true;
15779 /* Flavors of vector shift right algebraic.
15780 vec_sra{b,h,w} -> vsra{b,h,w}. */
15781 case ALTIVEC_BUILTIN_VSRAB:
15782 case ALTIVEC_BUILTIN_VSRAH:
15783 case ALTIVEC_BUILTIN_VSRAW:
15784 case P8V_BUILTIN_VSRAD:
15785 arg0 = gimple_call_arg (stmt, 0);
15786 arg1 = gimple_call_arg (stmt, 1);
15787 lhs = gimple_call_lhs (stmt);
15788 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
15789 gimple_set_location (g, gimple_location (stmt));
15790 gsi_replace (gsi, g, true);
15791 return true;
15792 /* Flavors of vector shift left.
15793 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15794 case ALTIVEC_BUILTIN_VSLB:
15795 case ALTIVEC_BUILTIN_VSLH:
15796 case ALTIVEC_BUILTIN_VSLW:
15797 case P8V_BUILTIN_VSLD:
15798 arg0 = gimple_call_arg (stmt, 0);
15799 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15800 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15801 return false;
15802 arg1 = gimple_call_arg (stmt, 1);
15803 lhs = gimple_call_lhs (stmt);
15804 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, arg1);
15805 gimple_set_location (g, gimple_location (stmt));
15806 gsi_replace (gsi, g, true);
15807 return true;
15808 /* Flavors of vector shift right. */
15809 case ALTIVEC_BUILTIN_VSRB:
15810 case ALTIVEC_BUILTIN_VSRH:
15811 case ALTIVEC_BUILTIN_VSRW:
15812 case P8V_BUILTIN_VSRD:
15813 {
15814 arg0 = gimple_call_arg (stmt, 0);
15815 arg1 = gimple_call_arg (stmt, 1);
15816 lhs = gimple_call_lhs (stmt);
15817 gimple_seq stmts = NULL;
15818 /* Convert arg0 to unsigned. */
15819 tree arg0_unsigned
15820 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15821 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15822 tree res
15823 = gimple_build (&stmts, RSHIFT_EXPR,
15824 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
15825 /* Convert result back to the lhs type. */
15826 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15827 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15828 update_call_from_tree (gsi, res);
15829 return true;
15830 }
15831 /* Vector loads. */
15832 case ALTIVEC_BUILTIN_LVX_V16QI:
15833 case ALTIVEC_BUILTIN_LVX_V8HI:
15834 case ALTIVEC_BUILTIN_LVX_V4SI:
15835 case ALTIVEC_BUILTIN_LVX_V4SF:
15836 case ALTIVEC_BUILTIN_LVX_V2DI:
15837 case ALTIVEC_BUILTIN_LVX_V2DF:
15838 case ALTIVEC_BUILTIN_LVX_V1TI:
15839 {
15840 arg0 = gimple_call_arg (stmt, 0); // offset
15841 arg1 = gimple_call_arg (stmt, 1); // address
15842 lhs = gimple_call_lhs (stmt);
15843 location_t loc = gimple_location (stmt);
15844 /* Since arg1 may be cast to a different type, just use ptr_type_node
15845 here instead of trying to enforce TBAA on pointer types. */
15846 tree arg1_type = ptr_type_node;
15847 tree lhs_type = TREE_TYPE (lhs);
15848 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15849 the tree using the value from arg0. The resulting type will match
15850 the type of arg1. */
15851 gimple_seq stmts = NULL;
15852 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15853 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15854 arg1_type, arg1, temp_offset);
15855 /* Mask off any lower bits from the address. */
15856 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15857 arg1_type, temp_addr,
15858 build_int_cst (arg1_type, -16));
15859 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15860 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15861 take an offset, but since we've already incorporated the offset
15862 above, here we just pass in a zero. */
15863 gimple *g
15864 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15865 build_int_cst (arg1_type, 0)));
15866 gimple_set_location (g, loc);
15867 gsi_replace (gsi, g, true);
15868 return true;
15869 }
15870 /* Vector stores. */
15871 case ALTIVEC_BUILTIN_STVX_V16QI:
15872 case ALTIVEC_BUILTIN_STVX_V8HI:
15873 case ALTIVEC_BUILTIN_STVX_V4SI:
15874 case ALTIVEC_BUILTIN_STVX_V4SF:
15875 case ALTIVEC_BUILTIN_STVX_V2DI:
15876 case ALTIVEC_BUILTIN_STVX_V2DF:
15877 {
15878 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15879 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15880 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15881 location_t loc = gimple_location (stmt);
15882 tree arg0_type = TREE_TYPE (arg0);
15883 /* Use ptr_type_node (no TBAA) for the arg2_type.
15884 FIXME: (Richard) "A proper fix would be to transition this type as
15885 seen from the frontend to GIMPLE, for example in a similar way we
15886 do for MEM_REFs by piggy-backing that on an extra argument, a
15887 constant zero pointer of the alias pointer type to use (which would
15888 also serve as a type indicator of the store itself). I'd use a
15889 target specific internal function for this (not sure if we can have
15890 those target specific, but I guess if it's folded away then that's
15891 fine) and get away with the overload set." */
15892 tree arg2_type = ptr_type_node;
15893 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15894 the tree using the value from arg0. The resulting type will match
15895 the type of arg2. */
15896 gimple_seq stmts = NULL;
15897 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15898 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15899 arg2_type, arg2, temp_offset);
15900 /* Mask off any lower bits from the address. */
15901 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15902 arg2_type, temp_addr,
15903 build_int_cst (arg2_type, -16));
15904 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15905 /* The desired gimple result should be similar to:
15906 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15907 gimple *g
15908 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15909 build_int_cst (arg2_type, 0)), arg0);
15910 gimple_set_location (g, loc);
15911 gsi_replace (gsi, g, true);
15912 return true;
15913 }
15914
15915 /* Vector Fused multiply-add (fma). */
15916 case ALTIVEC_BUILTIN_VMADDFP:
15917 case VSX_BUILTIN_XVMADDDP:
15918 case ALTIVEC_BUILTIN_VMLADDUHM:
15919 {
15920 arg0 = gimple_call_arg (stmt, 0);
15921 arg1 = gimple_call_arg (stmt, 1);
15922 tree arg2 = gimple_call_arg (stmt, 2);
15923 lhs = gimple_call_lhs (stmt);
15924 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
15925 gimple_call_set_lhs (g, lhs);
15926 gimple_call_set_nothrow (g, true);
15927 gimple_set_location (g, gimple_location (stmt));
15928 gsi_replace (gsi, g, true);
15929 return true;
15930 }
15931
15932 /* Vector compares; EQ, NE, GE, GT, LE. */
15933 case ALTIVEC_BUILTIN_VCMPEQUB:
15934 case ALTIVEC_BUILTIN_VCMPEQUH:
15935 case ALTIVEC_BUILTIN_VCMPEQUW:
15936 case P8V_BUILTIN_VCMPEQUD:
15937 fold_compare_helper (gsi, EQ_EXPR, stmt);
15938 return true;
15939
15940 case P9V_BUILTIN_CMPNEB:
15941 case P9V_BUILTIN_CMPNEH:
15942 case P9V_BUILTIN_CMPNEW:
15943 fold_compare_helper (gsi, NE_EXPR, stmt);
15944 return true;
15945
15946 case VSX_BUILTIN_CMPGE_16QI:
15947 case VSX_BUILTIN_CMPGE_U16QI:
15948 case VSX_BUILTIN_CMPGE_8HI:
15949 case VSX_BUILTIN_CMPGE_U8HI:
15950 case VSX_BUILTIN_CMPGE_4SI:
15951 case VSX_BUILTIN_CMPGE_U4SI:
15952 case VSX_BUILTIN_CMPGE_2DI:
15953 case VSX_BUILTIN_CMPGE_U2DI:
15954 fold_compare_helper (gsi, GE_EXPR, stmt);
15955 return true;
15956
15957 case ALTIVEC_BUILTIN_VCMPGTSB:
15958 case ALTIVEC_BUILTIN_VCMPGTUB:
15959 case ALTIVEC_BUILTIN_VCMPGTSH:
15960 case ALTIVEC_BUILTIN_VCMPGTUH:
15961 case ALTIVEC_BUILTIN_VCMPGTSW:
15962 case ALTIVEC_BUILTIN_VCMPGTUW:
15963 case P8V_BUILTIN_VCMPGTUD:
15964 case P8V_BUILTIN_VCMPGTSD:
15965 fold_compare_helper (gsi, GT_EXPR, stmt);
15966 return true;
15967
15968 case VSX_BUILTIN_CMPLE_16QI:
15969 case VSX_BUILTIN_CMPLE_U16QI:
15970 case VSX_BUILTIN_CMPLE_8HI:
15971 case VSX_BUILTIN_CMPLE_U8HI:
15972 case VSX_BUILTIN_CMPLE_4SI:
15973 case VSX_BUILTIN_CMPLE_U4SI:
15974 case VSX_BUILTIN_CMPLE_2DI:
15975 case VSX_BUILTIN_CMPLE_U2DI:
15976 fold_compare_helper (gsi, LE_EXPR, stmt);
15977 return true;
15978
15979 /* flavors of vec_splat_[us]{8,16,32}. */
15980 case ALTIVEC_BUILTIN_VSPLTISB:
15981 case ALTIVEC_BUILTIN_VSPLTISH:
15982 case ALTIVEC_BUILTIN_VSPLTISW:
15983 {
15984 int size;
15985
15986 if (fn_code == ALTIVEC_BUILTIN_VSPLTISB)
15987 size = 8;
15988 else if (fn_code == ALTIVEC_BUILTIN_VSPLTISH)
15989 size = 16;
15990 else
15991 size = 32;
15992
15993 arg0 = gimple_call_arg (stmt, 0);
15994 lhs = gimple_call_lhs (stmt);
15995
15996 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
15997 5-bit signed constant in range -16 to +15. */
15998 if (TREE_CODE (arg0) != INTEGER_CST
15999 || !IN_RANGE (sext_hwi(TREE_INT_CST_LOW (arg0), size),
16000 -16, 15))
16001 return false;
16002 gimple_seq stmts = NULL;
16003 location_t loc = gimple_location (stmt);
16004 tree splat_value = gimple_convert (&stmts, loc,
16005 TREE_TYPE (TREE_TYPE (lhs)), arg0);
16006 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16007 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
16008 g = gimple_build_assign (lhs, splat_tree);
16009 gimple_set_location (g, gimple_location (stmt));
16010 gsi_replace (gsi, g, true);
16011 return true;
16012 }
16013
16014 /* vec_mergel (integrals). */
16015 case ALTIVEC_BUILTIN_VMRGLH:
16016 case ALTIVEC_BUILTIN_VMRGLW:
16017 case VSX_BUILTIN_XXMRGLW_4SI:
16018 case ALTIVEC_BUILTIN_VMRGLB:
16019 case VSX_BUILTIN_VEC_MERGEL_V2DI:
16020 fold_mergehl_helper (gsi, stmt, 1);
16021 return true;
16022 /* vec_mergeh (integrals). */
16023 case ALTIVEC_BUILTIN_VMRGHH:
16024 case ALTIVEC_BUILTIN_VMRGHW:
16025 case VSX_BUILTIN_XXMRGHW_4SI:
16026 case ALTIVEC_BUILTIN_VMRGHB:
16027 case VSX_BUILTIN_VEC_MERGEH_V2DI:
16028 fold_mergehl_helper (gsi, stmt, 0);
16029 return true;
16030 default:
16031 if (TARGET_DEBUG_BUILTIN)
16032 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16033 fn_code, fn_name1, fn_name2);
16034 break;
16035 }
16036
16037 return false;
16038 }
16039
16040 /* Expand an expression EXP that calls a built-in function,
16041 with result going to TARGET if that's convenient
16042 (and in mode MODE if that's convenient).
16043 SUBTARGET may be used as the target for computing one of EXP's operands.
16044 IGNORE is nonzero if the value is to be ignored. */
16045
16046 static rtx
16047 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16048 machine_mode mode ATTRIBUTE_UNUSED,
16049 int ignore ATTRIBUTE_UNUSED)
16050 {
16051 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16052 enum rs6000_builtins fcode
16053 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16054 size_t uns_fcode = (size_t)fcode;
16055 const struct builtin_description *d;
16056 size_t i;
16057 rtx ret;
16058 bool success;
16059 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16060 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16061 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16062
16063 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16064 floating point type, depending on whether long double is the IBM extended
16065 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16066 we only define one variant of the built-in function, and switch the code
16067 when defining it, rather than defining two built-ins and using the
16068 overload table in rs6000-c.c to switch between the two. If we don't have
16069 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16070 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16071 #ifdef HAVE_AS_POWER9
16072 if (FLOAT128_IEEE_P (TFmode))
16073 switch (icode)
16074 {
16075 default:
16076 break;
16077
16078 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16079 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16080 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16081 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16082 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16083 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16084 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16085 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16086 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16087 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16088 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16089 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16090 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16091 }
16092 #endif
16093
16094 if (TARGET_DEBUG_BUILTIN)
16095 {
16096 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16097 const char *name2 = (icode != CODE_FOR_nothing)
16098 ? get_insn_name ((int) icode)
16099 : "nothing";
16100 const char *name3;
16101
16102 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16103 {
16104 default: name3 = "unknown"; break;
16105 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16106 case RS6000_BTC_UNARY: name3 = "unary"; break;
16107 case RS6000_BTC_BINARY: name3 = "binary"; break;
16108 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16109 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16110 case RS6000_BTC_ABS: name3 = "abs"; break;
16111 case RS6000_BTC_DST: name3 = "dst"; break;
16112 }
16113
16114
16115 fprintf (stderr,
16116 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16117 (name1) ? name1 : "---", fcode,
16118 (name2) ? name2 : "---", (int) icode,
16119 name3,
16120 func_valid_p ? "" : ", not valid");
16121 }
16122
16123 if (!func_valid_p)
16124 {
16125 rs6000_invalid_builtin (fcode);
16126
16127 /* Given it is invalid, just generate a normal call. */
16128 return expand_call (exp, target, ignore);
16129 }
16130
16131 switch (fcode)
16132 {
16133 case RS6000_BUILTIN_RECIP:
16134 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16135
16136 case RS6000_BUILTIN_RECIPF:
16137 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16138
16139 case RS6000_BUILTIN_RSQRTF:
16140 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16141
16142 case RS6000_BUILTIN_RSQRT:
16143 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16144
16145 case POWER7_BUILTIN_BPERMD:
16146 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16147 ? CODE_FOR_bpermd_di
16148 : CODE_FOR_bpermd_si), exp, target);
16149
16150 case RS6000_BUILTIN_GET_TB:
16151 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16152 target);
16153
16154 case RS6000_BUILTIN_MFTB:
16155 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16156 ? CODE_FOR_rs6000_mftb_di
16157 : CODE_FOR_rs6000_mftb_si),
16158 target);
16159
16160 case RS6000_BUILTIN_MFFS:
16161 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16162
16163 case RS6000_BUILTIN_MTFSF:
16164 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16165
16166 case RS6000_BUILTIN_CPU_INIT:
16167 case RS6000_BUILTIN_CPU_IS:
16168 case RS6000_BUILTIN_CPU_SUPPORTS:
16169 return cpu_expand_builtin (fcode, exp, target);
16170
16171 case MISC_BUILTIN_SPEC_BARRIER:
16172 {
16173 emit_insn (gen_rs6000_speculation_barrier ());
16174 return NULL_RTX;
16175 }
16176
16177 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16178 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16179 {
16180 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16181 : (int) CODE_FOR_altivec_lvsl_direct);
16182 machine_mode tmode = insn_data[icode2].operand[0].mode;
16183 machine_mode mode = insn_data[icode2].operand[1].mode;
16184 tree arg;
16185 rtx op, addr, pat;
16186
16187 gcc_assert (TARGET_ALTIVEC);
16188
16189 arg = CALL_EXPR_ARG (exp, 0);
16190 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16191 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16192 addr = memory_address (mode, op);
16193 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16194 op = addr;
16195 else
16196 {
16197 /* For the load case need to negate the address. */
16198 op = gen_reg_rtx (GET_MODE (addr));
16199 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16200 }
16201 op = gen_rtx_MEM (mode, op);
16202
16203 if (target == 0
16204 || GET_MODE (target) != tmode
16205 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16206 target = gen_reg_rtx (tmode);
16207
16208 pat = GEN_FCN (icode2) (target, op);
16209 if (!pat)
16210 return 0;
16211 emit_insn (pat);
16212
16213 return target;
16214 }
16215
16216 case ALTIVEC_BUILTIN_VCFUX:
16217 case ALTIVEC_BUILTIN_VCFSX:
16218 case ALTIVEC_BUILTIN_VCTUXS:
16219 case ALTIVEC_BUILTIN_VCTSXS:
16220 /* FIXME: There's got to be a nicer way to handle this case than
16221 constructing a new CALL_EXPR. */
16222 if (call_expr_nargs (exp) == 1)
16223 {
16224 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16225 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16226 }
16227 break;
16228
16229 /* For the pack and unpack int128 routines, fix up the builtin so it
16230 uses the correct IBM128 type. */
16231 case MISC_BUILTIN_PACK_IF:
16232 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16233 {
16234 icode = CODE_FOR_packtf;
16235 fcode = MISC_BUILTIN_PACK_TF;
16236 uns_fcode = (size_t)fcode;
16237 }
16238 break;
16239
16240 case MISC_BUILTIN_UNPACK_IF:
16241 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16242 {
16243 icode = CODE_FOR_unpacktf;
16244 fcode = MISC_BUILTIN_UNPACK_TF;
16245 uns_fcode = (size_t)fcode;
16246 }
16247 break;
16248
16249 default:
16250 break;
16251 }
16252
16253 if (TARGET_ALTIVEC)
16254 {
16255 ret = altivec_expand_builtin (exp, target, &success);
16256
16257 if (success)
16258 return ret;
16259 }
16260 if (TARGET_HTM)
16261 {
16262 ret = htm_expand_builtin (exp, target, &success);
16263
16264 if (success)
16265 return ret;
16266 }
16267
16268 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16269 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16270 gcc_assert (attr == RS6000_BTC_UNARY
16271 || attr == RS6000_BTC_BINARY
16272 || attr == RS6000_BTC_TERNARY
16273 || attr == RS6000_BTC_SPECIAL);
16274
16275 /* Handle simple unary operations. */
16276 d = bdesc_1arg;
16277 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16278 if (d->code == fcode)
16279 return rs6000_expand_unop_builtin (icode, exp, target);
16280
16281 /* Handle simple binary operations. */
16282 d = bdesc_2arg;
16283 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16284 if (d->code == fcode)
16285 return rs6000_expand_binop_builtin (icode, exp, target);
16286
16287 /* Handle simple ternary operations. */
16288 d = bdesc_3arg;
16289 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16290 if (d->code == fcode)
16291 return rs6000_expand_ternop_builtin (icode, exp, target);
16292
16293 /* Handle simple no-argument operations. */
16294 d = bdesc_0arg;
16295 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16296 if (d->code == fcode)
16297 return rs6000_expand_zeroop_builtin (icode, target);
16298
16299 gcc_unreachable ();
16300 }
16301
16302 /* Create a builtin vector type with a name. Taking care not to give
16303 the canonical type a name. */
16304
16305 static tree
16306 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16307 {
16308 tree result = build_vector_type (elt_type, num_elts);
16309
16310 /* Copy so we don't give the canonical type a name. */
16311 result = build_variant_type_copy (result);
16312
16313 add_builtin_type (name, result);
16314
16315 return result;
16316 }
16317
16318 static void
16319 rs6000_init_builtins (void)
16320 {
16321 tree tdecl;
16322 tree ftype;
16323 machine_mode mode;
16324
16325 if (TARGET_DEBUG_BUILTIN)
16326 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16327 (TARGET_ALTIVEC) ? ", altivec" : "",
16328 (TARGET_VSX) ? ", vsx" : "");
16329
16330 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16331 : "__vector long long",
16332 intDI_type_node, 2);
16333 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16334 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16335 intSI_type_node, 4);
16336 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16337 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16338 intHI_type_node, 8);
16339 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16340 intQI_type_node, 16);
16341
16342 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16343 unsigned_intQI_type_node, 16);
16344 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16345 unsigned_intHI_type_node, 8);
16346 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16347 unsigned_intSI_type_node, 4);
16348 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16349 ? "__vector unsigned long"
16350 : "__vector unsigned long long",
16351 unsigned_intDI_type_node, 2);
16352
16353 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16354
16355 const_str_type_node
16356 = build_pointer_type (build_qualified_type (char_type_node,
16357 TYPE_QUAL_CONST));
16358
16359 /* We use V1TI mode as a special container to hold __int128_t items that
16360 must live in VSX registers. */
16361 if (intTI_type_node)
16362 {
16363 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16364 intTI_type_node, 1);
16365 unsigned_V1TI_type_node
16366 = rs6000_vector_type ("__vector unsigned __int128",
16367 unsigned_intTI_type_node, 1);
16368 }
16369
16370 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16371 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16372 'vector unsigned short'. */
16373
16374 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16375 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16376 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16377 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16378 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16379
16380 long_integer_type_internal_node = long_integer_type_node;
16381 long_unsigned_type_internal_node = long_unsigned_type_node;
16382 long_long_integer_type_internal_node = long_long_integer_type_node;
16383 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16384 intQI_type_internal_node = intQI_type_node;
16385 uintQI_type_internal_node = unsigned_intQI_type_node;
16386 intHI_type_internal_node = intHI_type_node;
16387 uintHI_type_internal_node = unsigned_intHI_type_node;
16388 intSI_type_internal_node = intSI_type_node;
16389 uintSI_type_internal_node = unsigned_intSI_type_node;
16390 intDI_type_internal_node = intDI_type_node;
16391 uintDI_type_internal_node = unsigned_intDI_type_node;
16392 intTI_type_internal_node = intTI_type_node;
16393 uintTI_type_internal_node = unsigned_intTI_type_node;
16394 float_type_internal_node = float_type_node;
16395 double_type_internal_node = double_type_node;
16396 long_double_type_internal_node = long_double_type_node;
16397 dfloat64_type_internal_node = dfloat64_type_node;
16398 dfloat128_type_internal_node = dfloat128_type_node;
16399 void_type_internal_node = void_type_node;
16400
16401 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16402 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16403 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16404 format that uses a pair of doubles, depending on the switches and
16405 defaults.
16406
16407 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16408 floating point, we need make sure the type is non-zero or else self-test
16409 fails during bootstrap.
16410
16411 Always create __ibm128 as a separate type, even if the current long double
16412 format is IBM extended double.
16413
16414 For IEEE 128-bit floating point, always create the type __ieee128. If the
16415 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16416 __ieee128. */
16417 if (TARGET_FLOAT128_TYPE)
16418 {
16419 if (TARGET_IEEEQUAD || !TARGET_LONG_DOUBLE_128)
16420 {
16421 ibm128_float_type_node = make_node (REAL_TYPE);
16422 TYPE_PRECISION (ibm128_float_type_node) = 128;
16423 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16424 layout_type (ibm128_float_type_node);
16425 }
16426 else
16427 ibm128_float_type_node = long_double_type_node;
16428
16429 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16430 "__ibm128");
16431
16432 ieee128_float_type_node
16433 = TARGET_IEEEQUAD ? long_double_type_node : float128_type_node;
16434 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16435 "__ieee128");
16436 }
16437
16438 else
16439 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16440
16441 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16442 tree type node. */
16443 builtin_mode_to_type[QImode][0] = integer_type_node;
16444 builtin_mode_to_type[HImode][0] = integer_type_node;
16445 builtin_mode_to_type[SImode][0] = intSI_type_node;
16446 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16447 builtin_mode_to_type[DImode][0] = intDI_type_node;
16448 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16449 builtin_mode_to_type[TImode][0] = intTI_type_node;
16450 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16451 builtin_mode_to_type[SFmode][0] = float_type_node;
16452 builtin_mode_to_type[DFmode][0] = double_type_node;
16453 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16454 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16455 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16456 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16457 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16458 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16459 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16460 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16461 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16462 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16463 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16464 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16465 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16466 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16467 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16468 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16469 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16470
16471 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16472 TYPE_NAME (bool_char_type_node) = tdecl;
16473
16474 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16475 TYPE_NAME (bool_short_type_node) = tdecl;
16476
16477 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16478 TYPE_NAME (bool_int_type_node) = tdecl;
16479
16480 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16481 TYPE_NAME (pixel_type_node) = tdecl;
16482
16483 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16484 bool_char_type_node, 16);
16485 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16486 bool_short_type_node, 8);
16487 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16488 bool_int_type_node, 4);
16489 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16490 ? "__vector __bool long"
16491 : "__vector __bool long long",
16492 bool_long_long_type_node, 2);
16493 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16494 pixel_type_node, 8);
16495
16496 /* Create Altivec and VSX builtins on machines with at least the
16497 general purpose extensions (970 and newer) to allow the use of
16498 the target attribute. */
16499 if (TARGET_EXTRA_BUILTINS)
16500 altivec_init_builtins ();
16501 if (TARGET_HTM)
16502 htm_init_builtins ();
16503
16504 if (TARGET_EXTRA_BUILTINS)
16505 rs6000_common_init_builtins ();
16506
16507 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16508 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16509 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16510
16511 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16512 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16513 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16514
16515 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16516 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16517 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16518
16519 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16520 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16521 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16522
16523 mode = (TARGET_64BIT) ? DImode : SImode;
16524 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16525 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16526 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16527
16528 ftype = build_function_type_list (unsigned_intDI_type_node,
16529 NULL_TREE);
16530 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16531
16532 if (TARGET_64BIT)
16533 ftype = build_function_type_list (unsigned_intDI_type_node,
16534 NULL_TREE);
16535 else
16536 ftype = build_function_type_list (unsigned_intSI_type_node,
16537 NULL_TREE);
16538 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16539
16540 ftype = build_function_type_list (double_type_node, NULL_TREE);
16541 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16542
16543 ftype = build_function_type_list (void_type_node,
16544 intSI_type_node, double_type_node,
16545 NULL_TREE);
16546 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16547
16548 ftype = build_function_type_list (void_type_node, NULL_TREE);
16549 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16550 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16551 MISC_BUILTIN_SPEC_BARRIER);
16552
16553 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16554 NULL_TREE);
16555 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16556 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16557
16558 /* AIX libm provides clog as __clog. */
16559 if (TARGET_XCOFF &&
16560 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16561 set_user_assembler_name (tdecl, "__clog");
16562
16563 #ifdef SUBTARGET_INIT_BUILTINS
16564 SUBTARGET_INIT_BUILTINS;
16565 #endif
16566 }
16567
16568 /* Returns the rs6000 builtin decl for CODE. */
16569
16570 static tree
16571 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16572 {
16573 HOST_WIDE_INT fnmask;
16574
16575 if (code >= RS6000_BUILTIN_COUNT)
16576 return error_mark_node;
16577
16578 fnmask = rs6000_builtin_info[code].mask;
16579 if ((fnmask & rs6000_builtin_mask) != fnmask)
16580 {
16581 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16582 return error_mark_node;
16583 }
16584
16585 return rs6000_builtin_decls[code];
16586 }
16587
16588 static void
16589 altivec_init_builtins (void)
16590 {
16591 const struct builtin_description *d;
16592 size_t i;
16593 tree ftype;
16594 tree decl;
16595 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16596
16597 tree pvoid_type_node = build_pointer_type (void_type_node);
16598
16599 tree pcvoid_type_node
16600 = build_pointer_type (build_qualified_type (void_type_node,
16601 TYPE_QUAL_CONST));
16602
16603 tree int_ftype_opaque
16604 = build_function_type_list (integer_type_node,
16605 opaque_V4SI_type_node, NULL_TREE);
16606 tree opaque_ftype_opaque
16607 = build_function_type_list (integer_type_node, NULL_TREE);
16608 tree opaque_ftype_opaque_int
16609 = build_function_type_list (opaque_V4SI_type_node,
16610 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16611 tree opaque_ftype_opaque_opaque_int
16612 = build_function_type_list (opaque_V4SI_type_node,
16613 opaque_V4SI_type_node, opaque_V4SI_type_node,
16614 integer_type_node, NULL_TREE);
16615 tree opaque_ftype_opaque_opaque_opaque
16616 = build_function_type_list (opaque_V4SI_type_node,
16617 opaque_V4SI_type_node, opaque_V4SI_type_node,
16618 opaque_V4SI_type_node, NULL_TREE);
16619 tree opaque_ftype_opaque_opaque
16620 = build_function_type_list (opaque_V4SI_type_node,
16621 opaque_V4SI_type_node, opaque_V4SI_type_node,
16622 NULL_TREE);
16623 tree int_ftype_int_opaque_opaque
16624 = build_function_type_list (integer_type_node,
16625 integer_type_node, opaque_V4SI_type_node,
16626 opaque_V4SI_type_node, NULL_TREE);
16627 tree int_ftype_int_v4si_v4si
16628 = build_function_type_list (integer_type_node,
16629 integer_type_node, V4SI_type_node,
16630 V4SI_type_node, NULL_TREE);
16631 tree int_ftype_int_v2di_v2di
16632 = build_function_type_list (integer_type_node,
16633 integer_type_node, V2DI_type_node,
16634 V2DI_type_node, NULL_TREE);
16635 tree void_ftype_v4si
16636 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16637 tree v8hi_ftype_void
16638 = build_function_type_list (V8HI_type_node, NULL_TREE);
16639 tree void_ftype_void
16640 = build_function_type_list (void_type_node, NULL_TREE);
16641 tree void_ftype_int
16642 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16643
16644 tree opaque_ftype_long_pcvoid
16645 = build_function_type_list (opaque_V4SI_type_node,
16646 long_integer_type_node, pcvoid_type_node,
16647 NULL_TREE);
16648 tree v16qi_ftype_long_pcvoid
16649 = build_function_type_list (V16QI_type_node,
16650 long_integer_type_node, pcvoid_type_node,
16651 NULL_TREE);
16652 tree v8hi_ftype_long_pcvoid
16653 = build_function_type_list (V8HI_type_node,
16654 long_integer_type_node, pcvoid_type_node,
16655 NULL_TREE);
16656 tree v4si_ftype_long_pcvoid
16657 = build_function_type_list (V4SI_type_node,
16658 long_integer_type_node, pcvoid_type_node,
16659 NULL_TREE);
16660 tree v4sf_ftype_long_pcvoid
16661 = build_function_type_list (V4SF_type_node,
16662 long_integer_type_node, pcvoid_type_node,
16663 NULL_TREE);
16664 tree v2df_ftype_long_pcvoid
16665 = build_function_type_list (V2DF_type_node,
16666 long_integer_type_node, pcvoid_type_node,
16667 NULL_TREE);
16668 tree v2di_ftype_long_pcvoid
16669 = build_function_type_list (V2DI_type_node,
16670 long_integer_type_node, pcvoid_type_node,
16671 NULL_TREE);
16672 tree v1ti_ftype_long_pcvoid
16673 = build_function_type_list (V1TI_type_node,
16674 long_integer_type_node, pcvoid_type_node,
16675 NULL_TREE);
16676
16677 tree void_ftype_opaque_long_pvoid
16678 = build_function_type_list (void_type_node,
16679 opaque_V4SI_type_node, long_integer_type_node,
16680 pvoid_type_node, NULL_TREE);
16681 tree void_ftype_v4si_long_pvoid
16682 = build_function_type_list (void_type_node,
16683 V4SI_type_node, long_integer_type_node,
16684 pvoid_type_node, NULL_TREE);
16685 tree void_ftype_v16qi_long_pvoid
16686 = build_function_type_list (void_type_node,
16687 V16QI_type_node, long_integer_type_node,
16688 pvoid_type_node, NULL_TREE);
16689
16690 tree void_ftype_v16qi_pvoid_long
16691 = build_function_type_list (void_type_node,
16692 V16QI_type_node, pvoid_type_node,
16693 long_integer_type_node, NULL_TREE);
16694
16695 tree void_ftype_v8hi_long_pvoid
16696 = build_function_type_list (void_type_node,
16697 V8HI_type_node, long_integer_type_node,
16698 pvoid_type_node, NULL_TREE);
16699 tree void_ftype_v4sf_long_pvoid
16700 = build_function_type_list (void_type_node,
16701 V4SF_type_node, long_integer_type_node,
16702 pvoid_type_node, NULL_TREE);
16703 tree void_ftype_v2df_long_pvoid
16704 = build_function_type_list (void_type_node,
16705 V2DF_type_node, long_integer_type_node,
16706 pvoid_type_node, NULL_TREE);
16707 tree void_ftype_v1ti_long_pvoid
16708 = build_function_type_list (void_type_node,
16709 V1TI_type_node, long_integer_type_node,
16710 pvoid_type_node, NULL_TREE);
16711 tree void_ftype_v2di_long_pvoid
16712 = build_function_type_list (void_type_node,
16713 V2DI_type_node, long_integer_type_node,
16714 pvoid_type_node, NULL_TREE);
16715 tree int_ftype_int_v8hi_v8hi
16716 = build_function_type_list (integer_type_node,
16717 integer_type_node, V8HI_type_node,
16718 V8HI_type_node, NULL_TREE);
16719 tree int_ftype_int_v16qi_v16qi
16720 = build_function_type_list (integer_type_node,
16721 integer_type_node, V16QI_type_node,
16722 V16QI_type_node, NULL_TREE);
16723 tree int_ftype_int_v4sf_v4sf
16724 = build_function_type_list (integer_type_node,
16725 integer_type_node, V4SF_type_node,
16726 V4SF_type_node, NULL_TREE);
16727 tree int_ftype_int_v2df_v2df
16728 = build_function_type_list (integer_type_node,
16729 integer_type_node, V2DF_type_node,
16730 V2DF_type_node, NULL_TREE);
16731 tree v2di_ftype_v2di
16732 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16733 tree v4si_ftype_v4si
16734 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16735 tree v8hi_ftype_v8hi
16736 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16737 tree v16qi_ftype_v16qi
16738 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16739 tree v4sf_ftype_v4sf
16740 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16741 tree v2df_ftype_v2df
16742 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16743 tree void_ftype_pcvoid_int_int
16744 = build_function_type_list (void_type_node,
16745 pcvoid_type_node, integer_type_node,
16746 integer_type_node, NULL_TREE);
16747
16748 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
16749 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
16750 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
16751 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
16752 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
16753 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
16754 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
16755 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
16756 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
16757 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
16758 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
16759 ALTIVEC_BUILTIN_LVXL_V2DF);
16760 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
16761 ALTIVEC_BUILTIN_LVXL_V2DI);
16762 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
16763 ALTIVEC_BUILTIN_LVXL_V4SF);
16764 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
16765 ALTIVEC_BUILTIN_LVXL_V4SI);
16766 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
16767 ALTIVEC_BUILTIN_LVXL_V8HI);
16768 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
16769 ALTIVEC_BUILTIN_LVXL_V16QI);
16770 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
16771 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
16772 ALTIVEC_BUILTIN_LVX_V1TI);
16773 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
16774 ALTIVEC_BUILTIN_LVX_V2DF);
16775 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
16776 ALTIVEC_BUILTIN_LVX_V2DI);
16777 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
16778 ALTIVEC_BUILTIN_LVX_V4SF);
16779 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
16780 ALTIVEC_BUILTIN_LVX_V4SI);
16781 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
16782 ALTIVEC_BUILTIN_LVX_V8HI);
16783 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
16784 ALTIVEC_BUILTIN_LVX_V16QI);
16785 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
16786 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
16787 ALTIVEC_BUILTIN_STVX_V2DF);
16788 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
16789 ALTIVEC_BUILTIN_STVX_V2DI);
16790 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
16791 ALTIVEC_BUILTIN_STVX_V4SF);
16792 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
16793 ALTIVEC_BUILTIN_STVX_V4SI);
16794 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
16795 ALTIVEC_BUILTIN_STVX_V8HI);
16796 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
16797 ALTIVEC_BUILTIN_STVX_V16QI);
16798 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
16799 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
16800 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
16801 ALTIVEC_BUILTIN_STVXL_V2DF);
16802 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
16803 ALTIVEC_BUILTIN_STVXL_V2DI);
16804 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
16805 ALTIVEC_BUILTIN_STVXL_V4SF);
16806 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
16807 ALTIVEC_BUILTIN_STVXL_V4SI);
16808 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
16809 ALTIVEC_BUILTIN_STVXL_V8HI);
16810 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
16811 ALTIVEC_BUILTIN_STVXL_V16QI);
16812 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
16813 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
16814 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
16815 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
16816 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
16817 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
16818 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
16819 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
16820 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
16821 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
16822 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
16823 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
16824 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
16825 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
16826 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
16827 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
16828
16829 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
16830 VSX_BUILTIN_LXVD2X_V2DF);
16831 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
16832 VSX_BUILTIN_LXVD2X_V2DI);
16833 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
16834 VSX_BUILTIN_LXVW4X_V4SF);
16835 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
16836 VSX_BUILTIN_LXVW4X_V4SI);
16837 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
16838 VSX_BUILTIN_LXVW4X_V8HI);
16839 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
16840 VSX_BUILTIN_LXVW4X_V16QI);
16841 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
16842 VSX_BUILTIN_STXVD2X_V2DF);
16843 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
16844 VSX_BUILTIN_STXVD2X_V2DI);
16845 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
16846 VSX_BUILTIN_STXVW4X_V4SF);
16847 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
16848 VSX_BUILTIN_STXVW4X_V4SI);
16849 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
16850 VSX_BUILTIN_STXVW4X_V8HI);
16851 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
16852 VSX_BUILTIN_STXVW4X_V16QI);
16853
16854 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
16855 VSX_BUILTIN_LD_ELEMREV_V2DF);
16856 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
16857 VSX_BUILTIN_LD_ELEMREV_V2DI);
16858 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
16859 VSX_BUILTIN_LD_ELEMREV_V4SF);
16860 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
16861 VSX_BUILTIN_LD_ELEMREV_V4SI);
16862 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
16863 VSX_BUILTIN_LD_ELEMREV_V8HI);
16864 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
16865 VSX_BUILTIN_LD_ELEMREV_V16QI);
16866 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
16867 VSX_BUILTIN_ST_ELEMREV_V2DF);
16868 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
16869 VSX_BUILTIN_ST_ELEMREV_V1TI);
16870 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
16871 VSX_BUILTIN_ST_ELEMREV_V2DI);
16872 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
16873 VSX_BUILTIN_ST_ELEMREV_V4SF);
16874 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
16875 VSX_BUILTIN_ST_ELEMREV_V4SI);
16876 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
16877 VSX_BUILTIN_ST_ELEMREV_V8HI);
16878 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
16879 VSX_BUILTIN_ST_ELEMREV_V16QI);
16880
16881 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
16882 VSX_BUILTIN_VEC_LD);
16883 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
16884 VSX_BUILTIN_VEC_ST);
16885 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
16886 VSX_BUILTIN_VEC_XL);
16887 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
16888 VSX_BUILTIN_VEC_XL_BE);
16889 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
16890 VSX_BUILTIN_VEC_XST);
16891 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
16892 VSX_BUILTIN_VEC_XST_BE);
16893
16894 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
16895 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
16896 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
16897
16898 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
16899 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
16900 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
16901 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
16902 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
16903 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
16904 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
16905 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
16906 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
16907 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
16908 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
16909 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
16910
16911 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
16912 ALTIVEC_BUILTIN_VEC_ADDE);
16913 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
16914 ALTIVEC_BUILTIN_VEC_ADDEC);
16915 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
16916 ALTIVEC_BUILTIN_VEC_CMPNE);
16917 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
16918 ALTIVEC_BUILTIN_VEC_MUL);
16919 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
16920 ALTIVEC_BUILTIN_VEC_SUBE);
16921 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
16922 ALTIVEC_BUILTIN_VEC_SUBEC);
16923
16924 /* Cell builtins. */
16925 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
16926 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
16927 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
16928 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
16929
16930 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
16931 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
16932 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
16933 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
16934
16935 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
16936 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
16937 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
16938 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
16939
16940 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
16941 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
16942 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
16943 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
16944
16945 if (TARGET_P9_VECTOR)
16946 {
16947 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
16948 P9V_BUILTIN_STXVL);
16949 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
16950 P9V_BUILTIN_XST_LEN_R);
16951 }
16952
16953 /* Add the DST variants. */
16954 d = bdesc_dst;
16955 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
16956 {
16957 HOST_WIDE_INT mask = d->mask;
16958
16959 /* It is expected that these dst built-in functions may have
16960 d->icode equal to CODE_FOR_nothing. */
16961 if ((mask & builtin_mask) != mask)
16962 {
16963 if (TARGET_DEBUG_BUILTIN)
16964 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
16965 d->name);
16966 continue;
16967 }
16968 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
16969 }
16970
16971 /* Initialize the predicates. */
16972 d = bdesc_altivec_preds;
16973 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
16974 {
16975 machine_mode mode1;
16976 tree type;
16977 HOST_WIDE_INT mask = d->mask;
16978
16979 if ((mask & builtin_mask) != mask)
16980 {
16981 if (TARGET_DEBUG_BUILTIN)
16982 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
16983 d->name);
16984 continue;
16985 }
16986
16987 if (rs6000_overloaded_builtin_p (d->code))
16988 mode1 = VOIDmode;
16989 else
16990 {
16991 /* Cannot define builtin if the instruction is disabled. */
16992 gcc_assert (d->icode != CODE_FOR_nothing);
16993 mode1 = insn_data[d->icode].operand[1].mode;
16994 }
16995
16996 switch (mode1)
16997 {
16998 case E_VOIDmode:
16999 type = int_ftype_int_opaque_opaque;
17000 break;
17001 case E_V2DImode:
17002 type = int_ftype_int_v2di_v2di;
17003 break;
17004 case E_V4SImode:
17005 type = int_ftype_int_v4si_v4si;
17006 break;
17007 case E_V8HImode:
17008 type = int_ftype_int_v8hi_v8hi;
17009 break;
17010 case E_V16QImode:
17011 type = int_ftype_int_v16qi_v16qi;
17012 break;
17013 case E_V4SFmode:
17014 type = int_ftype_int_v4sf_v4sf;
17015 break;
17016 case E_V2DFmode:
17017 type = int_ftype_int_v2df_v2df;
17018 break;
17019 default:
17020 gcc_unreachable ();
17021 }
17022
17023 def_builtin (d->name, type, d->code);
17024 }
17025
17026 /* Initialize the abs* operators. */
17027 d = bdesc_abs;
17028 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17029 {
17030 machine_mode mode0;
17031 tree type;
17032 HOST_WIDE_INT mask = d->mask;
17033
17034 if ((mask & builtin_mask) != mask)
17035 {
17036 if (TARGET_DEBUG_BUILTIN)
17037 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17038 d->name);
17039 continue;
17040 }
17041
17042 /* Cannot define builtin if the instruction is disabled. */
17043 gcc_assert (d->icode != CODE_FOR_nothing);
17044 mode0 = insn_data[d->icode].operand[0].mode;
17045
17046 switch (mode0)
17047 {
17048 case E_V2DImode:
17049 type = v2di_ftype_v2di;
17050 break;
17051 case E_V4SImode:
17052 type = v4si_ftype_v4si;
17053 break;
17054 case E_V8HImode:
17055 type = v8hi_ftype_v8hi;
17056 break;
17057 case E_V16QImode:
17058 type = v16qi_ftype_v16qi;
17059 break;
17060 case E_V4SFmode:
17061 type = v4sf_ftype_v4sf;
17062 break;
17063 case E_V2DFmode:
17064 type = v2df_ftype_v2df;
17065 break;
17066 default:
17067 gcc_unreachable ();
17068 }
17069
17070 def_builtin (d->name, type, d->code);
17071 }
17072
17073 /* Initialize target builtin that implements
17074 targetm.vectorize.builtin_mask_for_load. */
17075
17076 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17077 v16qi_ftype_long_pcvoid,
17078 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17079 BUILT_IN_MD, NULL, NULL_TREE);
17080 TREE_READONLY (decl) = 1;
17081 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17082 altivec_builtin_mask_for_load = decl;
17083
17084 /* Access to the vec_init patterns. */
17085 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17086 integer_type_node, integer_type_node,
17087 integer_type_node, NULL_TREE);
17088 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17089
17090 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17091 short_integer_type_node,
17092 short_integer_type_node,
17093 short_integer_type_node,
17094 short_integer_type_node,
17095 short_integer_type_node,
17096 short_integer_type_node,
17097 short_integer_type_node, NULL_TREE);
17098 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17099
17100 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17101 char_type_node, char_type_node,
17102 char_type_node, char_type_node,
17103 char_type_node, char_type_node,
17104 char_type_node, char_type_node,
17105 char_type_node, char_type_node,
17106 char_type_node, char_type_node,
17107 char_type_node, char_type_node,
17108 char_type_node, NULL_TREE);
17109 def_builtin ("__builtin_vec_init_v16qi", ftype,
17110 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17111
17112 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17113 float_type_node, float_type_node,
17114 float_type_node, NULL_TREE);
17115 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17116
17117 /* VSX builtins. */
17118 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17119 double_type_node, NULL_TREE);
17120 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17121
17122 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17123 intDI_type_node, NULL_TREE);
17124 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17125
17126 /* Access to the vec_set patterns. */
17127 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17128 intSI_type_node,
17129 integer_type_node, NULL_TREE);
17130 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17131
17132 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17133 intHI_type_node,
17134 integer_type_node, NULL_TREE);
17135 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17136
17137 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17138 intQI_type_node,
17139 integer_type_node, NULL_TREE);
17140 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17141
17142 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17143 float_type_node,
17144 integer_type_node, NULL_TREE);
17145 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17146
17147 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17148 double_type_node,
17149 integer_type_node, NULL_TREE);
17150 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17151
17152 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17153 intDI_type_node,
17154 integer_type_node, NULL_TREE);
17155 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17156
17157 /* Access to the vec_extract patterns. */
17158 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17159 integer_type_node, NULL_TREE);
17160 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17161
17162 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17163 integer_type_node, NULL_TREE);
17164 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17165
17166 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17167 integer_type_node, NULL_TREE);
17168 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17169
17170 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17171 integer_type_node, NULL_TREE);
17172 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17173
17174 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17175 integer_type_node, NULL_TREE);
17176 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17177
17178 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17179 integer_type_node, NULL_TREE);
17180 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17181
17182
17183 if (V1TI_type_node)
17184 {
17185 tree v1ti_ftype_long_pcvoid
17186 = build_function_type_list (V1TI_type_node,
17187 long_integer_type_node, pcvoid_type_node,
17188 NULL_TREE);
17189 tree void_ftype_v1ti_long_pvoid
17190 = build_function_type_list (void_type_node,
17191 V1TI_type_node, long_integer_type_node,
17192 pvoid_type_node, NULL_TREE);
17193 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17194 VSX_BUILTIN_LD_ELEMREV_V1TI);
17195 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17196 VSX_BUILTIN_LXVD2X_V1TI);
17197 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17198 VSX_BUILTIN_STXVD2X_V1TI);
17199 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17200 NULL_TREE, NULL_TREE);
17201 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17202 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17203 intTI_type_node,
17204 integer_type_node, NULL_TREE);
17205 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17206 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17207 integer_type_node, NULL_TREE);
17208 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17209 }
17210
17211 }
17212
17213 static void
17214 htm_init_builtins (void)
17215 {
17216 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17217 const struct builtin_description *d;
17218 size_t i;
17219
17220 d = bdesc_htm;
17221 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17222 {
17223 tree op[MAX_HTM_OPERANDS], type;
17224 HOST_WIDE_INT mask = d->mask;
17225 unsigned attr = rs6000_builtin_info[d->code].attr;
17226 bool void_func = (attr & RS6000_BTC_VOID);
17227 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17228 int nopnds = 0;
17229 tree gpr_type_node;
17230 tree rettype;
17231 tree argtype;
17232
17233 /* It is expected that these htm built-in functions may have
17234 d->icode equal to CODE_FOR_nothing. */
17235
17236 if (TARGET_32BIT && TARGET_POWERPC64)
17237 gpr_type_node = long_long_unsigned_type_node;
17238 else
17239 gpr_type_node = long_unsigned_type_node;
17240
17241 if (attr & RS6000_BTC_SPR)
17242 {
17243 rettype = gpr_type_node;
17244 argtype = gpr_type_node;
17245 }
17246 else if (d->code == HTM_BUILTIN_TABORTDC
17247 || d->code == HTM_BUILTIN_TABORTDCI)
17248 {
17249 rettype = unsigned_type_node;
17250 argtype = gpr_type_node;
17251 }
17252 else
17253 {
17254 rettype = unsigned_type_node;
17255 argtype = unsigned_type_node;
17256 }
17257
17258 if ((mask & builtin_mask) != mask)
17259 {
17260 if (TARGET_DEBUG_BUILTIN)
17261 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17262 continue;
17263 }
17264
17265 if (d->name == 0)
17266 {
17267 if (TARGET_DEBUG_BUILTIN)
17268 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17269 (long unsigned) i);
17270 continue;
17271 }
17272
17273 op[nopnds++] = (void_func) ? void_type_node : rettype;
17274
17275 if (attr_args == RS6000_BTC_UNARY)
17276 op[nopnds++] = argtype;
17277 else if (attr_args == RS6000_BTC_BINARY)
17278 {
17279 op[nopnds++] = argtype;
17280 op[nopnds++] = argtype;
17281 }
17282 else if (attr_args == RS6000_BTC_TERNARY)
17283 {
17284 op[nopnds++] = argtype;
17285 op[nopnds++] = argtype;
17286 op[nopnds++] = argtype;
17287 }
17288
17289 switch (nopnds)
17290 {
17291 case 1:
17292 type = build_function_type_list (op[0], NULL_TREE);
17293 break;
17294 case 2:
17295 type = build_function_type_list (op[0], op[1], NULL_TREE);
17296 break;
17297 case 3:
17298 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17299 break;
17300 case 4:
17301 type = build_function_type_list (op[0], op[1], op[2], op[3],
17302 NULL_TREE);
17303 break;
17304 default:
17305 gcc_unreachable ();
17306 }
17307
17308 def_builtin (d->name, type, d->code);
17309 }
17310 }
17311
17312 /* Hash function for builtin functions with up to 3 arguments and a return
17313 type. */
17314 hashval_t
17315 builtin_hasher::hash (builtin_hash_struct *bh)
17316 {
17317 unsigned ret = 0;
17318 int i;
17319
17320 for (i = 0; i < 4; i++)
17321 {
17322 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17323 ret = (ret * 2) + bh->uns_p[i];
17324 }
17325
17326 return ret;
17327 }
17328
17329 /* Compare builtin hash entries H1 and H2 for equivalence. */
17330 bool
17331 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17332 {
17333 return ((p1->mode[0] == p2->mode[0])
17334 && (p1->mode[1] == p2->mode[1])
17335 && (p1->mode[2] == p2->mode[2])
17336 && (p1->mode[3] == p2->mode[3])
17337 && (p1->uns_p[0] == p2->uns_p[0])
17338 && (p1->uns_p[1] == p2->uns_p[1])
17339 && (p1->uns_p[2] == p2->uns_p[2])
17340 && (p1->uns_p[3] == p2->uns_p[3]));
17341 }
17342
17343 /* Map types for builtin functions with an explicit return type and up to 3
17344 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17345 of the argument. */
17346 static tree
17347 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17348 machine_mode mode_arg1, machine_mode mode_arg2,
17349 enum rs6000_builtins builtin, const char *name)
17350 {
17351 struct builtin_hash_struct h;
17352 struct builtin_hash_struct *h2;
17353 int num_args = 3;
17354 int i;
17355 tree ret_type = NULL_TREE;
17356 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17357
17358 /* Create builtin_hash_table. */
17359 if (builtin_hash_table == NULL)
17360 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17361
17362 h.type = NULL_TREE;
17363 h.mode[0] = mode_ret;
17364 h.mode[1] = mode_arg0;
17365 h.mode[2] = mode_arg1;
17366 h.mode[3] = mode_arg2;
17367 h.uns_p[0] = 0;
17368 h.uns_p[1] = 0;
17369 h.uns_p[2] = 0;
17370 h.uns_p[3] = 0;
17371
17372 /* If the builtin is a type that produces unsigned results or takes unsigned
17373 arguments, and it is returned as a decl for the vectorizer (such as
17374 widening multiplies, permute), make sure the arguments and return value
17375 are type correct. */
17376 switch (builtin)
17377 {
17378 /* unsigned 1 argument functions. */
17379 case CRYPTO_BUILTIN_VSBOX:
17380 case P8V_BUILTIN_VGBBD:
17381 case MISC_BUILTIN_CDTBCD:
17382 case MISC_BUILTIN_CBCDTD:
17383 h.uns_p[0] = 1;
17384 h.uns_p[1] = 1;
17385 break;
17386
17387 /* unsigned 2 argument functions. */
17388 case ALTIVEC_BUILTIN_VMULEUB:
17389 case ALTIVEC_BUILTIN_VMULEUH:
17390 case P8V_BUILTIN_VMULEUW:
17391 case ALTIVEC_BUILTIN_VMULOUB:
17392 case ALTIVEC_BUILTIN_VMULOUH:
17393 case P8V_BUILTIN_VMULOUW:
17394 case CRYPTO_BUILTIN_VCIPHER:
17395 case CRYPTO_BUILTIN_VCIPHERLAST:
17396 case CRYPTO_BUILTIN_VNCIPHER:
17397 case CRYPTO_BUILTIN_VNCIPHERLAST:
17398 case CRYPTO_BUILTIN_VPMSUMB:
17399 case CRYPTO_BUILTIN_VPMSUMH:
17400 case CRYPTO_BUILTIN_VPMSUMW:
17401 case CRYPTO_BUILTIN_VPMSUMD:
17402 case CRYPTO_BUILTIN_VPMSUM:
17403 case MISC_BUILTIN_ADDG6S:
17404 case MISC_BUILTIN_DIVWEU:
17405 case MISC_BUILTIN_DIVDEU:
17406 case VSX_BUILTIN_UDIV_V2DI:
17407 case ALTIVEC_BUILTIN_VMAXUB:
17408 case ALTIVEC_BUILTIN_VMINUB:
17409 case ALTIVEC_BUILTIN_VMAXUH:
17410 case ALTIVEC_BUILTIN_VMINUH:
17411 case ALTIVEC_BUILTIN_VMAXUW:
17412 case ALTIVEC_BUILTIN_VMINUW:
17413 case P8V_BUILTIN_VMAXUD:
17414 case P8V_BUILTIN_VMINUD:
17415 h.uns_p[0] = 1;
17416 h.uns_p[1] = 1;
17417 h.uns_p[2] = 1;
17418 break;
17419
17420 /* unsigned 3 argument functions. */
17421 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17422 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17423 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17424 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17425 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17426 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17427 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17428 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17429 case VSX_BUILTIN_VPERM_16QI_UNS:
17430 case VSX_BUILTIN_VPERM_8HI_UNS:
17431 case VSX_BUILTIN_VPERM_4SI_UNS:
17432 case VSX_BUILTIN_VPERM_2DI_UNS:
17433 case VSX_BUILTIN_XXSEL_16QI_UNS:
17434 case VSX_BUILTIN_XXSEL_8HI_UNS:
17435 case VSX_BUILTIN_XXSEL_4SI_UNS:
17436 case VSX_BUILTIN_XXSEL_2DI_UNS:
17437 case CRYPTO_BUILTIN_VPERMXOR:
17438 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17439 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17440 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17441 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17442 case CRYPTO_BUILTIN_VSHASIGMAW:
17443 case CRYPTO_BUILTIN_VSHASIGMAD:
17444 case CRYPTO_BUILTIN_VSHASIGMA:
17445 h.uns_p[0] = 1;
17446 h.uns_p[1] = 1;
17447 h.uns_p[2] = 1;
17448 h.uns_p[3] = 1;
17449 break;
17450
17451 /* signed permute functions with unsigned char mask. */
17452 case ALTIVEC_BUILTIN_VPERM_16QI:
17453 case ALTIVEC_BUILTIN_VPERM_8HI:
17454 case ALTIVEC_BUILTIN_VPERM_4SI:
17455 case ALTIVEC_BUILTIN_VPERM_4SF:
17456 case ALTIVEC_BUILTIN_VPERM_2DI:
17457 case ALTIVEC_BUILTIN_VPERM_2DF:
17458 case VSX_BUILTIN_VPERM_16QI:
17459 case VSX_BUILTIN_VPERM_8HI:
17460 case VSX_BUILTIN_VPERM_4SI:
17461 case VSX_BUILTIN_VPERM_4SF:
17462 case VSX_BUILTIN_VPERM_2DI:
17463 case VSX_BUILTIN_VPERM_2DF:
17464 h.uns_p[3] = 1;
17465 break;
17466
17467 /* unsigned args, signed return. */
17468 case VSX_BUILTIN_XVCVUXDSP:
17469 case VSX_BUILTIN_XVCVUXDDP_UNS:
17470 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17471 h.uns_p[1] = 1;
17472 break;
17473
17474 /* signed args, unsigned return. */
17475 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17476 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17477 case MISC_BUILTIN_UNPACK_TD:
17478 case MISC_BUILTIN_UNPACK_V1TI:
17479 h.uns_p[0] = 1;
17480 break;
17481
17482 /* unsigned arguments, bool return (compares). */
17483 case ALTIVEC_BUILTIN_VCMPEQUB:
17484 case ALTIVEC_BUILTIN_VCMPEQUH:
17485 case ALTIVEC_BUILTIN_VCMPEQUW:
17486 case P8V_BUILTIN_VCMPEQUD:
17487 case VSX_BUILTIN_CMPGE_U16QI:
17488 case VSX_BUILTIN_CMPGE_U8HI:
17489 case VSX_BUILTIN_CMPGE_U4SI:
17490 case VSX_BUILTIN_CMPGE_U2DI:
17491 case ALTIVEC_BUILTIN_VCMPGTUB:
17492 case ALTIVEC_BUILTIN_VCMPGTUH:
17493 case ALTIVEC_BUILTIN_VCMPGTUW:
17494 case P8V_BUILTIN_VCMPGTUD:
17495 h.uns_p[1] = 1;
17496 h.uns_p[2] = 1;
17497 break;
17498
17499 /* unsigned arguments for 128-bit pack instructions. */
17500 case MISC_BUILTIN_PACK_TD:
17501 case MISC_BUILTIN_PACK_V1TI:
17502 h.uns_p[1] = 1;
17503 h.uns_p[2] = 1;
17504 break;
17505
17506 /* unsigned second arguments (vector shift right). */
17507 case ALTIVEC_BUILTIN_VSRB:
17508 case ALTIVEC_BUILTIN_VSRH:
17509 case ALTIVEC_BUILTIN_VSRW:
17510 case P8V_BUILTIN_VSRD:
17511 h.uns_p[2] = 1;
17512 break;
17513
17514 default:
17515 break;
17516 }
17517
17518 /* Figure out how many args are present. */
17519 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17520 num_args--;
17521
17522 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17523 if (!ret_type && h.uns_p[0])
17524 ret_type = builtin_mode_to_type[h.mode[0]][0];
17525
17526 if (!ret_type)
17527 fatal_error (input_location,
17528 "internal error: builtin function %qs had an unexpected "
17529 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17530
17531 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17532 arg_type[i] = NULL_TREE;
17533
17534 for (i = 0; i < num_args; i++)
17535 {
17536 int m = (int) h.mode[i+1];
17537 int uns_p = h.uns_p[i+1];
17538
17539 arg_type[i] = builtin_mode_to_type[m][uns_p];
17540 if (!arg_type[i] && uns_p)
17541 arg_type[i] = builtin_mode_to_type[m][0];
17542
17543 if (!arg_type[i])
17544 fatal_error (input_location,
17545 "internal error: builtin function %qs, argument %d "
17546 "had unexpected argument type %qs", name, i,
17547 GET_MODE_NAME (m));
17548 }
17549
17550 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17551 if (*found == NULL)
17552 {
17553 h2 = ggc_alloc<builtin_hash_struct> ();
17554 *h2 = h;
17555 *found = h2;
17556
17557 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17558 arg_type[2], NULL_TREE);
17559 }
17560
17561 return (*found)->type;
17562 }
17563
17564 static void
17565 rs6000_common_init_builtins (void)
17566 {
17567 const struct builtin_description *d;
17568 size_t i;
17569
17570 tree opaque_ftype_opaque = NULL_TREE;
17571 tree opaque_ftype_opaque_opaque = NULL_TREE;
17572 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17573 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17574
17575 /* Create Altivec and VSX builtins on machines with at least the
17576 general purpose extensions (970 and newer) to allow the use of
17577 the target attribute. */
17578
17579 if (TARGET_EXTRA_BUILTINS)
17580 builtin_mask |= RS6000_BTM_COMMON;
17581
17582 /* Add the ternary operators. */
17583 d = bdesc_3arg;
17584 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17585 {
17586 tree type;
17587 HOST_WIDE_INT mask = d->mask;
17588
17589 if ((mask & builtin_mask) != mask)
17590 {
17591 if (TARGET_DEBUG_BUILTIN)
17592 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17593 continue;
17594 }
17595
17596 if (rs6000_overloaded_builtin_p (d->code))
17597 {
17598 if (! (type = opaque_ftype_opaque_opaque_opaque))
17599 type = opaque_ftype_opaque_opaque_opaque
17600 = build_function_type_list (opaque_V4SI_type_node,
17601 opaque_V4SI_type_node,
17602 opaque_V4SI_type_node,
17603 opaque_V4SI_type_node,
17604 NULL_TREE);
17605 }
17606 else
17607 {
17608 enum insn_code icode = d->icode;
17609 if (d->name == 0)
17610 {
17611 if (TARGET_DEBUG_BUILTIN)
17612 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17613 (long unsigned)i);
17614
17615 continue;
17616 }
17617
17618 if (icode == CODE_FOR_nothing)
17619 {
17620 if (TARGET_DEBUG_BUILTIN)
17621 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17622 d->name);
17623
17624 continue;
17625 }
17626
17627 type = builtin_function_type (insn_data[icode].operand[0].mode,
17628 insn_data[icode].operand[1].mode,
17629 insn_data[icode].operand[2].mode,
17630 insn_data[icode].operand[3].mode,
17631 d->code, d->name);
17632 }
17633
17634 def_builtin (d->name, type, d->code);
17635 }
17636
17637 /* Add the binary operators. */
17638 d = bdesc_2arg;
17639 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17640 {
17641 machine_mode mode0, mode1, mode2;
17642 tree type;
17643 HOST_WIDE_INT mask = d->mask;
17644
17645 if ((mask & builtin_mask) != mask)
17646 {
17647 if (TARGET_DEBUG_BUILTIN)
17648 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17649 continue;
17650 }
17651
17652 if (rs6000_overloaded_builtin_p (d->code))
17653 {
17654 if (! (type = opaque_ftype_opaque_opaque))
17655 type = opaque_ftype_opaque_opaque
17656 = build_function_type_list (opaque_V4SI_type_node,
17657 opaque_V4SI_type_node,
17658 opaque_V4SI_type_node,
17659 NULL_TREE);
17660 }
17661 else
17662 {
17663 enum insn_code icode = d->icode;
17664 if (d->name == 0)
17665 {
17666 if (TARGET_DEBUG_BUILTIN)
17667 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17668 (long unsigned)i);
17669
17670 continue;
17671 }
17672
17673 if (icode == CODE_FOR_nothing)
17674 {
17675 if (TARGET_DEBUG_BUILTIN)
17676 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17677 d->name);
17678
17679 continue;
17680 }
17681
17682 mode0 = insn_data[icode].operand[0].mode;
17683 mode1 = insn_data[icode].operand[1].mode;
17684 mode2 = insn_data[icode].operand[2].mode;
17685
17686 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17687 d->code, d->name);
17688 }
17689
17690 def_builtin (d->name, type, d->code);
17691 }
17692
17693 /* Add the simple unary operators. */
17694 d = bdesc_1arg;
17695 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17696 {
17697 machine_mode mode0, mode1;
17698 tree type;
17699 HOST_WIDE_INT mask = d->mask;
17700
17701 if ((mask & builtin_mask) != mask)
17702 {
17703 if (TARGET_DEBUG_BUILTIN)
17704 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17705 continue;
17706 }
17707
17708 if (rs6000_overloaded_builtin_p (d->code))
17709 {
17710 if (! (type = opaque_ftype_opaque))
17711 type = opaque_ftype_opaque
17712 = build_function_type_list (opaque_V4SI_type_node,
17713 opaque_V4SI_type_node,
17714 NULL_TREE);
17715 }
17716 else
17717 {
17718 enum insn_code icode = d->icode;
17719 if (d->name == 0)
17720 {
17721 if (TARGET_DEBUG_BUILTIN)
17722 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17723 (long unsigned)i);
17724
17725 continue;
17726 }
17727
17728 if (icode == CODE_FOR_nothing)
17729 {
17730 if (TARGET_DEBUG_BUILTIN)
17731 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17732 d->name);
17733
17734 continue;
17735 }
17736
17737 mode0 = insn_data[icode].operand[0].mode;
17738 mode1 = insn_data[icode].operand[1].mode;
17739
17740 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
17741 d->code, d->name);
17742 }
17743
17744 def_builtin (d->name, type, d->code);
17745 }
17746
17747 /* Add the simple no-argument operators. */
17748 d = bdesc_0arg;
17749 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
17750 {
17751 machine_mode mode0;
17752 tree type;
17753 HOST_WIDE_INT mask = d->mask;
17754
17755 if ((mask & builtin_mask) != mask)
17756 {
17757 if (TARGET_DEBUG_BUILTIN)
17758 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
17759 continue;
17760 }
17761 if (rs6000_overloaded_builtin_p (d->code))
17762 {
17763 if (!opaque_ftype_opaque)
17764 opaque_ftype_opaque
17765 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
17766 type = opaque_ftype_opaque;
17767 }
17768 else
17769 {
17770 enum insn_code icode = d->icode;
17771 if (d->name == 0)
17772 {
17773 if (TARGET_DEBUG_BUILTIN)
17774 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
17775 (long unsigned) i);
17776 continue;
17777 }
17778 if (icode == CODE_FOR_nothing)
17779 {
17780 if (TARGET_DEBUG_BUILTIN)
17781 fprintf (stderr,
17782 "rs6000_builtin, skip no-argument %s (no code)\n",
17783 d->name);
17784 continue;
17785 }
17786 mode0 = insn_data[icode].operand[0].mode;
17787 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
17788 d->code, d->name);
17789 }
17790 def_builtin (d->name, type, d->code);
17791 }
17792 }
17793
17794 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
17795 static void
17796 init_float128_ibm (machine_mode mode)
17797 {
17798 if (!TARGET_XL_COMPAT)
17799 {
17800 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
17801 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
17802 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
17803 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
17804
17805 if (!TARGET_HARD_FLOAT)
17806 {
17807 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
17808 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
17809 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
17810 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
17811 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
17812 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
17813 set_optab_libfunc (le_optab, mode, "__gcc_qle");
17814 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
17815
17816 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
17817 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
17818 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
17819 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
17820 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
17821 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
17822 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
17823 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
17824 }
17825 }
17826 else
17827 {
17828 set_optab_libfunc (add_optab, mode, "_xlqadd");
17829 set_optab_libfunc (sub_optab, mode, "_xlqsub");
17830 set_optab_libfunc (smul_optab, mode, "_xlqmul");
17831 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
17832 }
17833
17834 /* Add various conversions for IFmode to use the traditional TFmode
17835 names. */
17836 if (mode == IFmode)
17837 {
17838 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf2");
17839 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf2");
17840 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctftd2");
17841 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd2");
17842 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd2");
17843 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdtf2");
17844
17845 if (TARGET_POWERPC64)
17846 {
17847 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
17848 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
17849 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
17850 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
17851 }
17852 }
17853 }
17854
17855 /* Create a decl for either complex long double multiply or complex long double
17856 divide when long double is IEEE 128-bit floating point. We can't use
17857 __multc3 and __divtc3 because the original long double using IBM extended
17858 double used those names. The complex multiply/divide functions are encoded
17859 as builtin functions with a complex result and 4 scalar inputs. */
17860
17861 static void
17862 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
17863 {
17864 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
17865 name, NULL_TREE);
17866
17867 set_builtin_decl (fncode, fndecl, true);
17868
17869 if (TARGET_DEBUG_BUILTIN)
17870 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
17871
17872 return;
17873 }
17874
17875 /* Set up IEEE 128-bit floating point routines. Use different names if the
17876 arguments can be passed in a vector register. The historical PowerPC
17877 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
17878 continue to use that if we aren't using vector registers to pass IEEE
17879 128-bit floating point. */
17880
17881 static void
17882 init_float128_ieee (machine_mode mode)
17883 {
17884 if (FLOAT128_VECTOR_P (mode))
17885 {
17886 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. */
17887 if (mode == TFmode && TARGET_IEEEQUAD)
17888 {
17889 built_in_function fncode_mul =
17890 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
17891 - MIN_MODE_COMPLEX_FLOAT);
17892 built_in_function fncode_div =
17893 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
17894 - MIN_MODE_COMPLEX_FLOAT);
17895
17896 tree fntype = build_function_type_list (complex_long_double_type_node,
17897 long_double_type_node,
17898 long_double_type_node,
17899 long_double_type_node,
17900 long_double_type_node,
17901 NULL_TREE);
17902
17903 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
17904 create_complex_muldiv ("__divkc3", fncode_div, fntype);
17905 }
17906
17907 set_optab_libfunc (add_optab, mode, "__addkf3");
17908 set_optab_libfunc (sub_optab, mode, "__subkf3");
17909 set_optab_libfunc (neg_optab, mode, "__negkf2");
17910 set_optab_libfunc (smul_optab, mode, "__mulkf3");
17911 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
17912 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
17913 set_optab_libfunc (abs_optab, mode, "__abskf2");
17914 set_optab_libfunc (powi_optab, mode, "__powikf2");
17915
17916 set_optab_libfunc (eq_optab, mode, "__eqkf2");
17917 set_optab_libfunc (ne_optab, mode, "__nekf2");
17918 set_optab_libfunc (gt_optab, mode, "__gtkf2");
17919 set_optab_libfunc (ge_optab, mode, "__gekf2");
17920 set_optab_libfunc (lt_optab, mode, "__ltkf2");
17921 set_optab_libfunc (le_optab, mode, "__lekf2");
17922 set_optab_libfunc (unord_optab, mode, "__unordkf2");
17923
17924 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
17925 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
17926 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
17927 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
17928
17929 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
17930 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17931 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
17932
17933 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
17934 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17935 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
17936
17937 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf2");
17938 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf2");
17939 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunckftd2");
17940 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd2");
17941 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd2");
17942 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdkf2");
17943
17944 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
17945 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
17946 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
17947 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
17948
17949 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
17950 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
17951 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
17952 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
17953
17954 if (TARGET_POWERPC64)
17955 {
17956 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
17957 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
17958 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
17959 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
17960 }
17961 }
17962
17963 else
17964 {
17965 set_optab_libfunc (add_optab, mode, "_q_add");
17966 set_optab_libfunc (sub_optab, mode, "_q_sub");
17967 set_optab_libfunc (neg_optab, mode, "_q_neg");
17968 set_optab_libfunc (smul_optab, mode, "_q_mul");
17969 set_optab_libfunc (sdiv_optab, mode, "_q_div");
17970 if (TARGET_PPC_GPOPT)
17971 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
17972
17973 set_optab_libfunc (eq_optab, mode, "_q_feq");
17974 set_optab_libfunc (ne_optab, mode, "_q_fne");
17975 set_optab_libfunc (gt_optab, mode, "_q_fgt");
17976 set_optab_libfunc (ge_optab, mode, "_q_fge");
17977 set_optab_libfunc (lt_optab, mode, "_q_flt");
17978 set_optab_libfunc (le_optab, mode, "_q_fle");
17979
17980 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
17981 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
17982 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
17983 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
17984 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
17985 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
17986 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
17987 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
17988 }
17989 }
17990
17991 static void
17992 rs6000_init_libfuncs (void)
17993 {
17994 /* __float128 support. */
17995 if (TARGET_FLOAT128_TYPE)
17996 {
17997 init_float128_ibm (IFmode);
17998 init_float128_ieee (KFmode);
17999 }
18000
18001 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18002 if (TARGET_LONG_DOUBLE_128)
18003 {
18004 if (!TARGET_IEEEQUAD)
18005 init_float128_ibm (TFmode);
18006
18007 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18008 else
18009 init_float128_ieee (TFmode);
18010 }
18011 }
18012
18013 /* Emit a potentially record-form instruction, setting DST from SRC.
18014 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18015 signed comparison of DST with zero. If DOT is 1, the generated RTL
18016 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18017 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18018 a separate COMPARE. */
18019
18020 void
18021 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18022 {
18023 if (dot == 0)
18024 {
18025 emit_move_insn (dst, src);
18026 return;
18027 }
18028
18029 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18030 {
18031 emit_move_insn (dst, src);
18032 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18033 return;
18034 }
18035
18036 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18037 if (dot == 1)
18038 {
18039 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18040 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18041 }
18042 else
18043 {
18044 rtx set = gen_rtx_SET (dst, src);
18045 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18046 }
18047 }
18048
18049 \f
18050 /* A validation routine: say whether CODE, a condition code, and MODE
18051 match. The other alternatives either don't make sense or should
18052 never be generated. */
18053
18054 void
18055 validate_condition_mode (enum rtx_code code, machine_mode mode)
18056 {
18057 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18058 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18059 && GET_MODE_CLASS (mode) == MODE_CC);
18060
18061 /* These don't make sense. */
18062 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18063 || mode != CCUNSmode);
18064
18065 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18066 || mode == CCUNSmode);
18067
18068 gcc_assert (mode == CCFPmode
18069 || (code != ORDERED && code != UNORDERED
18070 && code != UNEQ && code != LTGT
18071 && code != UNGT && code != UNLT
18072 && code != UNGE && code != UNLE));
18073
18074 /* These should never be generated except for
18075 flag_finite_math_only. */
18076 gcc_assert (mode != CCFPmode
18077 || flag_finite_math_only
18078 || (code != LE && code != GE
18079 && code != UNEQ && code != LTGT
18080 && code != UNGT && code != UNLT));
18081
18082 /* These are invalid; the information is not there. */
18083 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18084 }
18085
18086 \f
18087 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18088 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18089 not zero, store there the bit offset (counted from the right) where
18090 the single stretch of 1 bits begins; and similarly for B, the bit
18091 offset where it ends. */
18092
18093 bool
18094 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18095 {
18096 unsigned HOST_WIDE_INT val = INTVAL (mask);
18097 unsigned HOST_WIDE_INT bit;
18098 int nb, ne;
18099 int n = GET_MODE_PRECISION (mode);
18100
18101 if (mode != DImode && mode != SImode)
18102 return false;
18103
18104 if (INTVAL (mask) >= 0)
18105 {
18106 bit = val & -val;
18107 ne = exact_log2 (bit);
18108 nb = exact_log2 (val + bit);
18109 }
18110 else if (val + 1 == 0)
18111 {
18112 nb = n;
18113 ne = 0;
18114 }
18115 else if (val & 1)
18116 {
18117 val = ~val;
18118 bit = val & -val;
18119 nb = exact_log2 (bit);
18120 ne = exact_log2 (val + bit);
18121 }
18122 else
18123 {
18124 bit = val & -val;
18125 ne = exact_log2 (bit);
18126 if (val + bit == 0)
18127 nb = n;
18128 else
18129 nb = 0;
18130 }
18131
18132 nb--;
18133
18134 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18135 return false;
18136
18137 if (b)
18138 *b = nb;
18139 if (e)
18140 *e = ne;
18141
18142 return true;
18143 }
18144
18145 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18146 or rldicr instruction, to implement an AND with it in mode MODE. */
18147
18148 bool
18149 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18150 {
18151 int nb, ne;
18152
18153 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18154 return false;
18155
18156 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18157 does not wrap. */
18158 if (mode == DImode)
18159 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18160
18161 /* For SImode, rlwinm can do everything. */
18162 if (mode == SImode)
18163 return (nb < 32 && ne < 32);
18164
18165 return false;
18166 }
18167
18168 /* Return the instruction template for an AND with mask in mode MODE, with
18169 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18170
18171 const char *
18172 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18173 {
18174 int nb, ne;
18175
18176 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18177 gcc_unreachable ();
18178
18179 if (mode == DImode && ne == 0)
18180 {
18181 operands[3] = GEN_INT (63 - nb);
18182 if (dot)
18183 return "rldicl. %0,%1,0,%3";
18184 return "rldicl %0,%1,0,%3";
18185 }
18186
18187 if (mode == DImode && nb == 63)
18188 {
18189 operands[3] = GEN_INT (63 - ne);
18190 if (dot)
18191 return "rldicr. %0,%1,0,%3";
18192 return "rldicr %0,%1,0,%3";
18193 }
18194
18195 if (nb < 32 && ne < 32)
18196 {
18197 operands[3] = GEN_INT (31 - nb);
18198 operands[4] = GEN_INT (31 - ne);
18199 if (dot)
18200 return "rlwinm. %0,%1,0,%3,%4";
18201 return "rlwinm %0,%1,0,%3,%4";
18202 }
18203
18204 gcc_unreachable ();
18205 }
18206
18207 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18208 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18209 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18210
18211 bool
18212 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18213 {
18214 int nb, ne;
18215
18216 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18217 return false;
18218
18219 int n = GET_MODE_PRECISION (mode);
18220 int sh = -1;
18221
18222 if (CONST_INT_P (XEXP (shift, 1)))
18223 {
18224 sh = INTVAL (XEXP (shift, 1));
18225 if (sh < 0 || sh >= n)
18226 return false;
18227 }
18228
18229 rtx_code code = GET_CODE (shift);
18230
18231 /* Convert any shift by 0 to a rotate, to simplify below code. */
18232 if (sh == 0)
18233 code = ROTATE;
18234
18235 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18236 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18237 code = ASHIFT;
18238 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18239 {
18240 code = LSHIFTRT;
18241 sh = n - sh;
18242 }
18243
18244 /* DImode rotates need rld*. */
18245 if (mode == DImode && code == ROTATE)
18246 return (nb == 63 || ne == 0 || ne == sh);
18247
18248 /* SImode rotates need rlw*. */
18249 if (mode == SImode && code == ROTATE)
18250 return (nb < 32 && ne < 32 && sh < 32);
18251
18252 /* Wrap-around masks are only okay for rotates. */
18253 if (ne > nb)
18254 return false;
18255
18256 /* Variable shifts are only okay for rotates. */
18257 if (sh < 0)
18258 return false;
18259
18260 /* Don't allow ASHIFT if the mask is wrong for that. */
18261 if (code == ASHIFT && ne < sh)
18262 return false;
18263
18264 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18265 if the mask is wrong for that. */
18266 if (nb < 32 && ne < 32 && sh < 32
18267 && !(code == LSHIFTRT && nb >= 32 - sh))
18268 return true;
18269
18270 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18271 if the mask is wrong for that. */
18272 if (code == LSHIFTRT)
18273 sh = 64 - sh;
18274 if (nb == 63 || ne == 0 || ne == sh)
18275 return !(code == LSHIFTRT && nb >= sh);
18276
18277 return false;
18278 }
18279
18280 /* Return the instruction template for a shift with mask in mode MODE, with
18281 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18282
18283 const char *
18284 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18285 {
18286 int nb, ne;
18287
18288 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18289 gcc_unreachable ();
18290
18291 if (mode == DImode && ne == 0)
18292 {
18293 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18294 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18295 operands[3] = GEN_INT (63 - nb);
18296 if (dot)
18297 return "rld%I2cl. %0,%1,%2,%3";
18298 return "rld%I2cl %0,%1,%2,%3";
18299 }
18300
18301 if (mode == DImode && nb == 63)
18302 {
18303 operands[3] = GEN_INT (63 - ne);
18304 if (dot)
18305 return "rld%I2cr. %0,%1,%2,%3";
18306 return "rld%I2cr %0,%1,%2,%3";
18307 }
18308
18309 if (mode == DImode
18310 && GET_CODE (operands[4]) != LSHIFTRT
18311 && CONST_INT_P (operands[2])
18312 && ne == INTVAL (operands[2]))
18313 {
18314 operands[3] = GEN_INT (63 - nb);
18315 if (dot)
18316 return "rld%I2c. %0,%1,%2,%3";
18317 return "rld%I2c %0,%1,%2,%3";
18318 }
18319
18320 if (nb < 32 && ne < 32)
18321 {
18322 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18323 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18324 operands[3] = GEN_INT (31 - nb);
18325 operands[4] = GEN_INT (31 - ne);
18326 /* This insn can also be a 64-bit rotate with mask that really makes
18327 it just a shift right (with mask); the %h below are to adjust for
18328 that situation (shift count is >= 32 in that case). */
18329 if (dot)
18330 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18331 return "rlw%I2nm %0,%1,%h2,%3,%4";
18332 }
18333
18334 gcc_unreachable ();
18335 }
18336
18337 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18338 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18339 ASHIFT, or LSHIFTRT) in mode MODE. */
18340
18341 bool
18342 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18343 {
18344 int nb, ne;
18345
18346 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18347 return false;
18348
18349 int n = GET_MODE_PRECISION (mode);
18350
18351 int sh = INTVAL (XEXP (shift, 1));
18352 if (sh < 0 || sh >= n)
18353 return false;
18354
18355 rtx_code code = GET_CODE (shift);
18356
18357 /* Convert any shift by 0 to a rotate, to simplify below code. */
18358 if (sh == 0)
18359 code = ROTATE;
18360
18361 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18362 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18363 code = ASHIFT;
18364 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18365 {
18366 code = LSHIFTRT;
18367 sh = n - sh;
18368 }
18369
18370 /* DImode rotates need rldimi. */
18371 if (mode == DImode && code == ROTATE)
18372 return (ne == sh);
18373
18374 /* SImode rotates need rlwimi. */
18375 if (mode == SImode && code == ROTATE)
18376 return (nb < 32 && ne < 32 && sh < 32);
18377
18378 /* Wrap-around masks are only okay for rotates. */
18379 if (ne > nb)
18380 return false;
18381
18382 /* Don't allow ASHIFT if the mask is wrong for that. */
18383 if (code == ASHIFT && ne < sh)
18384 return false;
18385
18386 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18387 if the mask is wrong for that. */
18388 if (nb < 32 && ne < 32 && sh < 32
18389 && !(code == LSHIFTRT && nb >= 32 - sh))
18390 return true;
18391
18392 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18393 if the mask is wrong for that. */
18394 if (code == LSHIFTRT)
18395 sh = 64 - sh;
18396 if (ne == sh)
18397 return !(code == LSHIFTRT && nb >= sh);
18398
18399 return false;
18400 }
18401
18402 /* Return the instruction template for an insert with mask in mode MODE, with
18403 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18404
18405 const char *
18406 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18407 {
18408 int nb, ne;
18409
18410 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18411 gcc_unreachable ();
18412
18413 /* Prefer rldimi because rlwimi is cracked. */
18414 if (TARGET_POWERPC64
18415 && (!dot || mode == DImode)
18416 && GET_CODE (operands[4]) != LSHIFTRT
18417 && ne == INTVAL (operands[2]))
18418 {
18419 operands[3] = GEN_INT (63 - nb);
18420 if (dot)
18421 return "rldimi. %0,%1,%2,%3";
18422 return "rldimi %0,%1,%2,%3";
18423 }
18424
18425 if (nb < 32 && ne < 32)
18426 {
18427 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18428 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18429 operands[3] = GEN_INT (31 - nb);
18430 operands[4] = GEN_INT (31 - ne);
18431 if (dot)
18432 return "rlwimi. %0,%1,%2,%3,%4";
18433 return "rlwimi %0,%1,%2,%3,%4";
18434 }
18435
18436 gcc_unreachable ();
18437 }
18438
18439 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18440 using two machine instructions. */
18441
18442 bool
18443 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18444 {
18445 /* There are two kinds of AND we can handle with two insns:
18446 1) those we can do with two rl* insn;
18447 2) ori[s];xori[s].
18448
18449 We do not handle that last case yet. */
18450
18451 /* If there is just one stretch of ones, we can do it. */
18452 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18453 return true;
18454
18455 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18456 one insn, we can do the whole thing with two. */
18457 unsigned HOST_WIDE_INT val = INTVAL (c);
18458 unsigned HOST_WIDE_INT bit1 = val & -val;
18459 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18460 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18461 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18462 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18463 }
18464
18465 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18466 If EXPAND is true, split rotate-and-mask instructions we generate to
18467 their constituent parts as well (this is used during expand); if DOT
18468 is 1, make the last insn a record-form instruction clobbering the
18469 destination GPR and setting the CC reg (from operands[3]); if 2, set
18470 that GPR as well as the CC reg. */
18471
18472 void
18473 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18474 {
18475 gcc_assert (!(expand && dot));
18476
18477 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18478
18479 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18480 shift right. This generates better code than doing the masks without
18481 shifts, or shifting first right and then left. */
18482 int nb, ne;
18483 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18484 {
18485 gcc_assert (mode == DImode);
18486
18487 int shift = 63 - nb;
18488 if (expand)
18489 {
18490 rtx tmp1 = gen_reg_rtx (DImode);
18491 rtx tmp2 = gen_reg_rtx (DImode);
18492 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18493 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18494 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18495 }
18496 else
18497 {
18498 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18499 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18500 emit_move_insn (operands[0], tmp);
18501 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18502 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18503 }
18504 return;
18505 }
18506
18507 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18508 that does the rest. */
18509 unsigned HOST_WIDE_INT bit1 = val & -val;
18510 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18511 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18512 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18513
18514 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18515 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18516
18517 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18518
18519 /* Two "no-rotate"-and-mask instructions, for SImode. */
18520 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18521 {
18522 gcc_assert (mode == SImode);
18523
18524 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18525 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18526 emit_move_insn (reg, tmp);
18527 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18528 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18529 return;
18530 }
18531
18532 gcc_assert (mode == DImode);
18533
18534 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18535 insns; we have to do the first in SImode, because it wraps. */
18536 if (mask2 <= 0xffffffff
18537 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18538 {
18539 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18540 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18541 GEN_INT (mask1));
18542 rtx reg_low = gen_lowpart (SImode, reg);
18543 emit_move_insn (reg_low, tmp);
18544 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18545 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18546 return;
18547 }
18548
18549 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18550 at the top end), rotate back and clear the other hole. */
18551 int right = exact_log2 (bit3);
18552 int left = 64 - right;
18553
18554 /* Rotate the mask too. */
18555 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18556
18557 if (expand)
18558 {
18559 rtx tmp1 = gen_reg_rtx (DImode);
18560 rtx tmp2 = gen_reg_rtx (DImode);
18561 rtx tmp3 = gen_reg_rtx (DImode);
18562 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18563 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18564 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18565 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18566 }
18567 else
18568 {
18569 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18570 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18571 emit_move_insn (operands[0], tmp);
18572 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18573 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18574 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18575 }
18576 }
18577 \f
18578 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18579 for lfq and stfq insns iff the registers are hard registers. */
18580
18581 int
18582 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18583 {
18584 /* We might have been passed a SUBREG. */
18585 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
18586 return 0;
18587
18588 /* We might have been passed non floating point registers. */
18589 if (!FP_REGNO_P (REGNO (reg1))
18590 || !FP_REGNO_P (REGNO (reg2)))
18591 return 0;
18592
18593 return (REGNO (reg1) == REGNO (reg2) - 1);
18594 }
18595
18596 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18597 addr1 and addr2 must be in consecutive memory locations
18598 (addr2 == addr1 + 8). */
18599
18600 int
18601 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18602 {
18603 rtx addr1, addr2;
18604 unsigned int reg1, reg2;
18605 int offset1, offset2;
18606
18607 /* The mems cannot be volatile. */
18608 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18609 return 0;
18610
18611 addr1 = XEXP (mem1, 0);
18612 addr2 = XEXP (mem2, 0);
18613
18614 /* Extract an offset (if used) from the first addr. */
18615 if (GET_CODE (addr1) == PLUS)
18616 {
18617 /* If not a REG, return zero. */
18618 if (GET_CODE (XEXP (addr1, 0)) != REG)
18619 return 0;
18620 else
18621 {
18622 reg1 = REGNO (XEXP (addr1, 0));
18623 /* The offset must be constant! */
18624 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
18625 return 0;
18626 offset1 = INTVAL (XEXP (addr1, 1));
18627 }
18628 }
18629 else if (GET_CODE (addr1) != REG)
18630 return 0;
18631 else
18632 {
18633 reg1 = REGNO (addr1);
18634 /* This was a simple (mem (reg)) expression. Offset is 0. */
18635 offset1 = 0;
18636 }
18637
18638 /* And now for the second addr. */
18639 if (GET_CODE (addr2) == PLUS)
18640 {
18641 /* If not a REG, return zero. */
18642 if (GET_CODE (XEXP (addr2, 0)) != REG)
18643 return 0;
18644 else
18645 {
18646 reg2 = REGNO (XEXP (addr2, 0));
18647 /* The offset must be constant. */
18648 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
18649 return 0;
18650 offset2 = INTVAL (XEXP (addr2, 1));
18651 }
18652 }
18653 else if (GET_CODE (addr2) != REG)
18654 return 0;
18655 else
18656 {
18657 reg2 = REGNO (addr2);
18658 /* This was a simple (mem (reg)) expression. Offset is 0. */
18659 offset2 = 0;
18660 }
18661
18662 /* Both of these must have the same base register. */
18663 if (reg1 != reg2)
18664 return 0;
18665
18666 /* The offset for the second addr must be 8 more than the first addr. */
18667 if (offset2 != offset1 + 8)
18668 return 0;
18669
18670 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18671 instructions. */
18672 return 1;
18673 }
18674 \f
18675 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18676 need to use DDmode, in all other cases we can use the same mode. */
18677 static machine_mode
18678 rs6000_secondary_memory_needed_mode (machine_mode mode)
18679 {
18680 if (lra_in_progress && mode == SDmode)
18681 return DDmode;
18682 return mode;
18683 }
18684
18685 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18686 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18687 only work on the traditional altivec registers, note if an altivec register
18688 was chosen. */
18689
18690 static enum rs6000_reg_type
18691 register_to_reg_type (rtx reg, bool *is_altivec)
18692 {
18693 HOST_WIDE_INT regno;
18694 enum reg_class rclass;
18695
18696 if (GET_CODE (reg) == SUBREG)
18697 reg = SUBREG_REG (reg);
18698
18699 if (!REG_P (reg))
18700 return NO_REG_TYPE;
18701
18702 regno = REGNO (reg);
18703 if (regno >= FIRST_PSEUDO_REGISTER)
18704 {
18705 if (!lra_in_progress && !reload_completed)
18706 return PSEUDO_REG_TYPE;
18707
18708 regno = true_regnum (reg);
18709 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
18710 return PSEUDO_REG_TYPE;
18711 }
18712
18713 gcc_assert (regno >= 0);
18714
18715 if (is_altivec && ALTIVEC_REGNO_P (regno))
18716 *is_altivec = true;
18717
18718 rclass = rs6000_regno_regclass[regno];
18719 return reg_class_to_reg_type[(int)rclass];
18720 }
18721
18722 /* Helper function to return the cost of adding a TOC entry address. */
18723
18724 static inline int
18725 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
18726 {
18727 int ret;
18728
18729 if (TARGET_CMODEL != CMODEL_SMALL)
18730 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
18731
18732 else
18733 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
18734
18735 return ret;
18736 }
18737
18738 /* Helper function for rs6000_secondary_reload to determine whether the memory
18739 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18740 needs reloading. Return negative if the memory is not handled by the memory
18741 helper functions and to try a different reload method, 0 if no additional
18742 instructions are need, and positive to give the extra cost for the
18743 memory. */
18744
18745 static int
18746 rs6000_secondary_reload_memory (rtx addr,
18747 enum reg_class rclass,
18748 machine_mode mode)
18749 {
18750 int extra_cost = 0;
18751 rtx reg, and_arg, plus_arg0, plus_arg1;
18752 addr_mask_type addr_mask;
18753 const char *type = NULL;
18754 const char *fail_msg = NULL;
18755
18756 if (GPR_REG_CLASS_P (rclass))
18757 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
18758
18759 else if (rclass == FLOAT_REGS)
18760 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
18761
18762 else if (rclass == ALTIVEC_REGS)
18763 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
18764
18765 /* For the combined VSX_REGS, turn off Altivec AND -16. */
18766 else if (rclass == VSX_REGS)
18767 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
18768 & ~RELOAD_REG_AND_M16);
18769
18770 /* If the register allocator hasn't made up its mind yet on the register
18771 class to use, settle on defaults to use. */
18772 else if (rclass == NO_REGS)
18773 {
18774 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
18775 & ~RELOAD_REG_AND_M16);
18776
18777 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
18778 addr_mask &= ~(RELOAD_REG_INDEXED
18779 | RELOAD_REG_PRE_INCDEC
18780 | RELOAD_REG_PRE_MODIFY);
18781 }
18782
18783 else
18784 addr_mask = 0;
18785
18786 /* If the register isn't valid in this register class, just return now. */
18787 if ((addr_mask & RELOAD_REG_VALID) == 0)
18788 {
18789 if (TARGET_DEBUG_ADDR)
18790 {
18791 fprintf (stderr,
18792 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18793 "not valid in class\n",
18794 GET_MODE_NAME (mode), reg_class_names[rclass]);
18795 debug_rtx (addr);
18796 }
18797
18798 return -1;
18799 }
18800
18801 switch (GET_CODE (addr))
18802 {
18803 /* Does the register class supports auto update forms for this mode? We
18804 don't need a scratch register, since the powerpc only supports
18805 PRE_INC, PRE_DEC, and PRE_MODIFY. */
18806 case PRE_INC:
18807 case PRE_DEC:
18808 reg = XEXP (addr, 0);
18809 if (!base_reg_operand (addr, GET_MODE (reg)))
18810 {
18811 fail_msg = "no base register #1";
18812 extra_cost = -1;
18813 }
18814
18815 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
18816 {
18817 extra_cost = 1;
18818 type = "update";
18819 }
18820 break;
18821
18822 case PRE_MODIFY:
18823 reg = XEXP (addr, 0);
18824 plus_arg1 = XEXP (addr, 1);
18825 if (!base_reg_operand (reg, GET_MODE (reg))
18826 || GET_CODE (plus_arg1) != PLUS
18827 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
18828 {
18829 fail_msg = "bad PRE_MODIFY";
18830 extra_cost = -1;
18831 }
18832
18833 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
18834 {
18835 extra_cost = 1;
18836 type = "update";
18837 }
18838 break;
18839
18840 /* Do we need to simulate AND -16 to clear the bottom address bits used
18841 in VMX load/stores? Only allow the AND for vector sizes. */
18842 case AND:
18843 and_arg = XEXP (addr, 0);
18844 if (GET_MODE_SIZE (mode) != 16
18845 || GET_CODE (XEXP (addr, 1)) != CONST_INT
18846 || INTVAL (XEXP (addr, 1)) != -16)
18847 {
18848 fail_msg = "bad Altivec AND #1";
18849 extra_cost = -1;
18850 }
18851
18852 if (rclass != ALTIVEC_REGS)
18853 {
18854 if (legitimate_indirect_address_p (and_arg, false))
18855 extra_cost = 1;
18856
18857 else if (legitimate_indexed_address_p (and_arg, false))
18858 extra_cost = 2;
18859
18860 else
18861 {
18862 fail_msg = "bad Altivec AND #2";
18863 extra_cost = -1;
18864 }
18865
18866 type = "and";
18867 }
18868 break;
18869
18870 /* If this is an indirect address, make sure it is a base register. */
18871 case REG:
18872 case SUBREG:
18873 if (!legitimate_indirect_address_p (addr, false))
18874 {
18875 extra_cost = 1;
18876 type = "move";
18877 }
18878 break;
18879
18880 /* If this is an indexed address, make sure the register class can handle
18881 indexed addresses for this mode. */
18882 case PLUS:
18883 plus_arg0 = XEXP (addr, 0);
18884 plus_arg1 = XEXP (addr, 1);
18885
18886 /* (plus (plus (reg) (constant)) (constant)) is generated during
18887 push_reload processing, so handle it now. */
18888 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
18889 {
18890 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18891 {
18892 extra_cost = 1;
18893 type = "offset";
18894 }
18895 }
18896
18897 /* (plus (plus (reg) (constant)) (reg)) is also generated during
18898 push_reload processing, so handle it now. */
18899 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
18900 {
18901 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
18902 {
18903 extra_cost = 1;
18904 type = "indexed #2";
18905 }
18906 }
18907
18908 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
18909 {
18910 fail_msg = "no base register #2";
18911 extra_cost = -1;
18912 }
18913
18914 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
18915 {
18916 if ((addr_mask & RELOAD_REG_INDEXED) == 0
18917 || !legitimate_indexed_address_p (addr, false))
18918 {
18919 extra_cost = 1;
18920 type = "indexed";
18921 }
18922 }
18923
18924 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
18925 && CONST_INT_P (plus_arg1))
18926 {
18927 if (!quad_address_offset_p (INTVAL (plus_arg1)))
18928 {
18929 extra_cost = 1;
18930 type = "vector d-form offset";
18931 }
18932 }
18933
18934 /* Make sure the register class can handle offset addresses. */
18935 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
18936 {
18937 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18938 {
18939 extra_cost = 1;
18940 type = "offset #2";
18941 }
18942 }
18943
18944 else
18945 {
18946 fail_msg = "bad PLUS";
18947 extra_cost = -1;
18948 }
18949
18950 break;
18951
18952 case LO_SUM:
18953 /* Quad offsets are restricted and can't handle normal addresses. */
18954 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18955 {
18956 extra_cost = -1;
18957 type = "vector d-form lo_sum";
18958 }
18959
18960 else if (!legitimate_lo_sum_address_p (mode, addr, false))
18961 {
18962 fail_msg = "bad LO_SUM";
18963 extra_cost = -1;
18964 }
18965
18966 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18967 {
18968 extra_cost = 1;
18969 type = "lo_sum";
18970 }
18971 break;
18972
18973 /* Static addresses need to create a TOC entry. */
18974 case CONST:
18975 case SYMBOL_REF:
18976 case LABEL_REF:
18977 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18978 {
18979 extra_cost = -1;
18980 type = "vector d-form lo_sum #2";
18981 }
18982
18983 else
18984 {
18985 type = "address";
18986 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
18987 }
18988 break;
18989
18990 /* TOC references look like offsetable memory. */
18991 case UNSPEC:
18992 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
18993 {
18994 fail_msg = "bad UNSPEC";
18995 extra_cost = -1;
18996 }
18997
18998 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18999 {
19000 extra_cost = -1;
19001 type = "vector d-form lo_sum #3";
19002 }
19003
19004 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19005 {
19006 extra_cost = 1;
19007 type = "toc reference";
19008 }
19009 break;
19010
19011 default:
19012 {
19013 fail_msg = "bad address";
19014 extra_cost = -1;
19015 }
19016 }
19017
19018 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19019 {
19020 if (extra_cost < 0)
19021 fprintf (stderr,
19022 "rs6000_secondary_reload_memory error: mode = %s, "
19023 "class = %s, addr_mask = '%s', %s\n",
19024 GET_MODE_NAME (mode),
19025 reg_class_names[rclass],
19026 rs6000_debug_addr_mask (addr_mask, false),
19027 (fail_msg != NULL) ? fail_msg : "<bad address>");
19028
19029 else
19030 fprintf (stderr,
19031 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19032 "addr_mask = '%s', extra cost = %d, %s\n",
19033 GET_MODE_NAME (mode),
19034 reg_class_names[rclass],
19035 rs6000_debug_addr_mask (addr_mask, false),
19036 extra_cost,
19037 (type) ? type : "<none>");
19038
19039 debug_rtx (addr);
19040 }
19041
19042 return extra_cost;
19043 }
19044
19045 /* Helper function for rs6000_secondary_reload to return true if a move to a
19046 different register classe is really a simple move. */
19047
19048 static bool
19049 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19050 enum rs6000_reg_type from_type,
19051 machine_mode mode)
19052 {
19053 int size = GET_MODE_SIZE (mode);
19054
19055 /* Add support for various direct moves available. In this function, we only
19056 look at cases where we don't need any extra registers, and one or more
19057 simple move insns are issued. Originally small integers are not allowed
19058 in FPR/VSX registers. Single precision binary floating is not a simple
19059 move because we need to convert to the single precision memory layout.
19060 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19061 need special direct move handling, which we do not support yet. */
19062 if (TARGET_DIRECT_MOVE
19063 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19064 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19065 {
19066 if (TARGET_POWERPC64)
19067 {
19068 /* ISA 2.07: MTVSRD or MVFVSRD. */
19069 if (size == 8)
19070 return true;
19071
19072 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19073 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19074 return true;
19075 }
19076
19077 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19078 if (TARGET_P8_VECTOR)
19079 {
19080 if (mode == SImode)
19081 return true;
19082
19083 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19084 return true;
19085 }
19086
19087 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19088 if (mode == SDmode)
19089 return true;
19090 }
19091
19092 /* Power6+: MFTGPR or MFFGPR. */
19093 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19094 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19095 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19096 return true;
19097
19098 /* Move to/from SPR. */
19099 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19100 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19101 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19102 return true;
19103
19104 return false;
19105 }
19106
19107 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19108 special direct moves that involve allocating an extra register, return the
19109 insn code of the helper function if there is such a function or
19110 CODE_FOR_nothing if not. */
19111
19112 static bool
19113 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19114 enum rs6000_reg_type from_type,
19115 machine_mode mode,
19116 secondary_reload_info *sri,
19117 bool altivec_p)
19118 {
19119 bool ret = false;
19120 enum insn_code icode = CODE_FOR_nothing;
19121 int cost = 0;
19122 int size = GET_MODE_SIZE (mode);
19123
19124 if (TARGET_POWERPC64 && size == 16)
19125 {
19126 /* Handle moving 128-bit values from GPRs to VSX point registers on
19127 ISA 2.07 (power8, power9) when running in 64-bit mode using
19128 XXPERMDI to glue the two 64-bit values back together. */
19129 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19130 {
19131 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19132 icode = reg_addr[mode].reload_vsx_gpr;
19133 }
19134
19135 /* Handle moving 128-bit values from VSX point registers to GPRs on
19136 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19137 bottom 64-bit value. */
19138 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19139 {
19140 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19141 icode = reg_addr[mode].reload_gpr_vsx;
19142 }
19143 }
19144
19145 else if (TARGET_POWERPC64 && mode == SFmode)
19146 {
19147 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19148 {
19149 cost = 3; /* xscvdpspn, mfvsrd, and. */
19150 icode = reg_addr[mode].reload_gpr_vsx;
19151 }
19152
19153 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19154 {
19155 cost = 2; /* mtvsrz, xscvspdpn. */
19156 icode = reg_addr[mode].reload_vsx_gpr;
19157 }
19158 }
19159
19160 else if (!TARGET_POWERPC64 && size == 8)
19161 {
19162 /* Handle moving 64-bit values from GPRs to floating point registers on
19163 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19164 32-bit values back together. Altivec register classes must be handled
19165 specially since a different instruction is used, and the secondary
19166 reload support requires a single instruction class in the scratch
19167 register constraint. However, right now TFmode is not allowed in
19168 Altivec registers, so the pattern will never match. */
19169 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19170 {
19171 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19172 icode = reg_addr[mode].reload_fpr_gpr;
19173 }
19174 }
19175
19176 if (icode != CODE_FOR_nothing)
19177 {
19178 ret = true;
19179 if (sri)
19180 {
19181 sri->icode = icode;
19182 sri->extra_cost = cost;
19183 }
19184 }
19185
19186 return ret;
19187 }
19188
19189 /* Return whether a move between two register classes can be done either
19190 directly (simple move) or via a pattern that uses a single extra temporary
19191 (using ISA 2.07's direct move in this case. */
19192
19193 static bool
19194 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19195 enum rs6000_reg_type from_type,
19196 machine_mode mode,
19197 secondary_reload_info *sri,
19198 bool altivec_p)
19199 {
19200 /* Fall back to load/store reloads if either type is not a register. */
19201 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19202 return false;
19203
19204 /* If we haven't allocated registers yet, assume the move can be done for the
19205 standard register types. */
19206 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19207 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19208 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19209 return true;
19210
19211 /* Moves to the same set of registers is a simple move for non-specialized
19212 registers. */
19213 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19214 return true;
19215
19216 /* Check whether a simple move can be done directly. */
19217 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19218 {
19219 if (sri)
19220 {
19221 sri->icode = CODE_FOR_nothing;
19222 sri->extra_cost = 0;
19223 }
19224 return true;
19225 }
19226
19227 /* Now check if we can do it in a few steps. */
19228 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19229 altivec_p);
19230 }
19231
19232 /* Inform reload about cases where moving X with a mode MODE to a register in
19233 RCLASS requires an extra scratch or immediate register. Return the class
19234 needed for the immediate register.
19235
19236 For VSX and Altivec, we may need a register to convert sp+offset into
19237 reg+sp.
19238
19239 For misaligned 64-bit gpr loads and stores we need a register to
19240 convert an offset address to indirect. */
19241
19242 static reg_class_t
19243 rs6000_secondary_reload (bool in_p,
19244 rtx x,
19245 reg_class_t rclass_i,
19246 machine_mode mode,
19247 secondary_reload_info *sri)
19248 {
19249 enum reg_class rclass = (enum reg_class) rclass_i;
19250 reg_class_t ret = ALL_REGS;
19251 enum insn_code icode;
19252 bool default_p = false;
19253 bool done_p = false;
19254
19255 /* Allow subreg of memory before/during reload. */
19256 bool memory_p = (MEM_P (x)
19257 || (!reload_completed && GET_CODE (x) == SUBREG
19258 && MEM_P (SUBREG_REG (x))));
19259
19260 sri->icode = CODE_FOR_nothing;
19261 sri->t_icode = CODE_FOR_nothing;
19262 sri->extra_cost = 0;
19263 icode = ((in_p)
19264 ? reg_addr[mode].reload_load
19265 : reg_addr[mode].reload_store);
19266
19267 if (REG_P (x) || register_operand (x, mode))
19268 {
19269 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19270 bool altivec_p = (rclass == ALTIVEC_REGS);
19271 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19272
19273 if (!in_p)
19274 std::swap (to_type, from_type);
19275
19276 /* Can we do a direct move of some sort? */
19277 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19278 altivec_p))
19279 {
19280 icode = (enum insn_code)sri->icode;
19281 default_p = false;
19282 done_p = true;
19283 ret = NO_REGS;
19284 }
19285 }
19286
19287 /* Make sure 0.0 is not reloaded or forced into memory. */
19288 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19289 {
19290 ret = NO_REGS;
19291 default_p = false;
19292 done_p = true;
19293 }
19294
19295 /* If this is a scalar floating point value and we want to load it into the
19296 traditional Altivec registers, do it via a move via a traditional floating
19297 point register, unless we have D-form addressing. Also make sure that
19298 non-zero constants use a FPR. */
19299 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19300 && !mode_supports_vmx_dform (mode)
19301 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19302 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19303 {
19304 ret = FLOAT_REGS;
19305 default_p = false;
19306 done_p = true;
19307 }
19308
19309 /* Handle reload of load/stores if we have reload helper functions. */
19310 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19311 {
19312 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19313 mode);
19314
19315 if (extra_cost >= 0)
19316 {
19317 done_p = true;
19318 ret = NO_REGS;
19319 if (extra_cost > 0)
19320 {
19321 sri->extra_cost = extra_cost;
19322 sri->icode = icode;
19323 }
19324 }
19325 }
19326
19327 /* Handle unaligned loads and stores of integer registers. */
19328 if (!done_p && TARGET_POWERPC64
19329 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19330 && memory_p
19331 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19332 {
19333 rtx addr = XEXP (x, 0);
19334 rtx off = address_offset (addr);
19335
19336 if (off != NULL_RTX)
19337 {
19338 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19339 unsigned HOST_WIDE_INT offset = INTVAL (off);
19340
19341 /* We need a secondary reload when our legitimate_address_p
19342 says the address is good (as otherwise the entire address
19343 will be reloaded), and the offset is not a multiple of
19344 four or we have an address wrap. Address wrap will only
19345 occur for LO_SUMs since legitimate_offset_address_p
19346 rejects addresses for 16-byte mems that will wrap. */
19347 if (GET_CODE (addr) == LO_SUM
19348 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19349 && ((offset & 3) != 0
19350 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19351 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19352 && (offset & 3) != 0))
19353 {
19354 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19355 if (in_p)
19356 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19357 : CODE_FOR_reload_di_load);
19358 else
19359 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19360 : CODE_FOR_reload_di_store);
19361 sri->extra_cost = 2;
19362 ret = NO_REGS;
19363 done_p = true;
19364 }
19365 else
19366 default_p = true;
19367 }
19368 else
19369 default_p = true;
19370 }
19371
19372 if (!done_p && !TARGET_POWERPC64
19373 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19374 && memory_p
19375 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19376 {
19377 rtx addr = XEXP (x, 0);
19378 rtx off = address_offset (addr);
19379
19380 if (off != NULL_RTX)
19381 {
19382 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19383 unsigned HOST_WIDE_INT offset = INTVAL (off);
19384
19385 /* We need a secondary reload when our legitimate_address_p
19386 says the address is good (as otherwise the entire address
19387 will be reloaded), and we have a wrap.
19388
19389 legitimate_lo_sum_address_p allows LO_SUM addresses to
19390 have any offset so test for wrap in the low 16 bits.
19391
19392 legitimate_offset_address_p checks for the range
19393 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19394 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19395 [0x7ff4,0x7fff] respectively, so test for the
19396 intersection of these ranges, [0x7ffc,0x7fff] and
19397 [0x7ff4,0x7ff7] respectively.
19398
19399 Note that the address we see here may have been
19400 manipulated by legitimize_reload_address. */
19401 if (GET_CODE (addr) == LO_SUM
19402 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19403 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19404 {
19405 if (in_p)
19406 sri->icode = CODE_FOR_reload_si_load;
19407 else
19408 sri->icode = CODE_FOR_reload_si_store;
19409 sri->extra_cost = 2;
19410 ret = NO_REGS;
19411 done_p = true;
19412 }
19413 else
19414 default_p = true;
19415 }
19416 else
19417 default_p = true;
19418 }
19419
19420 if (!done_p)
19421 default_p = true;
19422
19423 if (default_p)
19424 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19425
19426 gcc_assert (ret != ALL_REGS);
19427
19428 if (TARGET_DEBUG_ADDR)
19429 {
19430 fprintf (stderr,
19431 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19432 "mode = %s",
19433 reg_class_names[ret],
19434 in_p ? "true" : "false",
19435 reg_class_names[rclass],
19436 GET_MODE_NAME (mode));
19437
19438 if (reload_completed)
19439 fputs (", after reload", stderr);
19440
19441 if (!done_p)
19442 fputs (", done_p not set", stderr);
19443
19444 if (default_p)
19445 fputs (", default secondary reload", stderr);
19446
19447 if (sri->icode != CODE_FOR_nothing)
19448 fprintf (stderr, ", reload func = %s, extra cost = %d",
19449 insn_data[sri->icode].name, sri->extra_cost);
19450
19451 else if (sri->extra_cost > 0)
19452 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19453
19454 fputs ("\n", stderr);
19455 debug_rtx (x);
19456 }
19457
19458 return ret;
19459 }
19460
19461 /* Better tracing for rs6000_secondary_reload_inner. */
19462
19463 static void
19464 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19465 bool store_p)
19466 {
19467 rtx set, clobber;
19468
19469 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19470
19471 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19472 store_p ? "store" : "load");
19473
19474 if (store_p)
19475 set = gen_rtx_SET (mem, reg);
19476 else
19477 set = gen_rtx_SET (reg, mem);
19478
19479 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19480 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19481 }
19482
19483 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19484 ATTRIBUTE_NORETURN;
19485
19486 static void
19487 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19488 bool store_p)
19489 {
19490 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19491 gcc_unreachable ();
19492 }
19493
19494 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19495 reload helper functions. These were identified in
19496 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19497 reload, it calls the insns:
19498 reload_<RELOAD:mode>_<P:mptrsize>_store
19499 reload_<RELOAD:mode>_<P:mptrsize>_load
19500
19501 which in turn calls this function, to do whatever is necessary to create
19502 valid addresses. */
19503
19504 void
19505 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19506 {
19507 int regno = true_regnum (reg);
19508 machine_mode mode = GET_MODE (reg);
19509 addr_mask_type addr_mask;
19510 rtx addr;
19511 rtx new_addr;
19512 rtx op_reg, op0, op1;
19513 rtx and_op;
19514 rtx cc_clobber;
19515 rtvec rv;
19516
19517 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
19518 || !base_reg_operand (scratch, GET_MODE (scratch)))
19519 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19520
19521 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19522 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19523
19524 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19525 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19526
19527 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19528 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19529
19530 else
19531 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19532
19533 /* Make sure the mode is valid in this register class. */
19534 if ((addr_mask & RELOAD_REG_VALID) == 0)
19535 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19536
19537 if (TARGET_DEBUG_ADDR)
19538 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19539
19540 new_addr = addr = XEXP (mem, 0);
19541 switch (GET_CODE (addr))
19542 {
19543 /* Does the register class support auto update forms for this mode? If
19544 not, do the update now. We don't need a scratch register, since the
19545 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19546 case PRE_INC:
19547 case PRE_DEC:
19548 op_reg = XEXP (addr, 0);
19549 if (!base_reg_operand (op_reg, Pmode))
19550 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19551
19552 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19553 {
19554 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
19555 new_addr = op_reg;
19556 }
19557 break;
19558
19559 case PRE_MODIFY:
19560 op0 = XEXP (addr, 0);
19561 op1 = XEXP (addr, 1);
19562 if (!base_reg_operand (op0, Pmode)
19563 || GET_CODE (op1) != PLUS
19564 || !rtx_equal_p (op0, XEXP (op1, 0)))
19565 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19566
19567 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19568 {
19569 emit_insn (gen_rtx_SET (op0, op1));
19570 new_addr = reg;
19571 }
19572 break;
19573
19574 /* Do we need to simulate AND -16 to clear the bottom address bits used
19575 in VMX load/stores? */
19576 case AND:
19577 op0 = XEXP (addr, 0);
19578 op1 = XEXP (addr, 1);
19579 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19580 {
19581 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
19582 op_reg = op0;
19583
19584 else if (GET_CODE (op1) == PLUS)
19585 {
19586 emit_insn (gen_rtx_SET (scratch, op1));
19587 op_reg = scratch;
19588 }
19589
19590 else
19591 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19592
19593 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19594 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19595 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19596 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19597 new_addr = scratch;
19598 }
19599 break;
19600
19601 /* If this is an indirect address, make sure it is a base register. */
19602 case REG:
19603 case SUBREG:
19604 if (!base_reg_operand (addr, GET_MODE (addr)))
19605 {
19606 emit_insn (gen_rtx_SET (scratch, addr));
19607 new_addr = scratch;
19608 }
19609 break;
19610
19611 /* If this is an indexed address, make sure the register class can handle
19612 indexed addresses for this mode. */
19613 case PLUS:
19614 op0 = XEXP (addr, 0);
19615 op1 = XEXP (addr, 1);
19616 if (!base_reg_operand (op0, Pmode))
19617 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19618
19619 else if (int_reg_operand (op1, Pmode))
19620 {
19621 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19622 {
19623 emit_insn (gen_rtx_SET (scratch, addr));
19624 new_addr = scratch;
19625 }
19626 }
19627
19628 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19629 {
19630 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19631 || !quad_address_p (addr, mode, false))
19632 {
19633 emit_insn (gen_rtx_SET (scratch, addr));
19634 new_addr = scratch;
19635 }
19636 }
19637
19638 /* Make sure the register class can handle offset addresses. */
19639 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19640 {
19641 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19642 {
19643 emit_insn (gen_rtx_SET (scratch, addr));
19644 new_addr = scratch;
19645 }
19646 }
19647
19648 else
19649 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19650
19651 break;
19652
19653 case LO_SUM:
19654 op0 = XEXP (addr, 0);
19655 op1 = XEXP (addr, 1);
19656 if (!base_reg_operand (op0, Pmode))
19657 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19658
19659 else if (int_reg_operand (op1, Pmode))
19660 {
19661 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19662 {
19663 emit_insn (gen_rtx_SET (scratch, addr));
19664 new_addr = scratch;
19665 }
19666 }
19667
19668 /* Quad offsets are restricted and can't handle normal addresses. */
19669 else if (mode_supports_dq_form (mode))
19670 {
19671 emit_insn (gen_rtx_SET (scratch, addr));
19672 new_addr = scratch;
19673 }
19674
19675 /* Make sure the register class can handle offset addresses. */
19676 else if (legitimate_lo_sum_address_p (mode, addr, false))
19677 {
19678 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19679 {
19680 emit_insn (gen_rtx_SET (scratch, addr));
19681 new_addr = scratch;
19682 }
19683 }
19684
19685 else
19686 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19687
19688 break;
19689
19690 case SYMBOL_REF:
19691 case CONST:
19692 case LABEL_REF:
19693 rs6000_emit_move (scratch, addr, Pmode);
19694 new_addr = scratch;
19695 break;
19696
19697 default:
19698 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19699 }
19700
19701 /* Adjust the address if it changed. */
19702 if (addr != new_addr)
19703 {
19704 mem = replace_equiv_address_nv (mem, new_addr);
19705 if (TARGET_DEBUG_ADDR)
19706 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19707 }
19708
19709 /* Now create the move. */
19710 if (store_p)
19711 emit_insn (gen_rtx_SET (mem, reg));
19712 else
19713 emit_insn (gen_rtx_SET (reg, mem));
19714
19715 return;
19716 }
19717
19718 /* Convert reloads involving 64-bit gprs and misaligned offset
19719 addressing, or multiple 32-bit gprs and offsets that are too large,
19720 to use indirect addressing. */
19721
19722 void
19723 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
19724 {
19725 int regno = true_regnum (reg);
19726 enum reg_class rclass;
19727 rtx addr;
19728 rtx scratch_or_premodify = scratch;
19729
19730 if (TARGET_DEBUG_ADDR)
19731 {
19732 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
19733 store_p ? "store" : "load");
19734 fprintf (stderr, "reg:\n");
19735 debug_rtx (reg);
19736 fprintf (stderr, "mem:\n");
19737 debug_rtx (mem);
19738 fprintf (stderr, "scratch:\n");
19739 debug_rtx (scratch);
19740 }
19741
19742 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
19743 gcc_assert (GET_CODE (mem) == MEM);
19744 rclass = REGNO_REG_CLASS (regno);
19745 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
19746 addr = XEXP (mem, 0);
19747
19748 if (GET_CODE (addr) == PRE_MODIFY)
19749 {
19750 gcc_assert (REG_P (XEXP (addr, 0))
19751 && GET_CODE (XEXP (addr, 1)) == PLUS
19752 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
19753 scratch_or_premodify = XEXP (addr, 0);
19754 if (!HARD_REGISTER_P (scratch_or_premodify))
19755 /* If we have a pseudo here then reload will have arranged
19756 to have it replaced, but only in the original insn.
19757 Use the replacement here too. */
19758 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
19759
19760 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
19761 expressions from the original insn, without unsharing them.
19762 Any RTL that points into the original insn will of course
19763 have register replacements applied. That is why we don't
19764 need to look for replacements under the PLUS. */
19765 addr = XEXP (addr, 1);
19766 }
19767 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
19768
19769 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
19770
19771 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
19772
19773 /* Now create the move. */
19774 if (store_p)
19775 emit_insn (gen_rtx_SET (mem, reg));
19776 else
19777 emit_insn (gen_rtx_SET (reg, mem));
19778
19779 return;
19780 }
19781
19782 /* Given an rtx X being reloaded into a reg required to be
19783 in class CLASS, return the class of reg to actually use.
19784 In general this is just CLASS; but on some machines
19785 in some cases it is preferable to use a more restrictive class.
19786
19787 On the RS/6000, we have to return NO_REGS when we want to reload a
19788 floating-point CONST_DOUBLE to force it to be copied to memory.
19789
19790 We also don't want to reload integer values into floating-point
19791 registers if we can at all help it. In fact, this can
19792 cause reload to die, if it tries to generate a reload of CTR
19793 into a FP register and discovers it doesn't have the memory location
19794 required.
19795
19796 ??? Would it be a good idea to have reload do the converse, that is
19797 try to reload floating modes into FP registers if possible?
19798 */
19799
19800 static enum reg_class
19801 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
19802 {
19803 machine_mode mode = GET_MODE (x);
19804 bool is_constant = CONSTANT_P (x);
19805
19806 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
19807 reload class for it. */
19808 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19809 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
19810 return NO_REGS;
19811
19812 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
19813 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
19814 return NO_REGS;
19815
19816 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
19817 the reloading of address expressions using PLUS into floating point
19818 registers. */
19819 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
19820 {
19821 if (is_constant)
19822 {
19823 /* Zero is always allowed in all VSX registers. */
19824 if (x == CONST0_RTX (mode))
19825 return rclass;
19826
19827 /* If this is a vector constant that can be formed with a few Altivec
19828 instructions, we want altivec registers. */
19829 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
19830 return ALTIVEC_REGS;
19831
19832 /* If this is an integer constant that can easily be loaded into
19833 vector registers, allow it. */
19834 if (CONST_INT_P (x))
19835 {
19836 HOST_WIDE_INT value = INTVAL (x);
19837
19838 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
19839 2.06 can generate it in the Altivec registers with
19840 VSPLTI<x>. */
19841 if (value == -1)
19842 {
19843 if (TARGET_P8_VECTOR)
19844 return rclass;
19845 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19846 return ALTIVEC_REGS;
19847 else
19848 return NO_REGS;
19849 }
19850
19851 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
19852 a sign extend in the Altivec registers. */
19853 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
19854 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
19855 return ALTIVEC_REGS;
19856 }
19857
19858 /* Force constant to memory. */
19859 return NO_REGS;
19860 }
19861
19862 /* D-form addressing can easily reload the value. */
19863 if (mode_supports_vmx_dform (mode)
19864 || mode_supports_dq_form (mode))
19865 return rclass;
19866
19867 /* If this is a scalar floating point value and we don't have D-form
19868 addressing, prefer the traditional floating point registers so that we
19869 can use D-form (register+offset) addressing. */
19870 if (rclass == VSX_REGS
19871 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
19872 return FLOAT_REGS;
19873
19874 /* Prefer the Altivec registers if Altivec is handling the vector
19875 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
19876 loads. */
19877 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
19878 || mode == V1TImode)
19879 return ALTIVEC_REGS;
19880
19881 return rclass;
19882 }
19883
19884 if (is_constant || GET_CODE (x) == PLUS)
19885 {
19886 if (reg_class_subset_p (GENERAL_REGS, rclass))
19887 return GENERAL_REGS;
19888 if (reg_class_subset_p (BASE_REGS, rclass))
19889 return BASE_REGS;
19890 return NO_REGS;
19891 }
19892
19893 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
19894 return GENERAL_REGS;
19895
19896 return rclass;
19897 }
19898
19899 /* Debug version of rs6000_preferred_reload_class. */
19900 static enum reg_class
19901 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
19902 {
19903 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
19904
19905 fprintf (stderr,
19906 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
19907 "mode = %s, x:\n",
19908 reg_class_names[ret], reg_class_names[rclass],
19909 GET_MODE_NAME (GET_MODE (x)));
19910 debug_rtx (x);
19911
19912 return ret;
19913 }
19914
19915 /* If we are copying between FP or AltiVec registers and anything else, we need
19916 a memory location. The exception is when we are targeting ppc64 and the
19917 move to/from fpr to gpr instructions are available. Also, under VSX, you
19918 can copy vector registers from the FP register set to the Altivec register
19919 set and vice versa. */
19920
19921 static bool
19922 rs6000_secondary_memory_needed (machine_mode mode,
19923 reg_class_t from_class,
19924 reg_class_t to_class)
19925 {
19926 enum rs6000_reg_type from_type, to_type;
19927 bool altivec_p = ((from_class == ALTIVEC_REGS)
19928 || (to_class == ALTIVEC_REGS));
19929
19930 /* If a simple/direct move is available, we don't need secondary memory */
19931 from_type = reg_class_to_reg_type[(int)from_class];
19932 to_type = reg_class_to_reg_type[(int)to_class];
19933
19934 if (rs6000_secondary_reload_move (to_type, from_type, mode,
19935 (secondary_reload_info *)0, altivec_p))
19936 return false;
19937
19938 /* If we have a floating point or vector register class, we need to use
19939 memory to transfer the data. */
19940 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
19941 return true;
19942
19943 return false;
19944 }
19945
19946 /* Debug version of rs6000_secondary_memory_needed. */
19947 static bool
19948 rs6000_debug_secondary_memory_needed (machine_mode mode,
19949 reg_class_t from_class,
19950 reg_class_t to_class)
19951 {
19952 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
19953
19954 fprintf (stderr,
19955 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
19956 "to_class = %s, mode = %s\n",
19957 ret ? "true" : "false",
19958 reg_class_names[from_class],
19959 reg_class_names[to_class],
19960 GET_MODE_NAME (mode));
19961
19962 return ret;
19963 }
19964
19965 /* Return the register class of a scratch register needed to copy IN into
19966 or out of a register in RCLASS in MODE. If it can be done directly,
19967 NO_REGS is returned. */
19968
19969 static enum reg_class
19970 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
19971 rtx in)
19972 {
19973 int regno;
19974
19975 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
19976 #if TARGET_MACHO
19977 && MACHOPIC_INDIRECT
19978 #endif
19979 ))
19980 {
19981 /* We cannot copy a symbolic operand directly into anything
19982 other than BASE_REGS for TARGET_ELF. So indicate that a
19983 register from BASE_REGS is needed as an intermediate
19984 register.
19985
19986 On Darwin, pic addresses require a load from memory, which
19987 needs a base register. */
19988 if (rclass != BASE_REGS
19989 && (GET_CODE (in) == SYMBOL_REF
19990 || GET_CODE (in) == HIGH
19991 || GET_CODE (in) == LABEL_REF
19992 || GET_CODE (in) == CONST))
19993 return BASE_REGS;
19994 }
19995
19996 if (GET_CODE (in) == REG)
19997 {
19998 regno = REGNO (in);
19999 if (regno >= FIRST_PSEUDO_REGISTER)
20000 {
20001 regno = true_regnum (in);
20002 if (regno >= FIRST_PSEUDO_REGISTER)
20003 regno = -1;
20004 }
20005 }
20006 else if (GET_CODE (in) == SUBREG)
20007 {
20008 regno = true_regnum (in);
20009 if (regno >= FIRST_PSEUDO_REGISTER)
20010 regno = -1;
20011 }
20012 else
20013 regno = -1;
20014
20015 /* If we have VSX register moves, prefer moving scalar values between
20016 Altivec registers and GPR by going via an FPR (and then via memory)
20017 instead of reloading the secondary memory address for Altivec moves. */
20018 if (TARGET_VSX
20019 && GET_MODE_SIZE (mode) < 16
20020 && !mode_supports_vmx_dform (mode)
20021 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20022 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20023 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20024 && (regno >= 0 && INT_REGNO_P (regno)))))
20025 return FLOAT_REGS;
20026
20027 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20028 into anything. */
20029 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20030 || (regno >= 0 && INT_REGNO_P (regno)))
20031 return NO_REGS;
20032
20033 /* Constants, memory, and VSX registers can go into VSX registers (both the
20034 traditional floating point and the altivec registers). */
20035 if (rclass == VSX_REGS
20036 && (regno == -1 || VSX_REGNO_P (regno)))
20037 return NO_REGS;
20038
20039 /* Constants, memory, and FP registers can go into FP registers. */
20040 if ((regno == -1 || FP_REGNO_P (regno))
20041 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20042 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20043
20044 /* Memory, and AltiVec registers can go into AltiVec registers. */
20045 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20046 && rclass == ALTIVEC_REGS)
20047 return NO_REGS;
20048
20049 /* We can copy among the CR registers. */
20050 if ((rclass == CR_REGS || rclass == CR0_REGS)
20051 && regno >= 0 && CR_REGNO_P (regno))
20052 return NO_REGS;
20053
20054 /* Otherwise, we need GENERAL_REGS. */
20055 return GENERAL_REGS;
20056 }
20057
20058 /* Debug version of rs6000_secondary_reload_class. */
20059 static enum reg_class
20060 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20061 machine_mode mode, rtx in)
20062 {
20063 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20064 fprintf (stderr,
20065 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20066 "mode = %s, input rtx:\n",
20067 reg_class_names[ret], reg_class_names[rclass],
20068 GET_MODE_NAME (mode));
20069 debug_rtx (in);
20070
20071 return ret;
20072 }
20073
20074 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20075
20076 static bool
20077 rs6000_can_change_mode_class (machine_mode from,
20078 machine_mode to,
20079 reg_class_t rclass)
20080 {
20081 unsigned from_size = GET_MODE_SIZE (from);
20082 unsigned to_size = GET_MODE_SIZE (to);
20083
20084 if (from_size != to_size)
20085 {
20086 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20087
20088 if (reg_classes_intersect_p (xclass, rclass))
20089 {
20090 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20091 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20092 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20093 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20094
20095 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20096 single register under VSX because the scalar part of the register
20097 is in the upper 64-bits, and not the lower 64-bits. Types like
20098 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20099 IEEE floating point can't overlap, and neither can small
20100 values. */
20101
20102 if (to_float128_vector_p && from_float128_vector_p)
20103 return true;
20104
20105 else if (to_float128_vector_p || from_float128_vector_p)
20106 return false;
20107
20108 /* TDmode in floating-mode registers must always go into a register
20109 pair with the most significant word in the even-numbered register
20110 to match ISA requirements. In little-endian mode, this does not
20111 match subreg numbering, so we cannot allow subregs. */
20112 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20113 return false;
20114
20115 if (from_size < 8 || to_size < 8)
20116 return false;
20117
20118 if (from_size == 8 && (8 * to_nregs) != to_size)
20119 return false;
20120
20121 if (to_size == 8 && (8 * from_nregs) != from_size)
20122 return false;
20123
20124 return true;
20125 }
20126 else
20127 return true;
20128 }
20129
20130 /* Since the VSX register set includes traditional floating point registers
20131 and altivec registers, just check for the size being different instead of
20132 trying to check whether the modes are vector modes. Otherwise it won't
20133 allow say DF and DI to change classes. For types like TFmode and TDmode
20134 that take 2 64-bit registers, rather than a single 128-bit register, don't
20135 allow subregs of those types to other 128 bit types. */
20136 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20137 {
20138 unsigned num_regs = (from_size + 15) / 16;
20139 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20140 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20141 return false;
20142
20143 return (from_size == 8 || from_size == 16);
20144 }
20145
20146 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20147 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20148 return false;
20149
20150 return true;
20151 }
20152
20153 /* Debug version of rs6000_can_change_mode_class. */
20154 static bool
20155 rs6000_debug_can_change_mode_class (machine_mode from,
20156 machine_mode to,
20157 reg_class_t rclass)
20158 {
20159 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20160
20161 fprintf (stderr,
20162 "rs6000_can_change_mode_class, return %s, from = %s, "
20163 "to = %s, rclass = %s\n",
20164 ret ? "true" : "false",
20165 GET_MODE_NAME (from), GET_MODE_NAME (to),
20166 reg_class_names[rclass]);
20167
20168 return ret;
20169 }
20170 \f
20171 /* Return a string to do a move operation of 128 bits of data. */
20172
20173 const char *
20174 rs6000_output_move_128bit (rtx operands[])
20175 {
20176 rtx dest = operands[0];
20177 rtx src = operands[1];
20178 machine_mode mode = GET_MODE (dest);
20179 int dest_regno;
20180 int src_regno;
20181 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20182 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20183
20184 if (REG_P (dest))
20185 {
20186 dest_regno = REGNO (dest);
20187 dest_gpr_p = INT_REGNO_P (dest_regno);
20188 dest_fp_p = FP_REGNO_P (dest_regno);
20189 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20190 dest_vsx_p = dest_fp_p | dest_vmx_p;
20191 }
20192 else
20193 {
20194 dest_regno = -1;
20195 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20196 }
20197
20198 if (REG_P (src))
20199 {
20200 src_regno = REGNO (src);
20201 src_gpr_p = INT_REGNO_P (src_regno);
20202 src_fp_p = FP_REGNO_P (src_regno);
20203 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20204 src_vsx_p = src_fp_p | src_vmx_p;
20205 }
20206 else
20207 {
20208 src_regno = -1;
20209 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20210 }
20211
20212 /* Register moves. */
20213 if (dest_regno >= 0 && src_regno >= 0)
20214 {
20215 if (dest_gpr_p)
20216 {
20217 if (src_gpr_p)
20218 return "#";
20219
20220 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20221 return (WORDS_BIG_ENDIAN
20222 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20223 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20224
20225 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20226 return "#";
20227 }
20228
20229 else if (TARGET_VSX && dest_vsx_p)
20230 {
20231 if (src_vsx_p)
20232 return "xxlor %x0,%x1,%x1";
20233
20234 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20235 return (WORDS_BIG_ENDIAN
20236 ? "mtvsrdd %x0,%1,%L1"
20237 : "mtvsrdd %x0,%L1,%1");
20238
20239 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20240 return "#";
20241 }
20242
20243 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20244 return "vor %0,%1,%1";
20245
20246 else if (dest_fp_p && src_fp_p)
20247 return "#";
20248 }
20249
20250 /* Loads. */
20251 else if (dest_regno >= 0 && MEM_P (src))
20252 {
20253 if (dest_gpr_p)
20254 {
20255 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20256 return "lq %0,%1";
20257 else
20258 return "#";
20259 }
20260
20261 else if (TARGET_ALTIVEC && dest_vmx_p
20262 && altivec_indexed_or_indirect_operand (src, mode))
20263 return "lvx %0,%y1";
20264
20265 else if (TARGET_VSX && dest_vsx_p)
20266 {
20267 if (mode_supports_dq_form (mode)
20268 && quad_address_p (XEXP (src, 0), mode, true))
20269 return "lxv %x0,%1";
20270
20271 else if (TARGET_P9_VECTOR)
20272 return "lxvx %x0,%y1";
20273
20274 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20275 return "lxvw4x %x0,%y1";
20276
20277 else
20278 return "lxvd2x %x0,%y1";
20279 }
20280
20281 else if (TARGET_ALTIVEC && dest_vmx_p)
20282 return "lvx %0,%y1";
20283
20284 else if (dest_fp_p)
20285 return "#";
20286 }
20287
20288 /* Stores. */
20289 else if (src_regno >= 0 && MEM_P (dest))
20290 {
20291 if (src_gpr_p)
20292 {
20293 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20294 return "stq %1,%0";
20295 else
20296 return "#";
20297 }
20298
20299 else if (TARGET_ALTIVEC && src_vmx_p
20300 && altivec_indexed_or_indirect_operand (dest, mode))
20301 return "stvx %1,%y0";
20302
20303 else if (TARGET_VSX && src_vsx_p)
20304 {
20305 if (mode_supports_dq_form (mode)
20306 && quad_address_p (XEXP (dest, 0), mode, true))
20307 return "stxv %x1,%0";
20308
20309 else if (TARGET_P9_VECTOR)
20310 return "stxvx %x1,%y0";
20311
20312 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20313 return "stxvw4x %x1,%y0";
20314
20315 else
20316 return "stxvd2x %x1,%y0";
20317 }
20318
20319 else if (TARGET_ALTIVEC && src_vmx_p)
20320 return "stvx %1,%y0";
20321
20322 else if (src_fp_p)
20323 return "#";
20324 }
20325
20326 /* Constants. */
20327 else if (dest_regno >= 0
20328 && (GET_CODE (src) == CONST_INT
20329 || GET_CODE (src) == CONST_WIDE_INT
20330 || GET_CODE (src) == CONST_DOUBLE
20331 || GET_CODE (src) == CONST_VECTOR))
20332 {
20333 if (dest_gpr_p)
20334 return "#";
20335
20336 else if ((dest_vmx_p && TARGET_ALTIVEC)
20337 || (dest_vsx_p && TARGET_VSX))
20338 return output_vec_const_move (operands);
20339 }
20340
20341 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20342 }
20343
20344 /* Validate a 128-bit move. */
20345 bool
20346 rs6000_move_128bit_ok_p (rtx operands[])
20347 {
20348 machine_mode mode = GET_MODE (operands[0]);
20349 return (gpc_reg_operand (operands[0], mode)
20350 || gpc_reg_operand (operands[1], mode));
20351 }
20352
20353 /* Return true if a 128-bit move needs to be split. */
20354 bool
20355 rs6000_split_128bit_ok_p (rtx operands[])
20356 {
20357 if (!reload_completed)
20358 return false;
20359
20360 if (!gpr_or_gpr_p (operands[0], operands[1]))
20361 return false;
20362
20363 if (quad_load_store_p (operands[0], operands[1]))
20364 return false;
20365
20366 return true;
20367 }
20368
20369 \f
20370 /* Given a comparison operation, return the bit number in CCR to test. We
20371 know this is a valid comparison.
20372
20373 SCC_P is 1 if this is for an scc. That means that %D will have been
20374 used instead of %C, so the bits will be in different places.
20375
20376 Return -1 if OP isn't a valid comparison for some reason. */
20377
20378 int
20379 ccr_bit (rtx op, int scc_p)
20380 {
20381 enum rtx_code code = GET_CODE (op);
20382 machine_mode cc_mode;
20383 int cc_regnum;
20384 int base_bit;
20385 rtx reg;
20386
20387 if (!COMPARISON_P (op))
20388 return -1;
20389
20390 reg = XEXP (op, 0);
20391
20392 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
20393
20394 cc_mode = GET_MODE (reg);
20395 cc_regnum = REGNO (reg);
20396 base_bit = 4 * (cc_regnum - CR0_REGNO);
20397
20398 validate_condition_mode (code, cc_mode);
20399
20400 /* When generating a sCOND operation, only positive conditions are
20401 allowed. */
20402 gcc_assert (!scc_p
20403 || code == EQ || code == GT || code == LT || code == UNORDERED
20404 || code == GTU || code == LTU);
20405
20406 switch (code)
20407 {
20408 case NE:
20409 return scc_p ? base_bit + 3 : base_bit + 2;
20410 case EQ:
20411 return base_bit + 2;
20412 case GT: case GTU: case UNLE:
20413 return base_bit + 1;
20414 case LT: case LTU: case UNGE:
20415 return base_bit;
20416 case ORDERED: case UNORDERED:
20417 return base_bit + 3;
20418
20419 case GE: case GEU:
20420 /* If scc, we will have done a cror to put the bit in the
20421 unordered position. So test that bit. For integer, this is ! LT
20422 unless this is an scc insn. */
20423 return scc_p ? base_bit + 3 : base_bit;
20424
20425 case LE: case LEU:
20426 return scc_p ? base_bit + 3 : base_bit + 1;
20427
20428 default:
20429 gcc_unreachable ();
20430 }
20431 }
20432 \f
20433 /* Return the GOT register. */
20434
20435 rtx
20436 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20437 {
20438 /* The second flow pass currently (June 1999) can't update
20439 regs_ever_live without disturbing other parts of the compiler, so
20440 update it here to make the prolog/epilogue code happy. */
20441 if (!can_create_pseudo_p ()
20442 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20443 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20444
20445 crtl->uses_pic_offset_table = 1;
20446
20447 return pic_offset_table_rtx;
20448 }
20449 \f
20450 static rs6000_stack_t stack_info;
20451
20452 /* Function to init struct machine_function.
20453 This will be called, via a pointer variable,
20454 from push_function_context. */
20455
20456 static struct machine_function *
20457 rs6000_init_machine_status (void)
20458 {
20459 stack_info.reload_completed = 0;
20460 return ggc_cleared_alloc<machine_function> ();
20461 }
20462 \f
20463 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
20464
20465 /* Write out a function code label. */
20466
20467 void
20468 rs6000_output_function_entry (FILE *file, const char *fname)
20469 {
20470 if (fname[0] != '.')
20471 {
20472 switch (DEFAULT_ABI)
20473 {
20474 default:
20475 gcc_unreachable ();
20476
20477 case ABI_AIX:
20478 if (DOT_SYMBOLS)
20479 putc ('.', file);
20480 else
20481 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20482 break;
20483
20484 case ABI_ELFv2:
20485 case ABI_V4:
20486 case ABI_DARWIN:
20487 break;
20488 }
20489 }
20490
20491 RS6000_OUTPUT_BASENAME (file, fname);
20492 }
20493
20494 /* Print an operand. Recognize special options, documented below. */
20495
20496 #if TARGET_ELF
20497 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20498 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20499 #else
20500 #define SMALL_DATA_RELOC "sda21"
20501 #define SMALL_DATA_REG 0
20502 #endif
20503
20504 void
20505 print_operand (FILE *file, rtx x, int code)
20506 {
20507 int i;
20508 unsigned HOST_WIDE_INT uval;
20509
20510 switch (code)
20511 {
20512 /* %a is output_address. */
20513
20514 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20515 output_operand. */
20516
20517 case 'D':
20518 /* Like 'J' but get to the GT bit only. */
20519 gcc_assert (REG_P (x));
20520
20521 /* Bit 1 is GT bit. */
20522 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20523
20524 /* Add one for shift count in rlinm for scc. */
20525 fprintf (file, "%d", i + 1);
20526 return;
20527
20528 case 'e':
20529 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20530 if (! INT_P (x))
20531 {
20532 output_operand_lossage ("invalid %%e value");
20533 return;
20534 }
20535
20536 uval = INTVAL (x);
20537 if ((uval & 0xffff) == 0 && uval != 0)
20538 putc ('s', file);
20539 return;
20540
20541 case 'E':
20542 /* X is a CR register. Print the number of the EQ bit of the CR */
20543 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20544 output_operand_lossage ("invalid %%E value");
20545 else
20546 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20547 return;
20548
20549 case 'f':
20550 /* X is a CR register. Print the shift count needed to move it
20551 to the high-order four bits. */
20552 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20553 output_operand_lossage ("invalid %%f value");
20554 else
20555 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20556 return;
20557
20558 case 'F':
20559 /* Similar, but print the count for the rotate in the opposite
20560 direction. */
20561 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20562 output_operand_lossage ("invalid %%F value");
20563 else
20564 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20565 return;
20566
20567 case 'G':
20568 /* X is a constant integer. If it is negative, print "m",
20569 otherwise print "z". This is to make an aze or ame insn. */
20570 if (GET_CODE (x) != CONST_INT)
20571 output_operand_lossage ("invalid %%G value");
20572 else if (INTVAL (x) >= 0)
20573 putc ('z', file);
20574 else
20575 putc ('m', file);
20576 return;
20577
20578 case 'h':
20579 /* If constant, output low-order five bits. Otherwise, write
20580 normally. */
20581 if (INT_P (x))
20582 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20583 else
20584 print_operand (file, x, 0);
20585 return;
20586
20587 case 'H':
20588 /* If constant, output low-order six bits. Otherwise, write
20589 normally. */
20590 if (INT_P (x))
20591 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20592 else
20593 print_operand (file, x, 0);
20594 return;
20595
20596 case 'I':
20597 /* Print `i' if this is a constant, else nothing. */
20598 if (INT_P (x))
20599 putc ('i', file);
20600 return;
20601
20602 case 'j':
20603 /* Write the bit number in CCR for jump. */
20604 i = ccr_bit (x, 0);
20605 if (i == -1)
20606 output_operand_lossage ("invalid %%j code");
20607 else
20608 fprintf (file, "%d", i);
20609 return;
20610
20611 case 'J':
20612 /* Similar, but add one for shift count in rlinm for scc and pass
20613 scc flag to `ccr_bit'. */
20614 i = ccr_bit (x, 1);
20615 if (i == -1)
20616 output_operand_lossage ("invalid %%J code");
20617 else
20618 /* If we want bit 31, write a shift count of zero, not 32. */
20619 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20620 return;
20621
20622 case 'k':
20623 /* X must be a constant. Write the 1's complement of the
20624 constant. */
20625 if (! INT_P (x))
20626 output_operand_lossage ("invalid %%k value");
20627 else
20628 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20629 return;
20630
20631 case 'K':
20632 /* X must be a symbolic constant on ELF. Write an
20633 expression suitable for an 'addi' that adds in the low 16
20634 bits of the MEM. */
20635 if (GET_CODE (x) == CONST)
20636 {
20637 if (GET_CODE (XEXP (x, 0)) != PLUS
20638 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
20639 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20640 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
20641 output_operand_lossage ("invalid %%K value");
20642 }
20643 print_operand_address (file, x);
20644 fputs ("@l", file);
20645 return;
20646
20647 /* %l is output_asm_label. */
20648
20649 case 'L':
20650 /* Write second word of DImode or DFmode reference. Works on register
20651 or non-indexed memory only. */
20652 if (REG_P (x))
20653 fputs (reg_names[REGNO (x) + 1], file);
20654 else if (MEM_P (x))
20655 {
20656 machine_mode mode = GET_MODE (x);
20657 /* Handle possible auto-increment. Since it is pre-increment and
20658 we have already done it, we can just use an offset of word. */
20659 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20660 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20661 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20662 UNITS_PER_WORD));
20663 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20664 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20665 UNITS_PER_WORD));
20666 else
20667 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20668 UNITS_PER_WORD),
20669 0));
20670
20671 if (small_data_operand (x, GET_MODE (x)))
20672 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20673 reg_names[SMALL_DATA_REG]);
20674 }
20675 return;
20676
20677 case 'N': /* Unused */
20678 /* Write the number of elements in the vector times 4. */
20679 if (GET_CODE (x) != PARALLEL)
20680 output_operand_lossage ("invalid %%N value");
20681 else
20682 fprintf (file, "%d", XVECLEN (x, 0) * 4);
20683 return;
20684
20685 case 'O': /* Unused */
20686 /* Similar, but subtract 1 first. */
20687 if (GET_CODE (x) != PARALLEL)
20688 output_operand_lossage ("invalid %%O value");
20689 else
20690 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
20691 return;
20692
20693 case 'p':
20694 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20695 if (! INT_P (x)
20696 || INTVAL (x) < 0
20697 || (i = exact_log2 (INTVAL (x))) < 0)
20698 output_operand_lossage ("invalid %%p value");
20699 else
20700 fprintf (file, "%d", i);
20701 return;
20702
20703 case 'P':
20704 /* The operand must be an indirect memory reference. The result
20705 is the register name. */
20706 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
20707 || REGNO (XEXP (x, 0)) >= 32)
20708 output_operand_lossage ("invalid %%P value");
20709 else
20710 fputs (reg_names[REGNO (XEXP (x, 0))], file);
20711 return;
20712
20713 case 'q':
20714 /* This outputs the logical code corresponding to a boolean
20715 expression. The expression may have one or both operands
20716 negated (if one, only the first one). For condition register
20717 logical operations, it will also treat the negated
20718 CR codes as NOTs, but not handle NOTs of them. */
20719 {
20720 const char *const *t = 0;
20721 const char *s;
20722 enum rtx_code code = GET_CODE (x);
20723 static const char * const tbl[3][3] = {
20724 { "and", "andc", "nor" },
20725 { "or", "orc", "nand" },
20726 { "xor", "eqv", "xor" } };
20727
20728 if (code == AND)
20729 t = tbl[0];
20730 else if (code == IOR)
20731 t = tbl[1];
20732 else if (code == XOR)
20733 t = tbl[2];
20734 else
20735 output_operand_lossage ("invalid %%q value");
20736
20737 if (GET_CODE (XEXP (x, 0)) != NOT)
20738 s = t[0];
20739 else
20740 {
20741 if (GET_CODE (XEXP (x, 1)) == NOT)
20742 s = t[2];
20743 else
20744 s = t[1];
20745 }
20746
20747 fputs (s, file);
20748 }
20749 return;
20750
20751 case 'Q':
20752 if (! TARGET_MFCRF)
20753 return;
20754 fputc (',', file);
20755 /* FALLTHRU */
20756
20757 case 'R':
20758 /* X is a CR register. Print the mask for `mtcrf'. */
20759 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20760 output_operand_lossage ("invalid %%R value");
20761 else
20762 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
20763 return;
20764
20765 case 's':
20766 /* Low 5 bits of 32 - value */
20767 if (! INT_P (x))
20768 output_operand_lossage ("invalid %%s value");
20769 else
20770 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
20771 return;
20772
20773 case 't':
20774 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
20775 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
20776
20777 /* Bit 3 is OV bit. */
20778 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
20779
20780 /* If we want bit 31, write a shift count of zero, not 32. */
20781 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20782 return;
20783
20784 case 'T':
20785 /* Print the symbolic name of a branch target register. */
20786 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
20787 && REGNO (x) != CTR_REGNO))
20788 output_operand_lossage ("invalid %%T value");
20789 else if (REGNO (x) == LR_REGNO)
20790 fputs ("lr", file);
20791 else
20792 fputs ("ctr", file);
20793 return;
20794
20795 case 'u':
20796 /* High-order or low-order 16 bits of constant, whichever is non-zero,
20797 for use in unsigned operand. */
20798 if (! INT_P (x))
20799 {
20800 output_operand_lossage ("invalid %%u value");
20801 return;
20802 }
20803
20804 uval = INTVAL (x);
20805 if ((uval & 0xffff) == 0)
20806 uval >>= 16;
20807
20808 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
20809 return;
20810
20811 case 'v':
20812 /* High-order 16 bits of constant for use in signed operand. */
20813 if (! INT_P (x))
20814 output_operand_lossage ("invalid %%v value");
20815 else
20816 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
20817 (INTVAL (x) >> 16) & 0xffff);
20818 return;
20819
20820 case 'U':
20821 /* Print `u' if this has an auto-increment or auto-decrement. */
20822 if (MEM_P (x)
20823 && (GET_CODE (XEXP (x, 0)) == PRE_INC
20824 || GET_CODE (XEXP (x, 0)) == PRE_DEC
20825 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
20826 putc ('u', file);
20827 return;
20828
20829 case 'V':
20830 /* Print the trap code for this operand. */
20831 switch (GET_CODE (x))
20832 {
20833 case EQ:
20834 fputs ("eq", file); /* 4 */
20835 break;
20836 case NE:
20837 fputs ("ne", file); /* 24 */
20838 break;
20839 case LT:
20840 fputs ("lt", file); /* 16 */
20841 break;
20842 case LE:
20843 fputs ("le", file); /* 20 */
20844 break;
20845 case GT:
20846 fputs ("gt", file); /* 8 */
20847 break;
20848 case GE:
20849 fputs ("ge", file); /* 12 */
20850 break;
20851 case LTU:
20852 fputs ("llt", file); /* 2 */
20853 break;
20854 case LEU:
20855 fputs ("lle", file); /* 6 */
20856 break;
20857 case GTU:
20858 fputs ("lgt", file); /* 1 */
20859 break;
20860 case GEU:
20861 fputs ("lge", file); /* 5 */
20862 break;
20863 default:
20864 gcc_unreachable ();
20865 }
20866 break;
20867
20868 case 'w':
20869 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
20870 normally. */
20871 if (INT_P (x))
20872 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
20873 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
20874 else
20875 print_operand (file, x, 0);
20876 return;
20877
20878 case 'x':
20879 /* X is a FPR or Altivec register used in a VSX context. */
20880 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
20881 output_operand_lossage ("invalid %%x value");
20882 else
20883 {
20884 int reg = REGNO (x);
20885 int vsx_reg = (FP_REGNO_P (reg)
20886 ? reg - 32
20887 : reg - FIRST_ALTIVEC_REGNO + 32);
20888
20889 #ifdef TARGET_REGNAMES
20890 if (TARGET_REGNAMES)
20891 fprintf (file, "%%vs%d", vsx_reg);
20892 else
20893 #endif
20894 fprintf (file, "%d", vsx_reg);
20895 }
20896 return;
20897
20898 case 'X':
20899 if (MEM_P (x)
20900 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
20901 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
20902 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
20903 putc ('x', file);
20904 return;
20905
20906 case 'Y':
20907 /* Like 'L', for third word of TImode/PTImode */
20908 if (REG_P (x))
20909 fputs (reg_names[REGNO (x) + 2], file);
20910 else if (MEM_P (x))
20911 {
20912 machine_mode mode = GET_MODE (x);
20913 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20914 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20915 output_address (mode, plus_constant (Pmode,
20916 XEXP (XEXP (x, 0), 0), 8));
20917 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20918 output_address (mode, plus_constant (Pmode,
20919 XEXP (XEXP (x, 0), 0), 8));
20920 else
20921 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
20922 if (small_data_operand (x, GET_MODE (x)))
20923 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20924 reg_names[SMALL_DATA_REG]);
20925 }
20926 return;
20927
20928 case 'z':
20929 /* X is a SYMBOL_REF. Write out the name preceded by a
20930 period and without any trailing data in brackets. Used for function
20931 names. If we are configured for System V (or the embedded ABI) on
20932 the PowerPC, do not emit the period, since those systems do not use
20933 TOCs and the like. */
20934 gcc_assert (GET_CODE (x) == SYMBOL_REF);
20935
20936 /* For macho, check to see if we need a stub. */
20937 if (TARGET_MACHO)
20938 {
20939 const char *name = XSTR (x, 0);
20940 #if TARGET_MACHO
20941 if (darwin_emit_branch_islands
20942 && MACHOPIC_INDIRECT
20943 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
20944 name = machopic_indirection_name (x, /*stub_p=*/true);
20945 #endif
20946 assemble_name (file, name);
20947 }
20948 else if (!DOT_SYMBOLS)
20949 assemble_name (file, XSTR (x, 0));
20950 else
20951 rs6000_output_function_entry (file, XSTR (x, 0));
20952 return;
20953
20954 case 'Z':
20955 /* Like 'L', for last word of TImode/PTImode. */
20956 if (REG_P (x))
20957 fputs (reg_names[REGNO (x) + 3], file);
20958 else if (MEM_P (x))
20959 {
20960 machine_mode mode = GET_MODE (x);
20961 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20962 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20963 output_address (mode, plus_constant (Pmode,
20964 XEXP (XEXP (x, 0), 0), 12));
20965 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20966 output_address (mode, plus_constant (Pmode,
20967 XEXP (XEXP (x, 0), 0), 12));
20968 else
20969 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
20970 if (small_data_operand (x, GET_MODE (x)))
20971 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20972 reg_names[SMALL_DATA_REG]);
20973 }
20974 return;
20975
20976 /* Print AltiVec memory operand. */
20977 case 'y':
20978 {
20979 rtx tmp;
20980
20981 gcc_assert (MEM_P (x));
20982
20983 tmp = XEXP (x, 0);
20984
20985 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
20986 && GET_CODE (tmp) == AND
20987 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
20988 && INTVAL (XEXP (tmp, 1)) == -16)
20989 tmp = XEXP (tmp, 0);
20990 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
20991 && GET_CODE (tmp) == PRE_MODIFY)
20992 tmp = XEXP (tmp, 1);
20993 if (REG_P (tmp))
20994 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
20995 else
20996 {
20997 if (GET_CODE (tmp) != PLUS
20998 || !REG_P (XEXP (tmp, 0))
20999 || !REG_P (XEXP (tmp, 1)))
21000 {
21001 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21002 break;
21003 }
21004
21005 if (REGNO (XEXP (tmp, 0)) == 0)
21006 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21007 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21008 else
21009 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21010 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21011 }
21012 break;
21013 }
21014
21015 case 0:
21016 if (REG_P (x))
21017 fprintf (file, "%s", reg_names[REGNO (x)]);
21018 else if (MEM_P (x))
21019 {
21020 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21021 know the width from the mode. */
21022 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21023 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21024 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21025 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21026 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21027 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21028 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21029 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21030 else
21031 output_address (GET_MODE (x), XEXP (x, 0));
21032 }
21033 else
21034 {
21035 if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21036 /* This hack along with a corresponding hack in
21037 rs6000_output_addr_const_extra arranges to output addends
21038 where the assembler expects to find them. eg.
21039 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21040 without this hack would be output as "x@toc+4". We
21041 want "x+4@toc". */
21042 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21043 else
21044 output_addr_const (file, x);
21045 }
21046 return;
21047
21048 case '&':
21049 if (const char *name = get_some_local_dynamic_name ())
21050 assemble_name (file, name);
21051 else
21052 output_operand_lossage ("'%%&' used without any "
21053 "local dynamic TLS references");
21054 return;
21055
21056 default:
21057 output_operand_lossage ("invalid %%xn code");
21058 }
21059 }
21060 \f
21061 /* Print the address of an operand. */
21062
21063 void
21064 print_operand_address (FILE *file, rtx x)
21065 {
21066 if (REG_P (x))
21067 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21068 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21069 || GET_CODE (x) == LABEL_REF)
21070 {
21071 output_addr_const (file, x);
21072 if (small_data_operand (x, GET_MODE (x)))
21073 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21074 reg_names[SMALL_DATA_REG]);
21075 else
21076 gcc_assert (!TARGET_TOC);
21077 }
21078 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21079 && REG_P (XEXP (x, 1)))
21080 {
21081 if (REGNO (XEXP (x, 0)) == 0)
21082 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21083 reg_names[ REGNO (XEXP (x, 0)) ]);
21084 else
21085 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21086 reg_names[ REGNO (XEXP (x, 1)) ]);
21087 }
21088 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21089 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21090 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21091 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21092 #if TARGET_MACHO
21093 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21094 && CONSTANT_P (XEXP (x, 1)))
21095 {
21096 fprintf (file, "lo16(");
21097 output_addr_const (file, XEXP (x, 1));
21098 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21099 }
21100 #endif
21101 #if TARGET_ELF
21102 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21103 && CONSTANT_P (XEXP (x, 1)))
21104 {
21105 output_addr_const (file, XEXP (x, 1));
21106 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21107 }
21108 #endif
21109 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21110 {
21111 /* This hack along with a corresponding hack in
21112 rs6000_output_addr_const_extra arranges to output addends
21113 where the assembler expects to find them. eg.
21114 (lo_sum (reg 9)
21115 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21116 without this hack would be output as "x@toc+8@l(9)". We
21117 want "x+8@toc@l(9)". */
21118 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21119 if (GET_CODE (x) == LO_SUM)
21120 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21121 else
21122 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21123 }
21124 else
21125 gcc_unreachable ();
21126 }
21127 \f
21128 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21129
21130 static bool
21131 rs6000_output_addr_const_extra (FILE *file, rtx x)
21132 {
21133 if (GET_CODE (x) == UNSPEC)
21134 switch (XINT (x, 1))
21135 {
21136 case UNSPEC_TOCREL:
21137 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21138 && REG_P (XVECEXP (x, 0, 1))
21139 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21140 output_addr_const (file, XVECEXP (x, 0, 0));
21141 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21142 {
21143 if (INTVAL (tocrel_offset_oac) >= 0)
21144 fprintf (file, "+");
21145 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21146 }
21147 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21148 {
21149 putc ('-', file);
21150 assemble_name (file, toc_label_name);
21151 need_toc_init = 1;
21152 }
21153 else if (TARGET_ELF)
21154 fputs ("@toc", file);
21155 return true;
21156
21157 #if TARGET_MACHO
21158 case UNSPEC_MACHOPIC_OFFSET:
21159 output_addr_const (file, XVECEXP (x, 0, 0));
21160 putc ('-', file);
21161 machopic_output_function_base_name (file);
21162 return true;
21163 #endif
21164 }
21165 return false;
21166 }
21167 \f
21168 /* Target hook for assembling integer objects. The PowerPC version has
21169 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21170 is defined. It also needs to handle DI-mode objects on 64-bit
21171 targets. */
21172
21173 static bool
21174 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21175 {
21176 #ifdef RELOCATABLE_NEEDS_FIXUP
21177 /* Special handling for SI values. */
21178 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21179 {
21180 static int recurse = 0;
21181
21182 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21183 the .fixup section. Since the TOC section is already relocated, we
21184 don't need to mark it here. We used to skip the text section, but it
21185 should never be valid for relocated addresses to be placed in the text
21186 section. */
21187 if (DEFAULT_ABI == ABI_V4
21188 && (TARGET_RELOCATABLE || flag_pic > 1)
21189 && in_section != toc_section
21190 && !recurse
21191 && !CONST_SCALAR_INT_P (x)
21192 && CONSTANT_P (x))
21193 {
21194 char buf[256];
21195
21196 recurse = 1;
21197 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21198 fixuplabelno++;
21199 ASM_OUTPUT_LABEL (asm_out_file, buf);
21200 fprintf (asm_out_file, "\t.long\t(");
21201 output_addr_const (asm_out_file, x);
21202 fprintf (asm_out_file, ")@fixup\n");
21203 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21204 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21205 fprintf (asm_out_file, "\t.long\t");
21206 assemble_name (asm_out_file, buf);
21207 fprintf (asm_out_file, "\n\t.previous\n");
21208 recurse = 0;
21209 return true;
21210 }
21211 /* Remove initial .'s to turn a -mcall-aixdesc function
21212 address into the address of the descriptor, not the function
21213 itself. */
21214 else if (GET_CODE (x) == SYMBOL_REF
21215 && XSTR (x, 0)[0] == '.'
21216 && DEFAULT_ABI == ABI_AIX)
21217 {
21218 const char *name = XSTR (x, 0);
21219 while (*name == '.')
21220 name++;
21221
21222 fprintf (asm_out_file, "\t.long\t%s\n", name);
21223 return true;
21224 }
21225 }
21226 #endif /* RELOCATABLE_NEEDS_FIXUP */
21227 return default_assemble_integer (x, size, aligned_p);
21228 }
21229
21230 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21231 /* Emit an assembler directive to set symbol visibility for DECL to
21232 VISIBILITY_TYPE. */
21233
21234 static void
21235 rs6000_assemble_visibility (tree decl, int vis)
21236 {
21237 if (TARGET_XCOFF)
21238 return;
21239
21240 /* Functions need to have their entry point symbol visibility set as
21241 well as their descriptor symbol visibility. */
21242 if (DEFAULT_ABI == ABI_AIX
21243 && DOT_SYMBOLS
21244 && TREE_CODE (decl) == FUNCTION_DECL)
21245 {
21246 static const char * const visibility_types[] = {
21247 NULL, "protected", "hidden", "internal"
21248 };
21249
21250 const char *name, *type;
21251
21252 name = ((* targetm.strip_name_encoding)
21253 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21254 type = visibility_types[vis];
21255
21256 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21257 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21258 }
21259 else
21260 default_assemble_visibility (decl, vis);
21261 }
21262 #endif
21263 \f
21264 enum rtx_code
21265 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21266 {
21267 /* Reversal of FP compares takes care -- an ordered compare
21268 becomes an unordered compare and vice versa. */
21269 if (mode == CCFPmode
21270 && (!flag_finite_math_only
21271 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21272 || code == UNEQ || code == LTGT))
21273 return reverse_condition_maybe_unordered (code);
21274 else
21275 return reverse_condition (code);
21276 }
21277
21278 /* Generate a compare for CODE. Return a brand-new rtx that
21279 represents the result of the compare. */
21280
21281 static rtx
21282 rs6000_generate_compare (rtx cmp, machine_mode mode)
21283 {
21284 machine_mode comp_mode;
21285 rtx compare_result;
21286 enum rtx_code code = GET_CODE (cmp);
21287 rtx op0 = XEXP (cmp, 0);
21288 rtx op1 = XEXP (cmp, 1);
21289
21290 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21291 comp_mode = CCmode;
21292 else if (FLOAT_MODE_P (mode))
21293 comp_mode = CCFPmode;
21294 else if (code == GTU || code == LTU
21295 || code == GEU || code == LEU)
21296 comp_mode = CCUNSmode;
21297 else if ((code == EQ || code == NE)
21298 && unsigned_reg_p (op0)
21299 && (unsigned_reg_p (op1)
21300 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21301 /* These are unsigned values, perhaps there will be a later
21302 ordering compare that can be shared with this one. */
21303 comp_mode = CCUNSmode;
21304 else
21305 comp_mode = CCmode;
21306
21307 /* If we have an unsigned compare, make sure we don't have a signed value as
21308 an immediate. */
21309 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21310 && INTVAL (op1) < 0)
21311 {
21312 op0 = copy_rtx_if_shared (op0);
21313 op1 = force_reg (GET_MODE (op0), op1);
21314 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21315 }
21316
21317 /* First, the compare. */
21318 compare_result = gen_reg_rtx (comp_mode);
21319
21320 /* IEEE 128-bit support in VSX registers when we do not have hardware
21321 support. */
21322 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21323 {
21324 rtx libfunc = NULL_RTX;
21325 bool check_nan = false;
21326 rtx dest;
21327
21328 switch (code)
21329 {
21330 case EQ:
21331 case NE:
21332 libfunc = optab_libfunc (eq_optab, mode);
21333 break;
21334
21335 case GT:
21336 case GE:
21337 libfunc = optab_libfunc (ge_optab, mode);
21338 break;
21339
21340 case LT:
21341 case LE:
21342 libfunc = optab_libfunc (le_optab, mode);
21343 break;
21344
21345 case UNORDERED:
21346 case ORDERED:
21347 libfunc = optab_libfunc (unord_optab, mode);
21348 code = (code == UNORDERED) ? NE : EQ;
21349 break;
21350
21351 case UNGE:
21352 case UNGT:
21353 check_nan = true;
21354 libfunc = optab_libfunc (ge_optab, mode);
21355 code = (code == UNGE) ? GE : GT;
21356 break;
21357
21358 case UNLE:
21359 case UNLT:
21360 check_nan = true;
21361 libfunc = optab_libfunc (le_optab, mode);
21362 code = (code == UNLE) ? LE : LT;
21363 break;
21364
21365 case UNEQ:
21366 case LTGT:
21367 check_nan = true;
21368 libfunc = optab_libfunc (eq_optab, mode);
21369 code = (code = UNEQ) ? EQ : NE;
21370 break;
21371
21372 default:
21373 gcc_unreachable ();
21374 }
21375
21376 gcc_assert (libfunc);
21377
21378 if (!check_nan)
21379 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21380 SImode, op0, mode, op1, mode);
21381
21382 /* The library signals an exception for signalling NaNs, so we need to
21383 handle isgreater, etc. by first checking isordered. */
21384 else
21385 {
21386 rtx ne_rtx, normal_dest, unord_dest;
21387 rtx unord_func = optab_libfunc (unord_optab, mode);
21388 rtx join_label = gen_label_rtx ();
21389 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21390 rtx unord_cmp = gen_reg_rtx (comp_mode);
21391
21392
21393 /* Test for either value being a NaN. */
21394 gcc_assert (unord_func);
21395 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21396 SImode, op0, mode, op1, mode);
21397
21398 /* Set value (0) if either value is a NaN, and jump to the join
21399 label. */
21400 dest = gen_reg_rtx (SImode);
21401 emit_move_insn (dest, const1_rtx);
21402 emit_insn (gen_rtx_SET (unord_cmp,
21403 gen_rtx_COMPARE (comp_mode, unord_dest,
21404 const0_rtx)));
21405
21406 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21407 emit_jump_insn (gen_rtx_SET (pc_rtx,
21408 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21409 join_ref,
21410 pc_rtx)));
21411
21412 /* Do the normal comparison, knowing that the values are not
21413 NaNs. */
21414 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21415 SImode, op0, mode, op1, mode);
21416
21417 emit_insn (gen_cstoresi4 (dest,
21418 gen_rtx_fmt_ee (code, SImode, normal_dest,
21419 const0_rtx),
21420 normal_dest, const0_rtx));
21421
21422 /* Join NaN and non-Nan paths. Compare dest against 0. */
21423 emit_label (join_label);
21424 code = NE;
21425 }
21426
21427 emit_insn (gen_rtx_SET (compare_result,
21428 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21429 }
21430
21431 else
21432 {
21433 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21434 CLOBBERs to match cmptf_internal2 pattern. */
21435 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21436 && FLOAT128_IBM_P (GET_MODE (op0))
21437 && TARGET_HARD_FLOAT)
21438 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21439 gen_rtvec (10,
21440 gen_rtx_SET (compare_result,
21441 gen_rtx_COMPARE (comp_mode, op0, op1)),
21442 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21443 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21444 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21445 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21446 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21447 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21448 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21449 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21450 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21451 else if (GET_CODE (op1) == UNSPEC
21452 && XINT (op1, 1) == UNSPEC_SP_TEST)
21453 {
21454 rtx op1b = XVECEXP (op1, 0, 0);
21455 comp_mode = CCEQmode;
21456 compare_result = gen_reg_rtx (CCEQmode);
21457 if (TARGET_64BIT)
21458 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21459 else
21460 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21461 }
21462 else
21463 emit_insn (gen_rtx_SET (compare_result,
21464 gen_rtx_COMPARE (comp_mode, op0, op1)));
21465 }
21466
21467 /* Some kinds of FP comparisons need an OR operation;
21468 under flag_finite_math_only we don't bother. */
21469 if (FLOAT_MODE_P (mode)
21470 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
21471 && !flag_finite_math_only
21472 && (code == LE || code == GE
21473 || code == UNEQ || code == LTGT
21474 || code == UNGT || code == UNLT))
21475 {
21476 enum rtx_code or1, or2;
21477 rtx or1_rtx, or2_rtx, compare2_rtx;
21478 rtx or_result = gen_reg_rtx (CCEQmode);
21479
21480 switch (code)
21481 {
21482 case LE: or1 = LT; or2 = EQ; break;
21483 case GE: or1 = GT; or2 = EQ; break;
21484 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
21485 case LTGT: or1 = LT; or2 = GT; break;
21486 case UNGT: or1 = UNORDERED; or2 = GT; break;
21487 case UNLT: or1 = UNORDERED; or2 = LT; break;
21488 default: gcc_unreachable ();
21489 }
21490 validate_condition_mode (or1, comp_mode);
21491 validate_condition_mode (or2, comp_mode);
21492 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
21493 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
21494 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
21495 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
21496 const_true_rtx);
21497 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
21498
21499 compare_result = or_result;
21500 code = EQ;
21501 }
21502
21503 validate_condition_mode (code, GET_MODE (compare_result));
21504
21505 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
21506 }
21507
21508 \f
21509 /* Return the diagnostic message string if the binary operation OP is
21510 not permitted on TYPE1 and TYPE2, NULL otherwise. */
21511
21512 static const char*
21513 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
21514 const_tree type1,
21515 const_tree type2)
21516 {
21517 machine_mode mode1 = TYPE_MODE (type1);
21518 machine_mode mode2 = TYPE_MODE (type2);
21519
21520 /* For complex modes, use the inner type. */
21521 if (COMPLEX_MODE_P (mode1))
21522 mode1 = GET_MODE_INNER (mode1);
21523
21524 if (COMPLEX_MODE_P (mode2))
21525 mode2 = GET_MODE_INNER (mode2);
21526
21527 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
21528 double to intermix unless -mfloat128-convert. */
21529 if (mode1 == mode2)
21530 return NULL;
21531
21532 if (!TARGET_FLOAT128_CVT)
21533 {
21534 if ((mode1 == KFmode && mode2 == IFmode)
21535 || (mode1 == IFmode && mode2 == KFmode))
21536 return N_("__float128 and __ibm128 cannot be used in the same "
21537 "expression");
21538
21539 if (TARGET_IEEEQUAD
21540 && ((mode1 == IFmode && mode2 == TFmode)
21541 || (mode1 == TFmode && mode2 == IFmode)))
21542 return N_("__ibm128 and long double cannot be used in the same "
21543 "expression");
21544
21545 if (!TARGET_IEEEQUAD
21546 && ((mode1 == KFmode && mode2 == TFmode)
21547 || (mode1 == TFmode && mode2 == KFmode)))
21548 return N_("__float128 and long double cannot be used in the same "
21549 "expression");
21550 }
21551
21552 return NULL;
21553 }
21554
21555 \f
21556 /* Expand floating point conversion to/from __float128 and __ibm128. */
21557
21558 void
21559 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
21560 {
21561 machine_mode dest_mode = GET_MODE (dest);
21562 machine_mode src_mode = GET_MODE (src);
21563 convert_optab cvt = unknown_optab;
21564 bool do_move = false;
21565 rtx libfunc = NULL_RTX;
21566 rtx dest2;
21567 typedef rtx (*rtx_2func_t) (rtx, rtx);
21568 rtx_2func_t hw_convert = (rtx_2func_t)0;
21569 size_t kf_or_tf;
21570
21571 struct hw_conv_t {
21572 rtx_2func_t from_df;
21573 rtx_2func_t from_sf;
21574 rtx_2func_t from_si_sign;
21575 rtx_2func_t from_si_uns;
21576 rtx_2func_t from_di_sign;
21577 rtx_2func_t from_di_uns;
21578 rtx_2func_t to_df;
21579 rtx_2func_t to_sf;
21580 rtx_2func_t to_si_sign;
21581 rtx_2func_t to_si_uns;
21582 rtx_2func_t to_di_sign;
21583 rtx_2func_t to_di_uns;
21584 } hw_conversions[2] = {
21585 /* convertions to/from KFmode */
21586 {
21587 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
21588 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
21589 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
21590 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
21591 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
21592 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
21593 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
21594 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
21595 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
21596 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
21597 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
21598 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
21599 },
21600
21601 /* convertions to/from TFmode */
21602 {
21603 gen_extenddftf2_hw, /* TFmode <- DFmode. */
21604 gen_extendsftf2_hw, /* TFmode <- SFmode. */
21605 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
21606 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
21607 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
21608 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
21609 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
21610 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
21611 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
21612 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
21613 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
21614 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
21615 },
21616 };
21617
21618 if (dest_mode == src_mode)
21619 gcc_unreachable ();
21620
21621 /* Eliminate memory operations. */
21622 if (MEM_P (src))
21623 src = force_reg (src_mode, src);
21624
21625 if (MEM_P (dest))
21626 {
21627 rtx tmp = gen_reg_rtx (dest_mode);
21628 rs6000_expand_float128_convert (tmp, src, unsigned_p);
21629 rs6000_emit_move (dest, tmp, dest_mode);
21630 return;
21631 }
21632
21633 /* Convert to IEEE 128-bit floating point. */
21634 if (FLOAT128_IEEE_P (dest_mode))
21635 {
21636 if (dest_mode == KFmode)
21637 kf_or_tf = 0;
21638 else if (dest_mode == TFmode)
21639 kf_or_tf = 1;
21640 else
21641 gcc_unreachable ();
21642
21643 switch (src_mode)
21644 {
21645 case E_DFmode:
21646 cvt = sext_optab;
21647 hw_convert = hw_conversions[kf_or_tf].from_df;
21648 break;
21649
21650 case E_SFmode:
21651 cvt = sext_optab;
21652 hw_convert = hw_conversions[kf_or_tf].from_sf;
21653 break;
21654
21655 case E_KFmode:
21656 case E_IFmode:
21657 case E_TFmode:
21658 if (FLOAT128_IBM_P (src_mode))
21659 cvt = sext_optab;
21660 else
21661 do_move = true;
21662 break;
21663
21664 case E_SImode:
21665 if (unsigned_p)
21666 {
21667 cvt = ufloat_optab;
21668 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
21669 }
21670 else
21671 {
21672 cvt = sfloat_optab;
21673 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
21674 }
21675 break;
21676
21677 case E_DImode:
21678 if (unsigned_p)
21679 {
21680 cvt = ufloat_optab;
21681 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
21682 }
21683 else
21684 {
21685 cvt = sfloat_optab;
21686 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
21687 }
21688 break;
21689
21690 default:
21691 gcc_unreachable ();
21692 }
21693 }
21694
21695 /* Convert from IEEE 128-bit floating point. */
21696 else if (FLOAT128_IEEE_P (src_mode))
21697 {
21698 if (src_mode == KFmode)
21699 kf_or_tf = 0;
21700 else if (src_mode == TFmode)
21701 kf_or_tf = 1;
21702 else
21703 gcc_unreachable ();
21704
21705 switch (dest_mode)
21706 {
21707 case E_DFmode:
21708 cvt = trunc_optab;
21709 hw_convert = hw_conversions[kf_or_tf].to_df;
21710 break;
21711
21712 case E_SFmode:
21713 cvt = trunc_optab;
21714 hw_convert = hw_conversions[kf_or_tf].to_sf;
21715 break;
21716
21717 case E_KFmode:
21718 case E_IFmode:
21719 case E_TFmode:
21720 if (FLOAT128_IBM_P (dest_mode))
21721 cvt = trunc_optab;
21722 else
21723 do_move = true;
21724 break;
21725
21726 case E_SImode:
21727 if (unsigned_p)
21728 {
21729 cvt = ufix_optab;
21730 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
21731 }
21732 else
21733 {
21734 cvt = sfix_optab;
21735 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
21736 }
21737 break;
21738
21739 case E_DImode:
21740 if (unsigned_p)
21741 {
21742 cvt = ufix_optab;
21743 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
21744 }
21745 else
21746 {
21747 cvt = sfix_optab;
21748 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
21749 }
21750 break;
21751
21752 default:
21753 gcc_unreachable ();
21754 }
21755 }
21756
21757 /* Both IBM format. */
21758 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
21759 do_move = true;
21760
21761 else
21762 gcc_unreachable ();
21763
21764 /* Handle conversion between TFmode/KFmode/IFmode. */
21765 if (do_move)
21766 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
21767
21768 /* Handle conversion if we have hardware support. */
21769 else if (TARGET_FLOAT128_HW && hw_convert)
21770 emit_insn ((hw_convert) (dest, src));
21771
21772 /* Call an external function to do the conversion. */
21773 else if (cvt != unknown_optab)
21774 {
21775 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
21776 gcc_assert (libfunc != NULL_RTX);
21777
21778 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
21779 src, src_mode);
21780
21781 gcc_assert (dest2 != NULL_RTX);
21782 if (!rtx_equal_p (dest, dest2))
21783 emit_move_insn (dest, dest2);
21784 }
21785
21786 else
21787 gcc_unreachable ();
21788
21789 return;
21790 }
21791
21792 \f
21793 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
21794 can be used as that dest register. Return the dest register. */
21795
21796 rtx
21797 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
21798 {
21799 if (op2 == const0_rtx)
21800 return op1;
21801
21802 if (GET_CODE (scratch) == SCRATCH)
21803 scratch = gen_reg_rtx (mode);
21804
21805 if (logical_operand (op2, mode))
21806 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
21807 else
21808 emit_insn (gen_rtx_SET (scratch,
21809 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
21810
21811 return scratch;
21812 }
21813
21814 void
21815 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
21816 {
21817 rtx condition_rtx;
21818 machine_mode op_mode;
21819 enum rtx_code cond_code;
21820 rtx result = operands[0];
21821
21822 condition_rtx = rs6000_generate_compare (operands[1], mode);
21823 cond_code = GET_CODE (condition_rtx);
21824
21825 if (cond_code == NE
21826 || cond_code == GE || cond_code == LE
21827 || cond_code == GEU || cond_code == LEU
21828 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
21829 {
21830 rtx not_result = gen_reg_rtx (CCEQmode);
21831 rtx not_op, rev_cond_rtx;
21832 machine_mode cc_mode;
21833
21834 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
21835
21836 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
21837 SImode, XEXP (condition_rtx, 0), const0_rtx);
21838 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
21839 emit_insn (gen_rtx_SET (not_result, not_op));
21840 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
21841 }
21842
21843 op_mode = GET_MODE (XEXP (operands[1], 0));
21844 if (op_mode == VOIDmode)
21845 op_mode = GET_MODE (XEXP (operands[1], 1));
21846
21847 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
21848 {
21849 PUT_MODE (condition_rtx, DImode);
21850 convert_move (result, condition_rtx, 0);
21851 }
21852 else
21853 {
21854 PUT_MODE (condition_rtx, SImode);
21855 emit_insn (gen_rtx_SET (result, condition_rtx));
21856 }
21857 }
21858
21859 /* Emit a branch of kind CODE to location LOC. */
21860
21861 void
21862 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
21863 {
21864 rtx condition_rtx, loc_ref;
21865
21866 condition_rtx = rs6000_generate_compare (operands[0], mode);
21867 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
21868 emit_jump_insn (gen_rtx_SET (pc_rtx,
21869 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
21870 loc_ref, pc_rtx)));
21871 }
21872
21873 /* Return the string to output a conditional branch to LABEL, which is
21874 the operand template of the label, or NULL if the branch is really a
21875 conditional return.
21876
21877 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
21878 condition code register and its mode specifies what kind of
21879 comparison we made.
21880
21881 REVERSED is nonzero if we should reverse the sense of the comparison.
21882
21883 INSN is the insn. */
21884
21885 char *
21886 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
21887 {
21888 static char string[64];
21889 enum rtx_code code = GET_CODE (op);
21890 rtx cc_reg = XEXP (op, 0);
21891 machine_mode mode = GET_MODE (cc_reg);
21892 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
21893 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
21894 int really_reversed = reversed ^ need_longbranch;
21895 char *s = string;
21896 const char *ccode;
21897 const char *pred;
21898 rtx note;
21899
21900 validate_condition_mode (code, mode);
21901
21902 /* Work out which way this really branches. We could use
21903 reverse_condition_maybe_unordered here always but this
21904 makes the resulting assembler clearer. */
21905 if (really_reversed)
21906 {
21907 /* Reversal of FP compares takes care -- an ordered compare
21908 becomes an unordered compare and vice versa. */
21909 if (mode == CCFPmode)
21910 code = reverse_condition_maybe_unordered (code);
21911 else
21912 code = reverse_condition (code);
21913 }
21914
21915 switch (code)
21916 {
21917 /* Not all of these are actually distinct opcodes, but
21918 we distinguish them for clarity of the resulting assembler. */
21919 case NE: case LTGT:
21920 ccode = "ne"; break;
21921 case EQ: case UNEQ:
21922 ccode = "eq"; break;
21923 case GE: case GEU:
21924 ccode = "ge"; break;
21925 case GT: case GTU: case UNGT:
21926 ccode = "gt"; break;
21927 case LE: case LEU:
21928 ccode = "le"; break;
21929 case LT: case LTU: case UNLT:
21930 ccode = "lt"; break;
21931 case UNORDERED: ccode = "un"; break;
21932 case ORDERED: ccode = "nu"; break;
21933 case UNGE: ccode = "nl"; break;
21934 case UNLE: ccode = "ng"; break;
21935 default:
21936 gcc_unreachable ();
21937 }
21938
21939 /* Maybe we have a guess as to how likely the branch is. */
21940 pred = "";
21941 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
21942 if (note != NULL_RTX)
21943 {
21944 /* PROB is the difference from 50%. */
21945 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
21946 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
21947
21948 /* Only hint for highly probable/improbable branches on newer cpus when
21949 we have real profile data, as static prediction overrides processor
21950 dynamic prediction. For older cpus we may as well always hint, but
21951 assume not taken for branches that are very close to 50% as a
21952 mispredicted taken branch is more expensive than a
21953 mispredicted not-taken branch. */
21954 if (rs6000_always_hint
21955 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
21956 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
21957 && br_prob_note_reliable_p (note)))
21958 {
21959 if (abs (prob) > REG_BR_PROB_BASE / 20
21960 && ((prob > 0) ^ need_longbranch))
21961 pred = "+";
21962 else
21963 pred = "-";
21964 }
21965 }
21966
21967 if (label == NULL)
21968 s += sprintf (s, "b%slr%s ", ccode, pred);
21969 else
21970 s += sprintf (s, "b%s%s ", ccode, pred);
21971
21972 /* We need to escape any '%' characters in the reg_names string.
21973 Assume they'd only be the first character.... */
21974 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
21975 *s++ = '%';
21976 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
21977
21978 if (label != NULL)
21979 {
21980 /* If the branch distance was too far, we may have to use an
21981 unconditional branch to go the distance. */
21982 if (need_longbranch)
21983 s += sprintf (s, ",$+8\n\tb %s", label);
21984 else
21985 s += sprintf (s, ",%s", label);
21986 }
21987
21988 return string;
21989 }
21990
21991 /* Return insn for VSX or Altivec comparisons. */
21992
21993 static rtx
21994 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
21995 {
21996 rtx mask;
21997 machine_mode mode = GET_MODE (op0);
21998
21999 switch (code)
22000 {
22001 default:
22002 break;
22003
22004 case GE:
22005 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22006 return NULL_RTX;
22007 /* FALLTHRU */
22008
22009 case EQ:
22010 case GT:
22011 case GTU:
22012 case ORDERED:
22013 case UNORDERED:
22014 case UNEQ:
22015 case LTGT:
22016 mask = gen_reg_rtx (mode);
22017 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22018 return mask;
22019 }
22020
22021 return NULL_RTX;
22022 }
22023
22024 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22025 DMODE is expected destination mode. This is a recursive function. */
22026
22027 static rtx
22028 rs6000_emit_vector_compare (enum rtx_code rcode,
22029 rtx op0, rtx op1,
22030 machine_mode dmode)
22031 {
22032 rtx mask;
22033 bool swap_operands = false;
22034 bool try_again = false;
22035
22036 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22037 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22038
22039 /* See if the comparison works as is. */
22040 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22041 if (mask)
22042 return mask;
22043
22044 switch (rcode)
22045 {
22046 case LT:
22047 rcode = GT;
22048 swap_operands = true;
22049 try_again = true;
22050 break;
22051 case LTU:
22052 rcode = GTU;
22053 swap_operands = true;
22054 try_again = true;
22055 break;
22056 case NE:
22057 case UNLE:
22058 case UNLT:
22059 case UNGE:
22060 case UNGT:
22061 /* Invert condition and try again.
22062 e.g., A != B becomes ~(A==B). */
22063 {
22064 enum rtx_code rev_code;
22065 enum insn_code nor_code;
22066 rtx mask2;
22067
22068 rev_code = reverse_condition_maybe_unordered (rcode);
22069 if (rev_code == UNKNOWN)
22070 return NULL_RTX;
22071
22072 nor_code = optab_handler (one_cmpl_optab, dmode);
22073 if (nor_code == CODE_FOR_nothing)
22074 return NULL_RTX;
22075
22076 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22077 if (!mask2)
22078 return NULL_RTX;
22079
22080 mask = gen_reg_rtx (dmode);
22081 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22082 return mask;
22083 }
22084 break;
22085 case GE:
22086 case GEU:
22087 case LE:
22088 case LEU:
22089 /* Try GT/GTU/LT/LTU OR EQ */
22090 {
22091 rtx c_rtx, eq_rtx;
22092 enum insn_code ior_code;
22093 enum rtx_code new_code;
22094
22095 switch (rcode)
22096 {
22097 case GE:
22098 new_code = GT;
22099 break;
22100
22101 case GEU:
22102 new_code = GTU;
22103 break;
22104
22105 case LE:
22106 new_code = LT;
22107 break;
22108
22109 case LEU:
22110 new_code = LTU;
22111 break;
22112
22113 default:
22114 gcc_unreachable ();
22115 }
22116
22117 ior_code = optab_handler (ior_optab, dmode);
22118 if (ior_code == CODE_FOR_nothing)
22119 return NULL_RTX;
22120
22121 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22122 if (!c_rtx)
22123 return NULL_RTX;
22124
22125 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22126 if (!eq_rtx)
22127 return NULL_RTX;
22128
22129 mask = gen_reg_rtx (dmode);
22130 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22131 return mask;
22132 }
22133 break;
22134 default:
22135 return NULL_RTX;
22136 }
22137
22138 if (try_again)
22139 {
22140 if (swap_operands)
22141 std::swap (op0, op1);
22142
22143 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22144 if (mask)
22145 return mask;
22146 }
22147
22148 /* You only get two chances. */
22149 return NULL_RTX;
22150 }
22151
22152 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22153 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22154 operands for the relation operation COND. */
22155
22156 int
22157 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22158 rtx cond, rtx cc_op0, rtx cc_op1)
22159 {
22160 machine_mode dest_mode = GET_MODE (dest);
22161 machine_mode mask_mode = GET_MODE (cc_op0);
22162 enum rtx_code rcode = GET_CODE (cond);
22163 machine_mode cc_mode = CCmode;
22164 rtx mask;
22165 rtx cond2;
22166 bool invert_move = false;
22167
22168 if (VECTOR_UNIT_NONE_P (dest_mode))
22169 return 0;
22170
22171 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22172 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22173
22174 switch (rcode)
22175 {
22176 /* Swap operands if we can, and fall back to doing the operation as
22177 specified, and doing a NOR to invert the test. */
22178 case NE:
22179 case UNLE:
22180 case UNLT:
22181 case UNGE:
22182 case UNGT:
22183 /* Invert condition and try again.
22184 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22185 invert_move = true;
22186 rcode = reverse_condition_maybe_unordered (rcode);
22187 if (rcode == UNKNOWN)
22188 return 0;
22189 break;
22190
22191 case GE:
22192 case LE:
22193 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22194 {
22195 /* Invert condition to avoid compound test. */
22196 invert_move = true;
22197 rcode = reverse_condition (rcode);
22198 }
22199 break;
22200
22201 case GTU:
22202 case GEU:
22203 case LTU:
22204 case LEU:
22205 /* Mark unsigned tests with CCUNSmode. */
22206 cc_mode = CCUNSmode;
22207
22208 /* Invert condition to avoid compound test if necessary. */
22209 if (rcode == GEU || rcode == LEU)
22210 {
22211 invert_move = true;
22212 rcode = reverse_condition (rcode);
22213 }
22214 break;
22215
22216 default:
22217 break;
22218 }
22219
22220 /* Get the vector mask for the given relational operations. */
22221 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22222
22223 if (!mask)
22224 return 0;
22225
22226 if (invert_move)
22227 std::swap (op_true, op_false);
22228
22229 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22230 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22231 && (GET_CODE (op_true) == CONST_VECTOR
22232 || GET_CODE (op_false) == CONST_VECTOR))
22233 {
22234 rtx constant_0 = CONST0_RTX (dest_mode);
22235 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22236
22237 if (op_true == constant_m1 && op_false == constant_0)
22238 {
22239 emit_move_insn (dest, mask);
22240 return 1;
22241 }
22242
22243 else if (op_true == constant_0 && op_false == constant_m1)
22244 {
22245 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22246 return 1;
22247 }
22248
22249 /* If we can't use the vector comparison directly, perhaps we can use
22250 the mask for the true or false fields, instead of loading up a
22251 constant. */
22252 if (op_true == constant_m1)
22253 op_true = mask;
22254
22255 if (op_false == constant_0)
22256 op_false = mask;
22257 }
22258
22259 if (!REG_P (op_true) && !SUBREG_P (op_true))
22260 op_true = force_reg (dest_mode, op_true);
22261
22262 if (!REG_P (op_false) && !SUBREG_P (op_false))
22263 op_false = force_reg (dest_mode, op_false);
22264
22265 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22266 CONST0_RTX (dest_mode));
22267 emit_insn (gen_rtx_SET (dest,
22268 gen_rtx_IF_THEN_ELSE (dest_mode,
22269 cond2,
22270 op_true,
22271 op_false)));
22272 return 1;
22273 }
22274
22275 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22276 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22277 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22278 hardware has no such operation. */
22279
22280 static int
22281 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22282 {
22283 enum rtx_code code = GET_CODE (op);
22284 rtx op0 = XEXP (op, 0);
22285 rtx op1 = XEXP (op, 1);
22286 machine_mode compare_mode = GET_MODE (op0);
22287 machine_mode result_mode = GET_MODE (dest);
22288 bool max_p = false;
22289
22290 if (result_mode != compare_mode)
22291 return 0;
22292
22293 if (code == GE || code == GT)
22294 max_p = true;
22295 else if (code == LE || code == LT)
22296 max_p = false;
22297 else
22298 return 0;
22299
22300 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22301 ;
22302
22303 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22304 max_p = !max_p;
22305
22306 else
22307 return 0;
22308
22309 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22310 return 1;
22311 }
22312
22313 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22314 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22315 operands of the last comparison is nonzero/true, FALSE_COND if it is
22316 zero/false. Return 0 if the hardware has no such operation. */
22317
22318 static int
22319 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22320 {
22321 enum rtx_code code = GET_CODE (op);
22322 rtx op0 = XEXP (op, 0);
22323 rtx op1 = XEXP (op, 1);
22324 machine_mode result_mode = GET_MODE (dest);
22325 rtx compare_rtx;
22326 rtx cmove_rtx;
22327 rtx clobber_rtx;
22328
22329 if (!can_create_pseudo_p ())
22330 return 0;
22331
22332 switch (code)
22333 {
22334 case EQ:
22335 case GE:
22336 case GT:
22337 break;
22338
22339 case NE:
22340 case LT:
22341 case LE:
22342 code = swap_condition (code);
22343 std::swap (op0, op1);
22344 break;
22345
22346 default:
22347 return 0;
22348 }
22349
22350 /* Generate: [(parallel [(set (dest)
22351 (if_then_else (op (cmp1) (cmp2))
22352 (true)
22353 (false)))
22354 (clobber (scratch))])]. */
22355
22356 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22357 cmove_rtx = gen_rtx_SET (dest,
22358 gen_rtx_IF_THEN_ELSE (result_mode,
22359 compare_rtx,
22360 true_cond,
22361 false_cond));
22362
22363 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22364 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22365 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22366
22367 return 1;
22368 }
22369
22370 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22371 operands of the last comparison is nonzero/true, FALSE_COND if it
22372 is zero/false. Return 0 if the hardware has no such operation. */
22373
22374 int
22375 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22376 {
22377 enum rtx_code code = GET_CODE (op);
22378 rtx op0 = XEXP (op, 0);
22379 rtx op1 = XEXP (op, 1);
22380 machine_mode compare_mode = GET_MODE (op0);
22381 machine_mode result_mode = GET_MODE (dest);
22382 rtx temp;
22383 bool is_against_zero;
22384
22385 /* These modes should always match. */
22386 if (GET_MODE (op1) != compare_mode
22387 /* In the isel case however, we can use a compare immediate, so
22388 op1 may be a small constant. */
22389 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22390 return 0;
22391 if (GET_MODE (true_cond) != result_mode)
22392 return 0;
22393 if (GET_MODE (false_cond) != result_mode)
22394 return 0;
22395
22396 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22397 if (TARGET_P9_MINMAX
22398 && (compare_mode == SFmode || compare_mode == DFmode)
22399 && (result_mode == SFmode || result_mode == DFmode))
22400 {
22401 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22402 return 1;
22403
22404 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22405 return 1;
22406 }
22407
22408 /* Don't allow using floating point comparisons for integer results for
22409 now. */
22410 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22411 return 0;
22412
22413 /* First, work out if the hardware can do this at all, or
22414 if it's too slow.... */
22415 if (!FLOAT_MODE_P (compare_mode))
22416 {
22417 if (TARGET_ISEL)
22418 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22419 return 0;
22420 }
22421
22422 is_against_zero = op1 == CONST0_RTX (compare_mode);
22423
22424 /* A floating-point subtract might overflow, underflow, or produce
22425 an inexact result, thus changing the floating-point flags, so it
22426 can't be generated if we care about that. It's safe if one side
22427 of the construct is zero, since then no subtract will be
22428 generated. */
22429 if (SCALAR_FLOAT_MODE_P (compare_mode)
22430 && flag_trapping_math && ! is_against_zero)
22431 return 0;
22432
22433 /* Eliminate half of the comparisons by switching operands, this
22434 makes the remaining code simpler. */
22435 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22436 || code == LTGT || code == LT || code == UNLE)
22437 {
22438 code = reverse_condition_maybe_unordered (code);
22439 temp = true_cond;
22440 true_cond = false_cond;
22441 false_cond = temp;
22442 }
22443
22444 /* UNEQ and LTGT take four instructions for a comparison with zero,
22445 it'll probably be faster to use a branch here too. */
22446 if (code == UNEQ && HONOR_NANS (compare_mode))
22447 return 0;
22448
22449 /* We're going to try to implement comparisons by performing
22450 a subtract, then comparing against zero. Unfortunately,
22451 Inf - Inf is NaN which is not zero, and so if we don't
22452 know that the operand is finite and the comparison
22453 would treat EQ different to UNORDERED, we can't do it. */
22454 if (HONOR_INFINITIES (compare_mode)
22455 && code != GT && code != UNGE
22456 && (GET_CODE (op1) != CONST_DOUBLE
22457 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
22458 /* Constructs of the form (a OP b ? a : b) are safe. */
22459 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
22460 || (! rtx_equal_p (op0, true_cond)
22461 && ! rtx_equal_p (op1, true_cond))))
22462 return 0;
22463
22464 /* At this point we know we can use fsel. */
22465
22466 /* Reduce the comparison to a comparison against zero. */
22467 if (! is_against_zero)
22468 {
22469 temp = gen_reg_rtx (compare_mode);
22470 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
22471 op0 = temp;
22472 op1 = CONST0_RTX (compare_mode);
22473 }
22474
22475 /* If we don't care about NaNs we can reduce some of the comparisons
22476 down to faster ones. */
22477 if (! HONOR_NANS (compare_mode))
22478 switch (code)
22479 {
22480 case GT:
22481 code = LE;
22482 temp = true_cond;
22483 true_cond = false_cond;
22484 false_cond = temp;
22485 break;
22486 case UNGE:
22487 code = GE;
22488 break;
22489 case UNEQ:
22490 code = EQ;
22491 break;
22492 default:
22493 break;
22494 }
22495
22496 /* Now, reduce everything down to a GE. */
22497 switch (code)
22498 {
22499 case GE:
22500 break;
22501
22502 case LE:
22503 temp = gen_reg_rtx (compare_mode);
22504 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22505 op0 = temp;
22506 break;
22507
22508 case ORDERED:
22509 temp = gen_reg_rtx (compare_mode);
22510 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
22511 op0 = temp;
22512 break;
22513
22514 case EQ:
22515 temp = gen_reg_rtx (compare_mode);
22516 emit_insn (gen_rtx_SET (temp,
22517 gen_rtx_NEG (compare_mode,
22518 gen_rtx_ABS (compare_mode, op0))));
22519 op0 = temp;
22520 break;
22521
22522 case UNGE:
22523 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
22524 temp = gen_reg_rtx (result_mode);
22525 emit_insn (gen_rtx_SET (temp,
22526 gen_rtx_IF_THEN_ELSE (result_mode,
22527 gen_rtx_GE (VOIDmode,
22528 op0, op1),
22529 true_cond, false_cond)));
22530 false_cond = true_cond;
22531 true_cond = temp;
22532
22533 temp = gen_reg_rtx (compare_mode);
22534 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22535 op0 = temp;
22536 break;
22537
22538 case GT:
22539 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
22540 temp = gen_reg_rtx (result_mode);
22541 emit_insn (gen_rtx_SET (temp,
22542 gen_rtx_IF_THEN_ELSE (result_mode,
22543 gen_rtx_GE (VOIDmode,
22544 op0, op1),
22545 true_cond, false_cond)));
22546 true_cond = false_cond;
22547 false_cond = temp;
22548
22549 temp = gen_reg_rtx (compare_mode);
22550 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22551 op0 = temp;
22552 break;
22553
22554 default:
22555 gcc_unreachable ();
22556 }
22557
22558 emit_insn (gen_rtx_SET (dest,
22559 gen_rtx_IF_THEN_ELSE (result_mode,
22560 gen_rtx_GE (VOIDmode,
22561 op0, op1),
22562 true_cond, false_cond)));
22563 return 1;
22564 }
22565
22566 /* Same as above, but for ints (isel). */
22567
22568 int
22569 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22570 {
22571 rtx condition_rtx, cr;
22572 machine_mode mode = GET_MODE (dest);
22573 enum rtx_code cond_code;
22574 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
22575 bool signedp;
22576
22577 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
22578 return 0;
22579
22580 /* We still have to do the compare, because isel doesn't do a
22581 compare, it just looks at the CRx bits set by a previous compare
22582 instruction. */
22583 condition_rtx = rs6000_generate_compare (op, mode);
22584 cond_code = GET_CODE (condition_rtx);
22585 cr = XEXP (condition_rtx, 0);
22586 signedp = GET_MODE (cr) == CCmode;
22587
22588 isel_func = (mode == SImode
22589 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
22590 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
22591
22592 switch (cond_code)
22593 {
22594 case LT: case GT: case LTU: case GTU: case EQ:
22595 /* isel handles these directly. */
22596 break;
22597
22598 default:
22599 /* We need to swap the sense of the comparison. */
22600 {
22601 std::swap (false_cond, true_cond);
22602 PUT_CODE (condition_rtx, reverse_condition (cond_code));
22603 }
22604 break;
22605 }
22606
22607 false_cond = force_reg (mode, false_cond);
22608 if (true_cond != const0_rtx)
22609 true_cond = force_reg (mode, true_cond);
22610
22611 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
22612
22613 return 1;
22614 }
22615
22616 void
22617 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
22618 {
22619 machine_mode mode = GET_MODE (op0);
22620 enum rtx_code c;
22621 rtx target;
22622
22623 /* VSX/altivec have direct min/max insns. */
22624 if ((code == SMAX || code == SMIN)
22625 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
22626 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
22627 {
22628 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
22629 return;
22630 }
22631
22632 if (code == SMAX || code == SMIN)
22633 c = GE;
22634 else
22635 c = GEU;
22636
22637 if (code == SMAX || code == UMAX)
22638 target = emit_conditional_move (dest, c, op0, op1, mode,
22639 op0, op1, mode, 0);
22640 else
22641 target = emit_conditional_move (dest, c, op0, op1, mode,
22642 op1, op0, mode, 0);
22643 gcc_assert (target);
22644 if (target != dest)
22645 emit_move_insn (dest, target);
22646 }
22647
22648 /* A subroutine of the atomic operation splitters. Jump to LABEL if
22649 COND is true. Mark the jump as unlikely to be taken. */
22650
22651 static void
22652 emit_unlikely_jump (rtx cond, rtx label)
22653 {
22654 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
22655 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
22656 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
22657 }
22658
22659 /* A subroutine of the atomic operation splitters. Emit a load-locked
22660 instruction in MODE. For QI/HImode, possibly use a pattern than includes
22661 the zero_extend operation. */
22662
22663 static void
22664 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
22665 {
22666 rtx (*fn) (rtx, rtx) = NULL;
22667
22668 switch (mode)
22669 {
22670 case E_QImode:
22671 fn = gen_load_lockedqi;
22672 break;
22673 case E_HImode:
22674 fn = gen_load_lockedhi;
22675 break;
22676 case E_SImode:
22677 if (GET_MODE (mem) == QImode)
22678 fn = gen_load_lockedqi_si;
22679 else if (GET_MODE (mem) == HImode)
22680 fn = gen_load_lockedhi_si;
22681 else
22682 fn = gen_load_lockedsi;
22683 break;
22684 case E_DImode:
22685 fn = gen_load_lockeddi;
22686 break;
22687 case E_TImode:
22688 fn = gen_load_lockedti;
22689 break;
22690 default:
22691 gcc_unreachable ();
22692 }
22693 emit_insn (fn (reg, mem));
22694 }
22695
22696 /* A subroutine of the atomic operation splitters. Emit a store-conditional
22697 instruction in MODE. */
22698
22699 static void
22700 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
22701 {
22702 rtx (*fn) (rtx, rtx, rtx) = NULL;
22703
22704 switch (mode)
22705 {
22706 case E_QImode:
22707 fn = gen_store_conditionalqi;
22708 break;
22709 case E_HImode:
22710 fn = gen_store_conditionalhi;
22711 break;
22712 case E_SImode:
22713 fn = gen_store_conditionalsi;
22714 break;
22715 case E_DImode:
22716 fn = gen_store_conditionaldi;
22717 break;
22718 case E_TImode:
22719 fn = gen_store_conditionalti;
22720 break;
22721 default:
22722 gcc_unreachable ();
22723 }
22724
22725 /* Emit sync before stwcx. to address PPC405 Erratum. */
22726 if (PPC405_ERRATUM77)
22727 emit_insn (gen_hwsync ());
22728
22729 emit_insn (fn (res, mem, val));
22730 }
22731
22732 /* Expand barriers before and after a load_locked/store_cond sequence. */
22733
22734 static rtx
22735 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
22736 {
22737 rtx addr = XEXP (mem, 0);
22738
22739 if (!legitimate_indirect_address_p (addr, reload_completed)
22740 && !legitimate_indexed_address_p (addr, reload_completed))
22741 {
22742 addr = force_reg (Pmode, addr);
22743 mem = replace_equiv_address_nv (mem, addr);
22744 }
22745
22746 switch (model)
22747 {
22748 case MEMMODEL_RELAXED:
22749 case MEMMODEL_CONSUME:
22750 case MEMMODEL_ACQUIRE:
22751 break;
22752 case MEMMODEL_RELEASE:
22753 case MEMMODEL_ACQ_REL:
22754 emit_insn (gen_lwsync ());
22755 break;
22756 case MEMMODEL_SEQ_CST:
22757 emit_insn (gen_hwsync ());
22758 break;
22759 default:
22760 gcc_unreachable ();
22761 }
22762 return mem;
22763 }
22764
22765 static void
22766 rs6000_post_atomic_barrier (enum memmodel model)
22767 {
22768 switch (model)
22769 {
22770 case MEMMODEL_RELAXED:
22771 case MEMMODEL_CONSUME:
22772 case MEMMODEL_RELEASE:
22773 break;
22774 case MEMMODEL_ACQUIRE:
22775 case MEMMODEL_ACQ_REL:
22776 case MEMMODEL_SEQ_CST:
22777 emit_insn (gen_isync ());
22778 break;
22779 default:
22780 gcc_unreachable ();
22781 }
22782 }
22783
22784 /* A subroutine of the various atomic expanders. For sub-word operations,
22785 we must adjust things to operate on SImode. Given the original MEM,
22786 return a new aligned memory. Also build and return the quantities by
22787 which to shift and mask. */
22788
22789 static rtx
22790 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
22791 {
22792 rtx addr, align, shift, mask, mem;
22793 HOST_WIDE_INT shift_mask;
22794 machine_mode mode = GET_MODE (orig_mem);
22795
22796 /* For smaller modes, we have to implement this via SImode. */
22797 shift_mask = (mode == QImode ? 0x18 : 0x10);
22798
22799 addr = XEXP (orig_mem, 0);
22800 addr = force_reg (GET_MODE (addr), addr);
22801
22802 /* Aligned memory containing subword. Generate a new memory. We
22803 do not want any of the existing MEM_ATTR data, as we're now
22804 accessing memory outside the original object. */
22805 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
22806 NULL_RTX, 1, OPTAB_LIB_WIDEN);
22807 mem = gen_rtx_MEM (SImode, align);
22808 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
22809 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
22810 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
22811
22812 /* Shift amount for subword relative to aligned word. */
22813 shift = gen_reg_rtx (SImode);
22814 addr = gen_lowpart (SImode, addr);
22815 rtx tmp = gen_reg_rtx (SImode);
22816 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
22817 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
22818 if (BYTES_BIG_ENDIAN)
22819 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
22820 shift, 1, OPTAB_LIB_WIDEN);
22821 *pshift = shift;
22822
22823 /* Mask for insertion. */
22824 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
22825 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
22826 *pmask = mask;
22827
22828 return mem;
22829 }
22830
22831 /* A subroutine of the various atomic expanders. For sub-word operands,
22832 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
22833
22834 static rtx
22835 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
22836 {
22837 rtx x;
22838
22839 x = gen_reg_rtx (SImode);
22840 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
22841 gen_rtx_NOT (SImode, mask),
22842 oldval)));
22843
22844 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
22845
22846 return x;
22847 }
22848
22849 /* A subroutine of the various atomic expanders. For sub-word operands,
22850 extract WIDE to NARROW via SHIFT. */
22851
22852 static void
22853 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
22854 {
22855 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
22856 wide, 1, OPTAB_LIB_WIDEN);
22857 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
22858 }
22859
22860 /* Expand an atomic compare and swap operation. */
22861
22862 void
22863 rs6000_expand_atomic_compare_and_swap (rtx operands[])
22864 {
22865 rtx boolval, retval, mem, oldval, newval, cond;
22866 rtx label1, label2, x, mask, shift;
22867 machine_mode mode, orig_mode;
22868 enum memmodel mod_s, mod_f;
22869 bool is_weak;
22870
22871 boolval = operands[0];
22872 retval = operands[1];
22873 mem = operands[2];
22874 oldval = operands[3];
22875 newval = operands[4];
22876 is_weak = (INTVAL (operands[5]) != 0);
22877 mod_s = memmodel_base (INTVAL (operands[6]));
22878 mod_f = memmodel_base (INTVAL (operands[7]));
22879 orig_mode = mode = GET_MODE (mem);
22880
22881 mask = shift = NULL_RTX;
22882 if (mode == QImode || mode == HImode)
22883 {
22884 /* Before power8, we didn't have access to lbarx/lharx, so generate a
22885 lwarx and shift/mask operations. With power8, we need to do the
22886 comparison in SImode, but the store is still done in QI/HImode. */
22887 oldval = convert_modes (SImode, mode, oldval, 1);
22888
22889 if (!TARGET_SYNC_HI_QI)
22890 {
22891 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
22892
22893 /* Shift and mask OLDVAL into position with the word. */
22894 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
22895 NULL_RTX, 1, OPTAB_LIB_WIDEN);
22896
22897 /* Shift and mask NEWVAL into position within the word. */
22898 newval = convert_modes (SImode, mode, newval, 1);
22899 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
22900 NULL_RTX, 1, OPTAB_LIB_WIDEN);
22901 }
22902
22903 /* Prepare to adjust the return value. */
22904 retval = gen_reg_rtx (SImode);
22905 mode = SImode;
22906 }
22907 else if (reg_overlap_mentioned_p (retval, oldval))
22908 oldval = copy_to_reg (oldval);
22909
22910 if (mode != TImode && !reg_or_short_operand (oldval, mode))
22911 oldval = copy_to_mode_reg (mode, oldval);
22912
22913 if (reg_overlap_mentioned_p (retval, newval))
22914 newval = copy_to_reg (newval);
22915
22916 mem = rs6000_pre_atomic_barrier (mem, mod_s);
22917
22918 label1 = NULL_RTX;
22919 if (!is_weak)
22920 {
22921 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
22922 emit_label (XEXP (label1, 0));
22923 }
22924 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
22925
22926 emit_load_locked (mode, retval, mem);
22927
22928 x = retval;
22929 if (mask)
22930 x = expand_simple_binop (SImode, AND, retval, mask,
22931 NULL_RTX, 1, OPTAB_LIB_WIDEN);
22932
22933 cond = gen_reg_rtx (CCmode);
22934 /* If we have TImode, synthesize a comparison. */
22935 if (mode != TImode)
22936 x = gen_rtx_COMPARE (CCmode, x, oldval);
22937 else
22938 {
22939 rtx xor1_result = gen_reg_rtx (DImode);
22940 rtx xor2_result = gen_reg_rtx (DImode);
22941 rtx or_result = gen_reg_rtx (DImode);
22942 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
22943 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
22944 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
22945 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
22946
22947 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
22948 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
22949 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
22950 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
22951 }
22952
22953 emit_insn (gen_rtx_SET (cond, x));
22954
22955 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
22956 emit_unlikely_jump (x, label2);
22957
22958 x = newval;
22959 if (mask)
22960 x = rs6000_mask_atomic_subword (retval, newval, mask);
22961
22962 emit_store_conditional (orig_mode, cond, mem, x);
22963
22964 if (!is_weak)
22965 {
22966 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
22967 emit_unlikely_jump (x, label1);
22968 }
22969
22970 if (!is_mm_relaxed (mod_f))
22971 emit_label (XEXP (label2, 0));
22972
22973 rs6000_post_atomic_barrier (mod_s);
22974
22975 if (is_mm_relaxed (mod_f))
22976 emit_label (XEXP (label2, 0));
22977
22978 if (shift)
22979 rs6000_finish_atomic_subword (operands[1], retval, shift);
22980 else if (mode != GET_MODE (operands[1]))
22981 convert_move (operands[1], retval, 1);
22982
22983 /* In all cases, CR0 contains EQ on success, and NE on failure. */
22984 x = gen_rtx_EQ (SImode, cond, const0_rtx);
22985 emit_insn (gen_rtx_SET (boolval, x));
22986 }
22987
22988 /* Expand an atomic exchange operation. */
22989
22990 void
22991 rs6000_expand_atomic_exchange (rtx operands[])
22992 {
22993 rtx retval, mem, val, cond;
22994 machine_mode mode;
22995 enum memmodel model;
22996 rtx label, x, mask, shift;
22997
22998 retval = operands[0];
22999 mem = operands[1];
23000 val = operands[2];
23001 model = memmodel_base (INTVAL (operands[3]));
23002 mode = GET_MODE (mem);
23003
23004 mask = shift = NULL_RTX;
23005 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23006 {
23007 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23008
23009 /* Shift and mask VAL into position with the word. */
23010 val = convert_modes (SImode, mode, val, 1);
23011 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23012 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23013
23014 /* Prepare to adjust the return value. */
23015 retval = gen_reg_rtx (SImode);
23016 mode = SImode;
23017 }
23018
23019 mem = rs6000_pre_atomic_barrier (mem, model);
23020
23021 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23022 emit_label (XEXP (label, 0));
23023
23024 emit_load_locked (mode, retval, mem);
23025
23026 x = val;
23027 if (mask)
23028 x = rs6000_mask_atomic_subword (retval, val, mask);
23029
23030 cond = gen_reg_rtx (CCmode);
23031 emit_store_conditional (mode, cond, mem, x);
23032
23033 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23034 emit_unlikely_jump (x, label);
23035
23036 rs6000_post_atomic_barrier (model);
23037
23038 if (shift)
23039 rs6000_finish_atomic_subword (operands[0], retval, shift);
23040 }
23041
23042 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23043 to perform. MEM is the memory on which to operate. VAL is the second
23044 operand of the binary operator. BEFORE and AFTER are optional locations to
23045 return the value of MEM either before of after the operation. MODEL_RTX
23046 is a CONST_INT containing the memory model to use. */
23047
23048 void
23049 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23050 rtx orig_before, rtx orig_after, rtx model_rtx)
23051 {
23052 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23053 machine_mode mode = GET_MODE (mem);
23054 machine_mode store_mode = mode;
23055 rtx label, x, cond, mask, shift;
23056 rtx before = orig_before, after = orig_after;
23057
23058 mask = shift = NULL_RTX;
23059 /* On power8, we want to use SImode for the operation. On previous systems,
23060 use the operation in a subword and shift/mask to get the proper byte or
23061 halfword. */
23062 if (mode == QImode || mode == HImode)
23063 {
23064 if (TARGET_SYNC_HI_QI)
23065 {
23066 val = convert_modes (SImode, mode, val, 1);
23067
23068 /* Prepare to adjust the return value. */
23069 before = gen_reg_rtx (SImode);
23070 if (after)
23071 after = gen_reg_rtx (SImode);
23072 mode = SImode;
23073 }
23074 else
23075 {
23076 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23077
23078 /* Shift and mask VAL into position with the word. */
23079 val = convert_modes (SImode, mode, val, 1);
23080 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23081 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23082
23083 switch (code)
23084 {
23085 case IOR:
23086 case XOR:
23087 /* We've already zero-extended VAL. That is sufficient to
23088 make certain that it does not affect other bits. */
23089 mask = NULL;
23090 break;
23091
23092 case AND:
23093 /* If we make certain that all of the other bits in VAL are
23094 set, that will be sufficient to not affect other bits. */
23095 x = gen_rtx_NOT (SImode, mask);
23096 x = gen_rtx_IOR (SImode, x, val);
23097 emit_insn (gen_rtx_SET (val, x));
23098 mask = NULL;
23099 break;
23100
23101 case NOT:
23102 case PLUS:
23103 case MINUS:
23104 /* These will all affect bits outside the field and need
23105 adjustment via MASK within the loop. */
23106 break;
23107
23108 default:
23109 gcc_unreachable ();
23110 }
23111
23112 /* Prepare to adjust the return value. */
23113 before = gen_reg_rtx (SImode);
23114 if (after)
23115 after = gen_reg_rtx (SImode);
23116 store_mode = mode = SImode;
23117 }
23118 }
23119
23120 mem = rs6000_pre_atomic_barrier (mem, model);
23121
23122 label = gen_label_rtx ();
23123 emit_label (label);
23124 label = gen_rtx_LABEL_REF (VOIDmode, label);
23125
23126 if (before == NULL_RTX)
23127 before = gen_reg_rtx (mode);
23128
23129 emit_load_locked (mode, before, mem);
23130
23131 if (code == NOT)
23132 {
23133 x = expand_simple_binop (mode, AND, before, val,
23134 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23135 after = expand_simple_unop (mode, NOT, x, after, 1);
23136 }
23137 else
23138 {
23139 after = expand_simple_binop (mode, code, before, val,
23140 after, 1, OPTAB_LIB_WIDEN);
23141 }
23142
23143 x = after;
23144 if (mask)
23145 {
23146 x = expand_simple_binop (SImode, AND, after, mask,
23147 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23148 x = rs6000_mask_atomic_subword (before, x, mask);
23149 }
23150 else if (store_mode != mode)
23151 x = convert_modes (store_mode, mode, x, 1);
23152
23153 cond = gen_reg_rtx (CCmode);
23154 emit_store_conditional (store_mode, cond, mem, x);
23155
23156 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23157 emit_unlikely_jump (x, label);
23158
23159 rs6000_post_atomic_barrier (model);
23160
23161 if (shift)
23162 {
23163 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23164 then do the calcuations in a SImode register. */
23165 if (orig_before)
23166 rs6000_finish_atomic_subword (orig_before, before, shift);
23167 if (orig_after)
23168 rs6000_finish_atomic_subword (orig_after, after, shift);
23169 }
23170 else if (store_mode != mode)
23171 {
23172 /* QImode/HImode on machines with lbarx/lharx where we do the native
23173 operation and then do the calcuations in a SImode register. */
23174 if (orig_before)
23175 convert_move (orig_before, before, 1);
23176 if (orig_after)
23177 convert_move (orig_after, after, 1);
23178 }
23179 else if (orig_after && after != orig_after)
23180 emit_move_insn (orig_after, after);
23181 }
23182
23183 /* Emit instructions to move SRC to DST. Called by splitters for
23184 multi-register moves. It will emit at most one instruction for
23185 each register that is accessed; that is, it won't emit li/lis pairs
23186 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23187 register. */
23188
23189 void
23190 rs6000_split_multireg_move (rtx dst, rtx src)
23191 {
23192 /* The register number of the first register being moved. */
23193 int reg;
23194 /* The mode that is to be moved. */
23195 machine_mode mode;
23196 /* The mode that the move is being done in, and its size. */
23197 machine_mode reg_mode;
23198 int reg_mode_size;
23199 /* The number of registers that will be moved. */
23200 int nregs;
23201
23202 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23203 mode = GET_MODE (dst);
23204 nregs = hard_regno_nregs (reg, mode);
23205 if (FP_REGNO_P (reg))
23206 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23207 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23208 else if (ALTIVEC_REGNO_P (reg))
23209 reg_mode = V16QImode;
23210 else
23211 reg_mode = word_mode;
23212 reg_mode_size = GET_MODE_SIZE (reg_mode);
23213
23214 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23215
23216 /* TDmode residing in FP registers is special, since the ISA requires that
23217 the lower-numbered word of a register pair is always the most significant
23218 word, even in little-endian mode. This does not match the usual subreg
23219 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23220 the appropriate constituent registers "by hand" in little-endian mode.
23221
23222 Note we do not need to check for destructive overlap here since TDmode
23223 can only reside in even/odd register pairs. */
23224 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23225 {
23226 rtx p_src, p_dst;
23227 int i;
23228
23229 for (i = 0; i < nregs; i++)
23230 {
23231 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23232 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23233 else
23234 p_src = simplify_gen_subreg (reg_mode, src, mode,
23235 i * reg_mode_size);
23236
23237 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23238 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23239 else
23240 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23241 i * reg_mode_size);
23242
23243 emit_insn (gen_rtx_SET (p_dst, p_src));
23244 }
23245
23246 return;
23247 }
23248
23249 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23250 {
23251 /* Move register range backwards, if we might have destructive
23252 overlap. */
23253 int i;
23254 for (i = nregs - 1; i >= 0; i--)
23255 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23256 i * reg_mode_size),
23257 simplify_gen_subreg (reg_mode, src, mode,
23258 i * reg_mode_size)));
23259 }
23260 else
23261 {
23262 int i;
23263 int j = -1;
23264 bool used_update = false;
23265 rtx restore_basereg = NULL_RTX;
23266
23267 if (MEM_P (src) && INT_REGNO_P (reg))
23268 {
23269 rtx breg;
23270
23271 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23272 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23273 {
23274 rtx delta_rtx;
23275 breg = XEXP (XEXP (src, 0), 0);
23276 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23277 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23278 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23279 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23280 src = replace_equiv_address (src, breg);
23281 }
23282 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23283 {
23284 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23285 {
23286 rtx basereg = XEXP (XEXP (src, 0), 0);
23287 if (TARGET_UPDATE)
23288 {
23289 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23290 emit_insn (gen_rtx_SET (ndst,
23291 gen_rtx_MEM (reg_mode,
23292 XEXP (src, 0))));
23293 used_update = true;
23294 }
23295 else
23296 emit_insn (gen_rtx_SET (basereg,
23297 XEXP (XEXP (src, 0), 1)));
23298 src = replace_equiv_address (src, basereg);
23299 }
23300 else
23301 {
23302 rtx basereg = gen_rtx_REG (Pmode, reg);
23303 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23304 src = replace_equiv_address (src, basereg);
23305 }
23306 }
23307
23308 breg = XEXP (src, 0);
23309 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23310 breg = XEXP (breg, 0);
23311
23312 /* If the base register we are using to address memory is
23313 also a destination reg, then change that register last. */
23314 if (REG_P (breg)
23315 && REGNO (breg) >= REGNO (dst)
23316 && REGNO (breg) < REGNO (dst) + nregs)
23317 j = REGNO (breg) - REGNO (dst);
23318 }
23319 else if (MEM_P (dst) && INT_REGNO_P (reg))
23320 {
23321 rtx breg;
23322
23323 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23324 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23325 {
23326 rtx delta_rtx;
23327 breg = XEXP (XEXP (dst, 0), 0);
23328 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23329 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23330 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23331
23332 /* We have to update the breg before doing the store.
23333 Use store with update, if available. */
23334
23335 if (TARGET_UPDATE)
23336 {
23337 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23338 emit_insn (TARGET_32BIT
23339 ? (TARGET_POWERPC64
23340 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23341 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
23342 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23343 used_update = true;
23344 }
23345 else
23346 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23347 dst = replace_equiv_address (dst, breg);
23348 }
23349 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
23350 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23351 {
23352 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23353 {
23354 rtx basereg = XEXP (XEXP (dst, 0), 0);
23355 if (TARGET_UPDATE)
23356 {
23357 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23358 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23359 XEXP (dst, 0)),
23360 nsrc));
23361 used_update = true;
23362 }
23363 else
23364 emit_insn (gen_rtx_SET (basereg,
23365 XEXP (XEXP (dst, 0), 1)));
23366 dst = replace_equiv_address (dst, basereg);
23367 }
23368 else
23369 {
23370 rtx basereg = XEXP (XEXP (dst, 0), 0);
23371 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23372 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23373 && REG_P (basereg)
23374 && REG_P (offsetreg)
23375 && REGNO (basereg) != REGNO (offsetreg));
23376 if (REGNO (basereg) == 0)
23377 {
23378 rtx tmp = offsetreg;
23379 offsetreg = basereg;
23380 basereg = tmp;
23381 }
23382 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23383 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23384 dst = replace_equiv_address (dst, basereg);
23385 }
23386 }
23387 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23388 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
23389 }
23390
23391 for (i = 0; i < nregs; i++)
23392 {
23393 /* Calculate index to next subword. */
23394 ++j;
23395 if (j == nregs)
23396 j = 0;
23397
23398 /* If compiler already emitted move of first word by
23399 store with update, no need to do anything. */
23400 if (j == 0 && used_update)
23401 continue;
23402
23403 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23404 j * reg_mode_size),
23405 simplify_gen_subreg (reg_mode, src, mode,
23406 j * reg_mode_size)));
23407 }
23408 if (restore_basereg != NULL_RTX)
23409 emit_insn (restore_basereg);
23410 }
23411 }
23412
23413 \f
23414 /* This page contains routines that are used to determine what the
23415 function prologue and epilogue code will do and write them out. */
23416
23417 /* Determine whether the REG is really used. */
23418
23419 static bool
23420 save_reg_p (int reg)
23421 {
23422 /* We need to mark the PIC offset register live for the same conditions
23423 as it is set up, or otherwise it won't be saved before we clobber it. */
23424
23425 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
23426 {
23427 /* When calling eh_return, we must return true for all the cases
23428 where conditional_register_usage marks the PIC offset reg
23429 call used. */
23430 if (TARGET_TOC && TARGET_MINIMAL_TOC
23431 && (crtl->calls_eh_return
23432 || df_regs_ever_live_p (reg)
23433 || !constant_pool_empty_p ()))
23434 return true;
23435
23436 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
23437 && flag_pic)
23438 return true;
23439 }
23440
23441 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
23442 }
23443
23444 /* Return the first fixed-point register that is required to be
23445 saved. 32 if none. */
23446
23447 int
23448 first_reg_to_save (void)
23449 {
23450 int first_reg;
23451
23452 /* Find lowest numbered live register. */
23453 for (first_reg = 13; first_reg <= 31; first_reg++)
23454 if (save_reg_p (first_reg))
23455 break;
23456
23457 #if TARGET_MACHO
23458 if (flag_pic
23459 && crtl->uses_pic_offset_table
23460 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
23461 return RS6000_PIC_OFFSET_TABLE_REGNUM;
23462 #endif
23463
23464 return first_reg;
23465 }
23466
23467 /* Similar, for FP regs. */
23468
23469 int
23470 first_fp_reg_to_save (void)
23471 {
23472 int first_reg;
23473
23474 /* Find lowest numbered live register. */
23475 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
23476 if (save_reg_p (first_reg))
23477 break;
23478
23479 return first_reg;
23480 }
23481
23482 /* Similar, for AltiVec regs. */
23483
23484 static int
23485 first_altivec_reg_to_save (void)
23486 {
23487 int i;
23488
23489 /* Stack frame remains as is unless we are in AltiVec ABI. */
23490 if (! TARGET_ALTIVEC_ABI)
23491 return LAST_ALTIVEC_REGNO + 1;
23492
23493 /* On Darwin, the unwind routines are compiled without
23494 TARGET_ALTIVEC, and use save_world to save/restore the
23495 altivec registers when necessary. */
23496 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23497 && ! TARGET_ALTIVEC)
23498 return FIRST_ALTIVEC_REGNO + 20;
23499
23500 /* Find lowest numbered live register. */
23501 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
23502 if (save_reg_p (i))
23503 break;
23504
23505 return i;
23506 }
23507
23508 /* Return a 32-bit mask of the AltiVec registers we need to set in
23509 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
23510 the 32-bit word is 0. */
23511
23512 static unsigned int
23513 compute_vrsave_mask (void)
23514 {
23515 unsigned int i, mask = 0;
23516
23517 /* On Darwin, the unwind routines are compiled without
23518 TARGET_ALTIVEC, and use save_world to save/restore the
23519 call-saved altivec registers when necessary. */
23520 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23521 && ! TARGET_ALTIVEC)
23522 mask |= 0xFFF;
23523
23524 /* First, find out if we use _any_ altivec registers. */
23525 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
23526 if (df_regs_ever_live_p (i))
23527 mask |= ALTIVEC_REG_BIT (i);
23528
23529 if (mask == 0)
23530 return mask;
23531
23532 /* Next, remove the argument registers from the set. These must
23533 be in the VRSAVE mask set by the caller, so we don't need to add
23534 them in again. More importantly, the mask we compute here is
23535 used to generate CLOBBERs in the set_vrsave insn, and we do not
23536 wish the argument registers to die. */
23537 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
23538 mask &= ~ALTIVEC_REG_BIT (i);
23539
23540 /* Similarly, remove the return value from the set. */
23541 {
23542 bool yes = false;
23543 diddle_return_value (is_altivec_return_reg, &yes);
23544 if (yes)
23545 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
23546 }
23547
23548 return mask;
23549 }
23550
23551 /* For a very restricted set of circumstances, we can cut down the
23552 size of prologues/epilogues by calling our own save/restore-the-world
23553 routines. */
23554
23555 static void
23556 compute_save_world_info (rs6000_stack_t *info)
23557 {
23558 info->world_save_p = 1;
23559 info->world_save_p
23560 = (WORLD_SAVE_P (info)
23561 && DEFAULT_ABI == ABI_DARWIN
23562 && !cfun->has_nonlocal_label
23563 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
23564 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
23565 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
23566 && info->cr_save_p);
23567
23568 /* This will not work in conjunction with sibcalls. Make sure there
23569 are none. (This check is expensive, but seldom executed.) */
23570 if (WORLD_SAVE_P (info))
23571 {
23572 rtx_insn *insn;
23573 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
23574 if (CALL_P (insn) && SIBLING_CALL_P (insn))
23575 {
23576 info->world_save_p = 0;
23577 break;
23578 }
23579 }
23580
23581 if (WORLD_SAVE_P (info))
23582 {
23583 /* Even if we're not touching VRsave, make sure there's room on the
23584 stack for it, if it looks like we're calling SAVE_WORLD, which
23585 will attempt to save it. */
23586 info->vrsave_size = 4;
23587
23588 /* If we are going to save the world, we need to save the link register too. */
23589 info->lr_save_p = 1;
23590
23591 /* "Save" the VRsave register too if we're saving the world. */
23592 if (info->vrsave_mask == 0)
23593 info->vrsave_mask = compute_vrsave_mask ();
23594
23595 /* Because the Darwin register save/restore routines only handle
23596 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
23597 check. */
23598 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
23599 && (info->first_altivec_reg_save
23600 >= FIRST_SAVED_ALTIVEC_REGNO));
23601 }
23602
23603 return;
23604 }
23605
23606
23607 static void
23608 is_altivec_return_reg (rtx reg, void *xyes)
23609 {
23610 bool *yes = (bool *) xyes;
23611 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
23612 *yes = true;
23613 }
23614
23615 \f
23616 /* Return whether REG is a global user reg or has been specifed by
23617 -ffixed-REG. We should not restore these, and so cannot use
23618 lmw or out-of-line restore functions if there are any. We also
23619 can't save them (well, emit frame notes for them), because frame
23620 unwinding during exception handling will restore saved registers. */
23621
23622 static bool
23623 fixed_reg_p (int reg)
23624 {
23625 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
23626 backend sets it, overriding anything the user might have given. */
23627 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
23628 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
23629 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
23630 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
23631 return false;
23632
23633 return fixed_regs[reg];
23634 }
23635
23636 /* Determine the strategy for savings/restoring registers. */
23637
23638 enum {
23639 SAVE_MULTIPLE = 0x1,
23640 SAVE_INLINE_GPRS = 0x2,
23641 SAVE_INLINE_FPRS = 0x4,
23642 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
23643 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
23644 SAVE_INLINE_VRS = 0x20,
23645 REST_MULTIPLE = 0x100,
23646 REST_INLINE_GPRS = 0x200,
23647 REST_INLINE_FPRS = 0x400,
23648 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
23649 REST_INLINE_VRS = 0x1000
23650 };
23651
23652 static int
23653 rs6000_savres_strategy (rs6000_stack_t *info,
23654 bool using_static_chain_p)
23655 {
23656 int strategy = 0;
23657
23658 /* Select between in-line and out-of-line save and restore of regs.
23659 First, all the obvious cases where we don't use out-of-line. */
23660 if (crtl->calls_eh_return
23661 || cfun->machine->ra_need_lr)
23662 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
23663 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
23664 | SAVE_INLINE_VRS | REST_INLINE_VRS);
23665
23666 if (info->first_gp_reg_save == 32)
23667 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23668
23669 if (info->first_fp_reg_save == 64)
23670 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23671
23672 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
23673 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23674
23675 /* Define cutoff for using out-of-line functions to save registers. */
23676 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
23677 {
23678 if (!optimize_size)
23679 {
23680 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23681 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23682 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23683 }
23684 else
23685 {
23686 /* Prefer out-of-line restore if it will exit. */
23687 if (info->first_fp_reg_save > 61)
23688 strategy |= SAVE_INLINE_FPRS;
23689 if (info->first_gp_reg_save > 29)
23690 {
23691 if (info->first_fp_reg_save == 64)
23692 strategy |= SAVE_INLINE_GPRS;
23693 else
23694 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23695 }
23696 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
23697 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23698 }
23699 }
23700 else if (DEFAULT_ABI == ABI_DARWIN)
23701 {
23702 if (info->first_fp_reg_save > 60)
23703 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23704 if (info->first_gp_reg_save > 29)
23705 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23706 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23707 }
23708 else
23709 {
23710 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
23711 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
23712 || info->first_fp_reg_save > 61)
23713 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23714 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23715 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23716 }
23717
23718 /* Don't bother to try to save things out-of-line if r11 is occupied
23719 by the static chain. It would require too much fiddling and the
23720 static chain is rarely used anyway. FPRs are saved w.r.t the stack
23721 pointer on Darwin, and AIX uses r1 or r12. */
23722 if (using_static_chain_p
23723 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
23724 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
23725 | SAVE_INLINE_GPRS
23726 | SAVE_INLINE_VRS);
23727
23728 /* Don't ever restore fixed regs. That means we can't use the
23729 out-of-line register restore functions if a fixed reg is in the
23730 range of regs restored. */
23731 if (!(strategy & REST_INLINE_FPRS))
23732 for (int i = info->first_fp_reg_save; i < 64; i++)
23733 if (fixed_regs[i])
23734 {
23735 strategy |= REST_INLINE_FPRS;
23736 break;
23737 }
23738
23739 /* We can only use the out-of-line routines to restore fprs if we've
23740 saved all the registers from first_fp_reg_save in the prologue.
23741 Otherwise, we risk loading garbage. Of course, if we have saved
23742 out-of-line then we know we haven't skipped any fprs. */
23743 if ((strategy & SAVE_INLINE_FPRS)
23744 && !(strategy & REST_INLINE_FPRS))
23745 for (int i = info->first_fp_reg_save; i < 64; i++)
23746 if (!save_reg_p (i))
23747 {
23748 strategy |= REST_INLINE_FPRS;
23749 break;
23750 }
23751
23752 /* Similarly, for altivec regs. */
23753 if (!(strategy & REST_INLINE_VRS))
23754 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
23755 if (fixed_regs[i])
23756 {
23757 strategy |= REST_INLINE_VRS;
23758 break;
23759 }
23760
23761 if ((strategy & SAVE_INLINE_VRS)
23762 && !(strategy & REST_INLINE_VRS))
23763 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
23764 if (!save_reg_p (i))
23765 {
23766 strategy |= REST_INLINE_VRS;
23767 break;
23768 }
23769
23770 /* info->lr_save_p isn't yet set if the only reason lr needs to be
23771 saved is an out-of-line save or restore. Set up the value for
23772 the next test (excluding out-of-line gprs). */
23773 bool lr_save_p = (info->lr_save_p
23774 || !(strategy & SAVE_INLINE_FPRS)
23775 || !(strategy & SAVE_INLINE_VRS)
23776 || !(strategy & REST_INLINE_FPRS)
23777 || !(strategy & REST_INLINE_VRS));
23778
23779 if (TARGET_MULTIPLE
23780 && !TARGET_POWERPC64
23781 && info->first_gp_reg_save < 31
23782 && !(flag_shrink_wrap
23783 && flag_shrink_wrap_separate
23784 && optimize_function_for_speed_p (cfun)))
23785 {
23786 int count = 0;
23787 for (int i = info->first_gp_reg_save; i < 32; i++)
23788 if (save_reg_p (i))
23789 count++;
23790
23791 if (count <= 1)
23792 /* Don't use store multiple if only one reg needs to be
23793 saved. This can occur for example when the ABI_V4 pic reg
23794 (r30) needs to be saved to make calls, but r31 is not
23795 used. */
23796 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23797 else
23798 {
23799 /* Prefer store multiple for saves over out-of-line
23800 routines, since the store-multiple instruction will
23801 always be smaller. */
23802 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
23803
23804 /* The situation is more complicated with load multiple.
23805 We'd prefer to use the out-of-line routines for restores,
23806 since the "exit" out-of-line routines can handle the
23807 restore of LR and the frame teardown. However if doesn't
23808 make sense to use the out-of-line routine if that is the
23809 only reason we'd need to save LR, and we can't use the
23810 "exit" out-of-line gpr restore if we have saved some
23811 fprs; In those cases it is advantageous to use load
23812 multiple when available. */
23813 if (info->first_fp_reg_save != 64 || !lr_save_p)
23814 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
23815 }
23816 }
23817
23818 /* Using the "exit" out-of-line routine does not improve code size
23819 if using it would require lr to be saved and if only saving one
23820 or two gprs. */
23821 else if (!lr_save_p && info->first_gp_reg_save > 29)
23822 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23823
23824 /* Don't ever restore fixed regs. */
23825 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
23826 for (int i = info->first_gp_reg_save; i < 32; i++)
23827 if (fixed_reg_p (i))
23828 {
23829 strategy |= REST_INLINE_GPRS;
23830 strategy &= ~REST_MULTIPLE;
23831 break;
23832 }
23833
23834 /* We can only use load multiple or the out-of-line routines to
23835 restore gprs if we've saved all the registers from
23836 first_gp_reg_save. Otherwise, we risk loading garbage.
23837 Of course, if we have saved out-of-line or used stmw then we know
23838 we haven't skipped any gprs. */
23839 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
23840 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
23841 for (int i = info->first_gp_reg_save; i < 32; i++)
23842 if (!save_reg_p (i))
23843 {
23844 strategy |= REST_INLINE_GPRS;
23845 strategy &= ~REST_MULTIPLE;
23846 break;
23847 }
23848
23849 if (TARGET_ELF && TARGET_64BIT)
23850 {
23851 if (!(strategy & SAVE_INLINE_FPRS))
23852 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
23853 else if (!(strategy & SAVE_INLINE_GPRS)
23854 && info->first_fp_reg_save == 64)
23855 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
23856 }
23857 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
23858 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
23859
23860 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
23861 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
23862
23863 return strategy;
23864 }
23865
23866 /* Calculate the stack information for the current function. This is
23867 complicated by having two separate calling sequences, the AIX calling
23868 sequence and the V.4 calling sequence.
23869
23870 AIX (and Darwin/Mac OS X) stack frames look like:
23871 32-bit 64-bit
23872 SP----> +---------------------------------------+
23873 | back chain to caller | 0 0
23874 +---------------------------------------+
23875 | saved CR | 4 8 (8-11)
23876 +---------------------------------------+
23877 | saved LR | 8 16
23878 +---------------------------------------+
23879 | reserved for compilers | 12 24
23880 +---------------------------------------+
23881 | reserved for binders | 16 32
23882 +---------------------------------------+
23883 | saved TOC pointer | 20 40
23884 +---------------------------------------+
23885 | Parameter save area (+padding*) (P) | 24 48
23886 +---------------------------------------+
23887 | Alloca space (A) | 24+P etc.
23888 +---------------------------------------+
23889 | Local variable space (L) | 24+P+A
23890 +---------------------------------------+
23891 | Float/int conversion temporary (X) | 24+P+A+L
23892 +---------------------------------------+
23893 | Save area for AltiVec registers (W) | 24+P+A+L+X
23894 +---------------------------------------+
23895 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
23896 +---------------------------------------+
23897 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
23898 +---------------------------------------+
23899 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
23900 +---------------------------------------+
23901 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
23902 +---------------------------------------+
23903 old SP->| back chain to caller's caller |
23904 +---------------------------------------+
23905
23906 * If the alloca area is present, the parameter save area is
23907 padded so that the former starts 16-byte aligned.
23908
23909 The required alignment for AIX configurations is two words (i.e., 8
23910 or 16 bytes).
23911
23912 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
23913
23914 SP----> +---------------------------------------+
23915 | Back chain to caller | 0
23916 +---------------------------------------+
23917 | Save area for CR | 8
23918 +---------------------------------------+
23919 | Saved LR | 16
23920 +---------------------------------------+
23921 | Saved TOC pointer | 24
23922 +---------------------------------------+
23923 | Parameter save area (+padding*) (P) | 32
23924 +---------------------------------------+
23925 | Alloca space (A) | 32+P
23926 +---------------------------------------+
23927 | Local variable space (L) | 32+P+A
23928 +---------------------------------------+
23929 | Save area for AltiVec registers (W) | 32+P+A+L
23930 +---------------------------------------+
23931 | AltiVec alignment padding (Y) | 32+P+A+L+W
23932 +---------------------------------------+
23933 | Save area for GP registers (G) | 32+P+A+L+W+Y
23934 +---------------------------------------+
23935 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
23936 +---------------------------------------+
23937 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
23938 +---------------------------------------+
23939
23940 * If the alloca area is present, the parameter save area is
23941 padded so that the former starts 16-byte aligned.
23942
23943 V.4 stack frames look like:
23944
23945 SP----> +---------------------------------------+
23946 | back chain to caller | 0
23947 +---------------------------------------+
23948 | caller's saved LR | 4
23949 +---------------------------------------+
23950 | Parameter save area (+padding*) (P) | 8
23951 +---------------------------------------+
23952 | Alloca space (A) | 8+P
23953 +---------------------------------------+
23954 | Varargs save area (V) | 8+P+A
23955 +---------------------------------------+
23956 | Local variable space (L) | 8+P+A+V
23957 +---------------------------------------+
23958 | Float/int conversion temporary (X) | 8+P+A+V+L
23959 +---------------------------------------+
23960 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
23961 +---------------------------------------+
23962 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
23963 +---------------------------------------+
23964 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
23965 +---------------------------------------+
23966 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
23967 +---------------------------------------+
23968 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
23969 +---------------------------------------+
23970 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
23971 +---------------------------------------+
23972 old SP->| back chain to caller's caller |
23973 +---------------------------------------+
23974
23975 * If the alloca area is present and the required alignment is
23976 16 bytes, the parameter save area is padded so that the
23977 alloca area starts 16-byte aligned.
23978
23979 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
23980 given. (But note below and in sysv4.h that we require only 8 and
23981 may round up the size of our stack frame anyways. The historical
23982 reason is early versions of powerpc-linux which didn't properly
23983 align the stack at program startup. A happy side-effect is that
23984 -mno-eabi libraries can be used with -meabi programs.)
23985
23986 The EABI configuration defaults to the V.4 layout. However,
23987 the stack alignment requirements may differ. If -mno-eabi is not
23988 given, the required stack alignment is 8 bytes; if -mno-eabi is
23989 given, the required alignment is 16 bytes. (But see V.4 comment
23990 above.) */
23991
23992 #ifndef ABI_STACK_BOUNDARY
23993 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
23994 #endif
23995
23996 static rs6000_stack_t *
23997 rs6000_stack_info (void)
23998 {
23999 /* We should never be called for thunks, we are not set up for that. */
24000 gcc_assert (!cfun->is_thunk);
24001
24002 rs6000_stack_t *info = &stack_info;
24003 int reg_size = TARGET_32BIT ? 4 : 8;
24004 int ehrd_size;
24005 int ehcr_size;
24006 int save_align;
24007 int first_gp;
24008 HOST_WIDE_INT non_fixed_size;
24009 bool using_static_chain_p;
24010
24011 if (reload_completed && info->reload_completed)
24012 return info;
24013
24014 memset (info, 0, sizeof (*info));
24015 info->reload_completed = reload_completed;
24016
24017 /* Select which calling sequence. */
24018 info->abi = DEFAULT_ABI;
24019
24020 /* Calculate which registers need to be saved & save area size. */
24021 info->first_gp_reg_save = first_reg_to_save ();
24022 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24023 even if it currently looks like we won't. Reload may need it to
24024 get at a constant; if so, it will have already created a constant
24025 pool entry for it. */
24026 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24027 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24028 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24029 && crtl->uses_const_pool
24030 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24031 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24032 else
24033 first_gp = info->first_gp_reg_save;
24034
24035 info->gp_size = reg_size * (32 - first_gp);
24036
24037 info->first_fp_reg_save = first_fp_reg_to_save ();
24038 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24039
24040 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24041 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24042 - info->first_altivec_reg_save);
24043
24044 /* Does this function call anything? */
24045 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24046
24047 /* Determine if we need to save the condition code registers. */
24048 if (save_reg_p (CR2_REGNO)
24049 || save_reg_p (CR3_REGNO)
24050 || save_reg_p (CR4_REGNO))
24051 {
24052 info->cr_save_p = 1;
24053 if (DEFAULT_ABI == ABI_V4)
24054 info->cr_size = reg_size;
24055 }
24056
24057 /* If the current function calls __builtin_eh_return, then we need
24058 to allocate stack space for registers that will hold data for
24059 the exception handler. */
24060 if (crtl->calls_eh_return)
24061 {
24062 unsigned int i;
24063 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24064 continue;
24065
24066 ehrd_size = i * UNITS_PER_WORD;
24067 }
24068 else
24069 ehrd_size = 0;
24070
24071 /* In the ELFv2 ABI, we also need to allocate space for separate
24072 CR field save areas if the function calls __builtin_eh_return. */
24073 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24074 {
24075 /* This hard-codes that we have three call-saved CR fields. */
24076 ehcr_size = 3 * reg_size;
24077 /* We do *not* use the regular CR save mechanism. */
24078 info->cr_save_p = 0;
24079 }
24080 else
24081 ehcr_size = 0;
24082
24083 /* Determine various sizes. */
24084 info->reg_size = reg_size;
24085 info->fixed_size = RS6000_SAVE_AREA;
24086 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24087 if (cfun->calls_alloca)
24088 info->parm_size =
24089 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24090 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24091 else
24092 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24093 TARGET_ALTIVEC ? 16 : 8);
24094 if (FRAME_GROWS_DOWNWARD)
24095 info->vars_size
24096 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24097 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24098 - (info->fixed_size + info->vars_size + info->parm_size);
24099
24100 if (TARGET_ALTIVEC_ABI)
24101 info->vrsave_mask = compute_vrsave_mask ();
24102
24103 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24104 info->vrsave_size = 4;
24105
24106 compute_save_world_info (info);
24107
24108 /* Calculate the offsets. */
24109 switch (DEFAULT_ABI)
24110 {
24111 case ABI_NONE:
24112 default:
24113 gcc_unreachable ();
24114
24115 case ABI_AIX:
24116 case ABI_ELFv2:
24117 case ABI_DARWIN:
24118 info->fp_save_offset = -info->fp_size;
24119 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24120
24121 if (TARGET_ALTIVEC_ABI)
24122 {
24123 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24124
24125 /* Align stack so vector save area is on a quadword boundary.
24126 The padding goes above the vectors. */
24127 if (info->altivec_size != 0)
24128 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24129
24130 info->altivec_save_offset = info->vrsave_save_offset
24131 - info->altivec_padding_size
24132 - info->altivec_size;
24133 gcc_assert (info->altivec_size == 0
24134 || info->altivec_save_offset % 16 == 0);
24135
24136 /* Adjust for AltiVec case. */
24137 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24138 }
24139 else
24140 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24141
24142 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24143 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24144 info->lr_save_offset = 2*reg_size;
24145 break;
24146
24147 case ABI_V4:
24148 info->fp_save_offset = -info->fp_size;
24149 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24150 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24151
24152 if (TARGET_ALTIVEC_ABI)
24153 {
24154 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24155
24156 /* Align stack so vector save area is on a quadword boundary. */
24157 if (info->altivec_size != 0)
24158 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24159
24160 info->altivec_save_offset = info->vrsave_save_offset
24161 - info->altivec_padding_size
24162 - info->altivec_size;
24163
24164 /* Adjust for AltiVec case. */
24165 info->ehrd_offset = info->altivec_save_offset;
24166 }
24167 else
24168 info->ehrd_offset = info->cr_save_offset;
24169
24170 info->ehrd_offset -= ehrd_size;
24171 info->lr_save_offset = reg_size;
24172 }
24173
24174 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24175 info->save_size = RS6000_ALIGN (info->fp_size
24176 + info->gp_size
24177 + info->altivec_size
24178 + info->altivec_padding_size
24179 + ehrd_size
24180 + ehcr_size
24181 + info->cr_size
24182 + info->vrsave_size,
24183 save_align);
24184
24185 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24186
24187 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24188 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24189
24190 /* Determine if we need to save the link register. */
24191 if (info->calls_p
24192 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24193 && crtl->profile
24194 && !TARGET_PROFILE_KERNEL)
24195 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24196 #ifdef TARGET_RELOCATABLE
24197 || (DEFAULT_ABI == ABI_V4
24198 && (TARGET_RELOCATABLE || flag_pic > 1)
24199 && !constant_pool_empty_p ())
24200 #endif
24201 || rs6000_ra_ever_killed ())
24202 info->lr_save_p = 1;
24203
24204 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24205 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24206 && call_used_regs[STATIC_CHAIN_REGNUM]);
24207 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24208
24209 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24210 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24211 || !(info->savres_strategy & SAVE_INLINE_VRS)
24212 || !(info->savres_strategy & REST_INLINE_GPRS)
24213 || !(info->savres_strategy & REST_INLINE_FPRS)
24214 || !(info->savres_strategy & REST_INLINE_VRS))
24215 info->lr_save_p = 1;
24216
24217 if (info->lr_save_p)
24218 df_set_regs_ever_live (LR_REGNO, true);
24219
24220 /* Determine if we need to allocate any stack frame:
24221
24222 For AIX we need to push the stack if a frame pointer is needed
24223 (because the stack might be dynamically adjusted), if we are
24224 debugging, if we make calls, or if the sum of fp_save, gp_save,
24225 and local variables are more than the space needed to save all
24226 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24227 + 18*8 = 288 (GPR13 reserved).
24228
24229 For V.4 we don't have the stack cushion that AIX uses, but assume
24230 that the debugger can handle stackless frames. */
24231
24232 if (info->calls_p)
24233 info->push_p = 1;
24234
24235 else if (DEFAULT_ABI == ABI_V4)
24236 info->push_p = non_fixed_size != 0;
24237
24238 else if (frame_pointer_needed)
24239 info->push_p = 1;
24240
24241 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24242 info->push_p = 1;
24243
24244 else
24245 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24246
24247 return info;
24248 }
24249
24250 static void
24251 debug_stack_info (rs6000_stack_t *info)
24252 {
24253 const char *abi_string;
24254
24255 if (! info)
24256 info = rs6000_stack_info ();
24257
24258 fprintf (stderr, "\nStack information for function %s:\n",
24259 ((current_function_decl && DECL_NAME (current_function_decl))
24260 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24261 : "<unknown>"));
24262
24263 switch (info->abi)
24264 {
24265 default: abi_string = "Unknown"; break;
24266 case ABI_NONE: abi_string = "NONE"; break;
24267 case ABI_AIX: abi_string = "AIX"; break;
24268 case ABI_ELFv2: abi_string = "ELFv2"; break;
24269 case ABI_DARWIN: abi_string = "Darwin"; break;
24270 case ABI_V4: abi_string = "V.4"; break;
24271 }
24272
24273 fprintf (stderr, "\tABI = %5s\n", abi_string);
24274
24275 if (TARGET_ALTIVEC_ABI)
24276 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24277
24278 if (info->first_gp_reg_save != 32)
24279 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24280
24281 if (info->first_fp_reg_save != 64)
24282 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24283
24284 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24285 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24286 info->first_altivec_reg_save);
24287
24288 if (info->lr_save_p)
24289 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24290
24291 if (info->cr_save_p)
24292 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24293
24294 if (info->vrsave_mask)
24295 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24296
24297 if (info->push_p)
24298 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24299
24300 if (info->calls_p)
24301 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24302
24303 if (info->gp_size)
24304 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24305
24306 if (info->fp_size)
24307 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24308
24309 if (info->altivec_size)
24310 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24311 info->altivec_save_offset);
24312
24313 if (info->vrsave_size)
24314 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24315 info->vrsave_save_offset);
24316
24317 if (info->lr_save_p)
24318 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24319
24320 if (info->cr_save_p)
24321 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24322
24323 if (info->varargs_save_offset)
24324 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24325
24326 if (info->total_size)
24327 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24328 info->total_size);
24329
24330 if (info->vars_size)
24331 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24332 info->vars_size);
24333
24334 if (info->parm_size)
24335 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24336
24337 if (info->fixed_size)
24338 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24339
24340 if (info->gp_size)
24341 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24342
24343 if (info->fp_size)
24344 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24345
24346 if (info->altivec_size)
24347 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24348
24349 if (info->vrsave_size)
24350 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24351
24352 if (info->altivec_padding_size)
24353 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24354 info->altivec_padding_size);
24355
24356 if (info->cr_size)
24357 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
24358
24359 if (info->save_size)
24360 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
24361
24362 if (info->reg_size != 4)
24363 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
24364
24365 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
24366
24367 fprintf (stderr, "\n");
24368 }
24369
24370 rtx
24371 rs6000_return_addr (int count, rtx frame)
24372 {
24373 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
24374 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
24375 if (count != 0
24376 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
24377 {
24378 cfun->machine->ra_needs_full_frame = 1;
24379
24380 if (count == 0)
24381 /* FRAME is set to frame_pointer_rtx by the generic code, but that
24382 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
24383 frame = stack_pointer_rtx;
24384 rtx prev_frame_addr = memory_address (Pmode, frame);
24385 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
24386 rtx lr_save_off = plus_constant (Pmode,
24387 prev_frame, RETURN_ADDRESS_OFFSET);
24388 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
24389 return gen_rtx_MEM (Pmode, lr_save_addr);
24390 }
24391
24392 cfun->machine->ra_need_lr = 1;
24393 return get_hard_reg_initial_val (Pmode, LR_REGNO);
24394 }
24395
24396 /* Say whether a function is a candidate for sibcall handling or not. */
24397
24398 static bool
24399 rs6000_function_ok_for_sibcall (tree decl, tree exp)
24400 {
24401 tree fntype;
24402
24403 if (decl)
24404 fntype = TREE_TYPE (decl);
24405 else
24406 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
24407
24408 /* We can't do it if the called function has more vector parameters
24409 than the current function; there's nowhere to put the VRsave code. */
24410 if (TARGET_ALTIVEC_ABI
24411 && TARGET_ALTIVEC_VRSAVE
24412 && !(decl && decl == current_function_decl))
24413 {
24414 function_args_iterator args_iter;
24415 tree type;
24416 int nvreg = 0;
24417
24418 /* Functions with vector parameters are required to have a
24419 prototype, so the argument type info must be available
24420 here. */
24421 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
24422 if (TREE_CODE (type) == VECTOR_TYPE
24423 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24424 nvreg++;
24425
24426 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
24427 if (TREE_CODE (type) == VECTOR_TYPE
24428 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24429 nvreg--;
24430
24431 if (nvreg > 0)
24432 return false;
24433 }
24434
24435 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
24436 functions, because the callee may have a different TOC pointer to
24437 the caller and there's no way to ensure we restore the TOC when
24438 we return. With the secure-plt SYSV ABI we can't make non-local
24439 calls when -fpic/PIC because the plt call stubs use r30. */
24440 if (DEFAULT_ABI == ABI_DARWIN
24441 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24442 && decl
24443 && !DECL_EXTERNAL (decl)
24444 && !DECL_WEAK (decl)
24445 && (*targetm.binds_local_p) (decl))
24446 || (DEFAULT_ABI == ABI_V4
24447 && (!TARGET_SECURE_PLT
24448 || !flag_pic
24449 || (decl
24450 && (*targetm.binds_local_p) (decl)))))
24451 {
24452 tree attr_list = TYPE_ATTRIBUTES (fntype);
24453
24454 if (!lookup_attribute ("longcall", attr_list)
24455 || lookup_attribute ("shortcall", attr_list))
24456 return true;
24457 }
24458
24459 return false;
24460 }
24461
24462 static int
24463 rs6000_ra_ever_killed (void)
24464 {
24465 rtx_insn *top;
24466 rtx reg;
24467 rtx_insn *insn;
24468
24469 if (cfun->is_thunk)
24470 return 0;
24471
24472 if (cfun->machine->lr_save_state)
24473 return cfun->machine->lr_save_state - 1;
24474
24475 /* regs_ever_live has LR marked as used if any sibcalls are present,
24476 but this should not force saving and restoring in the
24477 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
24478 clobbers LR, so that is inappropriate. */
24479
24480 /* Also, the prologue can generate a store into LR that
24481 doesn't really count, like this:
24482
24483 move LR->R0
24484 bcl to set PIC register
24485 move LR->R31
24486 move R0->LR
24487
24488 When we're called from the epilogue, we need to avoid counting
24489 this as a store. */
24490
24491 push_topmost_sequence ();
24492 top = get_insns ();
24493 pop_topmost_sequence ();
24494 reg = gen_rtx_REG (Pmode, LR_REGNO);
24495
24496 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
24497 {
24498 if (INSN_P (insn))
24499 {
24500 if (CALL_P (insn))
24501 {
24502 if (!SIBLING_CALL_P (insn))
24503 return 1;
24504 }
24505 else if (find_regno_note (insn, REG_INC, LR_REGNO))
24506 return 1;
24507 else if (set_of (reg, insn) != NULL_RTX
24508 && !prologue_epilogue_contains (insn))
24509 return 1;
24510 }
24511 }
24512 return 0;
24513 }
24514 \f
24515 /* Emit instructions needed to load the TOC register.
24516 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
24517 a constant pool; or for SVR4 -fpic. */
24518
24519 void
24520 rs6000_emit_load_toc_table (int fromprolog)
24521 {
24522 rtx dest;
24523 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
24524
24525 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
24526 {
24527 char buf[30];
24528 rtx lab, tmp1, tmp2, got;
24529
24530 lab = gen_label_rtx ();
24531 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
24532 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24533 if (flag_pic == 2)
24534 {
24535 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24536 need_toc_init = 1;
24537 }
24538 else
24539 got = rs6000_got_sym ();
24540 tmp1 = tmp2 = dest;
24541 if (!fromprolog)
24542 {
24543 tmp1 = gen_reg_rtx (Pmode);
24544 tmp2 = gen_reg_rtx (Pmode);
24545 }
24546 emit_insn (gen_load_toc_v4_PIC_1 (lab));
24547 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
24548 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
24549 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
24550 }
24551 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
24552 {
24553 emit_insn (gen_load_toc_v4_pic_si ());
24554 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24555 }
24556 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
24557 {
24558 char buf[30];
24559 rtx temp0 = (fromprolog
24560 ? gen_rtx_REG (Pmode, 0)
24561 : gen_reg_rtx (Pmode));
24562
24563 if (fromprolog)
24564 {
24565 rtx symF, symL;
24566
24567 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
24568 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24569
24570 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
24571 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24572
24573 emit_insn (gen_load_toc_v4_PIC_1 (symF));
24574 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24575 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
24576 }
24577 else
24578 {
24579 rtx tocsym, lab;
24580
24581 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24582 need_toc_init = 1;
24583 lab = gen_label_rtx ();
24584 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
24585 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24586 if (TARGET_LINK_STACK)
24587 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
24588 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
24589 }
24590 emit_insn (gen_addsi3 (dest, temp0, dest));
24591 }
24592 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
24593 {
24594 /* This is for AIX code running in non-PIC ELF32. */
24595 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24596
24597 need_toc_init = 1;
24598 emit_insn (gen_elf_high (dest, realsym));
24599 emit_insn (gen_elf_low (dest, dest, realsym));
24600 }
24601 else
24602 {
24603 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24604
24605 if (TARGET_32BIT)
24606 emit_insn (gen_load_toc_aix_si (dest));
24607 else
24608 emit_insn (gen_load_toc_aix_di (dest));
24609 }
24610 }
24611
24612 /* Emit instructions to restore the link register after determining where
24613 its value has been stored. */
24614
24615 void
24616 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
24617 {
24618 rs6000_stack_t *info = rs6000_stack_info ();
24619 rtx operands[2];
24620
24621 operands[0] = source;
24622 operands[1] = scratch;
24623
24624 if (info->lr_save_p)
24625 {
24626 rtx frame_rtx = stack_pointer_rtx;
24627 HOST_WIDE_INT sp_offset = 0;
24628 rtx tmp;
24629
24630 if (frame_pointer_needed
24631 || cfun->calls_alloca
24632 || info->total_size > 32767)
24633 {
24634 tmp = gen_frame_mem (Pmode, frame_rtx);
24635 emit_move_insn (operands[1], tmp);
24636 frame_rtx = operands[1];
24637 }
24638 else if (info->push_p)
24639 sp_offset = info->total_size;
24640
24641 tmp = plus_constant (Pmode, frame_rtx,
24642 info->lr_save_offset + sp_offset);
24643 tmp = gen_frame_mem (Pmode, tmp);
24644 emit_move_insn (tmp, operands[0]);
24645 }
24646 else
24647 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
24648
24649 /* Freeze lr_save_p. We've just emitted rtl that depends on the
24650 state of lr_save_p so any change from here on would be a bug. In
24651 particular, stop rs6000_ra_ever_killed from considering the SET
24652 of lr we may have added just above. */
24653 cfun->machine->lr_save_state = info->lr_save_p + 1;
24654 }
24655
24656 static GTY(()) alias_set_type set = -1;
24657
24658 alias_set_type
24659 get_TOC_alias_set (void)
24660 {
24661 if (set == -1)
24662 set = new_alias_set ();
24663 return set;
24664 }
24665
24666 /* This returns nonzero if the current function uses the TOC. This is
24667 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
24668 is generated by the ABI_V4 load_toc_* patterns.
24669 Return 2 instead of 1 if the load_toc_* pattern is in the function
24670 partition that doesn't start the function. */
24671 #if TARGET_ELF
24672 static int
24673 uses_TOC (void)
24674 {
24675 rtx_insn *insn;
24676 int ret = 1;
24677
24678 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
24679 {
24680 if (INSN_P (insn))
24681 {
24682 rtx pat = PATTERN (insn);
24683 int i;
24684
24685 if (GET_CODE (pat) == PARALLEL)
24686 for (i = 0; i < XVECLEN (pat, 0); i++)
24687 {
24688 rtx sub = XVECEXP (pat, 0, i);
24689 if (GET_CODE (sub) == USE)
24690 {
24691 sub = XEXP (sub, 0);
24692 if (GET_CODE (sub) == UNSPEC
24693 && XINT (sub, 1) == UNSPEC_TOC)
24694 return ret;
24695 }
24696 }
24697 }
24698 else if (crtl->has_bb_partition
24699 && NOTE_P (insn)
24700 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
24701 ret = 2;
24702 }
24703 return 0;
24704 }
24705 #endif
24706
24707 rtx
24708 create_TOC_reference (rtx symbol, rtx largetoc_reg)
24709 {
24710 rtx tocrel, tocreg, hi;
24711
24712 if (TARGET_DEBUG_ADDR)
24713 {
24714 if (GET_CODE (symbol) == SYMBOL_REF)
24715 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
24716 XSTR (symbol, 0));
24717 else
24718 {
24719 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
24720 GET_RTX_NAME (GET_CODE (symbol)));
24721 debug_rtx (symbol);
24722 }
24723 }
24724
24725 if (!can_create_pseudo_p ())
24726 df_set_regs_ever_live (TOC_REGISTER, true);
24727
24728 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
24729 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
24730 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
24731 return tocrel;
24732
24733 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
24734 if (largetoc_reg != NULL)
24735 {
24736 emit_move_insn (largetoc_reg, hi);
24737 hi = largetoc_reg;
24738 }
24739 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
24740 }
24741
24742 /* Issue assembly directives that create a reference to the given DWARF
24743 FRAME_TABLE_LABEL from the current function section. */
24744 void
24745 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
24746 {
24747 fprintf (asm_out_file, "\t.ref %s\n",
24748 (* targetm.strip_name_encoding) (frame_table_label));
24749 }
24750 \f
24751 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
24752 and the change to the stack pointer. */
24753
24754 static void
24755 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
24756 {
24757 rtvec p;
24758 int i;
24759 rtx regs[3];
24760
24761 i = 0;
24762 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
24763 if (hard_frame_needed)
24764 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
24765 if (!(REGNO (fp) == STACK_POINTER_REGNUM
24766 || (hard_frame_needed
24767 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
24768 regs[i++] = fp;
24769
24770 p = rtvec_alloc (i);
24771 while (--i >= 0)
24772 {
24773 rtx mem = gen_frame_mem (BLKmode, regs[i]);
24774 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
24775 }
24776
24777 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
24778 }
24779
24780 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
24781 and set the appropriate attributes for the generated insn. Return the
24782 first insn which adjusts the stack pointer or the last insn before
24783 the stack adjustment loop.
24784
24785 SIZE_INT is used to create the CFI note for the allocation.
24786
24787 SIZE_RTX is an rtx containing the size of the adjustment. Note that
24788 since stacks grow to lower addresses its runtime value is -SIZE_INT.
24789
24790 ORIG_SP contains the backchain value that must be stored at *sp. */
24791
24792 static rtx_insn *
24793 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
24794 {
24795 rtx_insn *insn;
24796
24797 rtx size_rtx = GEN_INT (-size_int);
24798 if (size_int > 32767)
24799 {
24800 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
24801 /* Need a note here so that try_split doesn't get confused. */
24802 if (get_last_insn () == NULL_RTX)
24803 emit_note (NOTE_INSN_DELETED);
24804 insn = emit_move_insn (tmp_reg, size_rtx);
24805 try_split (PATTERN (insn), insn, 0);
24806 size_rtx = tmp_reg;
24807 }
24808
24809 if (Pmode == SImode)
24810 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
24811 stack_pointer_rtx,
24812 size_rtx,
24813 orig_sp));
24814 else
24815 insn = emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
24816 stack_pointer_rtx,
24817 size_rtx,
24818 orig_sp));
24819 rtx par = PATTERN (insn);
24820 gcc_assert (GET_CODE (par) == PARALLEL);
24821 rtx set = XVECEXP (par, 0, 0);
24822 gcc_assert (GET_CODE (set) == SET);
24823 rtx mem = SET_DEST (set);
24824 gcc_assert (MEM_P (mem));
24825 MEM_NOTRAP_P (mem) = 1;
24826 set_mem_alias_set (mem, get_frame_alias_set ());
24827
24828 RTX_FRAME_RELATED_P (insn) = 1;
24829 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
24830 gen_rtx_SET (stack_pointer_rtx,
24831 gen_rtx_PLUS (Pmode,
24832 stack_pointer_rtx,
24833 GEN_INT (-size_int))));
24834
24835 /* Emit a blockage to ensure the allocation/probing insns are
24836 not optimized, combined, removed, etc. Add REG_STACK_CHECK
24837 note for similar reasons. */
24838 if (flag_stack_clash_protection)
24839 {
24840 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
24841 emit_insn (gen_blockage ());
24842 }
24843
24844 return insn;
24845 }
24846
24847 static HOST_WIDE_INT
24848 get_stack_clash_protection_probe_interval (void)
24849 {
24850 return (HOST_WIDE_INT_1U
24851 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
24852 }
24853
24854 static HOST_WIDE_INT
24855 get_stack_clash_protection_guard_size (void)
24856 {
24857 return (HOST_WIDE_INT_1U
24858 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
24859 }
24860
24861 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
24862 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
24863
24864 COPY_REG, if non-null, should contain a copy of the original
24865 stack pointer at exit from this function.
24866
24867 This is subtly different than the Ada probing in that it tries hard to
24868 prevent attacks that jump the stack guard. Thus it is never allowed to
24869 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
24870 space without a suitable probe. */
24871 static rtx_insn *
24872 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
24873 rtx copy_reg)
24874 {
24875 rtx orig_sp = copy_reg;
24876
24877 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
24878
24879 /* Round the size down to a multiple of PROBE_INTERVAL. */
24880 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
24881
24882 /* If explicitly requested,
24883 or the rounded size is not the same as the original size
24884 or the the rounded size is greater than a page,
24885 then we will need a copy of the original stack pointer. */
24886 if (rounded_size != orig_size
24887 || rounded_size > probe_interval
24888 || copy_reg)
24889 {
24890 /* If the caller did not request a copy of the incoming stack
24891 pointer, then we use r0 to hold the copy. */
24892 if (!copy_reg)
24893 orig_sp = gen_rtx_REG (Pmode, 0);
24894 emit_move_insn (orig_sp, stack_pointer_rtx);
24895 }
24896
24897 /* There's three cases here.
24898
24899 One is a single probe which is the most common and most efficiently
24900 implemented as it does not have to have a copy of the original
24901 stack pointer if there are no residuals.
24902
24903 Second is unrolled allocation/probes which we use if there's just
24904 a few of them. It needs to save the original stack pointer into a
24905 temporary for use as a source register in the allocation/probe.
24906
24907 Last is a loop. This is the most uncommon case and least efficient. */
24908 rtx_insn *retval = NULL;
24909 if (rounded_size == probe_interval)
24910 {
24911 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
24912
24913 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
24914 }
24915 else if (rounded_size <= 8 * probe_interval)
24916 {
24917 /* The ABI requires using the store with update insns to allocate
24918 space and store the backchain into the stack
24919
24920 So we save the current stack pointer into a temporary, then
24921 emit the store-with-update insns to store the saved stack pointer
24922 into the right location in each new page. */
24923 for (int i = 0; i < rounded_size; i += probe_interval)
24924 {
24925 rtx_insn *insn
24926 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
24927
24928 /* Save the first stack adjustment in RETVAL. */
24929 if (i == 0)
24930 retval = insn;
24931 }
24932
24933 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
24934 }
24935 else
24936 {
24937 /* Compute the ending address. */
24938 rtx end_addr
24939 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
24940 rtx rs = GEN_INT (-rounded_size);
24941 rtx_insn *insn;
24942 if (add_operand (rs, Pmode))
24943 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
24944 else
24945 {
24946 emit_move_insn (end_addr, GEN_INT (-rounded_size));
24947 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
24948 stack_pointer_rtx));
24949 /* Describe the effect of INSN to the CFI engine. */
24950 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
24951 gen_rtx_SET (end_addr,
24952 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
24953 rs)));
24954 }
24955 RTX_FRAME_RELATED_P (insn) = 1;
24956
24957 /* Emit the loop. */
24958 if (TARGET_64BIT)
24959 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
24960 stack_pointer_rtx, orig_sp,
24961 end_addr));
24962 else
24963 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
24964 stack_pointer_rtx, orig_sp,
24965 end_addr));
24966 RTX_FRAME_RELATED_P (retval) = 1;
24967 /* Describe the effect of INSN to the CFI engine. */
24968 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
24969 gen_rtx_SET (stack_pointer_rtx, end_addr));
24970
24971 /* Emit a blockage to ensure the allocation/probing insns are
24972 not optimized, combined, removed, etc. Other cases handle this
24973 within their call to rs6000_emit_allocate_stack_1. */
24974 emit_insn (gen_blockage ());
24975
24976 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
24977 }
24978
24979 if (orig_size != rounded_size)
24980 {
24981 /* Allocate (and implicitly probe) any residual space. */
24982 HOST_WIDE_INT residual = orig_size - rounded_size;
24983
24984 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
24985
24986 /* If the residual was the only allocation, then we can return the
24987 allocating insn. */
24988 if (!retval)
24989 retval = insn;
24990 }
24991
24992 return retval;
24993 }
24994
24995 /* Emit the correct code for allocating stack space, as insns.
24996 If COPY_REG, make sure a copy of the old frame is left there.
24997 The generated code may use hard register 0 as a temporary. */
24998
24999 static rtx_insn *
25000 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25001 {
25002 rtx_insn *insn;
25003 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25004 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25005 rtx todec = gen_int_mode (-size, Pmode);
25006
25007 if (INTVAL (todec) != -size)
25008 {
25009 warning (0, "stack frame too large");
25010 emit_insn (gen_trap ());
25011 return 0;
25012 }
25013
25014 if (crtl->limit_stack)
25015 {
25016 if (REG_P (stack_limit_rtx)
25017 && REGNO (stack_limit_rtx) > 1
25018 && REGNO (stack_limit_rtx) <= 31)
25019 {
25020 rtx_insn *insn
25021 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25022 gcc_assert (insn);
25023 emit_insn (insn);
25024 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25025 }
25026 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25027 && TARGET_32BIT
25028 && DEFAULT_ABI == ABI_V4
25029 && !flag_pic)
25030 {
25031 rtx toload = gen_rtx_CONST (VOIDmode,
25032 gen_rtx_PLUS (Pmode,
25033 stack_limit_rtx,
25034 GEN_INT (size)));
25035
25036 emit_insn (gen_elf_high (tmp_reg, toload));
25037 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25038 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25039 const0_rtx));
25040 }
25041 else
25042 warning (0, "stack limit expression is not supported");
25043 }
25044
25045 if (flag_stack_clash_protection)
25046 {
25047 if (size < get_stack_clash_protection_guard_size ())
25048 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25049 else
25050 {
25051 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25052 copy_reg);
25053
25054 /* If we asked for a copy with an offset, then we still need add in
25055 the offset. */
25056 if (copy_reg && copy_off)
25057 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25058 return insn;
25059 }
25060 }
25061
25062 if (copy_reg)
25063 {
25064 if (copy_off != 0)
25065 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25066 else
25067 emit_move_insn (copy_reg, stack_reg);
25068 }
25069
25070 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25071 it now and set the alias set/attributes. The above gen_*_update
25072 calls will generate a PARALLEL with the MEM set being the first
25073 operation. */
25074 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25075 return insn;
25076 }
25077
25078 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25079
25080 #if PROBE_INTERVAL > 32768
25081 #error Cannot use indexed addressing mode for stack probing
25082 #endif
25083
25084 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25085 inclusive. These are offsets from the current stack pointer. */
25086
25087 static void
25088 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25089 {
25090 /* See if we have a constant small number of probes to generate. If so,
25091 that's the easy case. */
25092 if (first + size <= 32768)
25093 {
25094 HOST_WIDE_INT i;
25095
25096 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25097 it exceeds SIZE. If only one probe is needed, this will not
25098 generate any code. Then probe at FIRST + SIZE. */
25099 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25100 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25101 -(first + i)));
25102
25103 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25104 -(first + size)));
25105 }
25106
25107 /* Otherwise, do the same as above, but in a loop. Note that we must be
25108 extra careful with variables wrapping around because we might be at
25109 the very top (or the very bottom) of the address space and we have
25110 to be able to handle this case properly; in particular, we use an
25111 equality test for the loop condition. */
25112 else
25113 {
25114 HOST_WIDE_INT rounded_size;
25115 rtx r12 = gen_rtx_REG (Pmode, 12);
25116 rtx r0 = gen_rtx_REG (Pmode, 0);
25117
25118 /* Sanity check for the addressing mode we're going to use. */
25119 gcc_assert (first <= 32768);
25120
25121 /* Step 1: round SIZE to the previous multiple of the interval. */
25122
25123 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25124
25125
25126 /* Step 2: compute initial and final value of the loop counter. */
25127
25128 /* TEST_ADDR = SP + FIRST. */
25129 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25130 -first)));
25131
25132 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25133 if (rounded_size > 32768)
25134 {
25135 emit_move_insn (r0, GEN_INT (-rounded_size));
25136 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25137 }
25138 else
25139 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25140 -rounded_size)));
25141
25142
25143 /* Step 3: the loop
25144
25145 do
25146 {
25147 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25148 probe at TEST_ADDR
25149 }
25150 while (TEST_ADDR != LAST_ADDR)
25151
25152 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25153 until it is equal to ROUNDED_SIZE. */
25154
25155 if (TARGET_64BIT)
25156 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25157 else
25158 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25159
25160
25161 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25162 that SIZE is equal to ROUNDED_SIZE. */
25163
25164 if (size != rounded_size)
25165 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25166 }
25167 }
25168
25169 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25170 addresses, not offsets. */
25171
25172 static const char *
25173 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25174 {
25175 static int labelno = 0;
25176 char loop_lab[32];
25177 rtx xops[2];
25178
25179 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25180
25181 /* Loop. */
25182 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25183
25184 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25185 xops[0] = reg1;
25186 xops[1] = GEN_INT (-PROBE_INTERVAL);
25187 output_asm_insn ("addi %0,%0,%1", xops);
25188
25189 /* Probe at TEST_ADDR. */
25190 xops[1] = gen_rtx_REG (Pmode, 0);
25191 output_asm_insn ("stw %1,0(%0)", xops);
25192
25193 /* Test if TEST_ADDR == LAST_ADDR. */
25194 xops[1] = reg2;
25195 if (TARGET_64BIT)
25196 output_asm_insn ("cmpd 0,%0,%1", xops);
25197 else
25198 output_asm_insn ("cmpw 0,%0,%1", xops);
25199
25200 /* Branch. */
25201 fputs ("\tbne 0,", asm_out_file);
25202 assemble_name_raw (asm_out_file, loop_lab);
25203 fputc ('\n', asm_out_file);
25204
25205 return "";
25206 }
25207
25208 /* This function is called when rs6000_frame_related is processing
25209 SETs within a PARALLEL, and returns whether the REGNO save ought to
25210 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25211 for out-of-line register save functions, store multiple, and the
25212 Darwin world_save. They may contain registers that don't really
25213 need saving. */
25214
25215 static bool
25216 interesting_frame_related_regno (unsigned int regno)
25217 {
25218 /* Saves apparently of r0 are actually saving LR. It doesn't make
25219 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25220 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25221 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25222 as frame related. */
25223 if (regno == 0)
25224 return true;
25225 /* If we see CR2 then we are here on a Darwin world save. Saves of
25226 CR2 signify the whole CR is being saved. This is a long-standing
25227 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25228 that CR needs to be saved. */
25229 if (regno == CR2_REGNO)
25230 return true;
25231 /* Omit frame info for any user-defined global regs. If frame info
25232 is supplied for them, frame unwinding will restore a user reg.
25233 Also omit frame info for any reg we don't need to save, as that
25234 bloats frame info and can cause problems with shrink wrapping.
25235 Since global regs won't be seen as needing to be saved, both of
25236 these conditions are covered by save_reg_p. */
25237 return save_reg_p (regno);
25238 }
25239
25240 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25241 addresses, not offsets.
25242
25243 REG2 contains the backchain that must be stored into *sp at each allocation.
25244
25245 This is subtly different than the Ada probing above in that it tries hard
25246 to prevent attacks that jump the stack guard. Thus, it is never allowed
25247 to allocate more than PROBE_INTERVAL bytes of stack space without a
25248 suitable probe. */
25249
25250 static const char *
25251 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25252 {
25253 static int labelno = 0;
25254 char loop_lab[32];
25255 rtx xops[3];
25256
25257 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25258
25259 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25260
25261 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25262
25263 /* This allocates and probes. */
25264 xops[0] = reg1;
25265 xops[1] = reg2;
25266 xops[2] = GEN_INT (-probe_interval);
25267 if (TARGET_64BIT)
25268 output_asm_insn ("stdu %1,%2(%0)", xops);
25269 else
25270 output_asm_insn ("stwu %1,%2(%0)", xops);
25271
25272 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25273 xops[0] = reg1;
25274 xops[1] = reg3;
25275 if (TARGET_64BIT)
25276 output_asm_insn ("cmpd 0,%0,%1", xops);
25277 else
25278 output_asm_insn ("cmpw 0,%0,%1", xops);
25279
25280 fputs ("\tbne 0,", asm_out_file);
25281 assemble_name_raw (asm_out_file, loop_lab);
25282 fputc ('\n', asm_out_file);
25283
25284 return "";
25285 }
25286
25287 /* Wrapper around the output_probe_stack_range routines. */
25288 const char *
25289 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
25290 {
25291 if (flag_stack_clash_protection)
25292 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
25293 else
25294 return output_probe_stack_range_1 (reg1, reg3);
25295 }
25296
25297 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25298 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25299 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25300 deduce these equivalences by itself so it wasn't necessary to hold
25301 its hand so much. Don't be tempted to always supply d2_f_d_e with
25302 the actual cfa register, ie. r31 when we are using a hard frame
25303 pointer. That fails when saving regs off r1, and sched moves the
25304 r31 setup past the reg saves. */
25305
25306 static rtx_insn *
25307 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25308 rtx reg2, rtx repl2)
25309 {
25310 rtx repl;
25311
25312 if (REGNO (reg) == STACK_POINTER_REGNUM)
25313 {
25314 gcc_checking_assert (val == 0);
25315 repl = NULL_RTX;
25316 }
25317 else
25318 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25319 GEN_INT (val));
25320
25321 rtx pat = PATTERN (insn);
25322 if (!repl && !reg2)
25323 {
25324 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25325 if (GET_CODE (pat) == PARALLEL)
25326 for (int i = 0; i < XVECLEN (pat, 0); i++)
25327 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25328 {
25329 rtx set = XVECEXP (pat, 0, i);
25330
25331 if (!REG_P (SET_SRC (set))
25332 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25333 RTX_FRAME_RELATED_P (set) = 1;
25334 }
25335 RTX_FRAME_RELATED_P (insn) = 1;
25336 return insn;
25337 }
25338
25339 /* We expect that 'pat' is either a SET or a PARALLEL containing
25340 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25341 are important so they all have to be marked RTX_FRAME_RELATED_P.
25342 Call simplify_replace_rtx on the SETs rather than the whole insn
25343 so as to leave the other stuff alone (for example USE of r12). */
25344
25345 set_used_flags (pat);
25346 if (GET_CODE (pat) == SET)
25347 {
25348 if (repl)
25349 pat = simplify_replace_rtx (pat, reg, repl);
25350 if (reg2)
25351 pat = simplify_replace_rtx (pat, reg2, repl2);
25352 }
25353 else if (GET_CODE (pat) == PARALLEL)
25354 {
25355 pat = shallow_copy_rtx (pat);
25356 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25357
25358 for (int i = 0; i < XVECLEN (pat, 0); i++)
25359 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25360 {
25361 rtx set = XVECEXP (pat, 0, i);
25362
25363 if (repl)
25364 set = simplify_replace_rtx (set, reg, repl);
25365 if (reg2)
25366 set = simplify_replace_rtx (set, reg2, repl2);
25367 XVECEXP (pat, 0, i) = set;
25368
25369 if (!REG_P (SET_SRC (set))
25370 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25371 RTX_FRAME_RELATED_P (set) = 1;
25372 }
25373 }
25374 else
25375 gcc_unreachable ();
25376
25377 RTX_FRAME_RELATED_P (insn) = 1;
25378 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
25379
25380 return insn;
25381 }
25382
25383 /* Returns an insn that has a vrsave set operation with the
25384 appropriate CLOBBERs. */
25385
25386 static rtx
25387 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25388 {
25389 int nclobs, i;
25390 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25391 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25392
25393 clobs[0]
25394 = gen_rtx_SET (vrsave,
25395 gen_rtx_UNSPEC_VOLATILE (SImode,
25396 gen_rtvec (2, reg, vrsave),
25397 UNSPECV_SET_VRSAVE));
25398
25399 nclobs = 1;
25400
25401 /* We need to clobber the registers in the mask so the scheduler
25402 does not move sets to VRSAVE before sets of AltiVec registers.
25403
25404 However, if the function receives nonlocal gotos, reload will set
25405 all call saved registers live. We will end up with:
25406
25407 (set (reg 999) (mem))
25408 (parallel [ (set (reg vrsave) (unspec blah))
25409 (clobber (reg 999))])
25410
25411 The clobber will cause the store into reg 999 to be dead, and
25412 flow will attempt to delete an epilogue insn. In this case, we
25413 need an unspec use/set of the register. */
25414
25415 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25416 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25417 {
25418 if (!epiloguep || call_used_regs [i])
25419 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
25420 gen_rtx_REG (V4SImode, i));
25421 else
25422 {
25423 rtx reg = gen_rtx_REG (V4SImode, i);
25424
25425 clobs[nclobs++]
25426 = gen_rtx_SET (reg,
25427 gen_rtx_UNSPEC (V4SImode,
25428 gen_rtvec (1, reg), 27));
25429 }
25430 }
25431
25432 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25433
25434 for (i = 0; i < nclobs; ++i)
25435 XVECEXP (insn, 0, i) = clobs[i];
25436
25437 return insn;
25438 }
25439
25440 static rtx
25441 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25442 {
25443 rtx addr, mem;
25444
25445 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25446 mem = gen_frame_mem (GET_MODE (reg), addr);
25447 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25448 }
25449
25450 static rtx
25451 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25452 {
25453 return gen_frame_set (reg, frame_reg, offset, false);
25454 }
25455
25456 static rtx
25457 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25458 {
25459 return gen_frame_set (reg, frame_reg, offset, true);
25460 }
25461
25462 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25463 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25464
25465 static rtx_insn *
25466 emit_frame_save (rtx frame_reg, machine_mode mode,
25467 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
25468 {
25469 rtx reg;
25470
25471 /* Some cases that need register indexed addressing. */
25472 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
25473 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
25474
25475 reg = gen_rtx_REG (mode, regno);
25476 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
25477 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
25478 NULL_RTX, NULL_RTX);
25479 }
25480
25481 /* Emit an offset memory reference suitable for a frame store, while
25482 converting to a valid addressing mode. */
25483
25484 static rtx
25485 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
25486 {
25487 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
25488 }
25489
25490 #ifndef TARGET_FIX_AND_CONTINUE
25491 #define TARGET_FIX_AND_CONTINUE 0
25492 #endif
25493
25494 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25495 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25496 #define LAST_SAVRES_REGISTER 31
25497 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25498
25499 enum {
25500 SAVRES_LR = 0x1,
25501 SAVRES_SAVE = 0x2,
25502 SAVRES_REG = 0x0c,
25503 SAVRES_GPR = 0,
25504 SAVRES_FPR = 4,
25505 SAVRES_VR = 8
25506 };
25507
25508 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
25509
25510 /* Temporary holding space for an out-of-line register save/restore
25511 routine name. */
25512 static char savres_routine_name[30];
25513
25514 /* Return the name for an out-of-line register save/restore routine.
25515 We are saving/restoring GPRs if GPR is true. */
25516
25517 static char *
25518 rs6000_savres_routine_name (int regno, int sel)
25519 {
25520 const char *prefix = "";
25521 const char *suffix = "";
25522
25523 /* Different targets are supposed to define
25524 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25525 routine name could be defined with:
25526
25527 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25528
25529 This is a nice idea in practice, but in reality, things are
25530 complicated in several ways:
25531
25532 - ELF targets have save/restore routines for GPRs.
25533
25534 - PPC64 ELF targets have routines for save/restore of GPRs that
25535 differ in what they do with the link register, so having a set
25536 prefix doesn't work. (We only use one of the save routines at
25537 the moment, though.)
25538
25539 - PPC32 elf targets have "exit" versions of the restore routines
25540 that restore the link register and can save some extra space.
25541 These require an extra suffix. (There are also "tail" versions
25542 of the restore routines and "GOT" versions of the save routines,
25543 but we don't generate those at present. Same problems apply,
25544 though.)
25545
25546 We deal with all this by synthesizing our own prefix/suffix and
25547 using that for the simple sprintf call shown above. */
25548 if (DEFAULT_ABI == ABI_V4)
25549 {
25550 if (TARGET_64BIT)
25551 goto aix_names;
25552
25553 if ((sel & SAVRES_REG) == SAVRES_GPR)
25554 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
25555 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25556 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
25557 else if ((sel & SAVRES_REG) == SAVRES_VR)
25558 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25559 else
25560 abort ();
25561
25562 if ((sel & SAVRES_LR))
25563 suffix = "_x";
25564 }
25565 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25566 {
25567 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25568 /* No out-of-line save/restore routines for GPRs on AIX. */
25569 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
25570 #endif
25571
25572 aix_names:
25573 if ((sel & SAVRES_REG) == SAVRES_GPR)
25574 prefix = ((sel & SAVRES_SAVE)
25575 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
25576 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
25577 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25578 {
25579 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25580 if ((sel & SAVRES_LR))
25581 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
25582 else
25583 #endif
25584 {
25585 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
25586 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
25587 }
25588 }
25589 else if ((sel & SAVRES_REG) == SAVRES_VR)
25590 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25591 else
25592 abort ();
25593 }
25594
25595 if (DEFAULT_ABI == ABI_DARWIN)
25596 {
25597 /* The Darwin approach is (slightly) different, in order to be
25598 compatible with code generated by the system toolchain. There is a
25599 single symbol for the start of save sequence, and the code here
25600 embeds an offset into that code on the basis of the first register
25601 to be saved. */
25602 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
25603 if ((sel & SAVRES_REG) == SAVRES_GPR)
25604 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
25605 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
25606 (regno - 13) * 4, prefix, regno);
25607 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25608 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
25609 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
25610 else if ((sel & SAVRES_REG) == SAVRES_VR)
25611 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
25612 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
25613 else
25614 abort ();
25615 }
25616 else
25617 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
25618
25619 return savres_routine_name;
25620 }
25621
25622 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
25623 We are saving/restoring GPRs if GPR is true. */
25624
25625 static rtx
25626 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
25627 {
25628 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
25629 ? info->first_gp_reg_save
25630 : (sel & SAVRES_REG) == SAVRES_FPR
25631 ? info->first_fp_reg_save - 32
25632 : (sel & SAVRES_REG) == SAVRES_VR
25633 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
25634 : -1);
25635 rtx sym;
25636 int select = sel;
25637
25638 /* Don't generate bogus routine names. */
25639 gcc_assert (FIRST_SAVRES_REGISTER <= regno
25640 && regno <= LAST_SAVRES_REGISTER
25641 && select >= 0 && select <= 12);
25642
25643 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
25644
25645 if (sym == NULL)
25646 {
25647 char *name;
25648
25649 name = rs6000_savres_routine_name (regno, sel);
25650
25651 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
25652 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
25653 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
25654 }
25655
25656 return sym;
25657 }
25658
25659 /* Emit a sequence of insns, including a stack tie if needed, for
25660 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
25661 reset the stack pointer, but move the base of the frame into
25662 reg UPDT_REGNO for use by out-of-line register restore routines. */
25663
25664 static rtx
25665 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
25666 unsigned updt_regno)
25667 {
25668 /* If there is nothing to do, don't do anything. */
25669 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
25670 return NULL_RTX;
25671
25672 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
25673
25674 /* This blockage is needed so that sched doesn't decide to move
25675 the sp change before the register restores. */
25676 if (DEFAULT_ABI == ABI_V4)
25677 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
25678 GEN_INT (frame_off)));
25679
25680 /* If we are restoring registers out-of-line, we will be using the
25681 "exit" variants of the restore routines, which will reset the
25682 stack for us. But we do need to point updt_reg into the
25683 right place for those routines. */
25684 if (frame_off != 0)
25685 return emit_insn (gen_add3_insn (updt_reg_rtx,
25686 frame_reg_rtx, GEN_INT (frame_off)));
25687 else
25688 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
25689
25690 return NULL_RTX;
25691 }
25692
25693 /* Return the register number used as a pointer by out-of-line
25694 save/restore functions. */
25695
25696 static inline unsigned
25697 ptr_regno_for_savres (int sel)
25698 {
25699 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25700 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
25701 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
25702 }
25703
25704 /* Construct a parallel rtx describing the effect of a call to an
25705 out-of-line register save/restore routine, and emit the insn
25706 or jump_insn as appropriate. */
25707
25708 static rtx_insn *
25709 rs6000_emit_savres_rtx (rs6000_stack_t *info,
25710 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
25711 machine_mode reg_mode, int sel)
25712 {
25713 int i;
25714 int offset, start_reg, end_reg, n_regs, use_reg;
25715 int reg_size = GET_MODE_SIZE (reg_mode);
25716 rtx sym;
25717 rtvec p;
25718 rtx par;
25719 rtx_insn *insn;
25720
25721 offset = 0;
25722 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
25723 ? info->first_gp_reg_save
25724 : (sel & SAVRES_REG) == SAVRES_FPR
25725 ? info->first_fp_reg_save
25726 : (sel & SAVRES_REG) == SAVRES_VR
25727 ? info->first_altivec_reg_save
25728 : -1);
25729 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
25730 ? 32
25731 : (sel & SAVRES_REG) == SAVRES_FPR
25732 ? 64
25733 : (sel & SAVRES_REG) == SAVRES_VR
25734 ? LAST_ALTIVEC_REGNO + 1
25735 : -1);
25736 n_regs = end_reg - start_reg;
25737 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
25738 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
25739 + n_regs);
25740
25741 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
25742 RTVEC_ELT (p, offset++) = ret_rtx;
25743
25744 RTVEC_ELT (p, offset++)
25745 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
25746
25747 sym = rs6000_savres_routine_sym (info, sel);
25748 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
25749
25750 use_reg = ptr_regno_for_savres (sel);
25751 if ((sel & SAVRES_REG) == SAVRES_VR)
25752 {
25753 /* Vector regs are saved/restored using [reg+reg] addressing. */
25754 RTVEC_ELT (p, offset++)
25755 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
25756 RTVEC_ELT (p, offset++)
25757 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
25758 }
25759 else
25760 RTVEC_ELT (p, offset++)
25761 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
25762
25763 for (i = 0; i < end_reg - start_reg; i++)
25764 RTVEC_ELT (p, i + offset)
25765 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
25766 frame_reg_rtx, save_area_offset + reg_size * i,
25767 (sel & SAVRES_SAVE) != 0);
25768
25769 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
25770 RTVEC_ELT (p, i + offset)
25771 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
25772
25773 par = gen_rtx_PARALLEL (VOIDmode, p);
25774
25775 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
25776 {
25777 insn = emit_jump_insn (par);
25778 JUMP_LABEL (insn) = ret_rtx;
25779 }
25780 else
25781 insn = emit_insn (par);
25782 return insn;
25783 }
25784
25785 /* Emit prologue code to store CR fields that need to be saved into REG. This
25786 function should only be called when moving the non-volatile CRs to REG, it
25787 is not a general purpose routine to move the entire set of CRs to REG.
25788 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
25789 volatile CRs. */
25790
25791 static void
25792 rs6000_emit_prologue_move_from_cr (rtx reg)
25793 {
25794 /* Only the ELFv2 ABI allows storing only selected fields. */
25795 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
25796 {
25797 int i, cr_reg[8], count = 0;
25798
25799 /* Collect CR fields that must be saved. */
25800 for (i = 0; i < 8; i++)
25801 if (save_reg_p (CR0_REGNO + i))
25802 cr_reg[count++] = i;
25803
25804 /* If it's just a single one, use mfcrf. */
25805 if (count == 1)
25806 {
25807 rtvec p = rtvec_alloc (1);
25808 rtvec r = rtvec_alloc (2);
25809 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
25810 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
25811 RTVEC_ELT (p, 0)
25812 = gen_rtx_SET (reg,
25813 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
25814
25815 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
25816 return;
25817 }
25818
25819 /* ??? It might be better to handle count == 2 / 3 cases here
25820 as well, using logical operations to combine the values. */
25821 }
25822
25823 emit_insn (gen_prologue_movesi_from_cr (reg));
25824 }
25825
25826 /* Return whether the split-stack arg pointer (r12) is used. */
25827
25828 static bool
25829 split_stack_arg_pointer_used_p (void)
25830 {
25831 /* If the pseudo holding the arg pointer is no longer a pseudo,
25832 then the arg pointer is used. */
25833 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
25834 && (!REG_P (cfun->machine->split_stack_arg_pointer)
25835 || (REGNO (cfun->machine->split_stack_arg_pointer)
25836 < FIRST_PSEUDO_REGISTER)))
25837 return true;
25838
25839 /* Unfortunately we also need to do some code scanning, since
25840 r12 may have been substituted for the pseudo. */
25841 rtx_insn *insn;
25842 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
25843 FOR_BB_INSNS (bb, insn)
25844 if (NONDEBUG_INSN_P (insn))
25845 {
25846 /* A call destroys r12. */
25847 if (CALL_P (insn))
25848 return false;
25849
25850 df_ref use;
25851 FOR_EACH_INSN_USE (use, insn)
25852 {
25853 rtx x = DF_REF_REG (use);
25854 if (REG_P (x) && REGNO (x) == 12)
25855 return true;
25856 }
25857 df_ref def;
25858 FOR_EACH_INSN_DEF (def, insn)
25859 {
25860 rtx x = DF_REF_REG (def);
25861 if (REG_P (x) && REGNO (x) == 12)
25862 return false;
25863 }
25864 }
25865 return bitmap_bit_p (DF_LR_OUT (bb), 12);
25866 }
25867
25868 /* Return whether we need to emit an ELFv2 global entry point prologue. */
25869
25870 static bool
25871 rs6000_global_entry_point_needed_p (void)
25872 {
25873 /* Only needed for the ELFv2 ABI. */
25874 if (DEFAULT_ABI != ABI_ELFv2)
25875 return false;
25876
25877 /* With -msingle-pic-base, we assume the whole program shares the same
25878 TOC, so no global entry point prologues are needed anywhere. */
25879 if (TARGET_SINGLE_PIC_BASE)
25880 return false;
25881
25882 /* Ensure we have a global entry point for thunks. ??? We could
25883 avoid that if the target routine doesn't need a global entry point,
25884 but we do not know whether this is the case at this point. */
25885 if (cfun->is_thunk)
25886 return true;
25887
25888 /* For regular functions, rs6000_emit_prologue sets this flag if the
25889 routine ever uses the TOC pointer. */
25890 return cfun->machine->r2_setup_needed;
25891 }
25892
25893 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
25894 static sbitmap
25895 rs6000_get_separate_components (void)
25896 {
25897 rs6000_stack_t *info = rs6000_stack_info ();
25898
25899 if (WORLD_SAVE_P (info))
25900 return NULL;
25901
25902 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
25903 && !(info->savres_strategy & REST_MULTIPLE));
25904
25905 /* Component 0 is the save/restore of LR (done via GPR0).
25906 Component 2 is the save of the TOC (GPR2).
25907 Components 13..31 are the save/restore of GPR13..GPR31.
25908 Components 46..63 are the save/restore of FPR14..FPR31. */
25909
25910 cfun->machine->n_components = 64;
25911
25912 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
25913 bitmap_clear (components);
25914
25915 int reg_size = TARGET_32BIT ? 4 : 8;
25916 int fp_reg_size = 8;
25917
25918 /* The GPRs we need saved to the frame. */
25919 if ((info->savres_strategy & SAVE_INLINE_GPRS)
25920 && (info->savres_strategy & REST_INLINE_GPRS))
25921 {
25922 int offset = info->gp_save_offset;
25923 if (info->push_p)
25924 offset += info->total_size;
25925
25926 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
25927 {
25928 if (IN_RANGE (offset, -0x8000, 0x7fff)
25929 && save_reg_p (regno))
25930 bitmap_set_bit (components, regno);
25931
25932 offset += reg_size;
25933 }
25934 }
25935
25936 /* Don't mess with the hard frame pointer. */
25937 if (frame_pointer_needed)
25938 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
25939
25940 /* Don't mess with the fixed TOC register. */
25941 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
25942 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
25943 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
25944 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
25945
25946 /* The FPRs we need saved to the frame. */
25947 if ((info->savres_strategy & SAVE_INLINE_FPRS)
25948 && (info->savres_strategy & REST_INLINE_FPRS))
25949 {
25950 int offset = info->fp_save_offset;
25951 if (info->push_p)
25952 offset += info->total_size;
25953
25954 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
25955 {
25956 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
25957 bitmap_set_bit (components, regno);
25958
25959 offset += fp_reg_size;
25960 }
25961 }
25962
25963 /* Optimize LR save and restore if we can. This is component 0. Any
25964 out-of-line register save/restore routines need LR. */
25965 if (info->lr_save_p
25966 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
25967 && (info->savres_strategy & SAVE_INLINE_GPRS)
25968 && (info->savres_strategy & REST_INLINE_GPRS)
25969 && (info->savres_strategy & SAVE_INLINE_FPRS)
25970 && (info->savres_strategy & REST_INLINE_FPRS)
25971 && (info->savres_strategy & SAVE_INLINE_VRS)
25972 && (info->savres_strategy & REST_INLINE_VRS))
25973 {
25974 int offset = info->lr_save_offset;
25975 if (info->push_p)
25976 offset += info->total_size;
25977 if (IN_RANGE (offset, -0x8000, 0x7fff))
25978 bitmap_set_bit (components, 0);
25979 }
25980
25981 /* Optimize saving the TOC. This is component 2. */
25982 if (cfun->machine->save_toc_in_prologue)
25983 bitmap_set_bit (components, 2);
25984
25985 return components;
25986 }
25987
25988 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
25989 static sbitmap
25990 rs6000_components_for_bb (basic_block bb)
25991 {
25992 rs6000_stack_t *info = rs6000_stack_info ();
25993
25994 bitmap in = DF_LIVE_IN (bb);
25995 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
25996 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
25997
25998 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
25999 bitmap_clear (components);
26000
26001 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26002
26003 /* GPRs. */
26004 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26005 if (bitmap_bit_p (in, regno)
26006 || bitmap_bit_p (gen, regno)
26007 || bitmap_bit_p (kill, regno))
26008 bitmap_set_bit (components, regno);
26009
26010 /* FPRs. */
26011 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26012 if (bitmap_bit_p (in, regno)
26013 || bitmap_bit_p (gen, regno)
26014 || bitmap_bit_p (kill, regno))
26015 bitmap_set_bit (components, regno);
26016
26017 /* The link register. */
26018 if (bitmap_bit_p (in, LR_REGNO)
26019 || bitmap_bit_p (gen, LR_REGNO)
26020 || bitmap_bit_p (kill, LR_REGNO))
26021 bitmap_set_bit (components, 0);
26022
26023 /* The TOC save. */
26024 if (bitmap_bit_p (in, TOC_REGNUM)
26025 || bitmap_bit_p (gen, TOC_REGNUM)
26026 || bitmap_bit_p (kill, TOC_REGNUM))
26027 bitmap_set_bit (components, 2);
26028
26029 return components;
26030 }
26031
26032 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26033 static void
26034 rs6000_disqualify_components (sbitmap components, edge e,
26035 sbitmap edge_components, bool /*is_prologue*/)
26036 {
26037 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26038 live where we want to place that code. */
26039 if (bitmap_bit_p (edge_components, 0)
26040 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26041 {
26042 if (dump_file)
26043 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26044 "on entry to bb %d\n", e->dest->index);
26045 bitmap_clear_bit (components, 0);
26046 }
26047 }
26048
26049 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26050 static void
26051 rs6000_emit_prologue_components (sbitmap components)
26052 {
26053 rs6000_stack_t *info = rs6000_stack_info ();
26054 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26055 ? HARD_FRAME_POINTER_REGNUM
26056 : STACK_POINTER_REGNUM);
26057
26058 machine_mode reg_mode = Pmode;
26059 int reg_size = TARGET_32BIT ? 4 : 8;
26060 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26061 int fp_reg_size = 8;
26062
26063 /* Prologue for LR. */
26064 if (bitmap_bit_p (components, 0))
26065 {
26066 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26067 rtx reg = gen_rtx_REG (reg_mode, 0);
26068 rtx_insn *insn = emit_move_insn (reg, lr);
26069 RTX_FRAME_RELATED_P (insn) = 1;
26070 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (reg, lr));
26071
26072 int offset = info->lr_save_offset;
26073 if (info->push_p)
26074 offset += info->total_size;
26075
26076 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26077 RTX_FRAME_RELATED_P (insn) = 1;
26078 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26079 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26080 }
26081
26082 /* Prologue for TOC. */
26083 if (bitmap_bit_p (components, 2))
26084 {
26085 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26086 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26087 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26088 }
26089
26090 /* Prologue for the GPRs. */
26091 int offset = info->gp_save_offset;
26092 if (info->push_p)
26093 offset += info->total_size;
26094
26095 for (int i = info->first_gp_reg_save; i < 32; i++)
26096 {
26097 if (bitmap_bit_p (components, i))
26098 {
26099 rtx reg = gen_rtx_REG (reg_mode, i);
26100 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26101 RTX_FRAME_RELATED_P (insn) = 1;
26102 rtx set = copy_rtx (single_set (insn));
26103 add_reg_note (insn, REG_CFA_OFFSET, set);
26104 }
26105
26106 offset += reg_size;
26107 }
26108
26109 /* Prologue for the FPRs. */
26110 offset = info->fp_save_offset;
26111 if (info->push_p)
26112 offset += info->total_size;
26113
26114 for (int i = info->first_fp_reg_save; i < 64; i++)
26115 {
26116 if (bitmap_bit_p (components, i))
26117 {
26118 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26119 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26120 RTX_FRAME_RELATED_P (insn) = 1;
26121 rtx set = copy_rtx (single_set (insn));
26122 add_reg_note (insn, REG_CFA_OFFSET, set);
26123 }
26124
26125 offset += fp_reg_size;
26126 }
26127 }
26128
26129 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26130 static void
26131 rs6000_emit_epilogue_components (sbitmap components)
26132 {
26133 rs6000_stack_t *info = rs6000_stack_info ();
26134 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26135 ? HARD_FRAME_POINTER_REGNUM
26136 : STACK_POINTER_REGNUM);
26137
26138 machine_mode reg_mode = Pmode;
26139 int reg_size = TARGET_32BIT ? 4 : 8;
26140
26141 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26142 int fp_reg_size = 8;
26143
26144 /* Epilogue for the FPRs. */
26145 int offset = info->fp_save_offset;
26146 if (info->push_p)
26147 offset += info->total_size;
26148
26149 for (int i = info->first_fp_reg_save; i < 64; i++)
26150 {
26151 if (bitmap_bit_p (components, i))
26152 {
26153 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26154 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26155 RTX_FRAME_RELATED_P (insn) = 1;
26156 add_reg_note (insn, REG_CFA_RESTORE, reg);
26157 }
26158
26159 offset += fp_reg_size;
26160 }
26161
26162 /* Epilogue for the GPRs. */
26163 offset = info->gp_save_offset;
26164 if (info->push_p)
26165 offset += info->total_size;
26166
26167 for (int i = info->first_gp_reg_save; i < 32; i++)
26168 {
26169 if (bitmap_bit_p (components, i))
26170 {
26171 rtx reg = gen_rtx_REG (reg_mode, i);
26172 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26173 RTX_FRAME_RELATED_P (insn) = 1;
26174 add_reg_note (insn, REG_CFA_RESTORE, reg);
26175 }
26176
26177 offset += reg_size;
26178 }
26179
26180 /* Epilogue for LR. */
26181 if (bitmap_bit_p (components, 0))
26182 {
26183 int offset = info->lr_save_offset;
26184 if (info->push_p)
26185 offset += info->total_size;
26186
26187 rtx reg = gen_rtx_REG (reg_mode, 0);
26188 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26189
26190 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26191 insn = emit_move_insn (lr, reg);
26192 RTX_FRAME_RELATED_P (insn) = 1;
26193 add_reg_note (insn, REG_CFA_RESTORE, lr);
26194 }
26195 }
26196
26197 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26198 static void
26199 rs6000_set_handled_components (sbitmap components)
26200 {
26201 rs6000_stack_t *info = rs6000_stack_info ();
26202
26203 for (int i = info->first_gp_reg_save; i < 32; i++)
26204 if (bitmap_bit_p (components, i))
26205 cfun->machine->gpr_is_wrapped_separately[i] = true;
26206
26207 for (int i = info->first_fp_reg_save; i < 64; i++)
26208 if (bitmap_bit_p (components, i))
26209 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26210
26211 if (bitmap_bit_p (components, 0))
26212 cfun->machine->lr_is_wrapped_separately = true;
26213
26214 if (bitmap_bit_p (components, 2))
26215 cfun->machine->toc_is_wrapped_separately = true;
26216 }
26217
26218 /* VRSAVE is a bit vector representing which AltiVec registers
26219 are used. The OS uses this to determine which vector
26220 registers to save on a context switch. We need to save
26221 VRSAVE on the stack frame, add whatever AltiVec registers we
26222 used in this function, and do the corresponding magic in the
26223 epilogue. */
26224 static void
26225 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26226 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26227 {
26228 /* Get VRSAVE into a GPR. */
26229 rtx reg = gen_rtx_REG (SImode, save_regno);
26230 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26231 if (TARGET_MACHO)
26232 emit_insn (gen_get_vrsave_internal (reg));
26233 else
26234 emit_insn (gen_rtx_SET (reg, vrsave));
26235
26236 /* Save VRSAVE. */
26237 int offset = info->vrsave_save_offset + frame_off;
26238 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26239
26240 /* Include the registers in the mask. */
26241 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26242
26243 emit_insn (generate_set_vrsave (reg, info, 0));
26244 }
26245
26246 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26247 called, it left the arg pointer to the old stack in r29. Otherwise, the
26248 arg pointer is the top of the current frame. */
26249 static void
26250 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26251 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26252 {
26253 cfun->machine->split_stack_argp_used = true;
26254
26255 if (sp_adjust)
26256 {
26257 rtx r12 = gen_rtx_REG (Pmode, 12);
26258 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26259 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26260 emit_insn_before (set_r12, sp_adjust);
26261 }
26262 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26263 {
26264 rtx r12 = gen_rtx_REG (Pmode, 12);
26265 if (frame_off == 0)
26266 emit_move_insn (r12, frame_reg_rtx);
26267 else
26268 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26269 }
26270
26271 if (info->push_p)
26272 {
26273 rtx r12 = gen_rtx_REG (Pmode, 12);
26274 rtx r29 = gen_rtx_REG (Pmode, 29);
26275 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26276 rtx not_more = gen_label_rtx ();
26277 rtx jump;
26278
26279 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26280 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26281 gen_rtx_LABEL_REF (VOIDmode, not_more),
26282 pc_rtx);
26283 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26284 JUMP_LABEL (jump) = not_more;
26285 LABEL_NUSES (not_more) += 1;
26286 emit_move_insn (r12, r29);
26287 emit_label (not_more);
26288 }
26289 }
26290
26291 /* Emit function prologue as insns. */
26292
26293 void
26294 rs6000_emit_prologue (void)
26295 {
26296 rs6000_stack_t *info = rs6000_stack_info ();
26297 machine_mode reg_mode = Pmode;
26298 int reg_size = TARGET_32BIT ? 4 : 8;
26299 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26300 int fp_reg_size = 8;
26301 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26302 rtx frame_reg_rtx = sp_reg_rtx;
26303 unsigned int cr_save_regno;
26304 rtx cr_save_rtx = NULL_RTX;
26305 rtx_insn *insn;
26306 int strategy;
26307 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26308 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26309 && call_used_regs[STATIC_CHAIN_REGNUM]);
26310 int using_split_stack = (flag_split_stack
26311 && (lookup_attribute ("no_split_stack",
26312 DECL_ATTRIBUTES (cfun->decl))
26313 == NULL));
26314
26315 /* Offset to top of frame for frame_reg and sp respectively. */
26316 HOST_WIDE_INT frame_off = 0;
26317 HOST_WIDE_INT sp_off = 0;
26318 /* sp_adjust is the stack adjusting instruction, tracked so that the
26319 insn setting up the split-stack arg pointer can be emitted just
26320 prior to it, when r12 is not used here for other purposes. */
26321 rtx_insn *sp_adjust = 0;
26322
26323 #if CHECKING_P
26324 /* Track and check usage of r0, r11, r12. */
26325 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26326 #define START_USE(R) do \
26327 { \
26328 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26329 reg_inuse |= 1 << (R); \
26330 } while (0)
26331 #define END_USE(R) do \
26332 { \
26333 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26334 reg_inuse &= ~(1 << (R)); \
26335 } while (0)
26336 #define NOT_INUSE(R) do \
26337 { \
26338 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26339 } while (0)
26340 #else
26341 #define START_USE(R) do {} while (0)
26342 #define END_USE(R) do {} while (0)
26343 #define NOT_INUSE(R) do {} while (0)
26344 #endif
26345
26346 if (DEFAULT_ABI == ABI_ELFv2
26347 && !TARGET_SINGLE_PIC_BASE)
26348 {
26349 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26350
26351 /* With -mminimal-toc we may generate an extra use of r2 below. */
26352 if (TARGET_TOC && TARGET_MINIMAL_TOC
26353 && !constant_pool_empty_p ())
26354 cfun->machine->r2_setup_needed = true;
26355 }
26356
26357
26358 if (flag_stack_usage_info)
26359 current_function_static_stack_size = info->total_size;
26360
26361 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26362 {
26363 HOST_WIDE_INT size = info->total_size;
26364
26365 if (crtl->is_leaf && !cfun->calls_alloca)
26366 {
26367 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
26368 rs6000_emit_probe_stack_range (get_stack_check_protect (),
26369 size - get_stack_check_protect ());
26370 }
26371 else if (size > 0)
26372 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
26373 }
26374
26375 if (TARGET_FIX_AND_CONTINUE)
26376 {
26377 /* gdb on darwin arranges to forward a function from the old
26378 address by modifying the first 5 instructions of the function
26379 to branch to the overriding function. This is necessary to
26380 permit function pointers that point to the old function to
26381 actually forward to the new function. */
26382 emit_insn (gen_nop ());
26383 emit_insn (gen_nop ());
26384 emit_insn (gen_nop ());
26385 emit_insn (gen_nop ());
26386 emit_insn (gen_nop ());
26387 }
26388
26389 /* Handle world saves specially here. */
26390 if (WORLD_SAVE_P (info))
26391 {
26392 int i, j, sz;
26393 rtx treg;
26394 rtvec p;
26395 rtx reg0;
26396
26397 /* save_world expects lr in r0. */
26398 reg0 = gen_rtx_REG (Pmode, 0);
26399 if (info->lr_save_p)
26400 {
26401 insn = emit_move_insn (reg0,
26402 gen_rtx_REG (Pmode, LR_REGNO));
26403 RTX_FRAME_RELATED_P (insn) = 1;
26404 }
26405
26406 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26407 assumptions about the offsets of various bits of the stack
26408 frame. */
26409 gcc_assert (info->gp_save_offset == -220
26410 && info->fp_save_offset == -144
26411 && info->lr_save_offset == 8
26412 && info->cr_save_offset == 4
26413 && info->push_p
26414 && info->lr_save_p
26415 && (!crtl->calls_eh_return
26416 || info->ehrd_offset == -432)
26417 && info->vrsave_save_offset == -224
26418 && info->altivec_save_offset == -416);
26419
26420 treg = gen_rtx_REG (SImode, 11);
26421 emit_move_insn (treg, GEN_INT (-info->total_size));
26422
26423 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26424 in R11. It also clobbers R12, so beware! */
26425
26426 /* Preserve CR2 for save_world prologues */
26427 sz = 5;
26428 sz += 32 - info->first_gp_reg_save;
26429 sz += 64 - info->first_fp_reg_save;
26430 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26431 p = rtvec_alloc (sz);
26432 j = 0;
26433 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
26434 gen_rtx_REG (SImode,
26435 LR_REGNO));
26436 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26437 gen_rtx_SYMBOL_REF (Pmode,
26438 "*save_world"));
26439 /* We do floats first so that the instruction pattern matches
26440 properly. */
26441 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26442 RTVEC_ELT (p, j++)
26443 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
26444 info->first_fp_reg_save + i),
26445 frame_reg_rtx,
26446 info->fp_save_offset + frame_off + 8 * i);
26447 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26448 RTVEC_ELT (p, j++)
26449 = gen_frame_store (gen_rtx_REG (V4SImode,
26450 info->first_altivec_reg_save + i),
26451 frame_reg_rtx,
26452 info->altivec_save_offset + frame_off + 16 * i);
26453 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26454 RTVEC_ELT (p, j++)
26455 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26456 frame_reg_rtx,
26457 info->gp_save_offset + frame_off + reg_size * i);
26458
26459 /* CR register traditionally saved as CR2. */
26460 RTVEC_ELT (p, j++)
26461 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26462 frame_reg_rtx, info->cr_save_offset + frame_off);
26463 /* Explain about use of R0. */
26464 if (info->lr_save_p)
26465 RTVEC_ELT (p, j++)
26466 = gen_frame_store (reg0,
26467 frame_reg_rtx, info->lr_save_offset + frame_off);
26468 /* Explain what happens to the stack pointer. */
26469 {
26470 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
26471 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
26472 }
26473
26474 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26475 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26476 treg, GEN_INT (-info->total_size));
26477 sp_off = frame_off = info->total_size;
26478 }
26479
26480 strategy = info->savres_strategy;
26481
26482 /* For V.4, update stack before we do any saving and set back pointer. */
26483 if (! WORLD_SAVE_P (info)
26484 && info->push_p
26485 && (DEFAULT_ABI == ABI_V4
26486 || crtl->calls_eh_return))
26487 {
26488 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
26489 || !(strategy & SAVE_INLINE_GPRS)
26490 || !(strategy & SAVE_INLINE_VRS));
26491 int ptr_regno = -1;
26492 rtx ptr_reg = NULL_RTX;
26493 int ptr_off = 0;
26494
26495 if (info->total_size < 32767)
26496 frame_off = info->total_size;
26497 else if (need_r11)
26498 ptr_regno = 11;
26499 else if (info->cr_save_p
26500 || info->lr_save_p
26501 || info->first_fp_reg_save < 64
26502 || info->first_gp_reg_save < 32
26503 || info->altivec_size != 0
26504 || info->vrsave_size != 0
26505 || crtl->calls_eh_return)
26506 ptr_regno = 12;
26507 else
26508 {
26509 /* The prologue won't be saving any regs so there is no need
26510 to set up a frame register to access any frame save area.
26511 We also won't be using frame_off anywhere below, but set
26512 the correct value anyway to protect against future
26513 changes to this function. */
26514 frame_off = info->total_size;
26515 }
26516 if (ptr_regno != -1)
26517 {
26518 /* Set up the frame offset to that needed by the first
26519 out-of-line save function. */
26520 START_USE (ptr_regno);
26521 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26522 frame_reg_rtx = ptr_reg;
26523 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
26524 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
26525 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
26526 ptr_off = info->gp_save_offset + info->gp_size;
26527 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
26528 ptr_off = info->altivec_save_offset + info->altivec_size;
26529 frame_off = -ptr_off;
26530 }
26531 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26532 ptr_reg, ptr_off);
26533 if (REGNO (frame_reg_rtx) == 12)
26534 sp_adjust = 0;
26535 sp_off = info->total_size;
26536 if (frame_reg_rtx != sp_reg_rtx)
26537 rs6000_emit_stack_tie (frame_reg_rtx, false);
26538 }
26539
26540 /* If we use the link register, get it into r0. */
26541 if (!WORLD_SAVE_P (info) && info->lr_save_p
26542 && !cfun->machine->lr_is_wrapped_separately)
26543 {
26544 rtx addr, reg, mem;
26545
26546 reg = gen_rtx_REG (Pmode, 0);
26547 START_USE (0);
26548 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
26549 RTX_FRAME_RELATED_P (insn) = 1;
26550
26551 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
26552 | SAVE_NOINLINE_FPRS_SAVES_LR)))
26553 {
26554 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26555 GEN_INT (info->lr_save_offset + frame_off));
26556 mem = gen_rtx_MEM (Pmode, addr);
26557 /* This should not be of rs6000_sr_alias_set, because of
26558 __builtin_return_address. */
26559
26560 insn = emit_move_insn (mem, reg);
26561 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26562 NULL_RTX, NULL_RTX);
26563 END_USE (0);
26564 }
26565 }
26566
26567 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26568 r12 will be needed by out-of-line gpr restore. */
26569 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26570 && !(strategy & (SAVE_INLINE_GPRS
26571 | SAVE_NOINLINE_GPRS_SAVES_LR))
26572 ? 11 : 12);
26573 if (!WORLD_SAVE_P (info)
26574 && info->cr_save_p
26575 && REGNO (frame_reg_rtx) != cr_save_regno
26576 && !(using_static_chain_p && cr_save_regno == 11)
26577 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
26578 {
26579 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
26580 START_USE (cr_save_regno);
26581 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
26582 }
26583
26584 /* Do any required saving of fpr's. If only one or two to save, do
26585 it ourselves. Otherwise, call function. */
26586 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
26587 {
26588 int offset = info->fp_save_offset + frame_off;
26589 for (int i = info->first_fp_reg_save; i < 64; i++)
26590 {
26591 if (save_reg_p (i)
26592 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
26593 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
26594 sp_off - frame_off);
26595
26596 offset += fp_reg_size;
26597 }
26598 }
26599 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
26600 {
26601 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
26602 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
26603 unsigned ptr_regno = ptr_regno_for_savres (sel);
26604 rtx ptr_reg = frame_reg_rtx;
26605
26606 if (REGNO (frame_reg_rtx) == ptr_regno)
26607 gcc_checking_assert (frame_off == 0);
26608 else
26609 {
26610 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26611 NOT_INUSE (ptr_regno);
26612 emit_insn (gen_add3_insn (ptr_reg,
26613 frame_reg_rtx, GEN_INT (frame_off)));
26614 }
26615 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26616 info->fp_save_offset,
26617 info->lr_save_offset,
26618 DFmode, sel);
26619 rs6000_frame_related (insn, ptr_reg, sp_off,
26620 NULL_RTX, NULL_RTX);
26621 if (lr)
26622 END_USE (0);
26623 }
26624
26625 /* Save GPRs. This is done as a PARALLEL if we are using
26626 the store-multiple instructions. */
26627 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
26628 {
26629 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
26630 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
26631 unsigned ptr_regno = ptr_regno_for_savres (sel);
26632 rtx ptr_reg = frame_reg_rtx;
26633 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
26634 int end_save = info->gp_save_offset + info->gp_size;
26635 int ptr_off;
26636
26637 if (ptr_regno == 12)
26638 sp_adjust = 0;
26639 if (!ptr_set_up)
26640 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26641
26642 /* Need to adjust r11 (r12) if we saved any FPRs. */
26643 if (end_save + frame_off != 0)
26644 {
26645 rtx offset = GEN_INT (end_save + frame_off);
26646
26647 if (ptr_set_up)
26648 frame_off = -end_save;
26649 else
26650 NOT_INUSE (ptr_regno);
26651 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
26652 }
26653 else if (!ptr_set_up)
26654 {
26655 NOT_INUSE (ptr_regno);
26656 emit_move_insn (ptr_reg, frame_reg_rtx);
26657 }
26658 ptr_off = -end_save;
26659 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26660 info->gp_save_offset + ptr_off,
26661 info->lr_save_offset + ptr_off,
26662 reg_mode, sel);
26663 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
26664 NULL_RTX, NULL_RTX);
26665 if (lr)
26666 END_USE (0);
26667 }
26668 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
26669 {
26670 rtvec p;
26671 int i;
26672 p = rtvec_alloc (32 - info->first_gp_reg_save);
26673 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26674 RTVEC_ELT (p, i)
26675 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26676 frame_reg_rtx,
26677 info->gp_save_offset + frame_off + reg_size * i);
26678 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26679 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26680 NULL_RTX, NULL_RTX);
26681 }
26682 else if (!WORLD_SAVE_P (info))
26683 {
26684 int offset = info->gp_save_offset + frame_off;
26685 for (int i = info->first_gp_reg_save; i < 32; i++)
26686 {
26687 if (save_reg_p (i)
26688 && !cfun->machine->gpr_is_wrapped_separately[i])
26689 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
26690 sp_off - frame_off);
26691
26692 offset += reg_size;
26693 }
26694 }
26695
26696 if (crtl->calls_eh_return)
26697 {
26698 unsigned int i;
26699 rtvec p;
26700
26701 for (i = 0; ; ++i)
26702 {
26703 unsigned int regno = EH_RETURN_DATA_REGNO (i);
26704 if (regno == INVALID_REGNUM)
26705 break;
26706 }
26707
26708 p = rtvec_alloc (i);
26709
26710 for (i = 0; ; ++i)
26711 {
26712 unsigned int regno = EH_RETURN_DATA_REGNO (i);
26713 if (regno == INVALID_REGNUM)
26714 break;
26715
26716 rtx set
26717 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
26718 sp_reg_rtx,
26719 info->ehrd_offset + sp_off + reg_size * (int) i);
26720 RTVEC_ELT (p, i) = set;
26721 RTX_FRAME_RELATED_P (set) = 1;
26722 }
26723
26724 insn = emit_insn (gen_blockage ());
26725 RTX_FRAME_RELATED_P (insn) = 1;
26726 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
26727 }
26728
26729 /* In AIX ABI we need to make sure r2 is really saved. */
26730 if (TARGET_AIX && crtl->calls_eh_return)
26731 {
26732 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
26733 rtx join_insn, note;
26734 rtx_insn *save_insn;
26735 long toc_restore_insn;
26736
26737 tmp_reg = gen_rtx_REG (Pmode, 11);
26738 tmp_reg_si = gen_rtx_REG (SImode, 11);
26739 if (using_static_chain_p)
26740 {
26741 START_USE (0);
26742 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
26743 }
26744 else
26745 START_USE (11);
26746 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
26747 /* Peek at instruction to which this function returns. If it's
26748 restoring r2, then we know we've already saved r2. We can't
26749 unconditionally save r2 because the value we have will already
26750 be updated if we arrived at this function via a plt call or
26751 toc adjusting stub. */
26752 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
26753 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
26754 + RS6000_TOC_SAVE_SLOT);
26755 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
26756 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
26757 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
26758 validate_condition_mode (EQ, CCUNSmode);
26759 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
26760 emit_insn (gen_rtx_SET (compare_result,
26761 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
26762 toc_save_done = gen_label_rtx ();
26763 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26764 gen_rtx_EQ (VOIDmode, compare_result,
26765 const0_rtx),
26766 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
26767 pc_rtx);
26768 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26769 JUMP_LABEL (jump) = toc_save_done;
26770 LABEL_NUSES (toc_save_done) += 1;
26771
26772 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
26773 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
26774 sp_off - frame_off);
26775
26776 emit_label (toc_save_done);
26777
26778 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
26779 have a CFG that has different saves along different paths.
26780 Move the note to a dummy blockage insn, which describes that
26781 R2 is unconditionally saved after the label. */
26782 /* ??? An alternate representation might be a special insn pattern
26783 containing both the branch and the store. That might let the
26784 code that minimizes the number of DW_CFA_advance opcodes better
26785 freedom in placing the annotations. */
26786 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
26787 if (note)
26788 remove_note (save_insn, note);
26789 else
26790 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
26791 copy_rtx (PATTERN (save_insn)), NULL_RTX);
26792 RTX_FRAME_RELATED_P (save_insn) = 0;
26793
26794 join_insn = emit_insn (gen_blockage ());
26795 REG_NOTES (join_insn) = note;
26796 RTX_FRAME_RELATED_P (join_insn) = 1;
26797
26798 if (using_static_chain_p)
26799 {
26800 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
26801 END_USE (0);
26802 }
26803 else
26804 END_USE (11);
26805 }
26806
26807 /* Save CR if we use any that must be preserved. */
26808 if (!WORLD_SAVE_P (info) && info->cr_save_p)
26809 {
26810 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26811 GEN_INT (info->cr_save_offset + frame_off));
26812 rtx mem = gen_frame_mem (SImode, addr);
26813
26814 /* If we didn't copy cr before, do so now using r0. */
26815 if (cr_save_rtx == NULL_RTX)
26816 {
26817 START_USE (0);
26818 cr_save_rtx = gen_rtx_REG (SImode, 0);
26819 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
26820 }
26821
26822 /* Saving CR requires a two-instruction sequence: one instruction
26823 to move the CR to a general-purpose register, and a second
26824 instruction that stores the GPR to memory.
26825
26826 We do not emit any DWARF CFI records for the first of these,
26827 because we cannot properly represent the fact that CR is saved in
26828 a register. One reason is that we cannot express that multiple
26829 CR fields are saved; another reason is that on 64-bit, the size
26830 of the CR register in DWARF (4 bytes) differs from the size of
26831 a general-purpose register.
26832
26833 This means if any intervening instruction were to clobber one of
26834 the call-saved CR fields, we'd have incorrect CFI. To prevent
26835 this from happening, we mark the store to memory as a use of
26836 those CR fields, which prevents any such instruction from being
26837 scheduled in between the two instructions. */
26838 rtx crsave_v[9];
26839 int n_crsave = 0;
26840 int i;
26841
26842 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
26843 for (i = 0; i < 8; i++)
26844 if (save_reg_p (CR0_REGNO + i))
26845 crsave_v[n_crsave++]
26846 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
26847
26848 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
26849 gen_rtvec_v (n_crsave, crsave_v)));
26850 END_USE (REGNO (cr_save_rtx));
26851
26852 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
26853 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
26854 so we need to construct a frame expression manually. */
26855 RTX_FRAME_RELATED_P (insn) = 1;
26856
26857 /* Update address to be stack-pointer relative, like
26858 rs6000_frame_related would do. */
26859 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
26860 GEN_INT (info->cr_save_offset + sp_off));
26861 mem = gen_frame_mem (SImode, addr);
26862
26863 if (DEFAULT_ABI == ABI_ELFv2)
26864 {
26865 /* In the ELFv2 ABI we generate separate CFI records for each
26866 CR field that was actually saved. They all point to the
26867 same 32-bit stack slot. */
26868 rtx crframe[8];
26869 int n_crframe = 0;
26870
26871 for (i = 0; i < 8; i++)
26872 if (save_reg_p (CR0_REGNO + i))
26873 {
26874 crframe[n_crframe]
26875 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
26876
26877 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
26878 n_crframe++;
26879 }
26880
26881 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
26882 gen_rtx_PARALLEL (VOIDmode,
26883 gen_rtvec_v (n_crframe, crframe)));
26884 }
26885 else
26886 {
26887 /* In other ABIs, by convention, we use a single CR regnum to
26888 represent the fact that all call-saved CR fields are saved.
26889 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
26890 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
26891 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
26892 }
26893 }
26894
26895 /* In the ELFv2 ABI we need to save all call-saved CR fields into
26896 *separate* slots if the routine calls __builtin_eh_return, so
26897 that they can be independently restored by the unwinder. */
26898 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
26899 {
26900 int i, cr_off = info->ehcr_offset;
26901 rtx crsave;
26902
26903 /* ??? We might get better performance by using multiple mfocrf
26904 instructions. */
26905 crsave = gen_rtx_REG (SImode, 0);
26906 emit_insn (gen_prologue_movesi_from_cr (crsave));
26907
26908 for (i = 0; i < 8; i++)
26909 if (!call_used_regs[CR0_REGNO + i])
26910 {
26911 rtvec p = rtvec_alloc (2);
26912 RTVEC_ELT (p, 0)
26913 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
26914 RTVEC_ELT (p, 1)
26915 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
26916
26917 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26918
26919 RTX_FRAME_RELATED_P (insn) = 1;
26920 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
26921 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
26922 sp_reg_rtx, cr_off + sp_off));
26923
26924 cr_off += reg_size;
26925 }
26926 }
26927
26928 /* If we are emitting stack probes, but allocate no stack, then
26929 just note that in the dump file. */
26930 if (flag_stack_clash_protection
26931 && dump_file
26932 && !info->push_p)
26933 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
26934
26935 /* Update stack and set back pointer unless this is V.4,
26936 for which it was done previously. */
26937 if (!WORLD_SAVE_P (info) && info->push_p
26938 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
26939 {
26940 rtx ptr_reg = NULL;
26941 int ptr_off = 0;
26942
26943 /* If saving altivec regs we need to be able to address all save
26944 locations using a 16-bit offset. */
26945 if ((strategy & SAVE_INLINE_VRS) == 0
26946 || (info->altivec_size != 0
26947 && (info->altivec_save_offset + info->altivec_size - 16
26948 + info->total_size - frame_off) > 32767)
26949 || (info->vrsave_size != 0
26950 && (info->vrsave_save_offset
26951 + info->total_size - frame_off) > 32767))
26952 {
26953 int sel = SAVRES_SAVE | SAVRES_VR;
26954 unsigned ptr_regno = ptr_regno_for_savres (sel);
26955
26956 if (using_static_chain_p
26957 && ptr_regno == STATIC_CHAIN_REGNUM)
26958 ptr_regno = 12;
26959 if (REGNO (frame_reg_rtx) != ptr_regno)
26960 START_USE (ptr_regno);
26961 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26962 frame_reg_rtx = ptr_reg;
26963 ptr_off = info->altivec_save_offset + info->altivec_size;
26964 frame_off = -ptr_off;
26965 }
26966 else if (REGNO (frame_reg_rtx) == 1)
26967 frame_off = info->total_size;
26968 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26969 ptr_reg, ptr_off);
26970 if (REGNO (frame_reg_rtx) == 12)
26971 sp_adjust = 0;
26972 sp_off = info->total_size;
26973 if (frame_reg_rtx != sp_reg_rtx)
26974 rs6000_emit_stack_tie (frame_reg_rtx, false);
26975 }
26976
26977 /* Set frame pointer, if needed. */
26978 if (frame_pointer_needed)
26979 {
26980 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
26981 sp_reg_rtx);
26982 RTX_FRAME_RELATED_P (insn) = 1;
26983 }
26984
26985 /* Save AltiVec registers if needed. Save here because the red zone does
26986 not always include AltiVec registers. */
26987 if (!WORLD_SAVE_P (info)
26988 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
26989 {
26990 int end_save = info->altivec_save_offset + info->altivec_size;
26991 int ptr_off;
26992 /* Oddly, the vector save/restore functions point r0 at the end
26993 of the save area, then use r11 or r12 to load offsets for
26994 [reg+reg] addressing. */
26995 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
26996 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
26997 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
26998
26999 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27000 NOT_INUSE (0);
27001 if (scratch_regno == 12)
27002 sp_adjust = 0;
27003 if (end_save + frame_off != 0)
27004 {
27005 rtx offset = GEN_INT (end_save + frame_off);
27006
27007 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27008 }
27009 else
27010 emit_move_insn (ptr_reg, frame_reg_rtx);
27011
27012 ptr_off = -end_save;
27013 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27014 info->altivec_save_offset + ptr_off,
27015 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27016 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27017 NULL_RTX, NULL_RTX);
27018 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27019 {
27020 /* The oddity mentioned above clobbered our frame reg. */
27021 emit_move_insn (frame_reg_rtx, ptr_reg);
27022 frame_off = ptr_off;
27023 }
27024 }
27025 else if (!WORLD_SAVE_P (info)
27026 && info->altivec_size != 0)
27027 {
27028 int i;
27029
27030 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27031 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27032 {
27033 rtx areg, savereg, mem;
27034 HOST_WIDE_INT offset;
27035
27036 offset = (info->altivec_save_offset + frame_off
27037 + 16 * (i - info->first_altivec_reg_save));
27038
27039 savereg = gen_rtx_REG (V4SImode, i);
27040
27041 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27042 {
27043 mem = gen_frame_mem (V4SImode,
27044 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27045 GEN_INT (offset)));
27046 insn = emit_insn (gen_rtx_SET (mem, savereg));
27047 areg = NULL_RTX;
27048 }
27049 else
27050 {
27051 NOT_INUSE (0);
27052 areg = gen_rtx_REG (Pmode, 0);
27053 emit_move_insn (areg, GEN_INT (offset));
27054
27055 /* AltiVec addressing mode is [reg+reg]. */
27056 mem = gen_frame_mem (V4SImode,
27057 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27058
27059 /* Rather than emitting a generic move, force use of the stvx
27060 instruction, which we always want on ISA 2.07 (power8) systems.
27061 In particular we don't want xxpermdi/stxvd2x for little
27062 endian. */
27063 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27064 }
27065
27066 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27067 areg, GEN_INT (offset));
27068 }
27069 }
27070
27071 /* VRSAVE is a bit vector representing which AltiVec registers
27072 are used. The OS uses this to determine which vector
27073 registers to save on a context switch. We need to save
27074 VRSAVE on the stack frame, add whatever AltiVec registers we
27075 used in this function, and do the corresponding magic in the
27076 epilogue. */
27077
27078 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27079 {
27080 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27081 be using r12 as frame_reg_rtx and r11 as the static chain
27082 pointer for nested functions. */
27083 int save_regno = 12;
27084 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27085 && !using_static_chain_p)
27086 save_regno = 11;
27087 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27088 {
27089 save_regno = 11;
27090 if (using_static_chain_p)
27091 save_regno = 0;
27092 }
27093 NOT_INUSE (save_regno);
27094
27095 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27096 }
27097
27098 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27099 if (!TARGET_SINGLE_PIC_BASE
27100 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27101 && !constant_pool_empty_p ())
27102 || (DEFAULT_ABI == ABI_V4
27103 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27104 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27105 {
27106 /* If emit_load_toc_table will use the link register, we need to save
27107 it. We use R12 for this purpose because emit_load_toc_table
27108 can use register 0. This allows us to use a plain 'blr' to return
27109 from the procedure more often. */
27110 int save_LR_around_toc_setup = (TARGET_ELF
27111 && DEFAULT_ABI == ABI_V4
27112 && flag_pic
27113 && ! info->lr_save_p
27114 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27115 if (save_LR_around_toc_setup)
27116 {
27117 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27118 rtx tmp = gen_rtx_REG (Pmode, 12);
27119
27120 sp_adjust = 0;
27121 insn = emit_move_insn (tmp, lr);
27122 RTX_FRAME_RELATED_P (insn) = 1;
27123
27124 rs6000_emit_load_toc_table (TRUE);
27125
27126 insn = emit_move_insn (lr, tmp);
27127 add_reg_note (insn, REG_CFA_RESTORE, lr);
27128 RTX_FRAME_RELATED_P (insn) = 1;
27129 }
27130 else
27131 rs6000_emit_load_toc_table (TRUE);
27132 }
27133
27134 #if TARGET_MACHO
27135 if (!TARGET_SINGLE_PIC_BASE
27136 && DEFAULT_ABI == ABI_DARWIN
27137 && flag_pic && crtl->uses_pic_offset_table)
27138 {
27139 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27140 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27141
27142 /* Save and restore LR locally around this call (in R0). */
27143 if (!info->lr_save_p)
27144 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27145
27146 emit_insn (gen_load_macho_picbase (src));
27147
27148 emit_move_insn (gen_rtx_REG (Pmode,
27149 RS6000_PIC_OFFSET_TABLE_REGNUM),
27150 lr);
27151
27152 if (!info->lr_save_p)
27153 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27154 }
27155 #endif
27156
27157 /* If we need to, save the TOC register after doing the stack setup.
27158 Do not emit eh frame info for this save. The unwinder wants info,
27159 conceptually attached to instructions in this function, about
27160 register values in the caller of this function. This R2 may have
27161 already been changed from the value in the caller.
27162 We don't attempt to write accurate DWARF EH frame info for R2
27163 because code emitted by gcc for a (non-pointer) function call
27164 doesn't save and restore R2. Instead, R2 is managed out-of-line
27165 by a linker generated plt call stub when the function resides in
27166 a shared library. This behavior is costly to describe in DWARF,
27167 both in terms of the size of DWARF info and the time taken in the
27168 unwinder to interpret it. R2 changes, apart from the
27169 calls_eh_return case earlier in this function, are handled by
27170 linux-unwind.h frob_update_context. */
27171 if (rs6000_save_toc_in_prologue_p ()
27172 && !cfun->machine->toc_is_wrapped_separately)
27173 {
27174 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27175 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27176 }
27177
27178 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27179 if (using_split_stack && split_stack_arg_pointer_used_p ())
27180 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27181 }
27182
27183 /* Output .extern statements for the save/restore routines we use. */
27184
27185 static void
27186 rs6000_output_savres_externs (FILE *file)
27187 {
27188 rs6000_stack_t *info = rs6000_stack_info ();
27189
27190 if (TARGET_DEBUG_STACK)
27191 debug_stack_info (info);
27192
27193 /* Write .extern for any function we will call to save and restore
27194 fp values. */
27195 if (info->first_fp_reg_save < 64
27196 && !TARGET_MACHO
27197 && !TARGET_ELF)
27198 {
27199 char *name;
27200 int regno = info->first_fp_reg_save - 32;
27201
27202 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27203 {
27204 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27205 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27206 name = rs6000_savres_routine_name (regno, sel);
27207 fprintf (file, "\t.extern %s\n", name);
27208 }
27209 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27210 {
27211 bool lr = (info->savres_strategy
27212 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27213 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27214 name = rs6000_savres_routine_name (regno, sel);
27215 fprintf (file, "\t.extern %s\n", name);
27216 }
27217 }
27218 }
27219
27220 /* Write function prologue. */
27221
27222 static void
27223 rs6000_output_function_prologue (FILE *file)
27224 {
27225 if (!cfun->is_thunk)
27226 rs6000_output_savres_externs (file);
27227
27228 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27229 immediately after the global entry point label. */
27230 if (rs6000_global_entry_point_needed_p ())
27231 {
27232 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27233
27234 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27235
27236 if (TARGET_CMODEL != CMODEL_LARGE)
27237 {
27238 /* In the small and medium code models, we assume the TOC is less
27239 2 GB away from the text section, so it can be computed via the
27240 following two-instruction sequence. */
27241 char buf[256];
27242
27243 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27244 fprintf (file, "0:\taddis 2,12,.TOC.-");
27245 assemble_name (file, buf);
27246 fprintf (file, "@ha\n");
27247 fprintf (file, "\taddi 2,2,.TOC.-");
27248 assemble_name (file, buf);
27249 fprintf (file, "@l\n");
27250 }
27251 else
27252 {
27253 /* In the large code model, we allow arbitrary offsets between the
27254 TOC and the text section, so we have to load the offset from
27255 memory. The data field is emitted directly before the global
27256 entry point in rs6000_elf_declare_function_name. */
27257 char buf[256];
27258
27259 #ifdef HAVE_AS_ENTRY_MARKERS
27260 /* If supported by the linker, emit a marker relocation. If the
27261 total code size of the final executable or shared library
27262 happens to fit into 2 GB after all, the linker will replace
27263 this code sequence with the sequence for the small or medium
27264 code model. */
27265 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27266 #endif
27267 fprintf (file, "\tld 2,");
27268 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27269 assemble_name (file, buf);
27270 fprintf (file, "-");
27271 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27272 assemble_name (file, buf);
27273 fprintf (file, "(12)\n");
27274 fprintf (file, "\tadd 2,2,12\n");
27275 }
27276
27277 fputs ("\t.localentry\t", file);
27278 assemble_name (file, name);
27279 fputs (",.-", file);
27280 assemble_name (file, name);
27281 fputs ("\n", file);
27282 }
27283
27284 /* Output -mprofile-kernel code. This needs to be done here instead of
27285 in output_function_profile since it must go after the ELFv2 ABI
27286 local entry point. */
27287 if (TARGET_PROFILE_KERNEL && crtl->profile)
27288 {
27289 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27290 gcc_assert (!TARGET_32BIT);
27291
27292 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27293
27294 /* In the ELFv2 ABI we have no compiler stack word. It must be
27295 the resposibility of _mcount to preserve the static chain
27296 register if required. */
27297 if (DEFAULT_ABI != ABI_ELFv2
27298 && cfun->static_chain_decl != NULL)
27299 {
27300 asm_fprintf (file, "\tstd %s,24(%s)\n",
27301 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27302 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27303 asm_fprintf (file, "\tld %s,24(%s)\n",
27304 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27305 }
27306 else
27307 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27308 }
27309
27310 rs6000_pic_labelno++;
27311 }
27312
27313 /* -mprofile-kernel code calls mcount before the function prolog,
27314 so a profiled leaf function should stay a leaf function. */
27315 static bool
27316 rs6000_keep_leaf_when_profiled ()
27317 {
27318 return TARGET_PROFILE_KERNEL;
27319 }
27320
27321 /* Non-zero if vmx regs are restored before the frame pop, zero if
27322 we restore after the pop when possible. */
27323 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27324
27325 /* Restoring cr is a two step process: loading a reg from the frame
27326 save, then moving the reg to cr. For ABI_V4 we must let the
27327 unwinder know that the stack location is no longer valid at or
27328 before the stack deallocation, but we can't emit a cfa_restore for
27329 cr at the stack deallocation like we do for other registers.
27330 The trouble is that it is possible for the move to cr to be
27331 scheduled after the stack deallocation. So say exactly where cr
27332 is located on each of the two insns. */
27333
27334 static rtx
27335 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27336 {
27337 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27338 rtx reg = gen_rtx_REG (SImode, regno);
27339 rtx_insn *insn = emit_move_insn (reg, mem);
27340
27341 if (!exit_func && DEFAULT_ABI == ABI_V4)
27342 {
27343 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27344 rtx set = gen_rtx_SET (reg, cr);
27345
27346 add_reg_note (insn, REG_CFA_REGISTER, set);
27347 RTX_FRAME_RELATED_P (insn) = 1;
27348 }
27349 return reg;
27350 }
27351
27352 /* Reload CR from REG. */
27353
27354 static void
27355 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27356 {
27357 int count = 0;
27358 int i;
27359
27360 if (using_mfcr_multiple)
27361 {
27362 for (i = 0; i < 8; i++)
27363 if (save_reg_p (CR0_REGNO + i))
27364 count++;
27365 gcc_assert (count);
27366 }
27367
27368 if (using_mfcr_multiple && count > 1)
27369 {
27370 rtx_insn *insn;
27371 rtvec p;
27372 int ndx;
27373
27374 p = rtvec_alloc (count);
27375
27376 ndx = 0;
27377 for (i = 0; i < 8; i++)
27378 if (save_reg_p (CR0_REGNO + i))
27379 {
27380 rtvec r = rtvec_alloc (2);
27381 RTVEC_ELT (r, 0) = reg;
27382 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27383 RTVEC_ELT (p, ndx) =
27384 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27385 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27386 ndx++;
27387 }
27388 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27389 gcc_assert (ndx == count);
27390
27391 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27392 CR field separately. */
27393 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27394 {
27395 for (i = 0; i < 8; i++)
27396 if (save_reg_p (CR0_REGNO + i))
27397 add_reg_note (insn, REG_CFA_RESTORE,
27398 gen_rtx_REG (SImode, CR0_REGNO + i));
27399
27400 RTX_FRAME_RELATED_P (insn) = 1;
27401 }
27402 }
27403 else
27404 for (i = 0; i < 8; i++)
27405 if (save_reg_p (CR0_REGNO + i))
27406 {
27407 rtx insn = emit_insn (gen_movsi_to_cr_one
27408 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27409
27410 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27411 CR field separately, attached to the insn that in fact
27412 restores this particular CR field. */
27413 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27414 {
27415 add_reg_note (insn, REG_CFA_RESTORE,
27416 gen_rtx_REG (SImode, CR0_REGNO + i));
27417
27418 RTX_FRAME_RELATED_P (insn) = 1;
27419 }
27420 }
27421
27422 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27423 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27424 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27425 {
27426 rtx_insn *insn = get_last_insn ();
27427 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27428
27429 add_reg_note (insn, REG_CFA_RESTORE, cr);
27430 RTX_FRAME_RELATED_P (insn) = 1;
27431 }
27432 }
27433
27434 /* Like cr, the move to lr instruction can be scheduled after the
27435 stack deallocation, but unlike cr, its stack frame save is still
27436 valid. So we only need to emit the cfa_restore on the correct
27437 instruction. */
27438
27439 static void
27440 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27441 {
27442 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27443 rtx reg = gen_rtx_REG (Pmode, regno);
27444
27445 emit_move_insn (reg, mem);
27446 }
27447
27448 static void
27449 restore_saved_lr (int regno, bool exit_func)
27450 {
27451 rtx reg = gen_rtx_REG (Pmode, regno);
27452 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27453 rtx_insn *insn = emit_move_insn (lr, reg);
27454
27455 if (!exit_func && flag_shrink_wrap)
27456 {
27457 add_reg_note (insn, REG_CFA_RESTORE, lr);
27458 RTX_FRAME_RELATED_P (insn) = 1;
27459 }
27460 }
27461
27462 static rtx
27463 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27464 {
27465 if (DEFAULT_ABI == ABI_ELFv2)
27466 {
27467 int i;
27468 for (i = 0; i < 8; i++)
27469 if (save_reg_p (CR0_REGNO + i))
27470 {
27471 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
27472 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
27473 cfa_restores);
27474 }
27475 }
27476 else if (info->cr_save_p)
27477 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27478 gen_rtx_REG (SImode, CR2_REGNO),
27479 cfa_restores);
27480
27481 if (info->lr_save_p)
27482 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27483 gen_rtx_REG (Pmode, LR_REGNO),
27484 cfa_restores);
27485 return cfa_restores;
27486 }
27487
27488 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27489 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27490 below stack pointer not cloberred by signals. */
27491
27492 static inline bool
27493 offset_below_red_zone_p (HOST_WIDE_INT offset)
27494 {
27495 return offset < (DEFAULT_ABI == ABI_V4
27496 ? 0
27497 : TARGET_32BIT ? -220 : -288);
27498 }
27499
27500 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27501
27502 static void
27503 emit_cfa_restores (rtx cfa_restores)
27504 {
27505 rtx_insn *insn = get_last_insn ();
27506 rtx *loc = &REG_NOTES (insn);
27507
27508 while (*loc)
27509 loc = &XEXP (*loc, 1);
27510 *loc = cfa_restores;
27511 RTX_FRAME_RELATED_P (insn) = 1;
27512 }
27513
27514 /* Emit function epilogue as insns. */
27515
27516 void
27517 rs6000_emit_epilogue (int sibcall)
27518 {
27519 rs6000_stack_t *info;
27520 int restoring_GPRs_inline;
27521 int restoring_FPRs_inline;
27522 int using_load_multiple;
27523 int using_mtcr_multiple;
27524 int use_backchain_to_restore_sp;
27525 int restore_lr;
27526 int strategy;
27527 HOST_WIDE_INT frame_off = 0;
27528 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
27529 rtx frame_reg_rtx = sp_reg_rtx;
27530 rtx cfa_restores = NULL_RTX;
27531 rtx insn;
27532 rtx cr_save_reg = NULL_RTX;
27533 machine_mode reg_mode = Pmode;
27534 int reg_size = TARGET_32BIT ? 4 : 8;
27535 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
27536 int fp_reg_size = 8;
27537 int i;
27538 bool exit_func;
27539 unsigned ptr_regno;
27540
27541 info = rs6000_stack_info ();
27542
27543 strategy = info->savres_strategy;
27544 using_load_multiple = strategy & REST_MULTIPLE;
27545 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
27546 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
27547 using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
27548 || rs6000_tune == PROCESSOR_PPC603
27549 || rs6000_tune == PROCESSOR_PPC750
27550 || optimize_size);
27551 /* Restore via the backchain when we have a large frame, since this
27552 is more efficient than an addis, addi pair. The second condition
27553 here will not trigger at the moment; We don't actually need a
27554 frame pointer for alloca, but the generic parts of the compiler
27555 give us one anyway. */
27556 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
27557 ? info->lr_save_offset
27558 : 0) > 32767
27559 || (cfun->calls_alloca
27560 && !frame_pointer_needed));
27561 restore_lr = (info->lr_save_p
27562 && (restoring_FPRs_inline
27563 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
27564 && (restoring_GPRs_inline
27565 || info->first_fp_reg_save < 64)
27566 && !cfun->machine->lr_is_wrapped_separately);
27567
27568
27569 if (WORLD_SAVE_P (info))
27570 {
27571 int i, j;
27572 char rname[30];
27573 const char *alloc_rname;
27574 rtvec p;
27575
27576 /* eh_rest_world_r10 will return to the location saved in the LR
27577 stack slot (which is not likely to be our caller.)
27578 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27579 rest_world is similar, except any R10 parameter is ignored.
27580 The exception-handling stuff that was here in 2.95 is no
27581 longer necessary. */
27582
27583 p = rtvec_alloc (9
27584 + 32 - info->first_gp_reg_save
27585 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
27586 + 63 + 1 - info->first_fp_reg_save);
27587
27588 strcpy (rname, ((crtl->calls_eh_return) ?
27589 "*eh_rest_world_r10" : "*rest_world"));
27590 alloc_rname = ggc_strdup (rname);
27591
27592 j = 0;
27593 RTVEC_ELT (p, j++) = ret_rtx;
27594 RTVEC_ELT (p, j++)
27595 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
27596 /* The instruction pattern requires a clobber here;
27597 it is shared with the restVEC helper. */
27598 RTVEC_ELT (p, j++)
27599 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
27600
27601 {
27602 /* CR register traditionally saved as CR2. */
27603 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
27604 RTVEC_ELT (p, j++)
27605 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
27606 if (flag_shrink_wrap)
27607 {
27608 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27609 gen_rtx_REG (Pmode, LR_REGNO),
27610 cfa_restores);
27611 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27612 }
27613 }
27614
27615 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27616 {
27617 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
27618 RTVEC_ELT (p, j++)
27619 = gen_frame_load (reg,
27620 frame_reg_rtx, info->gp_save_offset + reg_size * i);
27621 if (flag_shrink_wrap
27622 && save_reg_p (info->first_gp_reg_save + i))
27623 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27624 }
27625 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27626 {
27627 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
27628 RTVEC_ELT (p, j++)
27629 = gen_frame_load (reg,
27630 frame_reg_rtx, info->altivec_save_offset + 16 * i);
27631 if (flag_shrink_wrap
27632 && save_reg_p (info->first_altivec_reg_save + i))
27633 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27634 }
27635 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
27636 {
27637 rtx reg = gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
27638 info->first_fp_reg_save + i);
27639 RTVEC_ELT (p, j++)
27640 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
27641 if (flag_shrink_wrap
27642 && save_reg_p (info->first_fp_reg_save + i))
27643 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27644 }
27645 RTVEC_ELT (p, j++)
27646 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
27647 RTVEC_ELT (p, j++)
27648 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
27649 RTVEC_ELT (p, j++)
27650 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
27651 RTVEC_ELT (p, j++)
27652 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
27653 RTVEC_ELT (p, j++)
27654 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
27655 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
27656
27657 if (flag_shrink_wrap)
27658 {
27659 REG_NOTES (insn) = cfa_restores;
27660 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
27661 RTX_FRAME_RELATED_P (insn) = 1;
27662 }
27663 return;
27664 }
27665
27666 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
27667 if (info->push_p)
27668 frame_off = info->total_size;
27669
27670 /* Restore AltiVec registers if we must do so before adjusting the
27671 stack. */
27672 if (info->altivec_size != 0
27673 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27674 || (DEFAULT_ABI != ABI_V4
27675 && offset_below_red_zone_p (info->altivec_save_offset))))
27676 {
27677 int i;
27678 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
27679
27680 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27681 if (use_backchain_to_restore_sp)
27682 {
27683 int frame_regno = 11;
27684
27685 if ((strategy & REST_INLINE_VRS) == 0)
27686 {
27687 /* Of r11 and r12, select the one not clobbered by an
27688 out-of-line restore function for the frame register. */
27689 frame_regno = 11 + 12 - scratch_regno;
27690 }
27691 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
27692 emit_move_insn (frame_reg_rtx,
27693 gen_rtx_MEM (Pmode, sp_reg_rtx));
27694 frame_off = 0;
27695 }
27696 else if (frame_pointer_needed)
27697 frame_reg_rtx = hard_frame_pointer_rtx;
27698
27699 if ((strategy & REST_INLINE_VRS) == 0)
27700 {
27701 int end_save = info->altivec_save_offset + info->altivec_size;
27702 int ptr_off;
27703 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27704 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27705
27706 if (end_save + frame_off != 0)
27707 {
27708 rtx offset = GEN_INT (end_save + frame_off);
27709
27710 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27711 }
27712 else
27713 emit_move_insn (ptr_reg, frame_reg_rtx);
27714
27715 ptr_off = -end_save;
27716 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27717 info->altivec_save_offset + ptr_off,
27718 0, V4SImode, SAVRES_VR);
27719 }
27720 else
27721 {
27722 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27723 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27724 {
27725 rtx addr, areg, mem, insn;
27726 rtx reg = gen_rtx_REG (V4SImode, i);
27727 HOST_WIDE_INT offset
27728 = (info->altivec_save_offset + frame_off
27729 + 16 * (i - info->first_altivec_reg_save));
27730
27731 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27732 {
27733 mem = gen_frame_mem (V4SImode,
27734 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27735 GEN_INT (offset)));
27736 insn = gen_rtx_SET (reg, mem);
27737 }
27738 else
27739 {
27740 areg = gen_rtx_REG (Pmode, 0);
27741 emit_move_insn (areg, GEN_INT (offset));
27742
27743 /* AltiVec addressing mode is [reg+reg]. */
27744 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
27745 mem = gen_frame_mem (V4SImode, addr);
27746
27747 /* Rather than emitting a generic move, force use of the
27748 lvx instruction, which we always want. In particular we
27749 don't want lxvd2x/xxpermdi for little endian. */
27750 insn = gen_altivec_lvx_v4si_internal (reg, mem);
27751 }
27752
27753 (void) emit_insn (insn);
27754 }
27755 }
27756
27757 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27758 if (((strategy & REST_INLINE_VRS) == 0
27759 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
27760 && (flag_shrink_wrap
27761 || (offset_below_red_zone_p
27762 (info->altivec_save_offset
27763 + 16 * (i - info->first_altivec_reg_save))))
27764 && save_reg_p (i))
27765 {
27766 rtx reg = gen_rtx_REG (V4SImode, i);
27767 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27768 }
27769 }
27770
27771 /* Restore VRSAVE if we must do so before adjusting the stack. */
27772 if (info->vrsave_size != 0
27773 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27774 || (DEFAULT_ABI != ABI_V4
27775 && offset_below_red_zone_p (info->vrsave_save_offset))))
27776 {
27777 rtx reg;
27778
27779 if (frame_reg_rtx == sp_reg_rtx)
27780 {
27781 if (use_backchain_to_restore_sp)
27782 {
27783 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27784 emit_move_insn (frame_reg_rtx,
27785 gen_rtx_MEM (Pmode, sp_reg_rtx));
27786 frame_off = 0;
27787 }
27788 else if (frame_pointer_needed)
27789 frame_reg_rtx = hard_frame_pointer_rtx;
27790 }
27791
27792 reg = gen_rtx_REG (SImode, 12);
27793 emit_insn (gen_frame_load (reg, frame_reg_rtx,
27794 info->vrsave_save_offset + frame_off));
27795
27796 emit_insn (generate_set_vrsave (reg, info, 1));
27797 }
27798
27799 insn = NULL_RTX;
27800 /* If we have a large stack frame, restore the old stack pointer
27801 using the backchain. */
27802 if (use_backchain_to_restore_sp)
27803 {
27804 if (frame_reg_rtx == sp_reg_rtx)
27805 {
27806 /* Under V.4, don't reset the stack pointer until after we're done
27807 loading the saved registers. */
27808 if (DEFAULT_ABI == ABI_V4)
27809 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27810
27811 insn = emit_move_insn (frame_reg_rtx,
27812 gen_rtx_MEM (Pmode, sp_reg_rtx));
27813 frame_off = 0;
27814 }
27815 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27816 && DEFAULT_ABI == ABI_V4)
27817 /* frame_reg_rtx has been set up by the altivec restore. */
27818 ;
27819 else
27820 {
27821 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
27822 frame_reg_rtx = sp_reg_rtx;
27823 }
27824 }
27825 /* If we have a frame pointer, we can restore the old stack pointer
27826 from it. */
27827 else if (frame_pointer_needed)
27828 {
27829 frame_reg_rtx = sp_reg_rtx;
27830 if (DEFAULT_ABI == ABI_V4)
27831 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27832 /* Prevent reordering memory accesses against stack pointer restore. */
27833 else if (cfun->calls_alloca
27834 || offset_below_red_zone_p (-info->total_size))
27835 rs6000_emit_stack_tie (frame_reg_rtx, true);
27836
27837 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
27838 GEN_INT (info->total_size)));
27839 frame_off = 0;
27840 }
27841 else if (info->push_p
27842 && DEFAULT_ABI != ABI_V4
27843 && !crtl->calls_eh_return)
27844 {
27845 /* Prevent reordering memory accesses against stack pointer restore. */
27846 if (cfun->calls_alloca
27847 || offset_below_red_zone_p (-info->total_size))
27848 rs6000_emit_stack_tie (frame_reg_rtx, false);
27849 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
27850 GEN_INT (info->total_size)));
27851 frame_off = 0;
27852 }
27853 if (insn && frame_reg_rtx == sp_reg_rtx)
27854 {
27855 if (cfa_restores)
27856 {
27857 REG_NOTES (insn) = cfa_restores;
27858 cfa_restores = NULL_RTX;
27859 }
27860 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
27861 RTX_FRAME_RELATED_P (insn) = 1;
27862 }
27863
27864 /* Restore AltiVec registers if we have not done so already. */
27865 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27866 && info->altivec_size != 0
27867 && (DEFAULT_ABI == ABI_V4
27868 || !offset_below_red_zone_p (info->altivec_save_offset)))
27869 {
27870 int i;
27871
27872 if ((strategy & REST_INLINE_VRS) == 0)
27873 {
27874 int end_save = info->altivec_save_offset + info->altivec_size;
27875 int ptr_off;
27876 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27877 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
27878 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27879
27880 if (end_save + frame_off != 0)
27881 {
27882 rtx offset = GEN_INT (end_save + frame_off);
27883
27884 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27885 }
27886 else
27887 emit_move_insn (ptr_reg, frame_reg_rtx);
27888
27889 ptr_off = -end_save;
27890 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27891 info->altivec_save_offset + ptr_off,
27892 0, V4SImode, SAVRES_VR);
27893 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27894 {
27895 /* Frame reg was clobbered by out-of-line save. Restore it
27896 from ptr_reg, and if we are calling out-of-line gpr or
27897 fpr restore set up the correct pointer and offset. */
27898 unsigned newptr_regno = 1;
27899 if (!restoring_GPRs_inline)
27900 {
27901 bool lr = info->gp_save_offset + info->gp_size == 0;
27902 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
27903 newptr_regno = ptr_regno_for_savres (sel);
27904 end_save = info->gp_save_offset + info->gp_size;
27905 }
27906 else if (!restoring_FPRs_inline)
27907 {
27908 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
27909 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27910 newptr_regno = ptr_regno_for_savres (sel);
27911 end_save = info->fp_save_offset + info->fp_size;
27912 }
27913
27914 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
27915 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
27916
27917 if (end_save + ptr_off != 0)
27918 {
27919 rtx offset = GEN_INT (end_save + ptr_off);
27920
27921 frame_off = -end_save;
27922 if (TARGET_32BIT)
27923 emit_insn (gen_addsi3_carry (frame_reg_rtx,
27924 ptr_reg, offset));
27925 else
27926 emit_insn (gen_adddi3_carry (frame_reg_rtx,
27927 ptr_reg, offset));
27928 }
27929 else
27930 {
27931 frame_off = ptr_off;
27932 emit_move_insn (frame_reg_rtx, ptr_reg);
27933 }
27934 }
27935 }
27936 else
27937 {
27938 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27939 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27940 {
27941 rtx addr, areg, mem, insn;
27942 rtx reg = gen_rtx_REG (V4SImode, i);
27943 HOST_WIDE_INT offset
27944 = (info->altivec_save_offset + frame_off
27945 + 16 * (i - info->first_altivec_reg_save));
27946
27947 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27948 {
27949 mem = gen_frame_mem (V4SImode,
27950 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27951 GEN_INT (offset)));
27952 insn = gen_rtx_SET (reg, mem);
27953 }
27954 else
27955 {
27956 areg = gen_rtx_REG (Pmode, 0);
27957 emit_move_insn (areg, GEN_INT (offset));
27958
27959 /* AltiVec addressing mode is [reg+reg]. */
27960 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
27961 mem = gen_frame_mem (V4SImode, addr);
27962
27963 /* Rather than emitting a generic move, force use of the
27964 lvx instruction, which we always want. In particular we
27965 don't want lxvd2x/xxpermdi for little endian. */
27966 insn = gen_altivec_lvx_v4si_internal (reg, mem);
27967 }
27968
27969 (void) emit_insn (insn);
27970 }
27971 }
27972
27973 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27974 if (((strategy & REST_INLINE_VRS) == 0
27975 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
27976 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
27977 && save_reg_p (i))
27978 {
27979 rtx reg = gen_rtx_REG (V4SImode, i);
27980 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27981 }
27982 }
27983
27984 /* Restore VRSAVE if we have not done so already. */
27985 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27986 && info->vrsave_size != 0
27987 && (DEFAULT_ABI == ABI_V4
27988 || !offset_below_red_zone_p (info->vrsave_save_offset)))
27989 {
27990 rtx reg;
27991
27992 reg = gen_rtx_REG (SImode, 12);
27993 emit_insn (gen_frame_load (reg, frame_reg_rtx,
27994 info->vrsave_save_offset + frame_off));
27995
27996 emit_insn (generate_set_vrsave (reg, info, 1));
27997 }
27998
27999 /* If we exit by an out-of-line restore function on ABI_V4 then that
28000 function will deallocate the stack, so we don't need to worry
28001 about the unwinder restoring cr from an invalid stack frame
28002 location. */
28003 exit_func = (!restoring_FPRs_inline
28004 || (!restoring_GPRs_inline
28005 && info->first_fp_reg_save == 64));
28006
28007 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28008 *separate* slots if the routine calls __builtin_eh_return, so
28009 that they can be independently restored by the unwinder. */
28010 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28011 {
28012 int i, cr_off = info->ehcr_offset;
28013
28014 for (i = 0; i < 8; i++)
28015 if (!call_used_regs[CR0_REGNO + i])
28016 {
28017 rtx reg = gen_rtx_REG (SImode, 0);
28018 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28019 cr_off + frame_off));
28020
28021 insn = emit_insn (gen_movsi_to_cr_one
28022 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28023
28024 if (!exit_func && flag_shrink_wrap)
28025 {
28026 add_reg_note (insn, REG_CFA_RESTORE,
28027 gen_rtx_REG (SImode, CR0_REGNO + i));
28028
28029 RTX_FRAME_RELATED_P (insn) = 1;
28030 }
28031
28032 cr_off += reg_size;
28033 }
28034 }
28035
28036 /* Get the old lr if we saved it. If we are restoring registers
28037 out-of-line, then the out-of-line routines can do this for us. */
28038 if (restore_lr && restoring_GPRs_inline)
28039 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28040
28041 /* Get the old cr if we saved it. */
28042 if (info->cr_save_p)
28043 {
28044 unsigned cr_save_regno = 12;
28045
28046 if (!restoring_GPRs_inline)
28047 {
28048 /* Ensure we don't use the register used by the out-of-line
28049 gpr register restore below. */
28050 bool lr = info->gp_save_offset + info->gp_size == 0;
28051 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28052 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28053
28054 if (gpr_ptr_regno == 12)
28055 cr_save_regno = 11;
28056 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28057 }
28058 else if (REGNO (frame_reg_rtx) == 12)
28059 cr_save_regno = 11;
28060
28061 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28062 info->cr_save_offset + frame_off,
28063 exit_func);
28064 }
28065
28066 /* Set LR here to try to overlap restores below. */
28067 if (restore_lr && restoring_GPRs_inline)
28068 restore_saved_lr (0, exit_func);
28069
28070 /* Load exception handler data registers, if needed. */
28071 if (crtl->calls_eh_return)
28072 {
28073 unsigned int i, regno;
28074
28075 if (TARGET_AIX)
28076 {
28077 rtx reg = gen_rtx_REG (reg_mode, 2);
28078 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28079 frame_off + RS6000_TOC_SAVE_SLOT));
28080 }
28081
28082 for (i = 0; ; ++i)
28083 {
28084 rtx mem;
28085
28086 regno = EH_RETURN_DATA_REGNO (i);
28087 if (regno == INVALID_REGNUM)
28088 break;
28089
28090 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28091 info->ehrd_offset + frame_off
28092 + reg_size * (int) i);
28093
28094 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28095 }
28096 }
28097
28098 /* Restore GPRs. This is done as a PARALLEL if we are using
28099 the load-multiple instructions. */
28100 if (!restoring_GPRs_inline)
28101 {
28102 /* We are jumping to an out-of-line function. */
28103 rtx ptr_reg;
28104 int end_save = info->gp_save_offset + info->gp_size;
28105 bool can_use_exit = end_save == 0;
28106 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28107 int ptr_off;
28108
28109 /* Emit stack reset code if we need it. */
28110 ptr_regno = ptr_regno_for_savres (sel);
28111 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28112 if (can_use_exit)
28113 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28114 else if (end_save + frame_off != 0)
28115 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28116 GEN_INT (end_save + frame_off)));
28117 else if (REGNO (frame_reg_rtx) != ptr_regno)
28118 emit_move_insn (ptr_reg, frame_reg_rtx);
28119 if (REGNO (frame_reg_rtx) == ptr_regno)
28120 frame_off = -end_save;
28121
28122 if (can_use_exit && info->cr_save_p)
28123 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28124
28125 ptr_off = -end_save;
28126 rs6000_emit_savres_rtx (info, ptr_reg,
28127 info->gp_save_offset + ptr_off,
28128 info->lr_save_offset + ptr_off,
28129 reg_mode, sel);
28130 }
28131 else if (using_load_multiple)
28132 {
28133 rtvec p;
28134 p = rtvec_alloc (32 - info->first_gp_reg_save);
28135 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28136 RTVEC_ELT (p, i)
28137 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28138 frame_reg_rtx,
28139 info->gp_save_offset + frame_off + reg_size * i);
28140 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28141 }
28142 else
28143 {
28144 int offset = info->gp_save_offset + frame_off;
28145 for (i = info->first_gp_reg_save; i < 32; i++)
28146 {
28147 if (save_reg_p (i)
28148 && !cfun->machine->gpr_is_wrapped_separately[i])
28149 {
28150 rtx reg = gen_rtx_REG (reg_mode, i);
28151 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28152 }
28153
28154 offset += reg_size;
28155 }
28156 }
28157
28158 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28159 {
28160 /* If the frame pointer was used then we can't delay emitting
28161 a REG_CFA_DEF_CFA note. This must happen on the insn that
28162 restores the frame pointer, r31. We may have already emitted
28163 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28164 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28165 be harmless if emitted. */
28166 if (frame_pointer_needed)
28167 {
28168 insn = get_last_insn ();
28169 add_reg_note (insn, REG_CFA_DEF_CFA,
28170 plus_constant (Pmode, frame_reg_rtx, frame_off));
28171 RTX_FRAME_RELATED_P (insn) = 1;
28172 }
28173
28174 /* Set up cfa_restores. We always need these when
28175 shrink-wrapping. If not shrink-wrapping then we only need
28176 the cfa_restore when the stack location is no longer valid.
28177 The cfa_restores must be emitted on or before the insn that
28178 invalidates the stack, and of course must not be emitted
28179 before the insn that actually does the restore. The latter
28180 is why it is a bad idea to emit the cfa_restores as a group
28181 on the last instruction here that actually does a restore:
28182 That insn may be reordered with respect to others doing
28183 restores. */
28184 if (flag_shrink_wrap
28185 && !restoring_GPRs_inline
28186 && info->first_fp_reg_save == 64)
28187 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28188
28189 for (i = info->first_gp_reg_save; i < 32; i++)
28190 if (save_reg_p (i)
28191 && !cfun->machine->gpr_is_wrapped_separately[i])
28192 {
28193 rtx reg = gen_rtx_REG (reg_mode, i);
28194 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28195 }
28196 }
28197
28198 if (!restoring_GPRs_inline
28199 && info->first_fp_reg_save == 64)
28200 {
28201 /* We are jumping to an out-of-line function. */
28202 if (cfa_restores)
28203 emit_cfa_restores (cfa_restores);
28204 return;
28205 }
28206
28207 if (restore_lr && !restoring_GPRs_inline)
28208 {
28209 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28210 restore_saved_lr (0, exit_func);
28211 }
28212
28213 /* Restore fpr's if we need to do it without calling a function. */
28214 if (restoring_FPRs_inline)
28215 {
28216 int offset = info->fp_save_offset + frame_off;
28217 for (i = info->first_fp_reg_save; i < 64; i++)
28218 {
28219 if (save_reg_p (i)
28220 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28221 {
28222 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28223 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28224 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28225 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28226 cfa_restores);
28227 }
28228
28229 offset += fp_reg_size;
28230 }
28231 }
28232
28233 /* If we saved cr, restore it here. Just those that were used. */
28234 if (info->cr_save_p)
28235 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28236
28237 /* If this is V.4, unwind the stack pointer after all of the loads
28238 have been done, or set up r11 if we are restoring fp out of line. */
28239 ptr_regno = 1;
28240 if (!restoring_FPRs_inline)
28241 {
28242 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28243 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28244 ptr_regno = ptr_regno_for_savres (sel);
28245 }
28246
28247 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28248 if (REGNO (frame_reg_rtx) == ptr_regno)
28249 frame_off = 0;
28250
28251 if (insn && restoring_FPRs_inline)
28252 {
28253 if (cfa_restores)
28254 {
28255 REG_NOTES (insn) = cfa_restores;
28256 cfa_restores = NULL_RTX;
28257 }
28258 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28259 RTX_FRAME_RELATED_P (insn) = 1;
28260 }
28261
28262 if (crtl->calls_eh_return)
28263 {
28264 rtx sa = EH_RETURN_STACKADJ_RTX;
28265 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28266 }
28267
28268 if (!sibcall && restoring_FPRs_inline)
28269 {
28270 if (cfa_restores)
28271 {
28272 /* We can't hang the cfa_restores off a simple return,
28273 since the shrink-wrap code sometimes uses an existing
28274 return. This means there might be a path from
28275 pre-prologue code to this return, and dwarf2cfi code
28276 wants the eh_frame unwinder state to be the same on
28277 all paths to any point. So we need to emit the
28278 cfa_restores before the return. For -m64 we really
28279 don't need epilogue cfa_restores at all, except for
28280 this irritating dwarf2cfi with shrink-wrap
28281 requirement; The stack red-zone means eh_frame info
28282 from the prologue telling the unwinder to restore
28283 from the stack is perfectly good right to the end of
28284 the function. */
28285 emit_insn (gen_blockage ());
28286 emit_cfa_restores (cfa_restores);
28287 cfa_restores = NULL_RTX;
28288 }
28289
28290 emit_jump_insn (targetm.gen_simple_return ());
28291 }
28292
28293 if (!sibcall && !restoring_FPRs_inline)
28294 {
28295 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28296 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28297 int elt = 0;
28298 RTVEC_ELT (p, elt++) = ret_rtx;
28299 if (lr)
28300 RTVEC_ELT (p, elt++)
28301 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
28302
28303 /* We have to restore more than two FP registers, so branch to the
28304 restore function. It will return to our caller. */
28305 int i;
28306 int reg;
28307 rtx sym;
28308
28309 if (flag_shrink_wrap)
28310 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28311
28312 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28313 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28314 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28315 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28316
28317 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28318 {
28319 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28320
28321 RTVEC_ELT (p, elt++)
28322 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28323 if (flag_shrink_wrap
28324 && save_reg_p (info->first_fp_reg_save + i))
28325 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28326 }
28327
28328 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28329 }
28330
28331 if (cfa_restores)
28332 {
28333 if (sibcall)
28334 /* Ensure the cfa_restores are hung off an insn that won't
28335 be reordered above other restores. */
28336 emit_insn (gen_blockage ());
28337
28338 emit_cfa_restores (cfa_restores);
28339 }
28340 }
28341
28342 /* Write function epilogue. */
28343
28344 static void
28345 rs6000_output_function_epilogue (FILE *file)
28346 {
28347 #if TARGET_MACHO
28348 macho_branch_islands ();
28349
28350 {
28351 rtx_insn *insn = get_last_insn ();
28352 rtx_insn *deleted_debug_label = NULL;
28353
28354 /* Mach-O doesn't support labels at the end of objects, so if
28355 it looks like we might want one, take special action.
28356
28357 First, collect any sequence of deleted debug labels. */
28358 while (insn
28359 && NOTE_P (insn)
28360 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28361 {
28362 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28363 notes only, instead set their CODE_LABEL_NUMBER to -1,
28364 otherwise there would be code generation differences
28365 in between -g and -g0. */
28366 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28367 deleted_debug_label = insn;
28368 insn = PREV_INSN (insn);
28369 }
28370
28371 /* Second, if we have:
28372 label:
28373 barrier
28374 then this needs to be detected, so skip past the barrier. */
28375
28376 if (insn && BARRIER_P (insn))
28377 insn = PREV_INSN (insn);
28378
28379 /* Up to now we've only seen notes or barriers. */
28380 if (insn)
28381 {
28382 if (LABEL_P (insn)
28383 || (NOTE_P (insn)
28384 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
28385 /* Trailing label: <barrier>. */
28386 fputs ("\tnop\n", file);
28387 else
28388 {
28389 /* Lastly, see if we have a completely empty function body. */
28390 while (insn && ! INSN_P (insn))
28391 insn = PREV_INSN (insn);
28392 /* If we don't find any insns, we've got an empty function body;
28393 I.e. completely empty - without a return or branch. This is
28394 taken as the case where a function body has been removed
28395 because it contains an inline __builtin_unreachable(). GCC
28396 states that reaching __builtin_unreachable() means UB so we're
28397 not obliged to do anything special; however, we want
28398 non-zero-sized function bodies. To meet this, and help the
28399 user out, let's trap the case. */
28400 if (insn == NULL)
28401 fputs ("\ttrap\n", file);
28402 }
28403 }
28404 else if (deleted_debug_label)
28405 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28406 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28407 CODE_LABEL_NUMBER (insn) = -1;
28408 }
28409 #endif
28410
28411 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28412 on its format.
28413
28414 We don't output a traceback table if -finhibit-size-directive was
28415 used. The documentation for -finhibit-size-directive reads
28416 ``don't output a @code{.size} assembler directive, or anything
28417 else that would cause trouble if the function is split in the
28418 middle, and the two halves are placed at locations far apart in
28419 memory.'' The traceback table has this property, since it
28420 includes the offset from the start of the function to the
28421 traceback table itself.
28422
28423 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28424 different traceback table. */
28425 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28426 && ! flag_inhibit_size_directive
28427 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28428 {
28429 const char *fname = NULL;
28430 const char *language_string = lang_hooks.name;
28431 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28432 int i;
28433 int optional_tbtab;
28434 rs6000_stack_t *info = rs6000_stack_info ();
28435
28436 if (rs6000_traceback == traceback_full)
28437 optional_tbtab = 1;
28438 else if (rs6000_traceback == traceback_part)
28439 optional_tbtab = 0;
28440 else
28441 optional_tbtab = !optimize_size && !TARGET_ELF;
28442
28443 if (optional_tbtab)
28444 {
28445 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28446 while (*fname == '.') /* V.4 encodes . in the name */
28447 fname++;
28448
28449 /* Need label immediately before tbtab, so we can compute
28450 its offset from the function start. */
28451 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28452 ASM_OUTPUT_LABEL (file, fname);
28453 }
28454
28455 /* The .tbtab pseudo-op can only be used for the first eight
28456 expressions, since it can't handle the possibly variable
28457 length fields that follow. However, if you omit the optional
28458 fields, the assembler outputs zeros for all optional fields
28459 anyways, giving each variable length field is minimum length
28460 (as defined in sys/debug.h). Thus we can not use the .tbtab
28461 pseudo-op at all. */
28462
28463 /* An all-zero word flags the start of the tbtab, for debuggers
28464 that have to find it by searching forward from the entry
28465 point or from the current pc. */
28466 fputs ("\t.long 0\n", file);
28467
28468 /* Tbtab format type. Use format type 0. */
28469 fputs ("\t.byte 0,", file);
28470
28471 /* Language type. Unfortunately, there does not seem to be any
28472 official way to discover the language being compiled, so we
28473 use language_string.
28474 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
28475 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28476 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
28477 either, so for now use 0. */
28478 if (lang_GNU_C ()
28479 || ! strcmp (language_string, "GNU GIMPLE")
28480 || ! strcmp (language_string, "GNU Go")
28481 || ! strcmp (language_string, "libgccjit"))
28482 i = 0;
28483 else if (! strcmp (language_string, "GNU F77")
28484 || lang_GNU_Fortran ())
28485 i = 1;
28486 else if (! strcmp (language_string, "GNU Pascal"))
28487 i = 2;
28488 else if (! strcmp (language_string, "GNU Ada"))
28489 i = 3;
28490 else if (lang_GNU_CXX ()
28491 || ! strcmp (language_string, "GNU Objective-C++"))
28492 i = 9;
28493 else if (! strcmp (language_string, "GNU Java"))
28494 i = 13;
28495 else if (! strcmp (language_string, "GNU Objective-C"))
28496 i = 14;
28497 else
28498 gcc_unreachable ();
28499 fprintf (file, "%d,", i);
28500
28501 /* 8 single bit fields: global linkage (not set for C extern linkage,
28502 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28503 from start of procedure stored in tbtab, internal function, function
28504 has controlled storage, function has no toc, function uses fp,
28505 function logs/aborts fp operations. */
28506 /* Assume that fp operations are used if any fp reg must be saved. */
28507 fprintf (file, "%d,",
28508 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
28509
28510 /* 6 bitfields: function is interrupt handler, name present in
28511 proc table, function calls alloca, on condition directives
28512 (controls stack walks, 3 bits), saves condition reg, saves
28513 link reg. */
28514 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28515 set up as a frame pointer, even when there is no alloca call. */
28516 fprintf (file, "%d,",
28517 ((optional_tbtab << 6)
28518 | ((optional_tbtab & frame_pointer_needed) << 5)
28519 | (info->cr_save_p << 1)
28520 | (info->lr_save_p)));
28521
28522 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28523 (6 bits). */
28524 fprintf (file, "%d,",
28525 (info->push_p << 7) | (64 - info->first_fp_reg_save));
28526
28527 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28528 fprintf (file, "%d,", (32 - first_reg_to_save ()));
28529
28530 if (optional_tbtab)
28531 {
28532 /* Compute the parameter info from the function decl argument
28533 list. */
28534 tree decl;
28535 int next_parm_info_bit = 31;
28536
28537 for (decl = DECL_ARGUMENTS (current_function_decl);
28538 decl; decl = DECL_CHAIN (decl))
28539 {
28540 rtx parameter = DECL_INCOMING_RTL (decl);
28541 machine_mode mode = GET_MODE (parameter);
28542
28543 if (GET_CODE (parameter) == REG)
28544 {
28545 if (SCALAR_FLOAT_MODE_P (mode))
28546 {
28547 int bits;
28548
28549 float_parms++;
28550
28551 switch (mode)
28552 {
28553 case E_SFmode:
28554 case E_SDmode:
28555 bits = 0x2;
28556 break;
28557
28558 case E_DFmode:
28559 case E_DDmode:
28560 case E_TFmode:
28561 case E_TDmode:
28562 case E_IFmode:
28563 case E_KFmode:
28564 bits = 0x3;
28565 break;
28566
28567 default:
28568 gcc_unreachable ();
28569 }
28570
28571 /* If only one bit will fit, don't or in this entry. */
28572 if (next_parm_info_bit > 0)
28573 parm_info |= (bits << (next_parm_info_bit - 1));
28574 next_parm_info_bit -= 2;
28575 }
28576 else
28577 {
28578 fixed_parms += ((GET_MODE_SIZE (mode)
28579 + (UNITS_PER_WORD - 1))
28580 / UNITS_PER_WORD);
28581 next_parm_info_bit -= 1;
28582 }
28583 }
28584 }
28585 }
28586
28587 /* Number of fixed point parameters. */
28588 /* This is actually the number of words of fixed point parameters; thus
28589 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28590 fprintf (file, "%d,", fixed_parms);
28591
28592 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28593 all on stack. */
28594 /* This is actually the number of fp registers that hold parameters;
28595 and thus the maximum value is 13. */
28596 /* Set parameters on stack bit if parameters are not in their original
28597 registers, regardless of whether they are on the stack? Xlc
28598 seems to set the bit when not optimizing. */
28599 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
28600
28601 if (optional_tbtab)
28602 {
28603 /* Optional fields follow. Some are variable length. */
28604
28605 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
28606 float, 11 double float. */
28607 /* There is an entry for each parameter in a register, in the order
28608 that they occur in the parameter list. Any intervening arguments
28609 on the stack are ignored. If the list overflows a long (max
28610 possible length 34 bits) then completely leave off all elements
28611 that don't fit. */
28612 /* Only emit this long if there was at least one parameter. */
28613 if (fixed_parms || float_parms)
28614 fprintf (file, "\t.long %d\n", parm_info);
28615
28616 /* Offset from start of code to tb table. */
28617 fputs ("\t.long ", file);
28618 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28619 RS6000_OUTPUT_BASENAME (file, fname);
28620 putc ('-', file);
28621 rs6000_output_function_entry (file, fname);
28622 putc ('\n', file);
28623
28624 /* Interrupt handler mask. */
28625 /* Omit this long, since we never set the interrupt handler bit
28626 above. */
28627
28628 /* Number of CTL (controlled storage) anchors. */
28629 /* Omit this long, since the has_ctl bit is never set above. */
28630
28631 /* Displacement into stack of each CTL anchor. */
28632 /* Omit this list of longs, because there are no CTL anchors. */
28633
28634 /* Length of function name. */
28635 if (*fname == '*')
28636 ++fname;
28637 fprintf (file, "\t.short %d\n", (int) strlen (fname));
28638
28639 /* Function name. */
28640 assemble_string (fname, strlen (fname));
28641
28642 /* Register for alloca automatic storage; this is always reg 31.
28643 Only emit this if the alloca bit was set above. */
28644 if (frame_pointer_needed)
28645 fputs ("\t.byte 31\n", file);
28646
28647 fputs ("\t.align 2\n", file);
28648 }
28649 }
28650
28651 /* Arrange to define .LCTOC1 label, if not already done. */
28652 if (need_toc_init)
28653 {
28654 need_toc_init = 0;
28655 if (!toc_initialized)
28656 {
28657 switch_to_section (toc_section);
28658 switch_to_section (current_function_section ());
28659 }
28660 }
28661 }
28662
28663 /* -fsplit-stack support. */
28664
28665 /* A SYMBOL_REF for __morestack. */
28666 static GTY(()) rtx morestack_ref;
28667
28668 static rtx
28669 gen_add3_const (rtx rt, rtx ra, long c)
28670 {
28671 if (TARGET_64BIT)
28672 return gen_adddi3 (rt, ra, GEN_INT (c));
28673 else
28674 return gen_addsi3 (rt, ra, GEN_INT (c));
28675 }
28676
28677 /* Emit -fsplit-stack prologue, which goes before the regular function
28678 prologue (at local entry point in the case of ELFv2). */
28679
28680 void
28681 rs6000_expand_split_stack_prologue (void)
28682 {
28683 rs6000_stack_t *info = rs6000_stack_info ();
28684 unsigned HOST_WIDE_INT allocate;
28685 long alloc_hi, alloc_lo;
28686 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
28687 rtx_insn *insn;
28688
28689 gcc_assert (flag_split_stack && reload_completed);
28690
28691 if (!info->push_p)
28692 return;
28693
28694 if (global_regs[29])
28695 {
28696 error ("%qs uses register r29", "-fsplit-stack");
28697 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
28698 "conflicts with %qD", global_regs_decl[29]);
28699 }
28700
28701 allocate = info->total_size;
28702 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
28703 {
28704 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
28705 return;
28706 }
28707 if (morestack_ref == NULL_RTX)
28708 {
28709 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
28710 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
28711 | SYMBOL_FLAG_FUNCTION);
28712 }
28713
28714 r0 = gen_rtx_REG (Pmode, 0);
28715 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28716 r12 = gen_rtx_REG (Pmode, 12);
28717 emit_insn (gen_load_split_stack_limit (r0));
28718 /* Always emit two insns here to calculate the requested stack,
28719 so that the linker can edit them when adjusting size for calling
28720 non-split-stack code. */
28721 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
28722 alloc_lo = -allocate - alloc_hi;
28723 if (alloc_hi != 0)
28724 {
28725 emit_insn (gen_add3_const (r12, r1, alloc_hi));
28726 if (alloc_lo != 0)
28727 emit_insn (gen_add3_const (r12, r12, alloc_lo));
28728 else
28729 emit_insn (gen_nop ());
28730 }
28731 else
28732 {
28733 emit_insn (gen_add3_const (r12, r1, alloc_lo));
28734 emit_insn (gen_nop ());
28735 }
28736
28737 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
28738 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
28739 ok_label = gen_label_rtx ();
28740 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
28741 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
28742 gen_rtx_LABEL_REF (VOIDmode, ok_label),
28743 pc_rtx);
28744 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
28745 JUMP_LABEL (insn) = ok_label;
28746 /* Mark the jump as very likely to be taken. */
28747 add_reg_br_prob_note (insn, profile_probability::very_likely ());
28748
28749 lr = gen_rtx_REG (Pmode, LR_REGNO);
28750 insn = emit_move_insn (r0, lr);
28751 RTX_FRAME_RELATED_P (insn) = 1;
28752 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
28753 RTX_FRAME_RELATED_P (insn) = 1;
28754
28755 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
28756 const0_rtx, const0_rtx));
28757 call_fusage = NULL_RTX;
28758 use_reg (&call_fusage, r12);
28759 /* Say the call uses r0, even though it doesn't, to stop regrename
28760 from twiddling with the insns saving lr, trashing args for cfun.
28761 The insns restoring lr are similarly protected by making
28762 split_stack_return use r0. */
28763 use_reg (&call_fusage, r0);
28764 add_function_usage_to (insn, call_fusage);
28765 /* Indicate that this function can't jump to non-local gotos. */
28766 make_reg_eh_region_note_nothrow_nononlocal (insn);
28767 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
28768 insn = emit_move_insn (lr, r0);
28769 add_reg_note (insn, REG_CFA_RESTORE, lr);
28770 RTX_FRAME_RELATED_P (insn) = 1;
28771 emit_insn (gen_split_stack_return ());
28772
28773 emit_label (ok_label);
28774 LABEL_NUSES (ok_label) = 1;
28775 }
28776
28777 /* Return the internal arg pointer used for function incoming
28778 arguments. When -fsplit-stack, the arg pointer is r12 so we need
28779 to copy it to a pseudo in order for it to be preserved over calls
28780 and suchlike. We'd really like to use a pseudo here for the
28781 internal arg pointer but data-flow analysis is not prepared to
28782 accept pseudos as live at the beginning of a function. */
28783
28784 static rtx
28785 rs6000_internal_arg_pointer (void)
28786 {
28787 if (flag_split_stack
28788 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
28789 == NULL))
28790
28791 {
28792 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
28793 {
28794 rtx pat;
28795
28796 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
28797 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
28798
28799 /* Put the pseudo initialization right after the note at the
28800 beginning of the function. */
28801 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
28802 gen_rtx_REG (Pmode, 12));
28803 push_topmost_sequence ();
28804 emit_insn_after (pat, get_insns ());
28805 pop_topmost_sequence ();
28806 }
28807 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
28808 FIRST_PARM_OFFSET (current_function_decl));
28809 return copy_to_reg (ret);
28810 }
28811 return virtual_incoming_args_rtx;
28812 }
28813
28814 /* We may have to tell the dataflow pass that the split stack prologue
28815 is initializing a register. */
28816
28817 static void
28818 rs6000_live_on_entry (bitmap regs)
28819 {
28820 if (flag_split_stack)
28821 bitmap_set_bit (regs, 12);
28822 }
28823
28824 /* Emit -fsplit-stack dynamic stack allocation space check. */
28825
28826 void
28827 rs6000_split_stack_space_check (rtx size, rtx label)
28828 {
28829 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28830 rtx limit = gen_reg_rtx (Pmode);
28831 rtx requested = gen_reg_rtx (Pmode);
28832 rtx cmp = gen_reg_rtx (CCUNSmode);
28833 rtx jump;
28834
28835 emit_insn (gen_load_split_stack_limit (limit));
28836 if (CONST_INT_P (size))
28837 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
28838 else
28839 {
28840 size = force_reg (Pmode, size);
28841 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
28842 }
28843 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
28844 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
28845 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
28846 gen_rtx_LABEL_REF (VOIDmode, label),
28847 pc_rtx);
28848 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
28849 JUMP_LABEL (jump) = label;
28850 }
28851 \f
28852 /* A C compound statement that outputs the assembler code for a thunk
28853 function, used to implement C++ virtual function calls with
28854 multiple inheritance. The thunk acts as a wrapper around a virtual
28855 function, adjusting the implicit object parameter before handing
28856 control off to the real function.
28857
28858 First, emit code to add the integer DELTA to the location that
28859 contains the incoming first argument. Assume that this argument
28860 contains a pointer, and is the one used to pass the `this' pointer
28861 in C++. This is the incoming argument *before* the function
28862 prologue, e.g. `%o0' on a sparc. The addition must preserve the
28863 values of all other incoming arguments.
28864
28865 After the addition, emit code to jump to FUNCTION, which is a
28866 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
28867 not touch the return address. Hence returning from FUNCTION will
28868 return to whoever called the current `thunk'.
28869
28870 The effect must be as if FUNCTION had been called directly with the
28871 adjusted first argument. This macro is responsible for emitting
28872 all of the code for a thunk function; output_function_prologue()
28873 and output_function_epilogue() are not invoked.
28874
28875 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
28876 been extracted from it.) It might possibly be useful on some
28877 targets, but probably not.
28878
28879 If you do not define this macro, the target-independent code in the
28880 C++ frontend will generate a less efficient heavyweight thunk that
28881 calls FUNCTION instead of jumping to it. The generic approach does
28882 not support varargs. */
28883
28884 static void
28885 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
28886 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
28887 tree function)
28888 {
28889 rtx this_rtx, funexp;
28890 rtx_insn *insn;
28891
28892 reload_completed = 1;
28893 epilogue_completed = 1;
28894
28895 /* Mark the end of the (empty) prologue. */
28896 emit_note (NOTE_INSN_PROLOGUE_END);
28897
28898 /* Find the "this" pointer. If the function returns a structure,
28899 the structure return pointer is in r3. */
28900 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
28901 this_rtx = gen_rtx_REG (Pmode, 4);
28902 else
28903 this_rtx = gen_rtx_REG (Pmode, 3);
28904
28905 /* Apply the constant offset, if required. */
28906 if (delta)
28907 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
28908
28909 /* Apply the offset from the vtable, if required. */
28910 if (vcall_offset)
28911 {
28912 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
28913 rtx tmp = gen_rtx_REG (Pmode, 12);
28914
28915 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
28916 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
28917 {
28918 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
28919 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
28920 }
28921 else
28922 {
28923 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
28924
28925 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
28926 }
28927 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
28928 }
28929
28930 /* Generate a tail call to the target function. */
28931 if (!TREE_USED (function))
28932 {
28933 assemble_external (function);
28934 TREE_USED (function) = 1;
28935 }
28936 funexp = XEXP (DECL_RTL (function), 0);
28937 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
28938
28939 #if TARGET_MACHO
28940 if (MACHOPIC_INDIRECT)
28941 funexp = machopic_indirect_call_target (funexp);
28942 #endif
28943
28944 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
28945 generate sibcall RTL explicitly. */
28946 insn = emit_call_insn (
28947 gen_rtx_PARALLEL (VOIDmode,
28948 gen_rtvec (3,
28949 gen_rtx_CALL (VOIDmode,
28950 funexp, const0_rtx),
28951 gen_rtx_USE (VOIDmode, const0_rtx),
28952 simple_return_rtx)));
28953 SIBLING_CALL_P (insn) = 1;
28954 emit_barrier ();
28955
28956 /* Run just enough of rest_of_compilation to get the insns emitted.
28957 There's not really enough bulk here to make other passes such as
28958 instruction scheduling worth while. Note that use_thunk calls
28959 assemble_start_function and assemble_end_function. */
28960 insn = get_insns ();
28961 shorten_branches (insn);
28962 final_start_function (insn, file, 1);
28963 final (insn, file, 1);
28964 final_end_function ();
28965
28966 reload_completed = 0;
28967 epilogue_completed = 0;
28968 }
28969 \f
28970 /* A quick summary of the various types of 'constant-pool tables'
28971 under PowerPC:
28972
28973 Target Flags Name One table per
28974 AIX (none) AIX TOC object file
28975 AIX -mfull-toc AIX TOC object file
28976 AIX -mminimal-toc AIX minimal TOC translation unit
28977 SVR4/EABI (none) SVR4 SDATA object file
28978 SVR4/EABI -fpic SVR4 pic object file
28979 SVR4/EABI -fPIC SVR4 PIC translation unit
28980 SVR4/EABI -mrelocatable EABI TOC function
28981 SVR4/EABI -maix AIX TOC object file
28982 SVR4/EABI -maix -mminimal-toc
28983 AIX minimal TOC translation unit
28984
28985 Name Reg. Set by entries contains:
28986 made by addrs? fp? sum?
28987
28988 AIX TOC 2 crt0 as Y option option
28989 AIX minimal TOC 30 prolog gcc Y Y option
28990 SVR4 SDATA 13 crt0 gcc N Y N
28991 SVR4 pic 30 prolog ld Y not yet N
28992 SVR4 PIC 30 prolog gcc Y option option
28993 EABI TOC 30 prolog gcc Y option option
28994
28995 */
28996
28997 /* Hash functions for the hash table. */
28998
28999 static unsigned
29000 rs6000_hash_constant (rtx k)
29001 {
29002 enum rtx_code code = GET_CODE (k);
29003 machine_mode mode = GET_MODE (k);
29004 unsigned result = (code << 3) ^ mode;
29005 const char *format;
29006 int flen, fidx;
29007
29008 format = GET_RTX_FORMAT (code);
29009 flen = strlen (format);
29010 fidx = 0;
29011
29012 switch (code)
29013 {
29014 case LABEL_REF:
29015 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29016
29017 case CONST_WIDE_INT:
29018 {
29019 int i;
29020 flen = CONST_WIDE_INT_NUNITS (k);
29021 for (i = 0; i < flen; i++)
29022 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29023 return result;
29024 }
29025
29026 case CONST_DOUBLE:
29027 if (mode != VOIDmode)
29028 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29029 flen = 2;
29030 break;
29031
29032 case CODE_LABEL:
29033 fidx = 3;
29034 break;
29035
29036 default:
29037 break;
29038 }
29039
29040 for (; fidx < flen; fidx++)
29041 switch (format[fidx])
29042 {
29043 case 's':
29044 {
29045 unsigned i, len;
29046 const char *str = XSTR (k, fidx);
29047 len = strlen (str);
29048 result = result * 613 + len;
29049 for (i = 0; i < len; i++)
29050 result = result * 613 + (unsigned) str[i];
29051 break;
29052 }
29053 case 'u':
29054 case 'e':
29055 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29056 break;
29057 case 'i':
29058 case 'n':
29059 result = result * 613 + (unsigned) XINT (k, fidx);
29060 break;
29061 case 'w':
29062 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29063 result = result * 613 + (unsigned) XWINT (k, fidx);
29064 else
29065 {
29066 size_t i;
29067 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29068 result = result * 613 + (unsigned) (XWINT (k, fidx)
29069 >> CHAR_BIT * i);
29070 }
29071 break;
29072 case '0':
29073 break;
29074 default:
29075 gcc_unreachable ();
29076 }
29077
29078 return result;
29079 }
29080
29081 hashval_t
29082 toc_hasher::hash (toc_hash_struct *thc)
29083 {
29084 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29085 }
29086
29087 /* Compare H1 and H2 for equivalence. */
29088
29089 bool
29090 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29091 {
29092 rtx r1 = h1->key;
29093 rtx r2 = h2->key;
29094
29095 if (h1->key_mode != h2->key_mode)
29096 return 0;
29097
29098 return rtx_equal_p (r1, r2);
29099 }
29100
29101 /* These are the names given by the C++ front-end to vtables, and
29102 vtable-like objects. Ideally, this logic should not be here;
29103 instead, there should be some programmatic way of inquiring as
29104 to whether or not an object is a vtable. */
29105
29106 #define VTABLE_NAME_P(NAME) \
29107 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29108 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29109 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29110 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29111 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29112
29113 #ifdef NO_DOLLAR_IN_LABEL
29114 /* Return a GGC-allocated character string translating dollar signs in
29115 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29116
29117 const char *
29118 rs6000_xcoff_strip_dollar (const char *name)
29119 {
29120 char *strip, *p;
29121 const char *q;
29122 size_t len;
29123
29124 q = (const char *) strchr (name, '$');
29125
29126 if (q == 0 || q == name)
29127 return name;
29128
29129 len = strlen (name);
29130 strip = XALLOCAVEC (char, len + 1);
29131 strcpy (strip, name);
29132 p = strip + (q - name);
29133 while (p)
29134 {
29135 *p = '_';
29136 p = strchr (p + 1, '$');
29137 }
29138
29139 return ggc_alloc_string (strip, len);
29140 }
29141 #endif
29142
29143 void
29144 rs6000_output_symbol_ref (FILE *file, rtx x)
29145 {
29146 const char *name = XSTR (x, 0);
29147
29148 /* Currently C++ toc references to vtables can be emitted before it
29149 is decided whether the vtable is public or private. If this is
29150 the case, then the linker will eventually complain that there is
29151 a reference to an unknown section. Thus, for vtables only,
29152 we emit the TOC reference to reference the identifier and not the
29153 symbol. */
29154 if (VTABLE_NAME_P (name))
29155 {
29156 RS6000_OUTPUT_BASENAME (file, name);
29157 }
29158 else
29159 assemble_name (file, name);
29160 }
29161
29162 /* Output a TOC entry. We derive the entry name from what is being
29163 written. */
29164
29165 void
29166 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29167 {
29168 char buf[256];
29169 const char *name = buf;
29170 rtx base = x;
29171 HOST_WIDE_INT offset = 0;
29172
29173 gcc_assert (!TARGET_NO_TOC);
29174
29175 /* When the linker won't eliminate them, don't output duplicate
29176 TOC entries (this happens on AIX if there is any kind of TOC,
29177 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29178 CODE_LABELs. */
29179 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29180 {
29181 struct toc_hash_struct *h;
29182
29183 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29184 time because GGC is not initialized at that point. */
29185 if (toc_hash_table == NULL)
29186 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29187
29188 h = ggc_alloc<toc_hash_struct> ();
29189 h->key = x;
29190 h->key_mode = mode;
29191 h->labelno = labelno;
29192
29193 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29194 if (*found == NULL)
29195 *found = h;
29196 else /* This is indeed a duplicate.
29197 Set this label equal to that label. */
29198 {
29199 fputs ("\t.set ", file);
29200 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29201 fprintf (file, "%d,", labelno);
29202 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29203 fprintf (file, "%d\n", ((*found)->labelno));
29204
29205 #ifdef HAVE_AS_TLS
29206 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29207 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29208 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29209 {
29210 fputs ("\t.set ", file);
29211 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29212 fprintf (file, "%d,", labelno);
29213 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29214 fprintf (file, "%d\n", ((*found)->labelno));
29215 }
29216 #endif
29217 return;
29218 }
29219 }
29220
29221 /* If we're going to put a double constant in the TOC, make sure it's
29222 aligned properly when strict alignment is on. */
29223 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29224 && STRICT_ALIGNMENT
29225 && GET_MODE_BITSIZE (mode) >= 64
29226 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29227 ASM_OUTPUT_ALIGN (file, 3);
29228 }
29229
29230 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29231
29232 /* Handle FP constants specially. Note that if we have a minimal
29233 TOC, things we put here aren't actually in the TOC, so we can allow
29234 FP constants. */
29235 if (GET_CODE (x) == CONST_DOUBLE &&
29236 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29237 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29238 {
29239 long k[4];
29240
29241 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29242 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29243 else
29244 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29245
29246 if (TARGET_64BIT)
29247 {
29248 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29249 fputs (DOUBLE_INT_ASM_OP, file);
29250 else
29251 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29252 k[0] & 0xffffffff, k[1] & 0xffffffff,
29253 k[2] & 0xffffffff, k[3] & 0xffffffff);
29254 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29255 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29256 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29257 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29258 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29259 return;
29260 }
29261 else
29262 {
29263 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29264 fputs ("\t.long ", file);
29265 else
29266 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29267 k[0] & 0xffffffff, k[1] & 0xffffffff,
29268 k[2] & 0xffffffff, k[3] & 0xffffffff);
29269 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29270 k[0] & 0xffffffff, k[1] & 0xffffffff,
29271 k[2] & 0xffffffff, k[3] & 0xffffffff);
29272 return;
29273 }
29274 }
29275 else if (GET_CODE (x) == CONST_DOUBLE &&
29276 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29277 {
29278 long k[2];
29279
29280 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29281 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29282 else
29283 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29284
29285 if (TARGET_64BIT)
29286 {
29287 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29288 fputs (DOUBLE_INT_ASM_OP, file);
29289 else
29290 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29291 k[0] & 0xffffffff, k[1] & 0xffffffff);
29292 fprintf (file, "0x%lx%08lx\n",
29293 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29294 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29295 return;
29296 }
29297 else
29298 {
29299 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29300 fputs ("\t.long ", file);
29301 else
29302 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29303 k[0] & 0xffffffff, k[1] & 0xffffffff);
29304 fprintf (file, "0x%lx,0x%lx\n",
29305 k[0] & 0xffffffff, k[1] & 0xffffffff);
29306 return;
29307 }
29308 }
29309 else if (GET_CODE (x) == CONST_DOUBLE &&
29310 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29311 {
29312 long l;
29313
29314 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29315 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29316 else
29317 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29318
29319 if (TARGET_64BIT)
29320 {
29321 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29322 fputs (DOUBLE_INT_ASM_OP, file);
29323 else
29324 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29325 if (WORDS_BIG_ENDIAN)
29326 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29327 else
29328 fprintf (file, "0x%lx\n", l & 0xffffffff);
29329 return;
29330 }
29331 else
29332 {
29333 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29334 fputs ("\t.long ", file);
29335 else
29336 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29337 fprintf (file, "0x%lx\n", l & 0xffffffff);
29338 return;
29339 }
29340 }
29341 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
29342 {
29343 unsigned HOST_WIDE_INT low;
29344 HOST_WIDE_INT high;
29345
29346 low = INTVAL (x) & 0xffffffff;
29347 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29348
29349 /* TOC entries are always Pmode-sized, so when big-endian
29350 smaller integer constants in the TOC need to be padded.
29351 (This is still a win over putting the constants in
29352 a separate constant pool, because then we'd have
29353 to have both a TOC entry _and_ the actual constant.)
29354
29355 For a 32-bit target, CONST_INT values are loaded and shifted
29356 entirely within `low' and can be stored in one TOC entry. */
29357
29358 /* It would be easy to make this work, but it doesn't now. */
29359 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29360
29361 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29362 {
29363 low |= high << 32;
29364 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29365 high = (HOST_WIDE_INT) low >> 32;
29366 low &= 0xffffffff;
29367 }
29368
29369 if (TARGET_64BIT)
29370 {
29371 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29372 fputs (DOUBLE_INT_ASM_OP, file);
29373 else
29374 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29375 (long) high & 0xffffffff, (long) low & 0xffffffff);
29376 fprintf (file, "0x%lx%08lx\n",
29377 (long) high & 0xffffffff, (long) low & 0xffffffff);
29378 return;
29379 }
29380 else
29381 {
29382 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29383 {
29384 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29385 fputs ("\t.long ", file);
29386 else
29387 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29388 (long) high & 0xffffffff, (long) low & 0xffffffff);
29389 fprintf (file, "0x%lx,0x%lx\n",
29390 (long) high & 0xffffffff, (long) low & 0xffffffff);
29391 }
29392 else
29393 {
29394 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29395 fputs ("\t.long ", file);
29396 else
29397 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29398 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29399 }
29400 return;
29401 }
29402 }
29403
29404 if (GET_CODE (x) == CONST)
29405 {
29406 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29407 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
29408
29409 base = XEXP (XEXP (x, 0), 0);
29410 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29411 }
29412
29413 switch (GET_CODE (base))
29414 {
29415 case SYMBOL_REF:
29416 name = XSTR (base, 0);
29417 break;
29418
29419 case LABEL_REF:
29420 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29421 CODE_LABEL_NUMBER (XEXP (base, 0)));
29422 break;
29423
29424 case CODE_LABEL:
29425 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29426 break;
29427
29428 default:
29429 gcc_unreachable ();
29430 }
29431
29432 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29433 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29434 else
29435 {
29436 fputs ("\t.tc ", file);
29437 RS6000_OUTPUT_BASENAME (file, name);
29438
29439 if (offset < 0)
29440 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29441 else if (offset)
29442 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29443
29444 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29445 after other TOC symbols, reducing overflow of small TOC access
29446 to [TC] symbols. */
29447 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29448 ? "[TE]," : "[TC],", file);
29449 }
29450
29451 /* Currently C++ toc references to vtables can be emitted before it
29452 is decided whether the vtable is public or private. If this is
29453 the case, then the linker will eventually complain that there is
29454 a TOC reference to an unknown section. Thus, for vtables only,
29455 we emit the TOC reference to reference the symbol and not the
29456 section. */
29457 if (VTABLE_NAME_P (name))
29458 {
29459 RS6000_OUTPUT_BASENAME (file, name);
29460 if (offset < 0)
29461 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29462 else if (offset > 0)
29463 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29464 }
29465 else
29466 output_addr_const (file, x);
29467
29468 #if HAVE_AS_TLS
29469 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
29470 {
29471 switch (SYMBOL_REF_TLS_MODEL (base))
29472 {
29473 case 0:
29474 break;
29475 case TLS_MODEL_LOCAL_EXEC:
29476 fputs ("@le", file);
29477 break;
29478 case TLS_MODEL_INITIAL_EXEC:
29479 fputs ("@ie", file);
29480 break;
29481 /* Use global-dynamic for local-dynamic. */
29482 case TLS_MODEL_GLOBAL_DYNAMIC:
29483 case TLS_MODEL_LOCAL_DYNAMIC:
29484 putc ('\n', file);
29485 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
29486 fputs ("\t.tc .", file);
29487 RS6000_OUTPUT_BASENAME (file, name);
29488 fputs ("[TC],", file);
29489 output_addr_const (file, x);
29490 fputs ("@m", file);
29491 break;
29492 default:
29493 gcc_unreachable ();
29494 }
29495 }
29496 #endif
29497
29498 putc ('\n', file);
29499 }
29500 \f
29501 /* Output an assembler pseudo-op to write an ASCII string of N characters
29502 starting at P to FILE.
29503
29504 On the RS/6000, we have to do this using the .byte operation and
29505 write out special characters outside the quoted string.
29506 Also, the assembler is broken; very long strings are truncated,
29507 so we must artificially break them up early. */
29508
29509 void
29510 output_ascii (FILE *file, const char *p, int n)
29511 {
29512 char c;
29513 int i, count_string;
29514 const char *for_string = "\t.byte \"";
29515 const char *for_decimal = "\t.byte ";
29516 const char *to_close = NULL;
29517
29518 count_string = 0;
29519 for (i = 0; i < n; i++)
29520 {
29521 c = *p++;
29522 if (c >= ' ' && c < 0177)
29523 {
29524 if (for_string)
29525 fputs (for_string, file);
29526 putc (c, file);
29527
29528 /* Write two quotes to get one. */
29529 if (c == '"')
29530 {
29531 putc (c, file);
29532 ++count_string;
29533 }
29534
29535 for_string = NULL;
29536 for_decimal = "\"\n\t.byte ";
29537 to_close = "\"\n";
29538 ++count_string;
29539
29540 if (count_string >= 512)
29541 {
29542 fputs (to_close, file);
29543
29544 for_string = "\t.byte \"";
29545 for_decimal = "\t.byte ";
29546 to_close = NULL;
29547 count_string = 0;
29548 }
29549 }
29550 else
29551 {
29552 if (for_decimal)
29553 fputs (for_decimal, file);
29554 fprintf (file, "%d", c);
29555
29556 for_string = "\n\t.byte \"";
29557 for_decimal = ", ";
29558 to_close = "\n";
29559 count_string = 0;
29560 }
29561 }
29562
29563 /* Now close the string if we have written one. Then end the line. */
29564 if (to_close)
29565 fputs (to_close, file);
29566 }
29567 \f
29568 /* Generate a unique section name for FILENAME for a section type
29569 represented by SECTION_DESC. Output goes into BUF.
29570
29571 SECTION_DESC can be any string, as long as it is different for each
29572 possible section type.
29573
29574 We name the section in the same manner as xlc. The name begins with an
29575 underscore followed by the filename (after stripping any leading directory
29576 names) with the last period replaced by the string SECTION_DESC. If
29577 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29578 the name. */
29579
29580 void
29581 rs6000_gen_section_name (char **buf, const char *filename,
29582 const char *section_desc)
29583 {
29584 const char *q, *after_last_slash, *last_period = 0;
29585 char *p;
29586 int len;
29587
29588 after_last_slash = filename;
29589 for (q = filename; *q; q++)
29590 {
29591 if (*q == '/')
29592 after_last_slash = q + 1;
29593 else if (*q == '.')
29594 last_period = q;
29595 }
29596
29597 len = strlen (after_last_slash) + strlen (section_desc) + 2;
29598 *buf = (char *) xmalloc (len);
29599
29600 p = *buf;
29601 *p++ = '_';
29602
29603 for (q = after_last_slash; *q; q++)
29604 {
29605 if (q == last_period)
29606 {
29607 strcpy (p, section_desc);
29608 p += strlen (section_desc);
29609 break;
29610 }
29611
29612 else if (ISALNUM (*q))
29613 *p++ = *q;
29614 }
29615
29616 if (last_period == 0)
29617 strcpy (p, section_desc);
29618 else
29619 *p = '\0';
29620 }
29621 \f
29622 /* Emit profile function. */
29623
29624 void
29625 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
29626 {
29627 /* Non-standard profiling for kernels, which just saves LR then calls
29628 _mcount without worrying about arg saves. The idea is to change
29629 the function prologue as little as possible as it isn't easy to
29630 account for arg save/restore code added just for _mcount. */
29631 if (TARGET_PROFILE_KERNEL)
29632 return;
29633
29634 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29635 {
29636 #ifndef NO_PROFILE_COUNTERS
29637 # define NO_PROFILE_COUNTERS 0
29638 #endif
29639 if (NO_PROFILE_COUNTERS)
29640 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29641 LCT_NORMAL, VOIDmode);
29642 else
29643 {
29644 char buf[30];
29645 const char *label_name;
29646 rtx fun;
29647
29648 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29649 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
29650 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
29651
29652 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29653 LCT_NORMAL, VOIDmode, fun, Pmode);
29654 }
29655 }
29656 else if (DEFAULT_ABI == ABI_DARWIN)
29657 {
29658 const char *mcount_name = RS6000_MCOUNT;
29659 int caller_addr_regno = LR_REGNO;
29660
29661 /* Be conservative and always set this, at least for now. */
29662 crtl->uses_pic_offset_table = 1;
29663
29664 #if TARGET_MACHO
29665 /* For PIC code, set up a stub and collect the caller's address
29666 from r0, which is where the prologue puts it. */
29667 if (MACHOPIC_INDIRECT
29668 && crtl->uses_pic_offset_table)
29669 caller_addr_regno = 0;
29670 #endif
29671 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
29672 LCT_NORMAL, VOIDmode,
29673 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
29674 }
29675 }
29676
29677 /* Write function profiler code. */
29678
29679 void
29680 output_function_profiler (FILE *file, int labelno)
29681 {
29682 char buf[100];
29683
29684 switch (DEFAULT_ABI)
29685 {
29686 default:
29687 gcc_unreachable ();
29688
29689 case ABI_V4:
29690 if (!TARGET_32BIT)
29691 {
29692 warning (0, "no profiling of 64-bit code for this ABI");
29693 return;
29694 }
29695 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29696 fprintf (file, "\tmflr %s\n", reg_names[0]);
29697 if (NO_PROFILE_COUNTERS)
29698 {
29699 asm_fprintf (file, "\tstw %s,4(%s)\n",
29700 reg_names[0], reg_names[1]);
29701 }
29702 else if (TARGET_SECURE_PLT && flag_pic)
29703 {
29704 if (TARGET_LINK_STACK)
29705 {
29706 char name[32];
29707 get_ppc476_thunk_name (name);
29708 asm_fprintf (file, "\tbl %s\n", name);
29709 }
29710 else
29711 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
29712 asm_fprintf (file, "\tstw %s,4(%s)\n",
29713 reg_names[0], reg_names[1]);
29714 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
29715 asm_fprintf (file, "\taddis %s,%s,",
29716 reg_names[12], reg_names[12]);
29717 assemble_name (file, buf);
29718 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
29719 assemble_name (file, buf);
29720 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
29721 }
29722 else if (flag_pic == 1)
29723 {
29724 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
29725 asm_fprintf (file, "\tstw %s,4(%s)\n",
29726 reg_names[0], reg_names[1]);
29727 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
29728 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
29729 assemble_name (file, buf);
29730 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
29731 }
29732 else if (flag_pic > 1)
29733 {
29734 asm_fprintf (file, "\tstw %s,4(%s)\n",
29735 reg_names[0], reg_names[1]);
29736 /* Now, we need to get the address of the label. */
29737 if (TARGET_LINK_STACK)
29738 {
29739 char name[32];
29740 get_ppc476_thunk_name (name);
29741 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
29742 assemble_name (file, buf);
29743 fputs ("-.\n1:", file);
29744 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
29745 asm_fprintf (file, "\taddi %s,%s,4\n",
29746 reg_names[11], reg_names[11]);
29747 }
29748 else
29749 {
29750 fputs ("\tbcl 20,31,1f\n\t.long ", file);
29751 assemble_name (file, buf);
29752 fputs ("-.\n1:", file);
29753 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
29754 }
29755 asm_fprintf (file, "\tlwz %s,0(%s)\n",
29756 reg_names[0], reg_names[11]);
29757 asm_fprintf (file, "\tadd %s,%s,%s\n",
29758 reg_names[0], reg_names[0], reg_names[11]);
29759 }
29760 else
29761 {
29762 asm_fprintf (file, "\tlis %s,", reg_names[12]);
29763 assemble_name (file, buf);
29764 fputs ("@ha\n", file);
29765 asm_fprintf (file, "\tstw %s,4(%s)\n",
29766 reg_names[0], reg_names[1]);
29767 asm_fprintf (file, "\tla %s,", reg_names[0]);
29768 assemble_name (file, buf);
29769 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
29770 }
29771
29772 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
29773 fprintf (file, "\tbl %s%s\n",
29774 RS6000_MCOUNT, flag_pic ? "@plt" : "");
29775 break;
29776
29777 case ABI_AIX:
29778 case ABI_ELFv2:
29779 case ABI_DARWIN:
29780 /* Don't do anything, done in output_profile_hook (). */
29781 break;
29782 }
29783 }
29784
29785 \f
29786
29787 /* The following variable value is the last issued insn. */
29788
29789 static rtx_insn *last_scheduled_insn;
29790
29791 /* The following variable helps to balance issuing of load and
29792 store instructions */
29793
29794 static int load_store_pendulum;
29795
29796 /* The following variable helps pair divide insns during scheduling. */
29797 static int divide_cnt;
29798 /* The following variable helps pair and alternate vector and vector load
29799 insns during scheduling. */
29800 static int vec_pairing;
29801
29802
29803 /* Power4 load update and store update instructions are cracked into a
29804 load or store and an integer insn which are executed in the same cycle.
29805 Branches have their own dispatch slot which does not count against the
29806 GCC issue rate, but it changes the program flow so there are no other
29807 instructions to issue in this cycle. */
29808
29809 static int
29810 rs6000_variable_issue_1 (rtx_insn *insn, int more)
29811 {
29812 last_scheduled_insn = insn;
29813 if (GET_CODE (PATTERN (insn)) == USE
29814 || GET_CODE (PATTERN (insn)) == CLOBBER)
29815 {
29816 cached_can_issue_more = more;
29817 return cached_can_issue_more;
29818 }
29819
29820 if (insn_terminates_group_p (insn, current_group))
29821 {
29822 cached_can_issue_more = 0;
29823 return cached_can_issue_more;
29824 }
29825
29826 /* If no reservation, but reach here */
29827 if (recog_memoized (insn) < 0)
29828 return more;
29829
29830 if (rs6000_sched_groups)
29831 {
29832 if (is_microcoded_insn (insn))
29833 cached_can_issue_more = 0;
29834 else if (is_cracked_insn (insn))
29835 cached_can_issue_more = more > 2 ? more - 2 : 0;
29836 else
29837 cached_can_issue_more = more - 1;
29838
29839 return cached_can_issue_more;
29840 }
29841
29842 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
29843 return 0;
29844
29845 cached_can_issue_more = more - 1;
29846 return cached_can_issue_more;
29847 }
29848
29849 static int
29850 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
29851 {
29852 int r = rs6000_variable_issue_1 (insn, more);
29853 if (verbose)
29854 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
29855 return r;
29856 }
29857
29858 /* Adjust the cost of a scheduling dependency. Return the new cost of
29859 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
29860
29861 static int
29862 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
29863 unsigned int)
29864 {
29865 enum attr_type attr_type;
29866
29867 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
29868 return cost;
29869
29870 switch (dep_type)
29871 {
29872 case REG_DEP_TRUE:
29873 {
29874 /* Data dependency; DEP_INSN writes a register that INSN reads
29875 some cycles later. */
29876
29877 /* Separate a load from a narrower, dependent store. */
29878 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
29879 && GET_CODE (PATTERN (insn)) == SET
29880 && GET_CODE (PATTERN (dep_insn)) == SET
29881 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
29882 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
29883 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
29884 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
29885 return cost + 14;
29886
29887 attr_type = get_attr_type (insn);
29888
29889 switch (attr_type)
29890 {
29891 case TYPE_JMPREG:
29892 /* Tell the first scheduling pass about the latency between
29893 a mtctr and bctr (and mtlr and br/blr). The first
29894 scheduling pass will not know about this latency since
29895 the mtctr instruction, which has the latency associated
29896 to it, will be generated by reload. */
29897 return 4;
29898 case TYPE_BRANCH:
29899 /* Leave some extra cycles between a compare and its
29900 dependent branch, to inhibit expensive mispredicts. */
29901 if ((rs6000_tune == PROCESSOR_PPC603
29902 || rs6000_tune == PROCESSOR_PPC604
29903 || rs6000_tune == PROCESSOR_PPC604e
29904 || rs6000_tune == PROCESSOR_PPC620
29905 || rs6000_tune == PROCESSOR_PPC630
29906 || rs6000_tune == PROCESSOR_PPC750
29907 || rs6000_tune == PROCESSOR_PPC7400
29908 || rs6000_tune == PROCESSOR_PPC7450
29909 || rs6000_tune == PROCESSOR_PPCE5500
29910 || rs6000_tune == PROCESSOR_PPCE6500
29911 || rs6000_tune == PROCESSOR_POWER4
29912 || rs6000_tune == PROCESSOR_POWER5
29913 || rs6000_tune == PROCESSOR_POWER7
29914 || rs6000_tune == PROCESSOR_POWER8
29915 || rs6000_tune == PROCESSOR_POWER9
29916 || rs6000_tune == PROCESSOR_CELL)
29917 && recog_memoized (dep_insn)
29918 && (INSN_CODE (dep_insn) >= 0))
29919
29920 switch (get_attr_type (dep_insn))
29921 {
29922 case TYPE_CMP:
29923 case TYPE_FPCOMPARE:
29924 case TYPE_CR_LOGICAL:
29925 return cost + 2;
29926 case TYPE_EXTS:
29927 case TYPE_MUL:
29928 if (get_attr_dot (dep_insn) == DOT_YES)
29929 return cost + 2;
29930 else
29931 break;
29932 case TYPE_SHIFT:
29933 if (get_attr_dot (dep_insn) == DOT_YES
29934 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
29935 return cost + 2;
29936 else
29937 break;
29938 default:
29939 break;
29940 }
29941 break;
29942
29943 case TYPE_STORE:
29944 case TYPE_FPSTORE:
29945 if ((rs6000_tune == PROCESSOR_POWER6)
29946 && recog_memoized (dep_insn)
29947 && (INSN_CODE (dep_insn) >= 0))
29948 {
29949
29950 if (GET_CODE (PATTERN (insn)) != SET)
29951 /* If this happens, we have to extend this to schedule
29952 optimally. Return default for now. */
29953 return cost;
29954
29955 /* Adjust the cost for the case where the value written
29956 by a fixed point operation is used as the address
29957 gen value on a store. */
29958 switch (get_attr_type (dep_insn))
29959 {
29960 case TYPE_LOAD:
29961 case TYPE_CNTLZ:
29962 {
29963 if (! rs6000_store_data_bypass_p (dep_insn, insn))
29964 return get_attr_sign_extend (dep_insn)
29965 == SIGN_EXTEND_YES ? 6 : 4;
29966 break;
29967 }
29968 case TYPE_SHIFT:
29969 {
29970 if (! rs6000_store_data_bypass_p (dep_insn, insn))
29971 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
29972 6 : 3;
29973 break;
29974 }
29975 case TYPE_INTEGER:
29976 case TYPE_ADD:
29977 case TYPE_LOGICAL:
29978 case TYPE_EXTS:
29979 case TYPE_INSERT:
29980 {
29981 if (! rs6000_store_data_bypass_p (dep_insn, insn))
29982 return 3;
29983 break;
29984 }
29985 case TYPE_STORE:
29986 case TYPE_FPLOAD:
29987 case TYPE_FPSTORE:
29988 {
29989 if (get_attr_update (dep_insn) == UPDATE_YES
29990 && ! rs6000_store_data_bypass_p (dep_insn, insn))
29991 return 3;
29992 break;
29993 }
29994 case TYPE_MUL:
29995 {
29996 if (! rs6000_store_data_bypass_p (dep_insn, insn))
29997 return 17;
29998 break;
29999 }
30000 case TYPE_DIV:
30001 {
30002 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30003 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30004 break;
30005 }
30006 default:
30007 break;
30008 }
30009 }
30010 break;
30011
30012 case TYPE_LOAD:
30013 if ((rs6000_tune == PROCESSOR_POWER6)
30014 && recog_memoized (dep_insn)
30015 && (INSN_CODE (dep_insn) >= 0))
30016 {
30017
30018 /* Adjust the cost for the case where the value written
30019 by a fixed point instruction is used within the address
30020 gen portion of a subsequent load(u)(x) */
30021 switch (get_attr_type (dep_insn))
30022 {
30023 case TYPE_LOAD:
30024 case TYPE_CNTLZ:
30025 {
30026 if (set_to_load_agen (dep_insn, insn))
30027 return get_attr_sign_extend (dep_insn)
30028 == SIGN_EXTEND_YES ? 6 : 4;
30029 break;
30030 }
30031 case TYPE_SHIFT:
30032 {
30033 if (set_to_load_agen (dep_insn, insn))
30034 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30035 6 : 3;
30036 break;
30037 }
30038 case TYPE_INTEGER:
30039 case TYPE_ADD:
30040 case TYPE_LOGICAL:
30041 case TYPE_EXTS:
30042 case TYPE_INSERT:
30043 {
30044 if (set_to_load_agen (dep_insn, insn))
30045 return 3;
30046 break;
30047 }
30048 case TYPE_STORE:
30049 case TYPE_FPLOAD:
30050 case TYPE_FPSTORE:
30051 {
30052 if (get_attr_update (dep_insn) == UPDATE_YES
30053 && set_to_load_agen (dep_insn, insn))
30054 return 3;
30055 break;
30056 }
30057 case TYPE_MUL:
30058 {
30059 if (set_to_load_agen (dep_insn, insn))
30060 return 17;
30061 break;
30062 }
30063 case TYPE_DIV:
30064 {
30065 if (set_to_load_agen (dep_insn, insn))
30066 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30067 break;
30068 }
30069 default:
30070 break;
30071 }
30072 }
30073 break;
30074
30075 case TYPE_FPLOAD:
30076 if ((rs6000_tune == PROCESSOR_POWER6)
30077 && get_attr_update (insn) == UPDATE_NO
30078 && recog_memoized (dep_insn)
30079 && (INSN_CODE (dep_insn) >= 0)
30080 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30081 return 2;
30082
30083 default:
30084 break;
30085 }
30086
30087 /* Fall out to return default cost. */
30088 }
30089 break;
30090
30091 case REG_DEP_OUTPUT:
30092 /* Output dependency; DEP_INSN writes a register that INSN writes some
30093 cycles later. */
30094 if ((rs6000_tune == PROCESSOR_POWER6)
30095 && recog_memoized (dep_insn)
30096 && (INSN_CODE (dep_insn) >= 0))
30097 {
30098 attr_type = get_attr_type (insn);
30099
30100 switch (attr_type)
30101 {
30102 case TYPE_FP:
30103 case TYPE_FPSIMPLE:
30104 if (get_attr_type (dep_insn) == TYPE_FP
30105 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30106 return 1;
30107 break;
30108 case TYPE_FPLOAD:
30109 if (get_attr_update (insn) == UPDATE_NO
30110 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30111 return 2;
30112 break;
30113 default:
30114 break;
30115 }
30116 }
30117 /* Fall through, no cost for output dependency. */
30118 /* FALLTHRU */
30119
30120 case REG_DEP_ANTI:
30121 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30122 cycles later. */
30123 return 0;
30124
30125 default:
30126 gcc_unreachable ();
30127 }
30128
30129 return cost;
30130 }
30131
30132 /* Debug version of rs6000_adjust_cost. */
30133
30134 static int
30135 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30136 int cost, unsigned int dw)
30137 {
30138 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30139
30140 if (ret != cost)
30141 {
30142 const char *dep;
30143
30144 switch (dep_type)
30145 {
30146 default: dep = "unknown depencency"; break;
30147 case REG_DEP_TRUE: dep = "data dependency"; break;
30148 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30149 case REG_DEP_ANTI: dep = "anti depencency"; break;
30150 }
30151
30152 fprintf (stderr,
30153 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30154 "%s, insn:\n", ret, cost, dep);
30155
30156 debug_rtx (insn);
30157 }
30158
30159 return ret;
30160 }
30161
30162 /* The function returns a true if INSN is microcoded.
30163 Return false otherwise. */
30164
30165 static bool
30166 is_microcoded_insn (rtx_insn *insn)
30167 {
30168 if (!insn || !NONDEBUG_INSN_P (insn)
30169 || GET_CODE (PATTERN (insn)) == USE
30170 || GET_CODE (PATTERN (insn)) == CLOBBER)
30171 return false;
30172
30173 if (rs6000_tune == PROCESSOR_CELL)
30174 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30175
30176 if (rs6000_sched_groups
30177 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30178 {
30179 enum attr_type type = get_attr_type (insn);
30180 if ((type == TYPE_LOAD
30181 && get_attr_update (insn) == UPDATE_YES
30182 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30183 || ((type == TYPE_LOAD || type == TYPE_STORE)
30184 && get_attr_update (insn) == UPDATE_YES
30185 && get_attr_indexed (insn) == INDEXED_YES)
30186 || type == TYPE_MFCR)
30187 return true;
30188 }
30189
30190 return false;
30191 }
30192
30193 /* The function returns true if INSN is cracked into 2 instructions
30194 by the processor (and therefore occupies 2 issue slots). */
30195
30196 static bool
30197 is_cracked_insn (rtx_insn *insn)
30198 {
30199 if (!insn || !NONDEBUG_INSN_P (insn)
30200 || GET_CODE (PATTERN (insn)) == USE
30201 || GET_CODE (PATTERN (insn)) == CLOBBER)
30202 return false;
30203
30204 if (rs6000_sched_groups
30205 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30206 {
30207 enum attr_type type = get_attr_type (insn);
30208 if ((type == TYPE_LOAD
30209 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30210 && get_attr_update (insn) == UPDATE_NO)
30211 || (type == TYPE_LOAD
30212 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30213 && get_attr_update (insn) == UPDATE_YES
30214 && get_attr_indexed (insn) == INDEXED_NO)
30215 || (type == TYPE_STORE
30216 && get_attr_update (insn) == UPDATE_YES
30217 && get_attr_indexed (insn) == INDEXED_NO)
30218 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30219 && get_attr_update (insn) == UPDATE_YES)
30220 || (type == TYPE_CR_LOGICAL
30221 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
30222 || (type == TYPE_EXTS
30223 && get_attr_dot (insn) == DOT_YES)
30224 || (type == TYPE_SHIFT
30225 && get_attr_dot (insn) == DOT_YES
30226 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30227 || (type == TYPE_MUL
30228 && get_attr_dot (insn) == DOT_YES)
30229 || type == TYPE_DIV
30230 || (type == TYPE_INSERT
30231 && get_attr_size (insn) == SIZE_32))
30232 return true;
30233 }
30234
30235 return false;
30236 }
30237
30238 /* The function returns true if INSN can be issued only from
30239 the branch slot. */
30240
30241 static bool
30242 is_branch_slot_insn (rtx_insn *insn)
30243 {
30244 if (!insn || !NONDEBUG_INSN_P (insn)
30245 || GET_CODE (PATTERN (insn)) == USE
30246 || GET_CODE (PATTERN (insn)) == CLOBBER)
30247 return false;
30248
30249 if (rs6000_sched_groups)
30250 {
30251 enum attr_type type = get_attr_type (insn);
30252 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30253 return true;
30254 return false;
30255 }
30256
30257 return false;
30258 }
30259
30260 /* The function returns true if out_inst sets a value that is
30261 used in the address generation computation of in_insn */
30262 static bool
30263 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30264 {
30265 rtx out_set, in_set;
30266
30267 /* For performance reasons, only handle the simple case where
30268 both loads are a single_set. */
30269 out_set = single_set (out_insn);
30270 if (out_set)
30271 {
30272 in_set = single_set (in_insn);
30273 if (in_set)
30274 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30275 }
30276
30277 return false;
30278 }
30279
30280 /* Try to determine base/offset/size parts of the given MEM.
30281 Return true if successful, false if all the values couldn't
30282 be determined.
30283
30284 This function only looks for REG or REG+CONST address forms.
30285 REG+REG address form will return false. */
30286
30287 static bool
30288 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30289 HOST_WIDE_INT *size)
30290 {
30291 rtx addr_rtx;
30292 if MEM_SIZE_KNOWN_P (mem)
30293 *size = MEM_SIZE (mem);
30294 else
30295 return false;
30296
30297 addr_rtx = (XEXP (mem, 0));
30298 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30299 addr_rtx = XEXP (addr_rtx, 1);
30300
30301 *offset = 0;
30302 while (GET_CODE (addr_rtx) == PLUS
30303 && CONST_INT_P (XEXP (addr_rtx, 1)))
30304 {
30305 *offset += INTVAL (XEXP (addr_rtx, 1));
30306 addr_rtx = XEXP (addr_rtx, 0);
30307 }
30308 if (!REG_P (addr_rtx))
30309 return false;
30310
30311 *base = addr_rtx;
30312 return true;
30313 }
30314
30315 /* The function returns true if the target storage location of
30316 mem1 is adjacent to the target storage location of mem2 */
30317 /* Return 1 if memory locations are adjacent. */
30318
30319 static bool
30320 adjacent_mem_locations (rtx mem1, rtx mem2)
30321 {
30322 rtx reg1, reg2;
30323 HOST_WIDE_INT off1, size1, off2, size2;
30324
30325 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30326 && get_memref_parts (mem2, &reg2, &off2, &size2))
30327 return ((REGNO (reg1) == REGNO (reg2))
30328 && ((off1 + size1 == off2)
30329 || (off2 + size2 == off1)));
30330
30331 return false;
30332 }
30333
30334 /* This function returns true if it can be determined that the two MEM
30335 locations overlap by at least 1 byte based on base reg/offset/size. */
30336
30337 static bool
30338 mem_locations_overlap (rtx mem1, rtx mem2)
30339 {
30340 rtx reg1, reg2;
30341 HOST_WIDE_INT off1, size1, off2, size2;
30342
30343 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30344 && get_memref_parts (mem2, &reg2, &off2, &size2))
30345 return ((REGNO (reg1) == REGNO (reg2))
30346 && (((off1 <= off2) && (off1 + size1 > off2))
30347 || ((off2 <= off1) && (off2 + size2 > off1))));
30348
30349 return false;
30350 }
30351
30352 /* A C statement (sans semicolon) to update the integer scheduling
30353 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30354 INSN earlier, reduce the priority to execute INSN later. Do not
30355 define this macro if you do not need to adjust the scheduling
30356 priorities of insns. */
30357
30358 static int
30359 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30360 {
30361 rtx load_mem, str_mem;
30362 /* On machines (like the 750) which have asymmetric integer units,
30363 where one integer unit can do multiply and divides and the other
30364 can't, reduce the priority of multiply/divide so it is scheduled
30365 before other integer operations. */
30366
30367 #if 0
30368 if (! INSN_P (insn))
30369 return priority;
30370
30371 if (GET_CODE (PATTERN (insn)) == USE)
30372 return priority;
30373
30374 switch (rs6000_tune) {
30375 case PROCESSOR_PPC750:
30376 switch (get_attr_type (insn))
30377 {
30378 default:
30379 break;
30380
30381 case TYPE_MUL:
30382 case TYPE_DIV:
30383 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30384 priority, priority);
30385 if (priority >= 0 && priority < 0x01000000)
30386 priority >>= 3;
30387 break;
30388 }
30389 }
30390 #endif
30391
30392 if (insn_must_be_first_in_group (insn)
30393 && reload_completed
30394 && current_sched_info->sched_max_insns_priority
30395 && rs6000_sched_restricted_insns_priority)
30396 {
30397
30398 /* Prioritize insns that can be dispatched only in the first
30399 dispatch slot. */
30400 if (rs6000_sched_restricted_insns_priority == 1)
30401 /* Attach highest priority to insn. This means that in
30402 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30403 precede 'priority' (critical path) considerations. */
30404 return current_sched_info->sched_max_insns_priority;
30405 else if (rs6000_sched_restricted_insns_priority == 2)
30406 /* Increase priority of insn by a minimal amount. This means that in
30407 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30408 considerations precede dispatch-slot restriction considerations. */
30409 return (priority + 1);
30410 }
30411
30412 if (rs6000_tune == PROCESSOR_POWER6
30413 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30414 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30415 /* Attach highest priority to insn if the scheduler has just issued two
30416 stores and this instruction is a load, or two loads and this instruction
30417 is a store. Power6 wants loads and stores scheduled alternately
30418 when possible */
30419 return current_sched_info->sched_max_insns_priority;
30420
30421 return priority;
30422 }
30423
30424 /* Return true if the instruction is nonpipelined on the Cell. */
30425 static bool
30426 is_nonpipeline_insn (rtx_insn *insn)
30427 {
30428 enum attr_type type;
30429 if (!insn || !NONDEBUG_INSN_P (insn)
30430 || GET_CODE (PATTERN (insn)) == USE
30431 || GET_CODE (PATTERN (insn)) == CLOBBER)
30432 return false;
30433
30434 type = get_attr_type (insn);
30435 if (type == TYPE_MUL
30436 || type == TYPE_DIV
30437 || type == TYPE_SDIV
30438 || type == TYPE_DDIV
30439 || type == TYPE_SSQRT
30440 || type == TYPE_DSQRT
30441 || type == TYPE_MFCR
30442 || type == TYPE_MFCRF
30443 || type == TYPE_MFJMPR)
30444 {
30445 return true;
30446 }
30447 return false;
30448 }
30449
30450
30451 /* Return how many instructions the machine can issue per cycle. */
30452
30453 static int
30454 rs6000_issue_rate (void)
30455 {
30456 /* Unless scheduling for register pressure, use issue rate of 1 for
30457 first scheduling pass to decrease degradation. */
30458 if (!reload_completed && !flag_sched_pressure)
30459 return 1;
30460
30461 switch (rs6000_tune) {
30462 case PROCESSOR_RS64A:
30463 case PROCESSOR_PPC601: /* ? */
30464 case PROCESSOR_PPC7450:
30465 return 3;
30466 case PROCESSOR_PPC440:
30467 case PROCESSOR_PPC603:
30468 case PROCESSOR_PPC750:
30469 case PROCESSOR_PPC7400:
30470 case PROCESSOR_PPC8540:
30471 case PROCESSOR_PPC8548:
30472 case PROCESSOR_CELL:
30473 case PROCESSOR_PPCE300C2:
30474 case PROCESSOR_PPCE300C3:
30475 case PROCESSOR_PPCE500MC:
30476 case PROCESSOR_PPCE500MC64:
30477 case PROCESSOR_PPCE5500:
30478 case PROCESSOR_PPCE6500:
30479 case PROCESSOR_TITAN:
30480 return 2;
30481 case PROCESSOR_PPC476:
30482 case PROCESSOR_PPC604:
30483 case PROCESSOR_PPC604e:
30484 case PROCESSOR_PPC620:
30485 case PROCESSOR_PPC630:
30486 return 4;
30487 case PROCESSOR_POWER4:
30488 case PROCESSOR_POWER5:
30489 case PROCESSOR_POWER6:
30490 case PROCESSOR_POWER7:
30491 return 5;
30492 case PROCESSOR_POWER8:
30493 return 7;
30494 case PROCESSOR_POWER9:
30495 return 6;
30496 default:
30497 return 1;
30498 }
30499 }
30500
30501 /* Return how many instructions to look ahead for better insn
30502 scheduling. */
30503
30504 static int
30505 rs6000_use_sched_lookahead (void)
30506 {
30507 switch (rs6000_tune)
30508 {
30509 case PROCESSOR_PPC8540:
30510 case PROCESSOR_PPC8548:
30511 return 4;
30512
30513 case PROCESSOR_CELL:
30514 return (reload_completed ? 8 : 0);
30515
30516 default:
30517 return 0;
30518 }
30519 }
30520
30521 /* We are choosing insn from the ready queue. Return zero if INSN can be
30522 chosen. */
30523 static int
30524 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
30525 {
30526 if (ready_index == 0)
30527 return 0;
30528
30529 if (rs6000_tune != PROCESSOR_CELL)
30530 return 0;
30531
30532 gcc_assert (insn != NULL_RTX && INSN_P (insn));
30533
30534 if (!reload_completed
30535 || is_nonpipeline_insn (insn)
30536 || is_microcoded_insn (insn))
30537 return 1;
30538
30539 return 0;
30540 }
30541
30542 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30543 and return true. */
30544
30545 static bool
30546 find_mem_ref (rtx pat, rtx *mem_ref)
30547 {
30548 const char * fmt;
30549 int i, j;
30550
30551 /* stack_tie does not produce any real memory traffic. */
30552 if (tie_operand (pat, VOIDmode))
30553 return false;
30554
30555 if (GET_CODE (pat) == MEM)
30556 {
30557 *mem_ref = pat;
30558 return true;
30559 }
30560
30561 /* Recursively process the pattern. */
30562 fmt = GET_RTX_FORMAT (GET_CODE (pat));
30563
30564 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
30565 {
30566 if (fmt[i] == 'e')
30567 {
30568 if (find_mem_ref (XEXP (pat, i), mem_ref))
30569 return true;
30570 }
30571 else if (fmt[i] == 'E')
30572 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
30573 {
30574 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
30575 return true;
30576 }
30577 }
30578
30579 return false;
30580 }
30581
30582 /* Determine if PAT is a PATTERN of a load insn. */
30583
30584 static bool
30585 is_load_insn1 (rtx pat, rtx *load_mem)
30586 {
30587 if (!pat || pat == NULL_RTX)
30588 return false;
30589
30590 if (GET_CODE (pat) == SET)
30591 return find_mem_ref (SET_SRC (pat), load_mem);
30592
30593 if (GET_CODE (pat) == PARALLEL)
30594 {
30595 int i;
30596
30597 for (i = 0; i < XVECLEN (pat, 0); i++)
30598 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
30599 return true;
30600 }
30601
30602 return false;
30603 }
30604
30605 /* Determine if INSN loads from memory. */
30606
30607 static bool
30608 is_load_insn (rtx insn, rtx *load_mem)
30609 {
30610 if (!insn || !INSN_P (insn))
30611 return false;
30612
30613 if (CALL_P (insn))
30614 return false;
30615
30616 return is_load_insn1 (PATTERN (insn), load_mem);
30617 }
30618
30619 /* Determine if PAT is a PATTERN of a store insn. */
30620
30621 static bool
30622 is_store_insn1 (rtx pat, rtx *str_mem)
30623 {
30624 if (!pat || pat == NULL_RTX)
30625 return false;
30626
30627 if (GET_CODE (pat) == SET)
30628 return find_mem_ref (SET_DEST (pat), str_mem);
30629
30630 if (GET_CODE (pat) == PARALLEL)
30631 {
30632 int i;
30633
30634 for (i = 0; i < XVECLEN (pat, 0); i++)
30635 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
30636 return true;
30637 }
30638
30639 return false;
30640 }
30641
30642 /* Determine if INSN stores to memory. */
30643
30644 static bool
30645 is_store_insn (rtx insn, rtx *str_mem)
30646 {
30647 if (!insn || !INSN_P (insn))
30648 return false;
30649
30650 return is_store_insn1 (PATTERN (insn), str_mem);
30651 }
30652
30653 /* Return whether TYPE is a Power9 pairable vector instruction type. */
30654
30655 static bool
30656 is_power9_pairable_vec_type (enum attr_type type)
30657 {
30658 switch (type)
30659 {
30660 case TYPE_VECSIMPLE:
30661 case TYPE_VECCOMPLEX:
30662 case TYPE_VECDIV:
30663 case TYPE_VECCMP:
30664 case TYPE_VECPERM:
30665 case TYPE_VECFLOAT:
30666 case TYPE_VECFDIV:
30667 case TYPE_VECDOUBLE:
30668 return true;
30669 default:
30670 break;
30671 }
30672 return false;
30673 }
30674
30675 /* Returns whether the dependence between INSN and NEXT is considered
30676 costly by the given target. */
30677
30678 static bool
30679 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
30680 {
30681 rtx insn;
30682 rtx next;
30683 rtx load_mem, str_mem;
30684
30685 /* If the flag is not enabled - no dependence is considered costly;
30686 allow all dependent insns in the same group.
30687 This is the most aggressive option. */
30688 if (rs6000_sched_costly_dep == no_dep_costly)
30689 return false;
30690
30691 /* If the flag is set to 1 - a dependence is always considered costly;
30692 do not allow dependent instructions in the same group.
30693 This is the most conservative option. */
30694 if (rs6000_sched_costly_dep == all_deps_costly)
30695 return true;
30696
30697 insn = DEP_PRO (dep);
30698 next = DEP_CON (dep);
30699
30700 if (rs6000_sched_costly_dep == store_to_load_dep_costly
30701 && is_load_insn (next, &load_mem)
30702 && is_store_insn (insn, &str_mem))
30703 /* Prevent load after store in the same group. */
30704 return true;
30705
30706 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
30707 && is_load_insn (next, &load_mem)
30708 && is_store_insn (insn, &str_mem)
30709 && DEP_TYPE (dep) == REG_DEP_TRUE
30710 && mem_locations_overlap(str_mem, load_mem))
30711 /* Prevent load after store in the same group if it is a true
30712 dependence. */
30713 return true;
30714
30715 /* The flag is set to X; dependences with latency >= X are considered costly,
30716 and will not be scheduled in the same group. */
30717 if (rs6000_sched_costly_dep <= max_dep_latency
30718 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
30719 return true;
30720
30721 return false;
30722 }
30723
30724 /* Return the next insn after INSN that is found before TAIL is reached,
30725 skipping any "non-active" insns - insns that will not actually occupy
30726 an issue slot. Return NULL_RTX if such an insn is not found. */
30727
30728 static rtx_insn *
30729 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
30730 {
30731 if (insn == NULL_RTX || insn == tail)
30732 return NULL;
30733
30734 while (1)
30735 {
30736 insn = NEXT_INSN (insn);
30737 if (insn == NULL_RTX || insn == tail)
30738 return NULL;
30739
30740 if (CALL_P (insn)
30741 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
30742 || (NONJUMP_INSN_P (insn)
30743 && GET_CODE (PATTERN (insn)) != USE
30744 && GET_CODE (PATTERN (insn)) != CLOBBER
30745 && INSN_CODE (insn) != CODE_FOR_stack_tie))
30746 break;
30747 }
30748 return insn;
30749 }
30750
30751 /* Do Power9 specific sched_reorder2 reordering of ready list. */
30752
30753 static int
30754 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
30755 {
30756 int pos;
30757 int i;
30758 rtx_insn *tmp;
30759 enum attr_type type, type2;
30760
30761 type = get_attr_type (last_scheduled_insn);
30762
30763 /* Try to issue fixed point divides back-to-back in pairs so they will be
30764 routed to separate execution units and execute in parallel. */
30765 if (type == TYPE_DIV && divide_cnt == 0)
30766 {
30767 /* First divide has been scheduled. */
30768 divide_cnt = 1;
30769
30770 /* Scan the ready list looking for another divide, if found move it
30771 to the end of the list so it is chosen next. */
30772 pos = lastpos;
30773 while (pos >= 0)
30774 {
30775 if (recog_memoized (ready[pos]) >= 0
30776 && get_attr_type (ready[pos]) == TYPE_DIV)
30777 {
30778 tmp = ready[pos];
30779 for (i = pos; i < lastpos; i++)
30780 ready[i] = ready[i + 1];
30781 ready[lastpos] = tmp;
30782 break;
30783 }
30784 pos--;
30785 }
30786 }
30787 else
30788 {
30789 /* Last insn was the 2nd divide or not a divide, reset the counter. */
30790 divide_cnt = 0;
30791
30792 /* The best dispatch throughput for vector and vector load insns can be
30793 achieved by interleaving a vector and vector load such that they'll
30794 dispatch to the same superslice. If this pairing cannot be achieved
30795 then it is best to pair vector insns together and vector load insns
30796 together.
30797
30798 To aid in this pairing, vec_pairing maintains the current state with
30799 the following values:
30800
30801 0 : Initial state, no vecload/vector pairing has been started.
30802
30803 1 : A vecload or vector insn has been issued and a candidate for
30804 pairing has been found and moved to the end of the ready
30805 list. */
30806 if (type == TYPE_VECLOAD)
30807 {
30808 /* Issued a vecload. */
30809 if (vec_pairing == 0)
30810 {
30811 int vecload_pos = -1;
30812 /* We issued a single vecload, look for a vector insn to pair it
30813 with. If one isn't found, try to pair another vecload. */
30814 pos = lastpos;
30815 while (pos >= 0)
30816 {
30817 if (recog_memoized (ready[pos]) >= 0)
30818 {
30819 type2 = get_attr_type (ready[pos]);
30820 if (is_power9_pairable_vec_type (type2))
30821 {
30822 /* Found a vector insn to pair with, move it to the
30823 end of the ready list so it is scheduled next. */
30824 tmp = ready[pos];
30825 for (i = pos; i < lastpos; i++)
30826 ready[i] = ready[i + 1];
30827 ready[lastpos] = tmp;
30828 vec_pairing = 1;
30829 return cached_can_issue_more;
30830 }
30831 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
30832 /* Remember position of first vecload seen. */
30833 vecload_pos = pos;
30834 }
30835 pos--;
30836 }
30837 if (vecload_pos >= 0)
30838 {
30839 /* Didn't find a vector to pair with but did find a vecload,
30840 move it to the end of the ready list. */
30841 tmp = ready[vecload_pos];
30842 for (i = vecload_pos; i < lastpos; i++)
30843 ready[i] = ready[i + 1];
30844 ready[lastpos] = tmp;
30845 vec_pairing = 1;
30846 return cached_can_issue_more;
30847 }
30848 }
30849 }
30850 else if (is_power9_pairable_vec_type (type))
30851 {
30852 /* Issued a vector operation. */
30853 if (vec_pairing == 0)
30854 {
30855 int vec_pos = -1;
30856 /* We issued a single vector insn, look for a vecload to pair it
30857 with. If one isn't found, try to pair another vector. */
30858 pos = lastpos;
30859 while (pos >= 0)
30860 {
30861 if (recog_memoized (ready[pos]) >= 0)
30862 {
30863 type2 = get_attr_type (ready[pos]);
30864 if (type2 == TYPE_VECLOAD)
30865 {
30866 /* Found a vecload insn to pair with, move it to the
30867 end of the ready list so it is scheduled next. */
30868 tmp = ready[pos];
30869 for (i = pos; i < lastpos; i++)
30870 ready[i] = ready[i + 1];
30871 ready[lastpos] = tmp;
30872 vec_pairing = 1;
30873 return cached_can_issue_more;
30874 }
30875 else if (is_power9_pairable_vec_type (type2)
30876 && vec_pos == -1)
30877 /* Remember position of first vector insn seen. */
30878 vec_pos = pos;
30879 }
30880 pos--;
30881 }
30882 if (vec_pos >= 0)
30883 {
30884 /* Didn't find a vecload to pair with but did find a vector
30885 insn, move it to the end of the ready list. */
30886 tmp = ready[vec_pos];
30887 for (i = vec_pos; i < lastpos; i++)
30888 ready[i] = ready[i + 1];
30889 ready[lastpos] = tmp;
30890 vec_pairing = 1;
30891 return cached_can_issue_more;
30892 }
30893 }
30894 }
30895
30896 /* We've either finished a vec/vecload pair, couldn't find an insn to
30897 continue the current pair, or the last insn had nothing to do with
30898 with pairing. In any case, reset the state. */
30899 vec_pairing = 0;
30900 }
30901
30902 return cached_can_issue_more;
30903 }
30904
30905 /* We are about to begin issuing insns for this clock cycle. */
30906
30907 static int
30908 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
30909 rtx_insn **ready ATTRIBUTE_UNUSED,
30910 int *pn_ready ATTRIBUTE_UNUSED,
30911 int clock_var ATTRIBUTE_UNUSED)
30912 {
30913 int n_ready = *pn_ready;
30914
30915 if (sched_verbose)
30916 fprintf (dump, "// rs6000_sched_reorder :\n");
30917
30918 /* Reorder the ready list, if the second to last ready insn
30919 is a nonepipeline insn. */
30920 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
30921 {
30922 if (is_nonpipeline_insn (ready[n_ready - 1])
30923 && (recog_memoized (ready[n_ready - 2]) > 0))
30924 /* Simply swap first two insns. */
30925 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
30926 }
30927
30928 if (rs6000_tune == PROCESSOR_POWER6)
30929 load_store_pendulum = 0;
30930
30931 return rs6000_issue_rate ();
30932 }
30933
30934 /* Like rs6000_sched_reorder, but called after issuing each insn. */
30935
30936 static int
30937 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
30938 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
30939 {
30940 if (sched_verbose)
30941 fprintf (dump, "// rs6000_sched_reorder2 :\n");
30942
30943 /* For Power6, we need to handle some special cases to try and keep the
30944 store queue from overflowing and triggering expensive flushes.
30945
30946 This code monitors how load and store instructions are being issued
30947 and skews the ready list one way or the other to increase the likelihood
30948 that a desired instruction is issued at the proper time.
30949
30950 A couple of things are done. First, we maintain a "load_store_pendulum"
30951 to track the current state of load/store issue.
30952
30953 - If the pendulum is at zero, then no loads or stores have been
30954 issued in the current cycle so we do nothing.
30955
30956 - If the pendulum is 1, then a single load has been issued in this
30957 cycle and we attempt to locate another load in the ready list to
30958 issue with it.
30959
30960 - If the pendulum is -2, then two stores have already been
30961 issued in this cycle, so we increase the priority of the first load
30962 in the ready list to increase it's likelihood of being chosen first
30963 in the next cycle.
30964
30965 - If the pendulum is -1, then a single store has been issued in this
30966 cycle and we attempt to locate another store in the ready list to
30967 issue with it, preferring a store to an adjacent memory location to
30968 facilitate store pairing in the store queue.
30969
30970 - If the pendulum is 2, then two loads have already been
30971 issued in this cycle, so we increase the priority of the first store
30972 in the ready list to increase it's likelihood of being chosen first
30973 in the next cycle.
30974
30975 - If the pendulum < -2 or > 2, then do nothing.
30976
30977 Note: This code covers the most common scenarios. There exist non
30978 load/store instructions which make use of the LSU and which
30979 would need to be accounted for to strictly model the behavior
30980 of the machine. Those instructions are currently unaccounted
30981 for to help minimize compile time overhead of this code.
30982 */
30983 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
30984 {
30985 int pos;
30986 int i;
30987 rtx_insn *tmp;
30988 rtx load_mem, str_mem;
30989
30990 if (is_store_insn (last_scheduled_insn, &str_mem))
30991 /* Issuing a store, swing the load_store_pendulum to the left */
30992 load_store_pendulum--;
30993 else if (is_load_insn (last_scheduled_insn, &load_mem))
30994 /* Issuing a load, swing the load_store_pendulum to the right */
30995 load_store_pendulum++;
30996 else
30997 return cached_can_issue_more;
30998
30999 /* If the pendulum is balanced, or there is only one instruction on
31000 the ready list, then all is well, so return. */
31001 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31002 return cached_can_issue_more;
31003
31004 if (load_store_pendulum == 1)
31005 {
31006 /* A load has been issued in this cycle. Scan the ready list
31007 for another load to issue with it */
31008 pos = *pn_ready-1;
31009
31010 while (pos >= 0)
31011 {
31012 if (is_load_insn (ready[pos], &load_mem))
31013 {
31014 /* Found a load. Move it to the head of the ready list,
31015 and adjust it's priority so that it is more likely to
31016 stay there */
31017 tmp = ready[pos];
31018 for (i=pos; i<*pn_ready-1; i++)
31019 ready[i] = ready[i + 1];
31020 ready[*pn_ready-1] = tmp;
31021
31022 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31023 INSN_PRIORITY (tmp)++;
31024 break;
31025 }
31026 pos--;
31027 }
31028 }
31029 else if (load_store_pendulum == -2)
31030 {
31031 /* Two stores have been issued in this cycle. Increase the
31032 priority of the first load in the ready list to favor it for
31033 issuing in the next cycle. */
31034 pos = *pn_ready-1;
31035
31036 while (pos >= 0)
31037 {
31038 if (is_load_insn (ready[pos], &load_mem)
31039 && !sel_sched_p ()
31040 && INSN_PRIORITY_KNOWN (ready[pos]))
31041 {
31042 INSN_PRIORITY (ready[pos])++;
31043
31044 /* Adjust the pendulum to account for the fact that a load
31045 was found and increased in priority. This is to prevent
31046 increasing the priority of multiple loads */
31047 load_store_pendulum--;
31048
31049 break;
31050 }
31051 pos--;
31052 }
31053 }
31054 else if (load_store_pendulum == -1)
31055 {
31056 /* A store has been issued in this cycle. Scan the ready list for
31057 another store to issue with it, preferring a store to an adjacent
31058 memory location */
31059 int first_store_pos = -1;
31060
31061 pos = *pn_ready-1;
31062
31063 while (pos >= 0)
31064 {
31065 if (is_store_insn (ready[pos], &str_mem))
31066 {
31067 rtx str_mem2;
31068 /* Maintain the index of the first store found on the
31069 list */
31070 if (first_store_pos == -1)
31071 first_store_pos = pos;
31072
31073 if (is_store_insn (last_scheduled_insn, &str_mem2)
31074 && adjacent_mem_locations (str_mem, str_mem2))
31075 {
31076 /* Found an adjacent store. Move it to the head of the
31077 ready list, and adjust it's priority so that it is
31078 more likely to stay there */
31079 tmp = ready[pos];
31080 for (i=pos; i<*pn_ready-1; i++)
31081 ready[i] = ready[i + 1];
31082 ready[*pn_ready-1] = tmp;
31083
31084 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31085 INSN_PRIORITY (tmp)++;
31086
31087 first_store_pos = -1;
31088
31089 break;
31090 };
31091 }
31092 pos--;
31093 }
31094
31095 if (first_store_pos >= 0)
31096 {
31097 /* An adjacent store wasn't found, but a non-adjacent store was,
31098 so move the non-adjacent store to the front of the ready
31099 list, and adjust its priority so that it is more likely to
31100 stay there. */
31101 tmp = ready[first_store_pos];
31102 for (i=first_store_pos; i<*pn_ready-1; i++)
31103 ready[i] = ready[i + 1];
31104 ready[*pn_ready-1] = tmp;
31105 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31106 INSN_PRIORITY (tmp)++;
31107 }
31108 }
31109 else if (load_store_pendulum == 2)
31110 {
31111 /* Two loads have been issued in this cycle. Increase the priority
31112 of the first store in the ready list to favor it for issuing in
31113 the next cycle. */
31114 pos = *pn_ready-1;
31115
31116 while (pos >= 0)
31117 {
31118 if (is_store_insn (ready[pos], &str_mem)
31119 && !sel_sched_p ()
31120 && INSN_PRIORITY_KNOWN (ready[pos]))
31121 {
31122 INSN_PRIORITY (ready[pos])++;
31123
31124 /* Adjust the pendulum to account for the fact that a store
31125 was found and increased in priority. This is to prevent
31126 increasing the priority of multiple stores */
31127 load_store_pendulum++;
31128
31129 break;
31130 }
31131 pos--;
31132 }
31133 }
31134 }
31135
31136 /* Do Power9 dependent reordering if necessary. */
31137 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31138 && recog_memoized (last_scheduled_insn) >= 0)
31139 return power9_sched_reorder2 (ready, *pn_ready - 1);
31140
31141 return cached_can_issue_more;
31142 }
31143
31144 /* Return whether the presence of INSN causes a dispatch group termination
31145 of group WHICH_GROUP.
31146
31147 If WHICH_GROUP == current_group, this function will return true if INSN
31148 causes the termination of the current group (i.e, the dispatch group to
31149 which INSN belongs). This means that INSN will be the last insn in the
31150 group it belongs to.
31151
31152 If WHICH_GROUP == previous_group, this function will return true if INSN
31153 causes the termination of the previous group (i.e, the dispatch group that
31154 precedes the group to which INSN belongs). This means that INSN will be
31155 the first insn in the group it belongs to). */
31156
31157 static bool
31158 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31159 {
31160 bool first, last;
31161
31162 if (! insn)
31163 return false;
31164
31165 first = insn_must_be_first_in_group (insn);
31166 last = insn_must_be_last_in_group (insn);
31167
31168 if (first && last)
31169 return true;
31170
31171 if (which_group == current_group)
31172 return last;
31173 else if (which_group == previous_group)
31174 return first;
31175
31176 return false;
31177 }
31178
31179
31180 static bool
31181 insn_must_be_first_in_group (rtx_insn *insn)
31182 {
31183 enum attr_type type;
31184
31185 if (!insn
31186 || NOTE_P (insn)
31187 || DEBUG_INSN_P (insn)
31188 || GET_CODE (PATTERN (insn)) == USE
31189 || GET_CODE (PATTERN (insn)) == CLOBBER)
31190 return false;
31191
31192 switch (rs6000_tune)
31193 {
31194 case PROCESSOR_POWER5:
31195 if (is_cracked_insn (insn))
31196 return true;
31197 /* FALLTHRU */
31198 case PROCESSOR_POWER4:
31199 if (is_microcoded_insn (insn))
31200 return true;
31201
31202 if (!rs6000_sched_groups)
31203 return false;
31204
31205 type = get_attr_type (insn);
31206
31207 switch (type)
31208 {
31209 case TYPE_MFCR:
31210 case TYPE_MFCRF:
31211 case TYPE_MTCR:
31212 case TYPE_CR_LOGICAL:
31213 case TYPE_MTJMPR:
31214 case TYPE_MFJMPR:
31215 case TYPE_DIV:
31216 case TYPE_LOAD_L:
31217 case TYPE_STORE_C:
31218 case TYPE_ISYNC:
31219 case TYPE_SYNC:
31220 return true;
31221 default:
31222 break;
31223 }
31224 break;
31225 case PROCESSOR_POWER6:
31226 type = get_attr_type (insn);
31227
31228 switch (type)
31229 {
31230 case TYPE_EXTS:
31231 case TYPE_CNTLZ:
31232 case TYPE_TRAP:
31233 case TYPE_MUL:
31234 case TYPE_INSERT:
31235 case TYPE_FPCOMPARE:
31236 case TYPE_MFCR:
31237 case TYPE_MTCR:
31238 case TYPE_MFJMPR:
31239 case TYPE_MTJMPR:
31240 case TYPE_ISYNC:
31241 case TYPE_SYNC:
31242 case TYPE_LOAD_L:
31243 case TYPE_STORE_C:
31244 return true;
31245 case TYPE_SHIFT:
31246 if (get_attr_dot (insn) == DOT_NO
31247 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31248 return true;
31249 else
31250 break;
31251 case TYPE_DIV:
31252 if (get_attr_size (insn) == SIZE_32)
31253 return true;
31254 else
31255 break;
31256 case TYPE_LOAD:
31257 case TYPE_STORE:
31258 case TYPE_FPLOAD:
31259 case TYPE_FPSTORE:
31260 if (get_attr_update (insn) == UPDATE_YES)
31261 return true;
31262 else
31263 break;
31264 default:
31265 break;
31266 }
31267 break;
31268 case PROCESSOR_POWER7:
31269 type = get_attr_type (insn);
31270
31271 switch (type)
31272 {
31273 case TYPE_CR_LOGICAL:
31274 case TYPE_MFCR:
31275 case TYPE_MFCRF:
31276 case TYPE_MTCR:
31277 case TYPE_DIV:
31278 case TYPE_ISYNC:
31279 case TYPE_LOAD_L:
31280 case TYPE_STORE_C:
31281 case TYPE_MFJMPR:
31282 case TYPE_MTJMPR:
31283 return true;
31284 case TYPE_MUL:
31285 case TYPE_SHIFT:
31286 case TYPE_EXTS:
31287 if (get_attr_dot (insn) == DOT_YES)
31288 return true;
31289 else
31290 break;
31291 case TYPE_LOAD:
31292 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31293 || get_attr_update (insn) == UPDATE_YES)
31294 return true;
31295 else
31296 break;
31297 case TYPE_STORE:
31298 case TYPE_FPLOAD:
31299 case TYPE_FPSTORE:
31300 if (get_attr_update (insn) == UPDATE_YES)
31301 return true;
31302 else
31303 break;
31304 default:
31305 break;
31306 }
31307 break;
31308 case PROCESSOR_POWER8:
31309 type = get_attr_type (insn);
31310
31311 switch (type)
31312 {
31313 case TYPE_CR_LOGICAL:
31314 case TYPE_MFCR:
31315 case TYPE_MFCRF:
31316 case TYPE_MTCR:
31317 case TYPE_SYNC:
31318 case TYPE_ISYNC:
31319 case TYPE_LOAD_L:
31320 case TYPE_STORE_C:
31321 case TYPE_VECSTORE:
31322 case TYPE_MFJMPR:
31323 case TYPE_MTJMPR:
31324 return true;
31325 case TYPE_SHIFT:
31326 case TYPE_EXTS:
31327 case TYPE_MUL:
31328 if (get_attr_dot (insn) == DOT_YES)
31329 return true;
31330 else
31331 break;
31332 case TYPE_LOAD:
31333 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31334 || get_attr_update (insn) == UPDATE_YES)
31335 return true;
31336 else
31337 break;
31338 case TYPE_STORE:
31339 if (get_attr_update (insn) == UPDATE_YES
31340 && get_attr_indexed (insn) == INDEXED_YES)
31341 return true;
31342 else
31343 break;
31344 default:
31345 break;
31346 }
31347 break;
31348 default:
31349 break;
31350 }
31351
31352 return false;
31353 }
31354
31355 static bool
31356 insn_must_be_last_in_group (rtx_insn *insn)
31357 {
31358 enum attr_type type;
31359
31360 if (!insn
31361 || NOTE_P (insn)
31362 || DEBUG_INSN_P (insn)
31363 || GET_CODE (PATTERN (insn)) == USE
31364 || GET_CODE (PATTERN (insn)) == CLOBBER)
31365 return false;
31366
31367 switch (rs6000_tune) {
31368 case PROCESSOR_POWER4:
31369 case PROCESSOR_POWER5:
31370 if (is_microcoded_insn (insn))
31371 return true;
31372
31373 if (is_branch_slot_insn (insn))
31374 return true;
31375
31376 break;
31377 case PROCESSOR_POWER6:
31378 type = get_attr_type (insn);
31379
31380 switch (type)
31381 {
31382 case TYPE_EXTS:
31383 case TYPE_CNTLZ:
31384 case TYPE_TRAP:
31385 case TYPE_MUL:
31386 case TYPE_FPCOMPARE:
31387 case TYPE_MFCR:
31388 case TYPE_MTCR:
31389 case TYPE_MFJMPR:
31390 case TYPE_MTJMPR:
31391 case TYPE_ISYNC:
31392 case TYPE_SYNC:
31393 case TYPE_LOAD_L:
31394 case TYPE_STORE_C:
31395 return true;
31396 case TYPE_SHIFT:
31397 if (get_attr_dot (insn) == DOT_NO
31398 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31399 return true;
31400 else
31401 break;
31402 case TYPE_DIV:
31403 if (get_attr_size (insn) == SIZE_32)
31404 return true;
31405 else
31406 break;
31407 default:
31408 break;
31409 }
31410 break;
31411 case PROCESSOR_POWER7:
31412 type = get_attr_type (insn);
31413
31414 switch (type)
31415 {
31416 case TYPE_ISYNC:
31417 case TYPE_SYNC:
31418 case TYPE_LOAD_L:
31419 case TYPE_STORE_C:
31420 return true;
31421 case TYPE_LOAD:
31422 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31423 && get_attr_update (insn) == UPDATE_YES)
31424 return true;
31425 else
31426 break;
31427 case TYPE_STORE:
31428 if (get_attr_update (insn) == UPDATE_YES
31429 && get_attr_indexed (insn) == INDEXED_YES)
31430 return true;
31431 else
31432 break;
31433 default:
31434 break;
31435 }
31436 break;
31437 case PROCESSOR_POWER8:
31438 type = get_attr_type (insn);
31439
31440 switch (type)
31441 {
31442 case TYPE_MFCR:
31443 case TYPE_MTCR:
31444 case TYPE_ISYNC:
31445 case TYPE_SYNC:
31446 case TYPE_LOAD_L:
31447 case TYPE_STORE_C:
31448 return true;
31449 case TYPE_LOAD:
31450 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31451 && get_attr_update (insn) == UPDATE_YES)
31452 return true;
31453 else
31454 break;
31455 case TYPE_STORE:
31456 if (get_attr_update (insn) == UPDATE_YES
31457 && get_attr_indexed (insn) == INDEXED_YES)
31458 return true;
31459 else
31460 break;
31461 default:
31462 break;
31463 }
31464 break;
31465 default:
31466 break;
31467 }
31468
31469 return false;
31470 }
31471
31472 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31473 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31474
31475 static bool
31476 is_costly_group (rtx *group_insns, rtx next_insn)
31477 {
31478 int i;
31479 int issue_rate = rs6000_issue_rate ();
31480
31481 for (i = 0; i < issue_rate; i++)
31482 {
31483 sd_iterator_def sd_it;
31484 dep_t dep;
31485 rtx insn = group_insns[i];
31486
31487 if (!insn)
31488 continue;
31489
31490 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
31491 {
31492 rtx next = DEP_CON (dep);
31493
31494 if (next == next_insn
31495 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
31496 return true;
31497 }
31498 }
31499
31500 return false;
31501 }
31502
31503 /* Utility of the function redefine_groups.
31504 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31505 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31506 to keep it "far" (in a separate group) from GROUP_INSNS, following
31507 one of the following schemes, depending on the value of the flag
31508 -minsert_sched_nops = X:
31509 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31510 in order to force NEXT_INSN into a separate group.
31511 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31512 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31513 insertion (has a group just ended, how many vacant issue slots remain in the
31514 last group, and how many dispatch groups were encountered so far). */
31515
31516 static int
31517 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
31518 rtx_insn *next_insn, bool *group_end, int can_issue_more,
31519 int *group_count)
31520 {
31521 rtx nop;
31522 bool force;
31523 int issue_rate = rs6000_issue_rate ();
31524 bool end = *group_end;
31525 int i;
31526
31527 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
31528 return can_issue_more;
31529
31530 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
31531 return can_issue_more;
31532
31533 force = is_costly_group (group_insns, next_insn);
31534 if (!force)
31535 return can_issue_more;
31536
31537 if (sched_verbose > 6)
31538 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
31539 *group_count ,can_issue_more);
31540
31541 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
31542 {
31543 if (*group_end)
31544 can_issue_more = 0;
31545
31546 /* Since only a branch can be issued in the last issue_slot, it is
31547 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31548 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31549 in this case the last nop will start a new group and the branch
31550 will be forced to the new group. */
31551 if (can_issue_more && !is_branch_slot_insn (next_insn))
31552 can_issue_more--;
31553
31554 /* Do we have a special group ending nop? */
31555 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
31556 || rs6000_tune == PROCESSOR_POWER8)
31557 {
31558 nop = gen_group_ending_nop ();
31559 emit_insn_before (nop, next_insn);
31560 can_issue_more = 0;
31561 }
31562 else
31563 while (can_issue_more > 0)
31564 {
31565 nop = gen_nop ();
31566 emit_insn_before (nop, next_insn);
31567 can_issue_more--;
31568 }
31569
31570 *group_end = true;
31571 return 0;
31572 }
31573
31574 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
31575 {
31576 int n_nops = rs6000_sched_insert_nops;
31577
31578 /* Nops can't be issued from the branch slot, so the effective
31579 issue_rate for nops is 'issue_rate - 1'. */
31580 if (can_issue_more == 0)
31581 can_issue_more = issue_rate;
31582 can_issue_more--;
31583 if (can_issue_more == 0)
31584 {
31585 can_issue_more = issue_rate - 1;
31586 (*group_count)++;
31587 end = true;
31588 for (i = 0; i < issue_rate; i++)
31589 {
31590 group_insns[i] = 0;
31591 }
31592 }
31593
31594 while (n_nops > 0)
31595 {
31596 nop = gen_nop ();
31597 emit_insn_before (nop, next_insn);
31598 if (can_issue_more == issue_rate - 1) /* new group begins */
31599 end = false;
31600 can_issue_more--;
31601 if (can_issue_more == 0)
31602 {
31603 can_issue_more = issue_rate - 1;
31604 (*group_count)++;
31605 end = true;
31606 for (i = 0; i < issue_rate; i++)
31607 {
31608 group_insns[i] = 0;
31609 }
31610 }
31611 n_nops--;
31612 }
31613
31614 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
31615 can_issue_more++;
31616
31617 /* Is next_insn going to start a new group? */
31618 *group_end
31619 = (end
31620 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31621 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31622 || (can_issue_more < issue_rate &&
31623 insn_terminates_group_p (next_insn, previous_group)));
31624 if (*group_end && end)
31625 (*group_count)--;
31626
31627 if (sched_verbose > 6)
31628 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
31629 *group_count, can_issue_more);
31630 return can_issue_more;
31631 }
31632
31633 return can_issue_more;
31634 }
31635
31636 /* This function tries to synch the dispatch groups that the compiler "sees"
31637 with the dispatch groups that the processor dispatcher is expected to
31638 form in practice. It tries to achieve this synchronization by forcing the
31639 estimated processor grouping on the compiler (as opposed to the function
31640 'pad_goups' which tries to force the scheduler's grouping on the processor).
31641
31642 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
31643 examines the (estimated) dispatch groups that will be formed by the processor
31644 dispatcher. It marks these group boundaries to reflect the estimated
31645 processor grouping, overriding the grouping that the scheduler had marked.
31646 Depending on the value of the flag '-minsert-sched-nops' this function can
31647 force certain insns into separate groups or force a certain distance between
31648 them by inserting nops, for example, if there exists a "costly dependence"
31649 between the insns.
31650
31651 The function estimates the group boundaries that the processor will form as
31652 follows: It keeps track of how many vacant issue slots are available after
31653 each insn. A subsequent insn will start a new group if one of the following
31654 4 cases applies:
31655 - no more vacant issue slots remain in the current dispatch group.
31656 - only the last issue slot, which is the branch slot, is vacant, but the next
31657 insn is not a branch.
31658 - only the last 2 or less issue slots, including the branch slot, are vacant,
31659 which means that a cracked insn (which occupies two issue slots) can't be
31660 issued in this group.
31661 - less than 'issue_rate' slots are vacant, and the next insn always needs to
31662 start a new group. */
31663
31664 static int
31665 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31666 rtx_insn *tail)
31667 {
31668 rtx_insn *insn, *next_insn;
31669 int issue_rate;
31670 int can_issue_more;
31671 int slot, i;
31672 bool group_end;
31673 int group_count = 0;
31674 rtx *group_insns;
31675
31676 /* Initialize. */
31677 issue_rate = rs6000_issue_rate ();
31678 group_insns = XALLOCAVEC (rtx, issue_rate);
31679 for (i = 0; i < issue_rate; i++)
31680 {
31681 group_insns[i] = 0;
31682 }
31683 can_issue_more = issue_rate;
31684 slot = 0;
31685 insn = get_next_active_insn (prev_head_insn, tail);
31686 group_end = false;
31687
31688 while (insn != NULL_RTX)
31689 {
31690 slot = (issue_rate - can_issue_more);
31691 group_insns[slot] = insn;
31692 can_issue_more =
31693 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
31694 if (insn_terminates_group_p (insn, current_group))
31695 can_issue_more = 0;
31696
31697 next_insn = get_next_active_insn (insn, tail);
31698 if (next_insn == NULL_RTX)
31699 return group_count + 1;
31700
31701 /* Is next_insn going to start a new group? */
31702 group_end
31703 = (can_issue_more == 0
31704 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31705 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31706 || (can_issue_more < issue_rate &&
31707 insn_terminates_group_p (next_insn, previous_group)));
31708
31709 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
31710 next_insn, &group_end, can_issue_more,
31711 &group_count);
31712
31713 if (group_end)
31714 {
31715 group_count++;
31716 can_issue_more = 0;
31717 for (i = 0; i < issue_rate; i++)
31718 {
31719 group_insns[i] = 0;
31720 }
31721 }
31722
31723 if (GET_MODE (next_insn) == TImode && can_issue_more)
31724 PUT_MODE (next_insn, VOIDmode);
31725 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
31726 PUT_MODE (next_insn, TImode);
31727
31728 insn = next_insn;
31729 if (can_issue_more == 0)
31730 can_issue_more = issue_rate;
31731 } /* while */
31732
31733 return group_count;
31734 }
31735
31736 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
31737 dispatch group boundaries that the scheduler had marked. Pad with nops
31738 any dispatch groups which have vacant issue slots, in order to force the
31739 scheduler's grouping on the processor dispatcher. The function
31740 returns the number of dispatch groups found. */
31741
31742 static int
31743 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31744 rtx_insn *tail)
31745 {
31746 rtx_insn *insn, *next_insn;
31747 rtx nop;
31748 int issue_rate;
31749 int can_issue_more;
31750 int group_end;
31751 int group_count = 0;
31752
31753 /* Initialize issue_rate. */
31754 issue_rate = rs6000_issue_rate ();
31755 can_issue_more = issue_rate;
31756
31757 insn = get_next_active_insn (prev_head_insn, tail);
31758 next_insn = get_next_active_insn (insn, tail);
31759
31760 while (insn != NULL_RTX)
31761 {
31762 can_issue_more =
31763 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
31764
31765 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
31766
31767 if (next_insn == NULL_RTX)
31768 break;
31769
31770 if (group_end)
31771 {
31772 /* If the scheduler had marked group termination at this location
31773 (between insn and next_insn), and neither insn nor next_insn will
31774 force group termination, pad the group with nops to force group
31775 termination. */
31776 if (can_issue_more
31777 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
31778 && !insn_terminates_group_p (insn, current_group)
31779 && !insn_terminates_group_p (next_insn, previous_group))
31780 {
31781 if (!is_branch_slot_insn (next_insn))
31782 can_issue_more--;
31783
31784 while (can_issue_more)
31785 {
31786 nop = gen_nop ();
31787 emit_insn_before (nop, next_insn);
31788 can_issue_more--;
31789 }
31790 }
31791
31792 can_issue_more = issue_rate;
31793 group_count++;
31794 }
31795
31796 insn = next_insn;
31797 next_insn = get_next_active_insn (insn, tail);
31798 }
31799
31800 return group_count;
31801 }
31802
31803 /* We're beginning a new block. Initialize data structures as necessary. */
31804
31805 static void
31806 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
31807 int sched_verbose ATTRIBUTE_UNUSED,
31808 int max_ready ATTRIBUTE_UNUSED)
31809 {
31810 last_scheduled_insn = NULL;
31811 load_store_pendulum = 0;
31812 divide_cnt = 0;
31813 vec_pairing = 0;
31814 }
31815
31816 /* The following function is called at the end of scheduling BB.
31817 After reload, it inserts nops at insn group bundling. */
31818
31819 static void
31820 rs6000_sched_finish (FILE *dump, int sched_verbose)
31821 {
31822 int n_groups;
31823
31824 if (sched_verbose)
31825 fprintf (dump, "=== Finishing schedule.\n");
31826
31827 if (reload_completed && rs6000_sched_groups)
31828 {
31829 /* Do not run sched_finish hook when selective scheduling enabled. */
31830 if (sel_sched_p ())
31831 return;
31832
31833 if (rs6000_sched_insert_nops == sched_finish_none)
31834 return;
31835
31836 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
31837 n_groups = pad_groups (dump, sched_verbose,
31838 current_sched_info->prev_head,
31839 current_sched_info->next_tail);
31840 else
31841 n_groups = redefine_groups (dump, sched_verbose,
31842 current_sched_info->prev_head,
31843 current_sched_info->next_tail);
31844
31845 if (sched_verbose >= 6)
31846 {
31847 fprintf (dump, "ngroups = %d\n", n_groups);
31848 print_rtl (dump, current_sched_info->prev_head);
31849 fprintf (dump, "Done finish_sched\n");
31850 }
31851 }
31852 }
31853
31854 struct rs6000_sched_context
31855 {
31856 short cached_can_issue_more;
31857 rtx_insn *last_scheduled_insn;
31858 int load_store_pendulum;
31859 int divide_cnt;
31860 int vec_pairing;
31861 };
31862
31863 typedef struct rs6000_sched_context rs6000_sched_context_def;
31864 typedef rs6000_sched_context_def *rs6000_sched_context_t;
31865
31866 /* Allocate store for new scheduling context. */
31867 static void *
31868 rs6000_alloc_sched_context (void)
31869 {
31870 return xmalloc (sizeof (rs6000_sched_context_def));
31871 }
31872
31873 /* If CLEAN_P is true then initializes _SC with clean data,
31874 and from the global context otherwise. */
31875 static void
31876 rs6000_init_sched_context (void *_sc, bool clean_p)
31877 {
31878 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
31879
31880 if (clean_p)
31881 {
31882 sc->cached_can_issue_more = 0;
31883 sc->last_scheduled_insn = NULL;
31884 sc->load_store_pendulum = 0;
31885 sc->divide_cnt = 0;
31886 sc->vec_pairing = 0;
31887 }
31888 else
31889 {
31890 sc->cached_can_issue_more = cached_can_issue_more;
31891 sc->last_scheduled_insn = last_scheduled_insn;
31892 sc->load_store_pendulum = load_store_pendulum;
31893 sc->divide_cnt = divide_cnt;
31894 sc->vec_pairing = vec_pairing;
31895 }
31896 }
31897
31898 /* Sets the global scheduling context to the one pointed to by _SC. */
31899 static void
31900 rs6000_set_sched_context (void *_sc)
31901 {
31902 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
31903
31904 gcc_assert (sc != NULL);
31905
31906 cached_can_issue_more = sc->cached_can_issue_more;
31907 last_scheduled_insn = sc->last_scheduled_insn;
31908 load_store_pendulum = sc->load_store_pendulum;
31909 divide_cnt = sc->divide_cnt;
31910 vec_pairing = sc->vec_pairing;
31911 }
31912
31913 /* Free _SC. */
31914 static void
31915 rs6000_free_sched_context (void *_sc)
31916 {
31917 gcc_assert (_sc != NULL);
31918
31919 free (_sc);
31920 }
31921
31922 static bool
31923 rs6000_sched_can_speculate_insn (rtx_insn *insn)
31924 {
31925 switch (get_attr_type (insn))
31926 {
31927 case TYPE_DIV:
31928 case TYPE_SDIV:
31929 case TYPE_DDIV:
31930 case TYPE_VECDIV:
31931 case TYPE_SSQRT:
31932 case TYPE_DSQRT:
31933 return false;
31934
31935 default:
31936 return true;
31937 }
31938 }
31939 \f
31940 /* Length in units of the trampoline for entering a nested function. */
31941
31942 int
31943 rs6000_trampoline_size (void)
31944 {
31945 int ret = 0;
31946
31947 switch (DEFAULT_ABI)
31948 {
31949 default:
31950 gcc_unreachable ();
31951
31952 case ABI_AIX:
31953 ret = (TARGET_32BIT) ? 12 : 24;
31954 break;
31955
31956 case ABI_ELFv2:
31957 gcc_assert (!TARGET_32BIT);
31958 ret = 32;
31959 break;
31960
31961 case ABI_DARWIN:
31962 case ABI_V4:
31963 ret = (TARGET_32BIT) ? 40 : 48;
31964 break;
31965 }
31966
31967 return ret;
31968 }
31969
31970 /* Emit RTL insns to initialize the variable parts of a trampoline.
31971 FNADDR is an RTX for the address of the function's pure code.
31972 CXT is an RTX for the static chain value for the function. */
31973
31974 static void
31975 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
31976 {
31977 int regsize = (TARGET_32BIT) ? 4 : 8;
31978 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
31979 rtx ctx_reg = force_reg (Pmode, cxt);
31980 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
31981
31982 switch (DEFAULT_ABI)
31983 {
31984 default:
31985 gcc_unreachable ();
31986
31987 /* Under AIX, just build the 3 word function descriptor */
31988 case ABI_AIX:
31989 {
31990 rtx fnmem, fn_reg, toc_reg;
31991
31992 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
31993 error ("you cannot take the address of a nested function if you use "
31994 "the %qs option", "-mno-pointers-to-nested-functions");
31995
31996 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
31997 fn_reg = gen_reg_rtx (Pmode);
31998 toc_reg = gen_reg_rtx (Pmode);
31999
32000 /* Macro to shorten the code expansions below. */
32001 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32002
32003 m_tramp = replace_equiv_address (m_tramp, addr);
32004
32005 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32006 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32007 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32008 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32009 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32010
32011 # undef MEM_PLUS
32012 }
32013 break;
32014
32015 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32016 case ABI_ELFv2:
32017 case ABI_DARWIN:
32018 case ABI_V4:
32019 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32020 LCT_NORMAL, VOIDmode,
32021 addr, Pmode,
32022 GEN_INT (rs6000_trampoline_size ()), SImode,
32023 fnaddr, Pmode,
32024 ctx_reg, Pmode);
32025 break;
32026 }
32027 }
32028
32029 \f
32030 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32031 identifier as an argument, so the front end shouldn't look it up. */
32032
32033 static bool
32034 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32035 {
32036 return is_attribute_p ("altivec", attr_id);
32037 }
32038
32039 /* Handle the "altivec" attribute. The attribute may have
32040 arguments as follows:
32041
32042 __attribute__((altivec(vector__)))
32043 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32044 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32045
32046 and may appear more than once (e.g., 'vector bool char') in a
32047 given declaration. */
32048
32049 static tree
32050 rs6000_handle_altivec_attribute (tree *node,
32051 tree name ATTRIBUTE_UNUSED,
32052 tree args,
32053 int flags ATTRIBUTE_UNUSED,
32054 bool *no_add_attrs)
32055 {
32056 tree type = *node, result = NULL_TREE;
32057 machine_mode mode;
32058 int unsigned_p;
32059 char altivec_type
32060 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32061 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32062 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32063 : '?');
32064
32065 while (POINTER_TYPE_P (type)
32066 || TREE_CODE (type) == FUNCTION_TYPE
32067 || TREE_CODE (type) == METHOD_TYPE
32068 || TREE_CODE (type) == ARRAY_TYPE)
32069 type = TREE_TYPE (type);
32070
32071 mode = TYPE_MODE (type);
32072
32073 /* Check for invalid AltiVec type qualifiers. */
32074 if (type == long_double_type_node)
32075 error ("use of %<long double%> in AltiVec types is invalid");
32076 else if (type == boolean_type_node)
32077 error ("use of boolean types in AltiVec types is invalid");
32078 else if (TREE_CODE (type) == COMPLEX_TYPE)
32079 error ("use of %<complex%> in AltiVec types is invalid");
32080 else if (DECIMAL_FLOAT_MODE_P (mode))
32081 error ("use of decimal floating point types in AltiVec types is invalid");
32082 else if (!TARGET_VSX)
32083 {
32084 if (type == long_unsigned_type_node || type == long_integer_type_node)
32085 {
32086 if (TARGET_64BIT)
32087 error ("use of %<long%> in AltiVec types is invalid for "
32088 "64-bit code without %qs", "-mvsx");
32089 else if (rs6000_warn_altivec_long)
32090 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32091 "use %<int%>");
32092 }
32093 else if (type == long_long_unsigned_type_node
32094 || type == long_long_integer_type_node)
32095 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32096 "-mvsx");
32097 else if (type == double_type_node)
32098 error ("use of %<double%> in AltiVec types is invalid without %qs",
32099 "-mvsx");
32100 }
32101
32102 switch (altivec_type)
32103 {
32104 case 'v':
32105 unsigned_p = TYPE_UNSIGNED (type);
32106 switch (mode)
32107 {
32108 case E_TImode:
32109 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32110 break;
32111 case E_DImode:
32112 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32113 break;
32114 case E_SImode:
32115 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32116 break;
32117 case E_HImode:
32118 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32119 break;
32120 case E_QImode:
32121 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32122 break;
32123 case E_SFmode: result = V4SF_type_node; break;
32124 case E_DFmode: result = V2DF_type_node; break;
32125 /* If the user says 'vector int bool', we may be handed the 'bool'
32126 attribute _before_ the 'vector' attribute, and so select the
32127 proper type in the 'b' case below. */
32128 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32129 case E_V2DImode: case E_V2DFmode:
32130 result = type;
32131 default: break;
32132 }
32133 break;
32134 case 'b':
32135 switch (mode)
32136 {
32137 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32138 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32139 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32140 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32141 default: break;
32142 }
32143 break;
32144 case 'p':
32145 switch (mode)
32146 {
32147 case E_V8HImode: result = pixel_V8HI_type_node;
32148 default: break;
32149 }
32150 default: break;
32151 }
32152
32153 /* Propagate qualifiers attached to the element type
32154 onto the vector type. */
32155 if (result && result != type && TYPE_QUALS (type))
32156 result = build_qualified_type (result, TYPE_QUALS (type));
32157
32158 *no_add_attrs = true; /* No need to hang on to the attribute. */
32159
32160 if (result)
32161 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32162
32163 return NULL_TREE;
32164 }
32165
32166 /* AltiVec defines five built-in scalar types that serve as vector
32167 elements; we must teach the compiler how to mangle them. The 128-bit
32168 floating point mangling is target-specific as well. */
32169
32170 static const char *
32171 rs6000_mangle_type (const_tree type)
32172 {
32173 type = TYPE_MAIN_VARIANT (type);
32174
32175 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32176 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32177 return NULL;
32178
32179 if (type == bool_char_type_node) return "U6__boolc";
32180 if (type == bool_short_type_node) return "U6__bools";
32181 if (type == pixel_type_node) return "u7__pixel";
32182 if (type == bool_int_type_node) return "U6__booli";
32183 if (type == bool_long_long_type_node) return "U6__boolx";
32184
32185 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IBM_P (TYPE_MODE (type)))
32186 return "g";
32187 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IEEE_P (TYPE_MODE (type)))
32188 return ieee128_mangling_gcc_8_1 ? "U10__float128" : "u9__ieee128";
32189
32190 /* For all other types, use the default mangling. */
32191 return NULL;
32192 }
32193
32194 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32195 struct attribute_spec.handler. */
32196
32197 static tree
32198 rs6000_handle_longcall_attribute (tree *node, tree name,
32199 tree args ATTRIBUTE_UNUSED,
32200 int flags ATTRIBUTE_UNUSED,
32201 bool *no_add_attrs)
32202 {
32203 if (TREE_CODE (*node) != FUNCTION_TYPE
32204 && TREE_CODE (*node) != FIELD_DECL
32205 && TREE_CODE (*node) != TYPE_DECL)
32206 {
32207 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32208 name);
32209 *no_add_attrs = true;
32210 }
32211
32212 return NULL_TREE;
32213 }
32214
32215 /* Set longcall attributes on all functions declared when
32216 rs6000_default_long_calls is true. */
32217 static void
32218 rs6000_set_default_type_attributes (tree type)
32219 {
32220 if (rs6000_default_long_calls
32221 && (TREE_CODE (type) == FUNCTION_TYPE
32222 || TREE_CODE (type) == METHOD_TYPE))
32223 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32224 NULL_TREE,
32225 TYPE_ATTRIBUTES (type));
32226
32227 #if TARGET_MACHO
32228 darwin_set_default_type_attributes (type);
32229 #endif
32230 }
32231
32232 /* Return a reference suitable for calling a function with the
32233 longcall attribute. */
32234
32235 rtx
32236 rs6000_longcall_ref (rtx call_ref)
32237 {
32238 const char *call_name;
32239 tree node;
32240
32241 if (GET_CODE (call_ref) != SYMBOL_REF)
32242 return call_ref;
32243
32244 /* System V adds '.' to the internal name, so skip them. */
32245 call_name = XSTR (call_ref, 0);
32246 if (*call_name == '.')
32247 {
32248 while (*call_name == '.')
32249 call_name++;
32250
32251 node = get_identifier (call_name);
32252 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32253 }
32254
32255 return force_reg (Pmode, call_ref);
32256 }
32257 \f
32258 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32259 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32260 #endif
32261
32262 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32263 struct attribute_spec.handler. */
32264 static tree
32265 rs6000_handle_struct_attribute (tree *node, tree name,
32266 tree args ATTRIBUTE_UNUSED,
32267 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32268 {
32269 tree *type = NULL;
32270 if (DECL_P (*node))
32271 {
32272 if (TREE_CODE (*node) == TYPE_DECL)
32273 type = &TREE_TYPE (*node);
32274 }
32275 else
32276 type = node;
32277
32278 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32279 || TREE_CODE (*type) == UNION_TYPE)))
32280 {
32281 warning (OPT_Wattributes, "%qE attribute ignored", name);
32282 *no_add_attrs = true;
32283 }
32284
32285 else if ((is_attribute_p ("ms_struct", name)
32286 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32287 || ((is_attribute_p ("gcc_struct", name)
32288 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32289 {
32290 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32291 name);
32292 *no_add_attrs = true;
32293 }
32294
32295 return NULL_TREE;
32296 }
32297
32298 static bool
32299 rs6000_ms_bitfield_layout_p (const_tree record_type)
32300 {
32301 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32302 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32303 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32304 }
32305 \f
32306 #ifdef USING_ELFOS_H
32307
32308 /* A get_unnamed_section callback, used for switching to toc_section. */
32309
32310 static void
32311 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32312 {
32313 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32314 && TARGET_MINIMAL_TOC)
32315 {
32316 if (!toc_initialized)
32317 {
32318 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32319 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32320 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32321 fprintf (asm_out_file, "\t.tc ");
32322 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32323 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32324 fprintf (asm_out_file, "\n");
32325
32326 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32327 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32328 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32329 fprintf (asm_out_file, " = .+32768\n");
32330 toc_initialized = 1;
32331 }
32332 else
32333 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32334 }
32335 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32336 {
32337 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32338 if (!toc_initialized)
32339 {
32340 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32341 toc_initialized = 1;
32342 }
32343 }
32344 else
32345 {
32346 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32347 if (!toc_initialized)
32348 {
32349 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32350 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32351 fprintf (asm_out_file, " = .+32768\n");
32352 toc_initialized = 1;
32353 }
32354 }
32355 }
32356
32357 /* Implement TARGET_ASM_INIT_SECTIONS. */
32358
32359 static void
32360 rs6000_elf_asm_init_sections (void)
32361 {
32362 toc_section
32363 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32364
32365 sdata2_section
32366 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32367 SDATA2_SECTION_ASM_OP);
32368 }
32369
32370 /* Implement TARGET_SELECT_RTX_SECTION. */
32371
32372 static section *
32373 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32374 unsigned HOST_WIDE_INT align)
32375 {
32376 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32377 return toc_section;
32378 else
32379 return default_elf_select_rtx_section (mode, x, align);
32380 }
32381 \f
32382 /* For a SYMBOL_REF, set generic flags and then perform some
32383 target-specific processing.
32384
32385 When the AIX ABI is requested on a non-AIX system, replace the
32386 function name with the real name (with a leading .) rather than the
32387 function descriptor name. This saves a lot of overriding code to
32388 read the prefixes. */
32389
32390 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32391 static void
32392 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32393 {
32394 default_encode_section_info (decl, rtl, first);
32395
32396 if (first
32397 && TREE_CODE (decl) == FUNCTION_DECL
32398 && !TARGET_AIX
32399 && DEFAULT_ABI == ABI_AIX)
32400 {
32401 rtx sym_ref = XEXP (rtl, 0);
32402 size_t len = strlen (XSTR (sym_ref, 0));
32403 char *str = XALLOCAVEC (char, len + 2);
32404 str[0] = '.';
32405 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32406 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32407 }
32408 }
32409
32410 static inline bool
32411 compare_section_name (const char *section, const char *templ)
32412 {
32413 int len;
32414
32415 len = strlen (templ);
32416 return (strncmp (section, templ, len) == 0
32417 && (section[len] == 0 || section[len] == '.'));
32418 }
32419
32420 bool
32421 rs6000_elf_in_small_data_p (const_tree decl)
32422 {
32423 if (rs6000_sdata == SDATA_NONE)
32424 return false;
32425
32426 /* We want to merge strings, so we never consider them small data. */
32427 if (TREE_CODE (decl) == STRING_CST)
32428 return false;
32429
32430 /* Functions are never in the small data area. */
32431 if (TREE_CODE (decl) == FUNCTION_DECL)
32432 return false;
32433
32434 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32435 {
32436 const char *section = DECL_SECTION_NAME (decl);
32437 if (compare_section_name (section, ".sdata")
32438 || compare_section_name (section, ".sdata2")
32439 || compare_section_name (section, ".gnu.linkonce.s")
32440 || compare_section_name (section, ".sbss")
32441 || compare_section_name (section, ".sbss2")
32442 || compare_section_name (section, ".gnu.linkonce.sb")
32443 || strcmp (section, ".PPC.EMB.sdata0") == 0
32444 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32445 return true;
32446 }
32447 else
32448 {
32449 /* If we are told not to put readonly data in sdata, then don't. */
32450 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
32451 && !rs6000_readonly_in_sdata)
32452 return false;
32453
32454 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
32455
32456 if (size > 0
32457 && size <= g_switch_value
32458 /* If it's not public, and we're not going to reference it there,
32459 there's no need to put it in the small data section. */
32460 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
32461 return true;
32462 }
32463
32464 return false;
32465 }
32466
32467 #endif /* USING_ELFOS_H */
32468 \f
32469 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32470
32471 static bool
32472 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
32473 {
32474 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
32475 }
32476
32477 /* Do not place thread-local symbols refs in the object blocks. */
32478
32479 static bool
32480 rs6000_use_blocks_for_decl_p (const_tree decl)
32481 {
32482 return !DECL_THREAD_LOCAL_P (decl);
32483 }
32484 \f
32485 /* Return a REG that occurs in ADDR with coefficient 1.
32486 ADDR can be effectively incremented by incrementing REG.
32487
32488 r0 is special and we must not select it as an address
32489 register by this routine since our caller will try to
32490 increment the returned register via an "la" instruction. */
32491
32492 rtx
32493 find_addr_reg (rtx addr)
32494 {
32495 while (GET_CODE (addr) == PLUS)
32496 {
32497 if (GET_CODE (XEXP (addr, 0)) == REG
32498 && REGNO (XEXP (addr, 0)) != 0)
32499 addr = XEXP (addr, 0);
32500 else if (GET_CODE (XEXP (addr, 1)) == REG
32501 && REGNO (XEXP (addr, 1)) != 0)
32502 addr = XEXP (addr, 1);
32503 else if (CONSTANT_P (XEXP (addr, 0)))
32504 addr = XEXP (addr, 1);
32505 else if (CONSTANT_P (XEXP (addr, 1)))
32506 addr = XEXP (addr, 0);
32507 else
32508 gcc_unreachable ();
32509 }
32510 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
32511 return addr;
32512 }
32513
32514 void
32515 rs6000_fatal_bad_address (rtx op)
32516 {
32517 fatal_insn ("bad address", op);
32518 }
32519
32520 #if TARGET_MACHO
32521
32522 typedef struct branch_island_d {
32523 tree function_name;
32524 tree label_name;
32525 int line_number;
32526 } branch_island;
32527
32528
32529 static vec<branch_island, va_gc> *branch_islands;
32530
32531 /* Remember to generate a branch island for far calls to the given
32532 function. */
32533
32534 static void
32535 add_compiler_branch_island (tree label_name, tree function_name,
32536 int line_number)
32537 {
32538 branch_island bi = {function_name, label_name, line_number};
32539 vec_safe_push (branch_islands, bi);
32540 }
32541
32542 /* Generate far-jump branch islands for everything recorded in
32543 branch_islands. Invoked immediately after the last instruction of
32544 the epilogue has been emitted; the branch islands must be appended
32545 to, and contiguous with, the function body. Mach-O stubs are
32546 generated in machopic_output_stub(). */
32547
32548 static void
32549 macho_branch_islands (void)
32550 {
32551 char tmp_buf[512];
32552
32553 while (!vec_safe_is_empty (branch_islands))
32554 {
32555 branch_island *bi = &branch_islands->last ();
32556 const char *label = IDENTIFIER_POINTER (bi->label_name);
32557 const char *name = IDENTIFIER_POINTER (bi->function_name);
32558 char name_buf[512];
32559 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32560 if (name[0] == '*' || name[0] == '&')
32561 strcpy (name_buf, name+1);
32562 else
32563 {
32564 name_buf[0] = '_';
32565 strcpy (name_buf+1, name);
32566 }
32567 strcpy (tmp_buf, "\n");
32568 strcat (tmp_buf, label);
32569 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32570 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32571 dbxout_stabd (N_SLINE, bi->line_number);
32572 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32573 if (flag_pic)
32574 {
32575 if (TARGET_LINK_STACK)
32576 {
32577 char name[32];
32578 get_ppc476_thunk_name (name);
32579 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
32580 strcat (tmp_buf, name);
32581 strcat (tmp_buf, "\n");
32582 strcat (tmp_buf, label);
32583 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32584 }
32585 else
32586 {
32587 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
32588 strcat (tmp_buf, label);
32589 strcat (tmp_buf, "_pic\n");
32590 strcat (tmp_buf, label);
32591 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32592 }
32593
32594 strcat (tmp_buf, "\taddis r11,r11,ha16(");
32595 strcat (tmp_buf, name_buf);
32596 strcat (tmp_buf, " - ");
32597 strcat (tmp_buf, label);
32598 strcat (tmp_buf, "_pic)\n");
32599
32600 strcat (tmp_buf, "\tmtlr r0\n");
32601
32602 strcat (tmp_buf, "\taddi r12,r11,lo16(");
32603 strcat (tmp_buf, name_buf);
32604 strcat (tmp_buf, " - ");
32605 strcat (tmp_buf, label);
32606 strcat (tmp_buf, "_pic)\n");
32607
32608 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
32609 }
32610 else
32611 {
32612 strcat (tmp_buf, ":\nlis r12,hi16(");
32613 strcat (tmp_buf, name_buf);
32614 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
32615 strcat (tmp_buf, name_buf);
32616 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
32617 }
32618 output_asm_insn (tmp_buf, 0);
32619 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32620 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32621 dbxout_stabd (N_SLINE, bi->line_number);
32622 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32623 branch_islands->pop ();
32624 }
32625 }
32626
32627 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
32628 already there or not. */
32629
32630 static int
32631 no_previous_def (tree function_name)
32632 {
32633 branch_island *bi;
32634 unsigned ix;
32635
32636 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32637 if (function_name == bi->function_name)
32638 return 0;
32639 return 1;
32640 }
32641
32642 /* GET_PREV_LABEL gets the label name from the previous definition of
32643 the function. */
32644
32645 static tree
32646 get_prev_label (tree function_name)
32647 {
32648 branch_island *bi;
32649 unsigned ix;
32650
32651 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32652 if (function_name == bi->function_name)
32653 return bi->label_name;
32654 return NULL_TREE;
32655 }
32656
32657 /* INSN is either a function call or a millicode call. It may have an
32658 unconditional jump in its delay slot.
32659
32660 CALL_DEST is the routine we are calling. */
32661
32662 char *
32663 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
32664 int cookie_operand_number)
32665 {
32666 static char buf[256];
32667 if (darwin_emit_branch_islands
32668 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
32669 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
32670 {
32671 tree labelname;
32672 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
32673
32674 if (no_previous_def (funname))
32675 {
32676 rtx label_rtx = gen_label_rtx ();
32677 char *label_buf, temp_buf[256];
32678 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
32679 CODE_LABEL_NUMBER (label_rtx));
32680 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
32681 labelname = get_identifier (label_buf);
32682 add_compiler_branch_island (labelname, funname, insn_line (insn));
32683 }
32684 else
32685 labelname = get_prev_label (funname);
32686
32687 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
32688 instruction will reach 'foo', otherwise link as 'bl L42'".
32689 "L42" should be a 'branch island', that will do a far jump to
32690 'foo'. Branch islands are generated in
32691 macho_branch_islands(). */
32692 sprintf (buf, "jbsr %%z%d,%.246s",
32693 dest_operand_number, IDENTIFIER_POINTER (labelname));
32694 }
32695 else
32696 sprintf (buf, "bl %%z%d", dest_operand_number);
32697 return buf;
32698 }
32699
32700 /* Generate PIC and indirect symbol stubs. */
32701
32702 void
32703 machopic_output_stub (FILE *file, const char *symb, const char *stub)
32704 {
32705 unsigned int length;
32706 char *symbol_name, *lazy_ptr_name;
32707 char *local_label_0;
32708 static int label = 0;
32709
32710 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
32711 symb = (*targetm.strip_name_encoding) (symb);
32712
32713
32714 length = strlen (symb);
32715 symbol_name = XALLOCAVEC (char, length + 32);
32716 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
32717
32718 lazy_ptr_name = XALLOCAVEC (char, length + 32);
32719 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
32720
32721 if (flag_pic == 2)
32722 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
32723 else
32724 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
32725
32726 if (flag_pic == 2)
32727 {
32728 fprintf (file, "\t.align 5\n");
32729
32730 fprintf (file, "%s:\n", stub);
32731 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32732
32733 label++;
32734 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
32735 sprintf (local_label_0, "\"L%011d$spb\"", label);
32736
32737 fprintf (file, "\tmflr r0\n");
32738 if (TARGET_LINK_STACK)
32739 {
32740 char name[32];
32741 get_ppc476_thunk_name (name);
32742 fprintf (file, "\tbl %s\n", name);
32743 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
32744 }
32745 else
32746 {
32747 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
32748 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
32749 }
32750 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
32751 lazy_ptr_name, local_label_0);
32752 fprintf (file, "\tmtlr r0\n");
32753 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
32754 (TARGET_64BIT ? "ldu" : "lwzu"),
32755 lazy_ptr_name, local_label_0);
32756 fprintf (file, "\tmtctr r12\n");
32757 fprintf (file, "\tbctr\n");
32758 }
32759 else
32760 {
32761 fprintf (file, "\t.align 4\n");
32762
32763 fprintf (file, "%s:\n", stub);
32764 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32765
32766 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
32767 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
32768 (TARGET_64BIT ? "ldu" : "lwzu"),
32769 lazy_ptr_name);
32770 fprintf (file, "\tmtctr r12\n");
32771 fprintf (file, "\tbctr\n");
32772 }
32773
32774 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
32775 fprintf (file, "%s:\n", lazy_ptr_name);
32776 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32777 fprintf (file, "%sdyld_stub_binding_helper\n",
32778 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
32779 }
32780
32781 /* Legitimize PIC addresses. If the address is already
32782 position-independent, we return ORIG. Newly generated
32783 position-independent addresses go into a reg. This is REG if non
32784 zero, otherwise we allocate register(s) as necessary. */
32785
32786 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
32787
32788 rtx
32789 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
32790 rtx reg)
32791 {
32792 rtx base, offset;
32793
32794 if (reg == NULL && !reload_completed)
32795 reg = gen_reg_rtx (Pmode);
32796
32797 if (GET_CODE (orig) == CONST)
32798 {
32799 rtx reg_temp;
32800
32801 if (GET_CODE (XEXP (orig, 0)) == PLUS
32802 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
32803 return orig;
32804
32805 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
32806
32807 /* Use a different reg for the intermediate value, as
32808 it will be marked UNCHANGING. */
32809 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
32810 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
32811 Pmode, reg_temp);
32812 offset =
32813 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
32814 Pmode, reg);
32815
32816 if (GET_CODE (offset) == CONST_INT)
32817 {
32818 if (SMALL_INT (offset))
32819 return plus_constant (Pmode, base, INTVAL (offset));
32820 else if (!reload_completed)
32821 offset = force_reg (Pmode, offset);
32822 else
32823 {
32824 rtx mem = force_const_mem (Pmode, orig);
32825 return machopic_legitimize_pic_address (mem, Pmode, reg);
32826 }
32827 }
32828 return gen_rtx_PLUS (Pmode, base, offset);
32829 }
32830
32831 /* Fall back on generic machopic code. */
32832 return machopic_legitimize_pic_address (orig, mode, reg);
32833 }
32834
32835 /* Output a .machine directive for the Darwin assembler, and call
32836 the generic start_file routine. */
32837
32838 static void
32839 rs6000_darwin_file_start (void)
32840 {
32841 static const struct
32842 {
32843 const char *arg;
32844 const char *name;
32845 HOST_WIDE_INT if_set;
32846 } mapping[] = {
32847 { "ppc64", "ppc64", MASK_64BIT },
32848 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
32849 { "power4", "ppc970", 0 },
32850 { "G5", "ppc970", 0 },
32851 { "7450", "ppc7450", 0 },
32852 { "7400", "ppc7400", MASK_ALTIVEC },
32853 { "G4", "ppc7400", 0 },
32854 { "750", "ppc750", 0 },
32855 { "740", "ppc750", 0 },
32856 { "G3", "ppc750", 0 },
32857 { "604e", "ppc604e", 0 },
32858 { "604", "ppc604", 0 },
32859 { "603e", "ppc603", 0 },
32860 { "603", "ppc603", 0 },
32861 { "601", "ppc601", 0 },
32862 { NULL, "ppc", 0 } };
32863 const char *cpu_id = "";
32864 size_t i;
32865
32866 rs6000_file_start ();
32867 darwin_file_start ();
32868
32869 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
32870
32871 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
32872 cpu_id = rs6000_default_cpu;
32873
32874 if (global_options_set.x_rs6000_cpu_index)
32875 cpu_id = processor_target_table[rs6000_cpu_index].name;
32876
32877 /* Look through the mapping array. Pick the first name that either
32878 matches the argument, has a bit set in IF_SET that is also set
32879 in the target flags, or has a NULL name. */
32880
32881 i = 0;
32882 while (mapping[i].arg != NULL
32883 && strcmp (mapping[i].arg, cpu_id) != 0
32884 && (mapping[i].if_set & rs6000_isa_flags) == 0)
32885 i++;
32886
32887 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
32888 }
32889
32890 #endif /* TARGET_MACHO */
32891
32892 #if TARGET_ELF
32893 static int
32894 rs6000_elf_reloc_rw_mask (void)
32895 {
32896 if (flag_pic)
32897 return 3;
32898 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32899 return 2;
32900 else
32901 return 0;
32902 }
32903
32904 /* Record an element in the table of global constructors. SYMBOL is
32905 a SYMBOL_REF of the function to be called; PRIORITY is a number
32906 between 0 and MAX_INIT_PRIORITY.
32907
32908 This differs from default_named_section_asm_out_constructor in
32909 that we have special handling for -mrelocatable. */
32910
32911 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
32912 static void
32913 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
32914 {
32915 const char *section = ".ctors";
32916 char buf[18];
32917
32918 if (priority != DEFAULT_INIT_PRIORITY)
32919 {
32920 sprintf (buf, ".ctors.%.5u",
32921 /* Invert the numbering so the linker puts us in the proper
32922 order; constructors are run from right to left, and the
32923 linker sorts in increasing order. */
32924 MAX_INIT_PRIORITY - priority);
32925 section = buf;
32926 }
32927
32928 switch_to_section (get_section (section, SECTION_WRITE, NULL));
32929 assemble_align (POINTER_SIZE);
32930
32931 if (DEFAULT_ABI == ABI_V4
32932 && (TARGET_RELOCATABLE || flag_pic > 1))
32933 {
32934 fputs ("\t.long (", asm_out_file);
32935 output_addr_const (asm_out_file, symbol);
32936 fputs (")@fixup\n", asm_out_file);
32937 }
32938 else
32939 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
32940 }
32941
32942 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
32943 static void
32944 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
32945 {
32946 const char *section = ".dtors";
32947 char buf[18];
32948
32949 if (priority != DEFAULT_INIT_PRIORITY)
32950 {
32951 sprintf (buf, ".dtors.%.5u",
32952 /* Invert the numbering so the linker puts us in the proper
32953 order; constructors are run from right to left, and the
32954 linker sorts in increasing order. */
32955 MAX_INIT_PRIORITY - priority);
32956 section = buf;
32957 }
32958
32959 switch_to_section (get_section (section, SECTION_WRITE, NULL));
32960 assemble_align (POINTER_SIZE);
32961
32962 if (DEFAULT_ABI == ABI_V4
32963 && (TARGET_RELOCATABLE || flag_pic > 1))
32964 {
32965 fputs ("\t.long (", asm_out_file);
32966 output_addr_const (asm_out_file, symbol);
32967 fputs (")@fixup\n", asm_out_file);
32968 }
32969 else
32970 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
32971 }
32972
32973 void
32974 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
32975 {
32976 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
32977 {
32978 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
32979 ASM_OUTPUT_LABEL (file, name);
32980 fputs (DOUBLE_INT_ASM_OP, file);
32981 rs6000_output_function_entry (file, name);
32982 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
32983 if (DOT_SYMBOLS)
32984 {
32985 fputs ("\t.size\t", file);
32986 assemble_name (file, name);
32987 fputs (",24\n\t.type\t.", file);
32988 assemble_name (file, name);
32989 fputs (",@function\n", file);
32990 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
32991 {
32992 fputs ("\t.globl\t.", file);
32993 assemble_name (file, name);
32994 putc ('\n', file);
32995 }
32996 }
32997 else
32998 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
32999 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33000 rs6000_output_function_entry (file, name);
33001 fputs (":\n", file);
33002 return;
33003 }
33004
33005 int uses_toc;
33006 if (DEFAULT_ABI == ABI_V4
33007 && (TARGET_RELOCATABLE || flag_pic > 1)
33008 && !TARGET_SECURE_PLT
33009 && (!constant_pool_empty_p () || crtl->profile)
33010 && (uses_toc = uses_TOC ()))
33011 {
33012 char buf[256];
33013
33014 if (uses_toc == 2)
33015 switch_to_other_text_partition ();
33016 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33017
33018 fprintf (file, "\t.long ");
33019 assemble_name (file, toc_label_name);
33020 need_toc_init = 1;
33021 putc ('-', file);
33022 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33023 assemble_name (file, buf);
33024 putc ('\n', file);
33025 if (uses_toc == 2)
33026 switch_to_other_text_partition ();
33027 }
33028
33029 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33030 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33031
33032 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33033 {
33034 char buf[256];
33035
33036 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33037
33038 fprintf (file, "\t.quad .TOC.-");
33039 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33040 assemble_name (file, buf);
33041 putc ('\n', file);
33042 }
33043
33044 if (DEFAULT_ABI == ABI_AIX)
33045 {
33046 const char *desc_name, *orig_name;
33047
33048 orig_name = (*targetm.strip_name_encoding) (name);
33049 desc_name = orig_name;
33050 while (*desc_name == '.')
33051 desc_name++;
33052
33053 if (TREE_PUBLIC (decl))
33054 fprintf (file, "\t.globl %s\n", desc_name);
33055
33056 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33057 fprintf (file, "%s:\n", desc_name);
33058 fprintf (file, "\t.long %s\n", orig_name);
33059 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33060 fputs ("\t.long 0\n", file);
33061 fprintf (file, "\t.previous\n");
33062 }
33063 ASM_OUTPUT_LABEL (file, name);
33064 }
33065
33066 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33067 static void
33068 rs6000_elf_file_end (void)
33069 {
33070 #ifdef HAVE_AS_GNU_ATTRIBUTE
33071 /* ??? The value emitted depends on options active at file end.
33072 Assume anyone using #pragma or attributes that might change
33073 options knows what they are doing. */
33074 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33075 && rs6000_passes_float)
33076 {
33077 int fp;
33078
33079 if (TARGET_HARD_FLOAT)
33080 fp = 1;
33081 else
33082 fp = 2;
33083 if (rs6000_passes_long_double)
33084 {
33085 if (!TARGET_LONG_DOUBLE_128)
33086 fp |= 2 * 4;
33087 else if (TARGET_IEEEQUAD)
33088 fp |= 3 * 4;
33089 else
33090 fp |= 1 * 4;
33091 }
33092 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33093 }
33094 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33095 {
33096 if (rs6000_passes_vector)
33097 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33098 (TARGET_ALTIVEC_ABI ? 2 : 1));
33099 if (rs6000_returns_struct)
33100 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33101 aix_struct_return ? 2 : 1);
33102 }
33103 #endif
33104 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33105 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33106 file_end_indicate_exec_stack ();
33107 #endif
33108
33109 if (flag_split_stack)
33110 file_end_indicate_split_stack ();
33111
33112 if (cpu_builtin_p)
33113 {
33114 /* We have expanded a CPU builtin, so we need to emit a reference to
33115 the special symbol that LIBC uses to declare it supports the
33116 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33117 switch_to_section (data_section);
33118 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33119 fprintf (asm_out_file, "\t%s %s\n",
33120 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33121 }
33122 }
33123 #endif
33124
33125 #if TARGET_XCOFF
33126
33127 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33128 #define HAVE_XCOFF_DWARF_EXTRAS 0
33129 #endif
33130
33131 static enum unwind_info_type
33132 rs6000_xcoff_debug_unwind_info (void)
33133 {
33134 return UI_NONE;
33135 }
33136
33137 static void
33138 rs6000_xcoff_asm_output_anchor (rtx symbol)
33139 {
33140 char buffer[100];
33141
33142 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33143 SYMBOL_REF_BLOCK_OFFSET (symbol));
33144 fprintf (asm_out_file, "%s", SET_ASM_OP);
33145 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33146 fprintf (asm_out_file, ",");
33147 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33148 fprintf (asm_out_file, "\n");
33149 }
33150
33151 static void
33152 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33153 {
33154 fputs (GLOBAL_ASM_OP, stream);
33155 RS6000_OUTPUT_BASENAME (stream, name);
33156 putc ('\n', stream);
33157 }
33158
33159 /* A get_unnamed_decl callback, used for read-only sections. PTR
33160 points to the section string variable. */
33161
33162 static void
33163 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33164 {
33165 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33166 *(const char *const *) directive,
33167 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33168 }
33169
33170 /* Likewise for read-write sections. */
33171
33172 static void
33173 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33174 {
33175 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33176 *(const char *const *) directive,
33177 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33178 }
33179
33180 static void
33181 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33182 {
33183 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33184 *(const char *const *) directive,
33185 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33186 }
33187
33188 /* A get_unnamed_section callback, used for switching to toc_section. */
33189
33190 static void
33191 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33192 {
33193 if (TARGET_MINIMAL_TOC)
33194 {
33195 /* toc_section is always selected at least once from
33196 rs6000_xcoff_file_start, so this is guaranteed to
33197 always be defined once and only once in each file. */
33198 if (!toc_initialized)
33199 {
33200 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33201 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33202 toc_initialized = 1;
33203 }
33204 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33205 (TARGET_32BIT ? "" : ",3"));
33206 }
33207 else
33208 fputs ("\t.toc\n", asm_out_file);
33209 }
33210
33211 /* Implement TARGET_ASM_INIT_SECTIONS. */
33212
33213 static void
33214 rs6000_xcoff_asm_init_sections (void)
33215 {
33216 read_only_data_section
33217 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33218 &xcoff_read_only_section_name);
33219
33220 private_data_section
33221 = get_unnamed_section (SECTION_WRITE,
33222 rs6000_xcoff_output_readwrite_section_asm_op,
33223 &xcoff_private_data_section_name);
33224
33225 tls_data_section
33226 = get_unnamed_section (SECTION_TLS,
33227 rs6000_xcoff_output_tls_section_asm_op,
33228 &xcoff_tls_data_section_name);
33229
33230 tls_private_data_section
33231 = get_unnamed_section (SECTION_TLS,
33232 rs6000_xcoff_output_tls_section_asm_op,
33233 &xcoff_private_data_section_name);
33234
33235 read_only_private_data_section
33236 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33237 &xcoff_private_data_section_name);
33238
33239 toc_section
33240 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33241
33242 readonly_data_section = read_only_data_section;
33243 }
33244
33245 static int
33246 rs6000_xcoff_reloc_rw_mask (void)
33247 {
33248 return 3;
33249 }
33250
33251 static void
33252 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33253 tree decl ATTRIBUTE_UNUSED)
33254 {
33255 int smclass;
33256 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33257
33258 if (flags & SECTION_EXCLUDE)
33259 smclass = 4;
33260 else if (flags & SECTION_DEBUG)
33261 {
33262 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33263 return;
33264 }
33265 else if (flags & SECTION_CODE)
33266 smclass = 0;
33267 else if (flags & SECTION_TLS)
33268 smclass = 3;
33269 else if (flags & SECTION_WRITE)
33270 smclass = 2;
33271 else
33272 smclass = 1;
33273
33274 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33275 (flags & SECTION_CODE) ? "." : "",
33276 name, suffix[smclass], flags & SECTION_ENTSIZE);
33277 }
33278
33279 #define IN_NAMED_SECTION(DECL) \
33280 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33281 && DECL_SECTION_NAME (DECL) != NULL)
33282
33283 static section *
33284 rs6000_xcoff_select_section (tree decl, int reloc,
33285 unsigned HOST_WIDE_INT align)
33286 {
33287 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33288 named section. */
33289 if (align > BIGGEST_ALIGNMENT)
33290 {
33291 resolve_unique_section (decl, reloc, true);
33292 if (IN_NAMED_SECTION (decl))
33293 return get_named_section (decl, NULL, reloc);
33294 }
33295
33296 if (decl_readonly_section (decl, reloc))
33297 {
33298 if (TREE_PUBLIC (decl))
33299 return read_only_data_section;
33300 else
33301 return read_only_private_data_section;
33302 }
33303 else
33304 {
33305 #if HAVE_AS_TLS
33306 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33307 {
33308 if (TREE_PUBLIC (decl))
33309 return tls_data_section;
33310 else if (bss_initializer_p (decl))
33311 {
33312 /* Convert to COMMON to emit in BSS. */
33313 DECL_COMMON (decl) = 1;
33314 return tls_comm_section;
33315 }
33316 else
33317 return tls_private_data_section;
33318 }
33319 else
33320 #endif
33321 if (TREE_PUBLIC (decl))
33322 return data_section;
33323 else
33324 return private_data_section;
33325 }
33326 }
33327
33328 static void
33329 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33330 {
33331 const char *name;
33332
33333 /* Use select_section for private data and uninitialized data with
33334 alignment <= BIGGEST_ALIGNMENT. */
33335 if (!TREE_PUBLIC (decl)
33336 || DECL_COMMON (decl)
33337 || (DECL_INITIAL (decl) == NULL_TREE
33338 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33339 || DECL_INITIAL (decl) == error_mark_node
33340 || (flag_zero_initialized_in_bss
33341 && initializer_zerop (DECL_INITIAL (decl))))
33342 return;
33343
33344 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33345 name = (*targetm.strip_name_encoding) (name);
33346 set_decl_section_name (decl, name);
33347 }
33348
33349 /* Select section for constant in constant pool.
33350
33351 On RS/6000, all constants are in the private read-only data area.
33352 However, if this is being placed in the TOC it must be output as a
33353 toc entry. */
33354
33355 static section *
33356 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33357 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33358 {
33359 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33360 return toc_section;
33361 else
33362 return read_only_private_data_section;
33363 }
33364
33365 /* Remove any trailing [DS] or the like from the symbol name. */
33366
33367 static const char *
33368 rs6000_xcoff_strip_name_encoding (const char *name)
33369 {
33370 size_t len;
33371 if (*name == '*')
33372 name++;
33373 len = strlen (name);
33374 if (name[len - 1] == ']')
33375 return ggc_alloc_string (name, len - 4);
33376 else
33377 return name;
33378 }
33379
33380 /* Section attributes. AIX is always PIC. */
33381
33382 static unsigned int
33383 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33384 {
33385 unsigned int align;
33386 unsigned int flags = default_section_type_flags (decl, name, reloc);
33387
33388 /* Align to at least UNIT size. */
33389 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33390 align = MIN_UNITS_PER_WORD;
33391 else
33392 /* Increase alignment of large objects if not already stricter. */
33393 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33394 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33395 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33396
33397 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33398 }
33399
33400 /* Output at beginning of assembler file.
33401
33402 Initialize the section names for the RS/6000 at this point.
33403
33404 Specify filename, including full path, to assembler.
33405
33406 We want to go into the TOC section so at least one .toc will be emitted.
33407 Also, in order to output proper .bs/.es pairs, we need at least one static
33408 [RW] section emitted.
33409
33410 Finally, declare mcount when profiling to make the assembler happy. */
33411
33412 static void
33413 rs6000_xcoff_file_start (void)
33414 {
33415 rs6000_gen_section_name (&xcoff_bss_section_name,
33416 main_input_filename, ".bss_");
33417 rs6000_gen_section_name (&xcoff_private_data_section_name,
33418 main_input_filename, ".rw_");
33419 rs6000_gen_section_name (&xcoff_read_only_section_name,
33420 main_input_filename, ".ro_");
33421 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33422 main_input_filename, ".tls_");
33423 rs6000_gen_section_name (&xcoff_tbss_section_name,
33424 main_input_filename, ".tbss_[UL]");
33425
33426 fputs ("\t.file\t", asm_out_file);
33427 output_quoted_string (asm_out_file, main_input_filename);
33428 fputc ('\n', asm_out_file);
33429 if (write_symbols != NO_DEBUG)
33430 switch_to_section (private_data_section);
33431 switch_to_section (toc_section);
33432 switch_to_section (text_section);
33433 if (profile_flag)
33434 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33435 rs6000_file_start ();
33436 }
33437
33438 /* Output at end of assembler file.
33439 On the RS/6000, referencing data should automatically pull in text. */
33440
33441 static void
33442 rs6000_xcoff_file_end (void)
33443 {
33444 switch_to_section (text_section);
33445 fputs ("_section_.text:\n", asm_out_file);
33446 switch_to_section (data_section);
33447 fputs (TARGET_32BIT
33448 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33449 asm_out_file);
33450 }
33451
33452 struct declare_alias_data
33453 {
33454 FILE *file;
33455 bool function_descriptor;
33456 };
33457
33458 /* Declare alias N. A helper function for for_node_and_aliases. */
33459
33460 static bool
33461 rs6000_declare_alias (struct symtab_node *n, void *d)
33462 {
33463 struct declare_alias_data *data = (struct declare_alias_data *)d;
33464 /* Main symbol is output specially, because varasm machinery does part of
33465 the job for us - we do not need to declare .globl/lglobs and such. */
33466 if (!n->alias || n->weakref)
33467 return false;
33468
33469 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33470 return false;
33471
33472 /* Prevent assemble_alias from trying to use .set pseudo operation
33473 that does not behave as expected by the middle-end. */
33474 TREE_ASM_WRITTEN (n->decl) = true;
33475
33476 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33477 char *buffer = (char *) alloca (strlen (name) + 2);
33478 char *p;
33479 int dollar_inside = 0;
33480
33481 strcpy (buffer, name);
33482 p = strchr (buffer, '$');
33483 while (p) {
33484 *p = '_';
33485 dollar_inside++;
33486 p = strchr (p + 1, '$');
33487 }
33488 if (TREE_PUBLIC (n->decl))
33489 {
33490 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33491 {
33492 if (dollar_inside) {
33493 if (data->function_descriptor)
33494 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33495 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33496 }
33497 if (data->function_descriptor)
33498 {
33499 fputs ("\t.globl .", data->file);
33500 RS6000_OUTPUT_BASENAME (data->file, buffer);
33501 putc ('\n', data->file);
33502 }
33503 fputs ("\t.globl ", data->file);
33504 RS6000_OUTPUT_BASENAME (data->file, buffer);
33505 putc ('\n', data->file);
33506 }
33507 #ifdef ASM_WEAKEN_DECL
33508 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
33509 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
33510 #endif
33511 }
33512 else
33513 {
33514 if (dollar_inside)
33515 {
33516 if (data->function_descriptor)
33517 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33518 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33519 }
33520 if (data->function_descriptor)
33521 {
33522 fputs ("\t.lglobl .", data->file);
33523 RS6000_OUTPUT_BASENAME (data->file, buffer);
33524 putc ('\n', data->file);
33525 }
33526 fputs ("\t.lglobl ", data->file);
33527 RS6000_OUTPUT_BASENAME (data->file, buffer);
33528 putc ('\n', data->file);
33529 }
33530 if (data->function_descriptor)
33531 fputs (".", data->file);
33532 RS6000_OUTPUT_BASENAME (data->file, buffer);
33533 fputs (":\n", data->file);
33534 return false;
33535 }
33536
33537
33538 #ifdef HAVE_GAS_HIDDEN
33539 /* Helper function to calculate visibility of a DECL
33540 and return the value as a const string. */
33541
33542 static const char *
33543 rs6000_xcoff_visibility (tree decl)
33544 {
33545 static const char * const visibility_types[] = {
33546 "", ",protected", ",hidden", ",internal"
33547 };
33548
33549 enum symbol_visibility vis = DECL_VISIBILITY (decl);
33550 return visibility_types[vis];
33551 }
33552 #endif
33553
33554
33555 /* This macro produces the initial definition of a function name.
33556 On the RS/6000, we need to place an extra '.' in the function name and
33557 output the function descriptor.
33558 Dollar signs are converted to underscores.
33559
33560 The csect for the function will have already been created when
33561 text_section was selected. We do have to go back to that csect, however.
33562
33563 The third and fourth parameters to the .function pseudo-op (16 and 044)
33564 are placeholders which no longer have any use.
33565
33566 Because AIX assembler's .set command has unexpected semantics, we output
33567 all aliases as alternative labels in front of the definition. */
33568
33569 void
33570 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
33571 {
33572 char *buffer = (char *) alloca (strlen (name) + 1);
33573 char *p;
33574 int dollar_inside = 0;
33575 struct declare_alias_data data = {file, false};
33576
33577 strcpy (buffer, name);
33578 p = strchr (buffer, '$');
33579 while (p) {
33580 *p = '_';
33581 dollar_inside++;
33582 p = strchr (p + 1, '$');
33583 }
33584 if (TREE_PUBLIC (decl))
33585 {
33586 if (!RS6000_WEAK || !DECL_WEAK (decl))
33587 {
33588 if (dollar_inside) {
33589 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33590 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33591 }
33592 fputs ("\t.globl .", file);
33593 RS6000_OUTPUT_BASENAME (file, buffer);
33594 #ifdef HAVE_GAS_HIDDEN
33595 fputs (rs6000_xcoff_visibility (decl), file);
33596 #endif
33597 putc ('\n', file);
33598 }
33599 }
33600 else
33601 {
33602 if (dollar_inside) {
33603 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33604 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33605 }
33606 fputs ("\t.lglobl .", file);
33607 RS6000_OUTPUT_BASENAME (file, buffer);
33608 putc ('\n', file);
33609 }
33610 fputs ("\t.csect ", file);
33611 RS6000_OUTPUT_BASENAME (file, buffer);
33612 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
33613 RS6000_OUTPUT_BASENAME (file, buffer);
33614 fputs (":\n", file);
33615 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33616 &data, true);
33617 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
33618 RS6000_OUTPUT_BASENAME (file, buffer);
33619 fputs (", TOC[tc0], 0\n", file);
33620 in_section = NULL;
33621 switch_to_section (function_section (decl));
33622 putc ('.', file);
33623 RS6000_OUTPUT_BASENAME (file, buffer);
33624 fputs (":\n", file);
33625 data.function_descriptor = true;
33626 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33627 &data, true);
33628 if (!DECL_IGNORED_P (decl))
33629 {
33630 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33631 xcoffout_declare_function (file, decl, buffer);
33632 else if (write_symbols == DWARF2_DEBUG)
33633 {
33634 name = (*targetm.strip_name_encoding) (name);
33635 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
33636 }
33637 }
33638 return;
33639 }
33640
33641
33642 /* Output assembly language to globalize a symbol from a DECL,
33643 possibly with visibility. */
33644
33645 void
33646 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
33647 {
33648 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
33649 fputs (GLOBAL_ASM_OP, stream);
33650 RS6000_OUTPUT_BASENAME (stream, name);
33651 #ifdef HAVE_GAS_HIDDEN
33652 fputs (rs6000_xcoff_visibility (decl), stream);
33653 #endif
33654 putc ('\n', stream);
33655 }
33656
33657 /* Output assembly language to define a symbol as COMMON from a DECL,
33658 possibly with visibility. */
33659
33660 void
33661 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
33662 tree decl ATTRIBUTE_UNUSED,
33663 const char *name,
33664 unsigned HOST_WIDE_INT size,
33665 unsigned HOST_WIDE_INT align)
33666 {
33667 unsigned HOST_WIDE_INT align2 = 2;
33668
33669 if (align > 32)
33670 align2 = floor_log2 (align / BITS_PER_UNIT);
33671 else if (size > 4)
33672 align2 = 3;
33673
33674 fputs (COMMON_ASM_OP, stream);
33675 RS6000_OUTPUT_BASENAME (stream, name);
33676
33677 fprintf (stream,
33678 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
33679 size, align2);
33680
33681 #ifdef HAVE_GAS_HIDDEN
33682 if (decl != NULL)
33683 fputs (rs6000_xcoff_visibility (decl), stream);
33684 #endif
33685 putc ('\n', stream);
33686 }
33687
33688 /* This macro produces the initial definition of a object (variable) name.
33689 Because AIX assembler's .set command has unexpected semantics, we output
33690 all aliases as alternative labels in front of the definition. */
33691
33692 void
33693 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
33694 {
33695 struct declare_alias_data data = {file, false};
33696 RS6000_OUTPUT_BASENAME (file, name);
33697 fputs (":\n", file);
33698 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33699 &data, true);
33700 }
33701
33702 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
33703
33704 void
33705 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
33706 {
33707 fputs (integer_asm_op (size, FALSE), file);
33708 assemble_name (file, label);
33709 fputs ("-$", file);
33710 }
33711
33712 /* Output a symbol offset relative to the dbase for the current object.
33713 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
33714 signed offsets.
33715
33716 __gcc_unwind_dbase is embedded in all executables/libraries through
33717 libgcc/config/rs6000/crtdbase.S. */
33718
33719 void
33720 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
33721 {
33722 fputs (integer_asm_op (size, FALSE), file);
33723 assemble_name (file, label);
33724 fputs("-__gcc_unwind_dbase", file);
33725 }
33726
33727 #ifdef HAVE_AS_TLS
33728 static void
33729 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
33730 {
33731 rtx symbol;
33732 int flags;
33733 const char *symname;
33734
33735 default_encode_section_info (decl, rtl, first);
33736
33737 /* Careful not to prod global register variables. */
33738 if (!MEM_P (rtl))
33739 return;
33740 symbol = XEXP (rtl, 0);
33741 if (GET_CODE (symbol) != SYMBOL_REF)
33742 return;
33743
33744 flags = SYMBOL_REF_FLAGS (symbol);
33745
33746 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33747 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
33748
33749 SYMBOL_REF_FLAGS (symbol) = flags;
33750
33751 /* Append mapping class to extern decls. */
33752 symname = XSTR (symbol, 0);
33753 if (decl /* sync condition with assemble_external () */
33754 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
33755 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
33756 || TREE_CODE (decl) == FUNCTION_DECL)
33757 && symname[strlen (symname) - 1] != ']')
33758 {
33759 char *newname = (char *) alloca (strlen (symname) + 5);
33760 strcpy (newname, symname);
33761 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
33762 ? "[DS]" : "[UA]"));
33763 XSTR (symbol, 0) = ggc_strdup (newname);
33764 }
33765 }
33766 #endif /* HAVE_AS_TLS */
33767 #endif /* TARGET_XCOFF */
33768
33769 void
33770 rs6000_asm_weaken_decl (FILE *stream, tree decl,
33771 const char *name, const char *val)
33772 {
33773 fputs ("\t.weak\t", stream);
33774 RS6000_OUTPUT_BASENAME (stream, name);
33775 if (decl && TREE_CODE (decl) == FUNCTION_DECL
33776 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
33777 {
33778 if (TARGET_XCOFF)
33779 fputs ("[DS]", stream);
33780 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
33781 if (TARGET_XCOFF)
33782 fputs (rs6000_xcoff_visibility (decl), stream);
33783 #endif
33784 fputs ("\n\t.weak\t.", stream);
33785 RS6000_OUTPUT_BASENAME (stream, name);
33786 }
33787 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
33788 if (TARGET_XCOFF)
33789 fputs (rs6000_xcoff_visibility (decl), stream);
33790 #endif
33791 fputc ('\n', stream);
33792 if (val)
33793 {
33794 #ifdef ASM_OUTPUT_DEF
33795 ASM_OUTPUT_DEF (stream, name, val);
33796 #endif
33797 if (decl && TREE_CODE (decl) == FUNCTION_DECL
33798 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
33799 {
33800 fputs ("\t.set\t.", stream);
33801 RS6000_OUTPUT_BASENAME (stream, name);
33802 fputs (",.", stream);
33803 RS6000_OUTPUT_BASENAME (stream, val);
33804 fputc ('\n', stream);
33805 }
33806 }
33807 }
33808
33809
33810 /* Return true if INSN should not be copied. */
33811
33812 static bool
33813 rs6000_cannot_copy_insn_p (rtx_insn *insn)
33814 {
33815 return recog_memoized (insn) >= 0
33816 && get_attr_cannot_copy (insn);
33817 }
33818
33819 /* Compute a (partial) cost for rtx X. Return true if the complete
33820 cost has been computed, and false if subexpressions should be
33821 scanned. In either case, *TOTAL contains the cost result. */
33822
33823 static bool
33824 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
33825 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
33826 {
33827 int code = GET_CODE (x);
33828
33829 switch (code)
33830 {
33831 /* On the RS/6000, if it is valid in the insn, it is free. */
33832 case CONST_INT:
33833 if (((outer_code == SET
33834 || outer_code == PLUS
33835 || outer_code == MINUS)
33836 && (satisfies_constraint_I (x)
33837 || satisfies_constraint_L (x)))
33838 || (outer_code == AND
33839 && (satisfies_constraint_K (x)
33840 || (mode == SImode
33841 ? satisfies_constraint_L (x)
33842 : satisfies_constraint_J (x))))
33843 || ((outer_code == IOR || outer_code == XOR)
33844 && (satisfies_constraint_K (x)
33845 || (mode == SImode
33846 ? satisfies_constraint_L (x)
33847 : satisfies_constraint_J (x))))
33848 || outer_code == ASHIFT
33849 || outer_code == ASHIFTRT
33850 || outer_code == LSHIFTRT
33851 || outer_code == ROTATE
33852 || outer_code == ROTATERT
33853 || outer_code == ZERO_EXTRACT
33854 || (outer_code == MULT
33855 && satisfies_constraint_I (x))
33856 || ((outer_code == DIV || outer_code == UDIV
33857 || outer_code == MOD || outer_code == UMOD)
33858 && exact_log2 (INTVAL (x)) >= 0)
33859 || (outer_code == COMPARE
33860 && (satisfies_constraint_I (x)
33861 || satisfies_constraint_K (x)))
33862 || ((outer_code == EQ || outer_code == NE)
33863 && (satisfies_constraint_I (x)
33864 || satisfies_constraint_K (x)
33865 || (mode == SImode
33866 ? satisfies_constraint_L (x)
33867 : satisfies_constraint_J (x))))
33868 || (outer_code == GTU
33869 && satisfies_constraint_I (x))
33870 || (outer_code == LTU
33871 && satisfies_constraint_P (x)))
33872 {
33873 *total = 0;
33874 return true;
33875 }
33876 else if ((outer_code == PLUS
33877 && reg_or_add_cint_operand (x, VOIDmode))
33878 || (outer_code == MINUS
33879 && reg_or_sub_cint_operand (x, VOIDmode))
33880 || ((outer_code == SET
33881 || outer_code == IOR
33882 || outer_code == XOR)
33883 && (INTVAL (x)
33884 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
33885 {
33886 *total = COSTS_N_INSNS (1);
33887 return true;
33888 }
33889 /* FALLTHRU */
33890
33891 case CONST_DOUBLE:
33892 case CONST_WIDE_INT:
33893 case CONST:
33894 case HIGH:
33895 case SYMBOL_REF:
33896 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
33897 return true;
33898
33899 case MEM:
33900 /* When optimizing for size, MEM should be slightly more expensive
33901 than generating address, e.g., (plus (reg) (const)).
33902 L1 cache latency is about two instructions. */
33903 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
33904 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
33905 *total += COSTS_N_INSNS (100);
33906 return true;
33907
33908 case LABEL_REF:
33909 *total = 0;
33910 return true;
33911
33912 case PLUS:
33913 case MINUS:
33914 if (FLOAT_MODE_P (mode))
33915 *total = rs6000_cost->fp;
33916 else
33917 *total = COSTS_N_INSNS (1);
33918 return false;
33919
33920 case MULT:
33921 if (GET_CODE (XEXP (x, 1)) == CONST_INT
33922 && satisfies_constraint_I (XEXP (x, 1)))
33923 {
33924 if (INTVAL (XEXP (x, 1)) >= -256
33925 && INTVAL (XEXP (x, 1)) <= 255)
33926 *total = rs6000_cost->mulsi_const9;
33927 else
33928 *total = rs6000_cost->mulsi_const;
33929 }
33930 else if (mode == SFmode)
33931 *total = rs6000_cost->fp;
33932 else if (FLOAT_MODE_P (mode))
33933 *total = rs6000_cost->dmul;
33934 else if (mode == DImode)
33935 *total = rs6000_cost->muldi;
33936 else
33937 *total = rs6000_cost->mulsi;
33938 return false;
33939
33940 case FMA:
33941 if (mode == SFmode)
33942 *total = rs6000_cost->fp;
33943 else
33944 *total = rs6000_cost->dmul;
33945 break;
33946
33947 case DIV:
33948 case MOD:
33949 if (FLOAT_MODE_P (mode))
33950 {
33951 *total = mode == DFmode ? rs6000_cost->ddiv
33952 : rs6000_cost->sdiv;
33953 return false;
33954 }
33955 /* FALLTHRU */
33956
33957 case UDIV:
33958 case UMOD:
33959 if (GET_CODE (XEXP (x, 1)) == CONST_INT
33960 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
33961 {
33962 if (code == DIV || code == MOD)
33963 /* Shift, addze */
33964 *total = COSTS_N_INSNS (2);
33965 else
33966 /* Shift */
33967 *total = COSTS_N_INSNS (1);
33968 }
33969 else
33970 {
33971 if (GET_MODE (XEXP (x, 1)) == DImode)
33972 *total = rs6000_cost->divdi;
33973 else
33974 *total = rs6000_cost->divsi;
33975 }
33976 /* Add in shift and subtract for MOD unless we have a mod instruction. */
33977 if (!TARGET_MODULO && (code == MOD || code == UMOD))
33978 *total += COSTS_N_INSNS (2);
33979 return false;
33980
33981 case CTZ:
33982 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
33983 return false;
33984
33985 case FFS:
33986 *total = COSTS_N_INSNS (4);
33987 return false;
33988
33989 case POPCOUNT:
33990 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
33991 return false;
33992
33993 case PARITY:
33994 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
33995 return false;
33996
33997 case NOT:
33998 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
33999 *total = 0;
34000 else
34001 *total = COSTS_N_INSNS (1);
34002 return false;
34003
34004 case AND:
34005 if (CONST_INT_P (XEXP (x, 1)))
34006 {
34007 rtx left = XEXP (x, 0);
34008 rtx_code left_code = GET_CODE (left);
34009
34010 /* rotate-and-mask: 1 insn. */
34011 if ((left_code == ROTATE
34012 || left_code == ASHIFT
34013 || left_code == LSHIFTRT)
34014 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34015 {
34016 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34017 if (!CONST_INT_P (XEXP (left, 1)))
34018 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34019 *total += COSTS_N_INSNS (1);
34020 return true;
34021 }
34022
34023 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34024 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34025 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34026 || (val & 0xffff) == val
34027 || (val & 0xffff0000) == val
34028 || ((val & 0xffff) == 0 && mode == SImode))
34029 {
34030 *total = rtx_cost (left, mode, AND, 0, speed);
34031 *total += COSTS_N_INSNS (1);
34032 return true;
34033 }
34034
34035 /* 2 insns. */
34036 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34037 {
34038 *total = rtx_cost (left, mode, AND, 0, speed);
34039 *total += COSTS_N_INSNS (2);
34040 return true;
34041 }
34042 }
34043
34044 *total = COSTS_N_INSNS (1);
34045 return false;
34046
34047 case IOR:
34048 /* FIXME */
34049 *total = COSTS_N_INSNS (1);
34050 return true;
34051
34052 case CLZ:
34053 case XOR:
34054 case ZERO_EXTRACT:
34055 *total = COSTS_N_INSNS (1);
34056 return false;
34057
34058 case ASHIFT:
34059 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34060 the sign extend and shift separately within the insn. */
34061 if (TARGET_EXTSWSLI && mode == DImode
34062 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34063 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34064 {
34065 *total = 0;
34066 return false;
34067 }
34068 /* fall through */
34069
34070 case ASHIFTRT:
34071 case LSHIFTRT:
34072 case ROTATE:
34073 case ROTATERT:
34074 /* Handle mul_highpart. */
34075 if (outer_code == TRUNCATE
34076 && GET_CODE (XEXP (x, 0)) == MULT)
34077 {
34078 if (mode == DImode)
34079 *total = rs6000_cost->muldi;
34080 else
34081 *total = rs6000_cost->mulsi;
34082 return true;
34083 }
34084 else if (outer_code == AND)
34085 *total = 0;
34086 else
34087 *total = COSTS_N_INSNS (1);
34088 return false;
34089
34090 case SIGN_EXTEND:
34091 case ZERO_EXTEND:
34092 if (GET_CODE (XEXP (x, 0)) == MEM)
34093 *total = 0;
34094 else
34095 *total = COSTS_N_INSNS (1);
34096 return false;
34097
34098 case COMPARE:
34099 case NEG:
34100 case ABS:
34101 if (!FLOAT_MODE_P (mode))
34102 {
34103 *total = COSTS_N_INSNS (1);
34104 return false;
34105 }
34106 /* FALLTHRU */
34107
34108 case FLOAT:
34109 case UNSIGNED_FLOAT:
34110 case FIX:
34111 case UNSIGNED_FIX:
34112 case FLOAT_TRUNCATE:
34113 *total = rs6000_cost->fp;
34114 return false;
34115
34116 case FLOAT_EXTEND:
34117 if (mode == DFmode)
34118 *total = rs6000_cost->sfdf_convert;
34119 else
34120 *total = rs6000_cost->fp;
34121 return false;
34122
34123 case UNSPEC:
34124 switch (XINT (x, 1))
34125 {
34126 case UNSPEC_FRSP:
34127 *total = rs6000_cost->fp;
34128 return true;
34129
34130 default:
34131 break;
34132 }
34133 break;
34134
34135 case CALL:
34136 case IF_THEN_ELSE:
34137 if (!speed)
34138 {
34139 *total = COSTS_N_INSNS (1);
34140 return true;
34141 }
34142 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34143 {
34144 *total = rs6000_cost->fp;
34145 return false;
34146 }
34147 break;
34148
34149 case NE:
34150 case EQ:
34151 case GTU:
34152 case LTU:
34153 /* Carry bit requires mode == Pmode.
34154 NEG or PLUS already counted so only add one. */
34155 if (mode == Pmode
34156 && (outer_code == NEG || outer_code == PLUS))
34157 {
34158 *total = COSTS_N_INSNS (1);
34159 return true;
34160 }
34161 /* FALLTHRU */
34162
34163 case GT:
34164 case LT:
34165 case UNORDERED:
34166 if (outer_code == SET)
34167 {
34168 if (XEXP (x, 1) == const0_rtx)
34169 {
34170 *total = COSTS_N_INSNS (2);
34171 return true;
34172 }
34173 else
34174 {
34175 *total = COSTS_N_INSNS (3);
34176 return false;
34177 }
34178 }
34179 /* CC COMPARE. */
34180 if (outer_code == COMPARE)
34181 {
34182 *total = 0;
34183 return true;
34184 }
34185 break;
34186
34187 default:
34188 break;
34189 }
34190
34191 return false;
34192 }
34193
34194 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34195
34196 static bool
34197 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34198 int opno, int *total, bool speed)
34199 {
34200 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34201
34202 fprintf (stderr,
34203 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34204 "opno = %d, total = %d, speed = %s, x:\n",
34205 ret ? "complete" : "scan inner",
34206 GET_MODE_NAME (mode),
34207 GET_RTX_NAME (outer_code),
34208 opno,
34209 *total,
34210 speed ? "true" : "false");
34211
34212 debug_rtx (x);
34213
34214 return ret;
34215 }
34216
34217 static int
34218 rs6000_insn_cost (rtx_insn *insn, bool speed)
34219 {
34220 if (recog_memoized (insn) < 0)
34221 return 0;
34222
34223 if (!speed)
34224 return get_attr_length (insn);
34225
34226 int cost = get_attr_cost (insn);
34227 if (cost > 0)
34228 return cost;
34229
34230 int n = get_attr_length (insn) / 4;
34231 enum attr_type type = get_attr_type (insn);
34232
34233 switch (type)
34234 {
34235 case TYPE_LOAD:
34236 case TYPE_FPLOAD:
34237 case TYPE_VECLOAD:
34238 cost = COSTS_N_INSNS (n + 1);
34239 break;
34240
34241 case TYPE_MUL:
34242 switch (get_attr_size (insn))
34243 {
34244 case SIZE_8:
34245 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
34246 break;
34247 case SIZE_16:
34248 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
34249 break;
34250 case SIZE_32:
34251 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
34252 break;
34253 case SIZE_64:
34254 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
34255 break;
34256 default:
34257 gcc_unreachable ();
34258 }
34259 break;
34260 case TYPE_DIV:
34261 switch (get_attr_size (insn))
34262 {
34263 case SIZE_32:
34264 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
34265 break;
34266 case SIZE_64:
34267 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
34268 break;
34269 default:
34270 gcc_unreachable ();
34271 }
34272 break;
34273
34274 case TYPE_FP:
34275 cost = n * rs6000_cost->fp;
34276 break;
34277 case TYPE_DMUL:
34278 cost = n * rs6000_cost->dmul;
34279 break;
34280 case TYPE_SDIV:
34281 cost = n * rs6000_cost->sdiv;
34282 break;
34283 case TYPE_DDIV:
34284 cost = n * rs6000_cost->ddiv;
34285 break;
34286
34287 case TYPE_SYNC:
34288 case TYPE_LOAD_L:
34289 case TYPE_MFCR:
34290 case TYPE_MFCRF:
34291 cost = COSTS_N_INSNS (n + 2);
34292 break;
34293
34294 default:
34295 cost = COSTS_N_INSNS (n);
34296 }
34297
34298 return cost;
34299 }
34300
34301 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34302
34303 static int
34304 rs6000_debug_address_cost (rtx x, machine_mode mode,
34305 addr_space_t as, bool speed)
34306 {
34307 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34308
34309 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34310 ret, speed ? "true" : "false");
34311 debug_rtx (x);
34312
34313 return ret;
34314 }
34315
34316
34317 /* A C expression returning the cost of moving data from a register of class
34318 CLASS1 to one of CLASS2. */
34319
34320 static int
34321 rs6000_register_move_cost (machine_mode mode,
34322 reg_class_t from, reg_class_t to)
34323 {
34324 int ret;
34325
34326 if (TARGET_DEBUG_COST)
34327 dbg_cost_ctrl++;
34328
34329 /* Moves from/to GENERAL_REGS. */
34330 if (reg_classes_intersect_p (to, GENERAL_REGS)
34331 || reg_classes_intersect_p (from, GENERAL_REGS))
34332 {
34333 reg_class_t rclass = from;
34334
34335 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34336 rclass = to;
34337
34338 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34339 ret = (rs6000_memory_move_cost (mode, rclass, false)
34340 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34341
34342 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34343 shift. */
34344 else if (rclass == CR_REGS)
34345 ret = 4;
34346
34347 /* For those processors that have slow LR/CTR moves, make them more
34348 expensive than memory in order to bias spills to memory .*/
34349 else if ((rs6000_tune == PROCESSOR_POWER6
34350 || rs6000_tune == PROCESSOR_POWER7
34351 || rs6000_tune == PROCESSOR_POWER8
34352 || rs6000_tune == PROCESSOR_POWER9)
34353 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34354 ret = 6 * hard_regno_nregs (0, mode);
34355
34356 else
34357 /* A move will cost one instruction per GPR moved. */
34358 ret = 2 * hard_regno_nregs (0, mode);
34359 }
34360
34361 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34362 else if (VECTOR_MEM_VSX_P (mode)
34363 && reg_classes_intersect_p (to, VSX_REGS)
34364 && reg_classes_intersect_p (from, VSX_REGS))
34365 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
34366
34367 /* Moving between two similar registers is just one instruction. */
34368 else if (reg_classes_intersect_p (to, from))
34369 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34370
34371 /* Everything else has to go through GENERAL_REGS. */
34372 else
34373 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34374 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34375
34376 if (TARGET_DEBUG_COST)
34377 {
34378 if (dbg_cost_ctrl == 1)
34379 fprintf (stderr,
34380 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34381 ret, GET_MODE_NAME (mode), reg_class_names[from],
34382 reg_class_names[to]);
34383 dbg_cost_ctrl--;
34384 }
34385
34386 return ret;
34387 }
34388
34389 /* A C expressions returning the cost of moving data of MODE from a register to
34390 or from memory. */
34391
34392 static int
34393 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34394 bool in ATTRIBUTE_UNUSED)
34395 {
34396 int ret;
34397
34398 if (TARGET_DEBUG_COST)
34399 dbg_cost_ctrl++;
34400
34401 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34402 ret = 4 * hard_regno_nregs (0, mode);
34403 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34404 || reg_classes_intersect_p (rclass, VSX_REGS)))
34405 ret = 4 * hard_regno_nregs (32, mode);
34406 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34407 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
34408 else
34409 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34410
34411 if (TARGET_DEBUG_COST)
34412 {
34413 if (dbg_cost_ctrl == 1)
34414 fprintf (stderr,
34415 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34416 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34417 dbg_cost_ctrl--;
34418 }
34419
34420 return ret;
34421 }
34422
34423 /* Returns a code for a target-specific builtin that implements
34424 reciprocal of the function, or NULL_TREE if not available. */
34425
34426 static tree
34427 rs6000_builtin_reciprocal (tree fndecl)
34428 {
34429 switch (DECL_FUNCTION_CODE (fndecl))
34430 {
34431 case VSX_BUILTIN_XVSQRTDP:
34432 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34433 return NULL_TREE;
34434
34435 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34436
34437 case VSX_BUILTIN_XVSQRTSP:
34438 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34439 return NULL_TREE;
34440
34441 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34442
34443 default:
34444 return NULL_TREE;
34445 }
34446 }
34447
34448 /* Load up a constant. If the mode is a vector mode, splat the value across
34449 all of the vector elements. */
34450
34451 static rtx
34452 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34453 {
34454 rtx reg;
34455
34456 if (mode == SFmode || mode == DFmode)
34457 {
34458 rtx d = const_double_from_real_value (dconst, mode);
34459 reg = force_reg (mode, d);
34460 }
34461 else if (mode == V4SFmode)
34462 {
34463 rtx d = const_double_from_real_value (dconst, SFmode);
34464 rtvec v = gen_rtvec (4, d, d, d, d);
34465 reg = gen_reg_rtx (mode);
34466 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34467 }
34468 else if (mode == V2DFmode)
34469 {
34470 rtx d = const_double_from_real_value (dconst, DFmode);
34471 rtvec v = gen_rtvec (2, d, d);
34472 reg = gen_reg_rtx (mode);
34473 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34474 }
34475 else
34476 gcc_unreachable ();
34477
34478 return reg;
34479 }
34480
34481 /* Generate an FMA instruction. */
34482
34483 static void
34484 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34485 {
34486 machine_mode mode = GET_MODE (target);
34487 rtx dst;
34488
34489 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34490 gcc_assert (dst != NULL);
34491
34492 if (dst != target)
34493 emit_move_insn (target, dst);
34494 }
34495
34496 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34497
34498 static void
34499 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
34500 {
34501 machine_mode mode = GET_MODE (dst);
34502 rtx r;
34503
34504 /* This is a tad more complicated, since the fnma_optab is for
34505 a different expression: fma(-m1, m2, a), which is the same
34506 thing except in the case of signed zeros.
34507
34508 Fortunately we know that if FMA is supported that FNMSUB is
34509 also supported in the ISA. Just expand it directly. */
34510
34511 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
34512
34513 r = gen_rtx_NEG (mode, a);
34514 r = gen_rtx_FMA (mode, m1, m2, r);
34515 r = gen_rtx_NEG (mode, r);
34516 emit_insn (gen_rtx_SET (dst, r));
34517 }
34518
34519 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34520 add a reg_note saying that this was a division. Support both scalar and
34521 vector divide. Assumes no trapping math and finite arguments. */
34522
34523 void
34524 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
34525 {
34526 machine_mode mode = GET_MODE (dst);
34527 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
34528 int i;
34529
34530 /* Low precision estimates guarantee 5 bits of accuracy. High
34531 precision estimates guarantee 14 bits of accuracy. SFmode
34532 requires 23 bits of accuracy. DFmode requires 52 bits of
34533 accuracy. Each pass at least doubles the accuracy, leading
34534 to the following. */
34535 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34536 if (mode == DFmode || mode == V2DFmode)
34537 passes++;
34538
34539 enum insn_code code = optab_handler (smul_optab, mode);
34540 insn_gen_fn gen_mul = GEN_FCN (code);
34541
34542 gcc_assert (code != CODE_FOR_nothing);
34543
34544 one = rs6000_load_constant_and_splat (mode, dconst1);
34545
34546 /* x0 = 1./d estimate */
34547 x0 = gen_reg_rtx (mode);
34548 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
34549 UNSPEC_FRES)));
34550
34551 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34552 if (passes > 1) {
34553
34554 /* e0 = 1. - d * x0 */
34555 e0 = gen_reg_rtx (mode);
34556 rs6000_emit_nmsub (e0, d, x0, one);
34557
34558 /* x1 = x0 + e0 * x0 */
34559 x1 = gen_reg_rtx (mode);
34560 rs6000_emit_madd (x1, e0, x0, x0);
34561
34562 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
34563 ++i, xprev = xnext, eprev = enext) {
34564
34565 /* enext = eprev * eprev */
34566 enext = gen_reg_rtx (mode);
34567 emit_insn (gen_mul (enext, eprev, eprev));
34568
34569 /* xnext = xprev + enext * xprev */
34570 xnext = gen_reg_rtx (mode);
34571 rs6000_emit_madd (xnext, enext, xprev, xprev);
34572 }
34573
34574 } else
34575 xprev = x0;
34576
34577 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34578
34579 /* u = n * xprev */
34580 u = gen_reg_rtx (mode);
34581 emit_insn (gen_mul (u, n, xprev));
34582
34583 /* v = n - (d * u) */
34584 v = gen_reg_rtx (mode);
34585 rs6000_emit_nmsub (v, d, u, n);
34586
34587 /* dst = (v * xprev) + u */
34588 rs6000_emit_madd (dst, v, xprev, u);
34589
34590 if (note_p)
34591 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
34592 }
34593
34594 /* Goldschmidt's Algorithm for single/double-precision floating point
34595 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34596
34597 void
34598 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
34599 {
34600 machine_mode mode = GET_MODE (src);
34601 rtx e = gen_reg_rtx (mode);
34602 rtx g = gen_reg_rtx (mode);
34603 rtx h = gen_reg_rtx (mode);
34604
34605 /* Low precision estimates guarantee 5 bits of accuracy. High
34606 precision estimates guarantee 14 bits of accuracy. SFmode
34607 requires 23 bits of accuracy. DFmode requires 52 bits of
34608 accuracy. Each pass at least doubles the accuracy, leading
34609 to the following. */
34610 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34611 if (mode == DFmode || mode == V2DFmode)
34612 passes++;
34613
34614 int i;
34615 rtx mhalf;
34616 enum insn_code code = optab_handler (smul_optab, mode);
34617 insn_gen_fn gen_mul = GEN_FCN (code);
34618
34619 gcc_assert (code != CODE_FOR_nothing);
34620
34621 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
34622
34623 /* e = rsqrt estimate */
34624 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
34625 UNSPEC_RSQRT)));
34626
34627 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
34628 if (!recip)
34629 {
34630 rtx zero = force_reg (mode, CONST0_RTX (mode));
34631
34632 if (mode == SFmode)
34633 {
34634 rtx target = emit_conditional_move (e, GT, src, zero, mode,
34635 e, zero, mode, 0);
34636 if (target != e)
34637 emit_move_insn (e, target);
34638 }
34639 else
34640 {
34641 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
34642 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
34643 }
34644 }
34645
34646 /* g = sqrt estimate. */
34647 emit_insn (gen_mul (g, e, src));
34648 /* h = 1/(2*sqrt) estimate. */
34649 emit_insn (gen_mul (h, e, mhalf));
34650
34651 if (recip)
34652 {
34653 if (passes == 1)
34654 {
34655 rtx t = gen_reg_rtx (mode);
34656 rs6000_emit_nmsub (t, g, h, mhalf);
34657 /* Apply correction directly to 1/rsqrt estimate. */
34658 rs6000_emit_madd (dst, e, t, e);
34659 }
34660 else
34661 {
34662 for (i = 0; i < passes; i++)
34663 {
34664 rtx t1 = gen_reg_rtx (mode);
34665 rtx g1 = gen_reg_rtx (mode);
34666 rtx h1 = gen_reg_rtx (mode);
34667
34668 rs6000_emit_nmsub (t1, g, h, mhalf);
34669 rs6000_emit_madd (g1, g, t1, g);
34670 rs6000_emit_madd (h1, h, t1, h);
34671
34672 g = g1;
34673 h = h1;
34674 }
34675 /* Multiply by 2 for 1/rsqrt. */
34676 emit_insn (gen_add3_insn (dst, h, h));
34677 }
34678 }
34679 else
34680 {
34681 rtx t = gen_reg_rtx (mode);
34682 rs6000_emit_nmsub (t, g, h, mhalf);
34683 rs6000_emit_madd (dst, g, t, g);
34684 }
34685
34686 return;
34687 }
34688
34689 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
34690 (Power7) targets. DST is the target, and SRC is the argument operand. */
34691
34692 void
34693 rs6000_emit_popcount (rtx dst, rtx src)
34694 {
34695 machine_mode mode = GET_MODE (dst);
34696 rtx tmp1, tmp2;
34697
34698 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
34699 if (TARGET_POPCNTD)
34700 {
34701 if (mode == SImode)
34702 emit_insn (gen_popcntdsi2 (dst, src));
34703 else
34704 emit_insn (gen_popcntddi2 (dst, src));
34705 return;
34706 }
34707
34708 tmp1 = gen_reg_rtx (mode);
34709
34710 if (mode == SImode)
34711 {
34712 emit_insn (gen_popcntbsi2 (tmp1, src));
34713 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
34714 NULL_RTX, 0);
34715 tmp2 = force_reg (SImode, tmp2);
34716 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
34717 }
34718 else
34719 {
34720 emit_insn (gen_popcntbdi2 (tmp1, src));
34721 tmp2 = expand_mult (DImode, tmp1,
34722 GEN_INT ((HOST_WIDE_INT)
34723 0x01010101 << 32 | 0x01010101),
34724 NULL_RTX, 0);
34725 tmp2 = force_reg (DImode, tmp2);
34726 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
34727 }
34728 }
34729
34730
34731 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
34732 target, and SRC is the argument operand. */
34733
34734 void
34735 rs6000_emit_parity (rtx dst, rtx src)
34736 {
34737 machine_mode mode = GET_MODE (dst);
34738 rtx tmp;
34739
34740 tmp = gen_reg_rtx (mode);
34741
34742 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
34743 if (TARGET_CMPB)
34744 {
34745 if (mode == SImode)
34746 {
34747 emit_insn (gen_popcntbsi2 (tmp, src));
34748 emit_insn (gen_paritysi2_cmpb (dst, tmp));
34749 }
34750 else
34751 {
34752 emit_insn (gen_popcntbdi2 (tmp, src));
34753 emit_insn (gen_paritydi2_cmpb (dst, tmp));
34754 }
34755 return;
34756 }
34757
34758 if (mode == SImode)
34759 {
34760 /* Is mult+shift >= shift+xor+shift+xor? */
34761 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
34762 {
34763 rtx tmp1, tmp2, tmp3, tmp4;
34764
34765 tmp1 = gen_reg_rtx (SImode);
34766 emit_insn (gen_popcntbsi2 (tmp1, src));
34767
34768 tmp2 = gen_reg_rtx (SImode);
34769 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
34770 tmp3 = gen_reg_rtx (SImode);
34771 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
34772
34773 tmp4 = gen_reg_rtx (SImode);
34774 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
34775 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
34776 }
34777 else
34778 rs6000_emit_popcount (tmp, src);
34779 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
34780 }
34781 else
34782 {
34783 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
34784 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
34785 {
34786 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
34787
34788 tmp1 = gen_reg_rtx (DImode);
34789 emit_insn (gen_popcntbdi2 (tmp1, src));
34790
34791 tmp2 = gen_reg_rtx (DImode);
34792 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
34793 tmp3 = gen_reg_rtx (DImode);
34794 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
34795
34796 tmp4 = gen_reg_rtx (DImode);
34797 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
34798 tmp5 = gen_reg_rtx (DImode);
34799 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
34800
34801 tmp6 = gen_reg_rtx (DImode);
34802 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
34803 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
34804 }
34805 else
34806 rs6000_emit_popcount (tmp, src);
34807 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
34808 }
34809 }
34810
34811 /* Expand an Altivec constant permutation for little endian mode.
34812 OP0 and OP1 are the input vectors and TARGET is the output vector.
34813 SEL specifies the constant permutation vector.
34814
34815 There are two issues: First, the two input operands must be
34816 swapped so that together they form a double-wide array in LE
34817 order. Second, the vperm instruction has surprising behavior
34818 in LE mode: it interprets the elements of the source vectors
34819 in BE mode ("left to right") and interprets the elements of
34820 the destination vector in LE mode ("right to left"). To
34821 correct for this, we must subtract each element of the permute
34822 control vector from 31.
34823
34824 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
34825 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
34826 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
34827 serve as the permute control vector. Then, in BE mode,
34828
34829 vperm 9,10,11,12
34830
34831 places the desired result in vr9. However, in LE mode the
34832 vector contents will be
34833
34834 vr10 = 00000003 00000002 00000001 00000000
34835 vr11 = 00000007 00000006 00000005 00000004
34836
34837 The result of the vperm using the same permute control vector is
34838
34839 vr9 = 05000000 07000000 01000000 03000000
34840
34841 That is, the leftmost 4 bytes of vr10 are interpreted as the
34842 source for the rightmost 4 bytes of vr9, and so on.
34843
34844 If we change the permute control vector to
34845
34846 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
34847
34848 and issue
34849
34850 vperm 9,11,10,12
34851
34852 we get the desired
34853
34854 vr9 = 00000006 00000004 00000002 00000000. */
34855
34856 static void
34857 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
34858 const vec_perm_indices &sel)
34859 {
34860 unsigned int i;
34861 rtx perm[16];
34862 rtx constv, unspec;
34863
34864 /* Unpack and adjust the constant selector. */
34865 for (i = 0; i < 16; ++i)
34866 {
34867 unsigned int elt = 31 - (sel[i] & 31);
34868 perm[i] = GEN_INT (elt);
34869 }
34870
34871 /* Expand to a permute, swapping the inputs and using the
34872 adjusted selector. */
34873 if (!REG_P (op0))
34874 op0 = force_reg (V16QImode, op0);
34875 if (!REG_P (op1))
34876 op1 = force_reg (V16QImode, op1);
34877
34878 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
34879 constv = force_reg (V16QImode, constv);
34880 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
34881 UNSPEC_VPERM);
34882 if (!REG_P (target))
34883 {
34884 rtx tmp = gen_reg_rtx (V16QImode);
34885 emit_move_insn (tmp, unspec);
34886 unspec = tmp;
34887 }
34888
34889 emit_move_insn (target, unspec);
34890 }
34891
34892 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
34893 permute control vector. But here it's not a constant, so we must
34894 generate a vector NAND or NOR to do the adjustment. */
34895
34896 void
34897 altivec_expand_vec_perm_le (rtx operands[4])
34898 {
34899 rtx notx, iorx, unspec;
34900 rtx target = operands[0];
34901 rtx op0 = operands[1];
34902 rtx op1 = operands[2];
34903 rtx sel = operands[3];
34904 rtx tmp = target;
34905 rtx norreg = gen_reg_rtx (V16QImode);
34906 machine_mode mode = GET_MODE (target);
34907
34908 /* Get everything in regs so the pattern matches. */
34909 if (!REG_P (op0))
34910 op0 = force_reg (mode, op0);
34911 if (!REG_P (op1))
34912 op1 = force_reg (mode, op1);
34913 if (!REG_P (sel))
34914 sel = force_reg (V16QImode, sel);
34915 if (!REG_P (target))
34916 tmp = gen_reg_rtx (mode);
34917
34918 if (TARGET_P9_VECTOR)
34919 {
34920 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
34921 UNSPEC_VPERMR);
34922 }
34923 else
34924 {
34925 /* Invert the selector with a VNAND if available, else a VNOR.
34926 The VNAND is preferred for future fusion opportunities. */
34927 notx = gen_rtx_NOT (V16QImode, sel);
34928 iorx = (TARGET_P8_VECTOR
34929 ? gen_rtx_IOR (V16QImode, notx, notx)
34930 : gen_rtx_AND (V16QImode, notx, notx));
34931 emit_insn (gen_rtx_SET (norreg, iorx));
34932
34933 /* Permute with operands reversed and adjusted selector. */
34934 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
34935 UNSPEC_VPERM);
34936 }
34937
34938 /* Copy into target, possibly by way of a register. */
34939 if (!REG_P (target))
34940 {
34941 emit_move_insn (tmp, unspec);
34942 unspec = tmp;
34943 }
34944
34945 emit_move_insn (target, unspec);
34946 }
34947
34948 /* Expand an Altivec constant permutation. Return true if we match
34949 an efficient implementation; false to fall back to VPERM.
34950
34951 OP0 and OP1 are the input vectors and TARGET is the output vector.
34952 SEL specifies the constant permutation vector. */
34953
34954 static bool
34955 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
34956 const vec_perm_indices &sel)
34957 {
34958 struct altivec_perm_insn {
34959 HOST_WIDE_INT mask;
34960 enum insn_code impl;
34961 unsigned char perm[16];
34962 };
34963 static const struct altivec_perm_insn patterns[] = {
34964 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
34965 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
34966 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
34967 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
34968 { OPTION_MASK_ALTIVEC,
34969 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
34970 : CODE_FOR_altivec_vmrglb_direct),
34971 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
34972 { OPTION_MASK_ALTIVEC,
34973 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
34974 : CODE_FOR_altivec_vmrglh_direct),
34975 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
34976 { OPTION_MASK_ALTIVEC,
34977 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
34978 : CODE_FOR_altivec_vmrglw_direct),
34979 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
34980 { OPTION_MASK_ALTIVEC,
34981 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
34982 : CODE_FOR_altivec_vmrghb_direct),
34983 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
34984 { OPTION_MASK_ALTIVEC,
34985 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
34986 : CODE_FOR_altivec_vmrghh_direct),
34987 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
34988 { OPTION_MASK_ALTIVEC,
34989 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
34990 : CODE_FOR_altivec_vmrghw_direct),
34991 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
34992 { OPTION_MASK_P8_VECTOR,
34993 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
34994 : CODE_FOR_p8_vmrgow_v4sf_direct),
34995 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
34996 { OPTION_MASK_P8_VECTOR,
34997 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
34998 : CODE_FOR_p8_vmrgew_v4sf_direct),
34999 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35000 };
35001
35002 unsigned int i, j, elt, which;
35003 unsigned char perm[16];
35004 rtx x;
35005 bool one_vec;
35006
35007 /* Unpack the constant selector. */
35008 for (i = which = 0; i < 16; ++i)
35009 {
35010 elt = sel[i] & 31;
35011 which |= (elt < 16 ? 1 : 2);
35012 perm[i] = elt;
35013 }
35014
35015 /* Simplify the constant selector based on operands. */
35016 switch (which)
35017 {
35018 default:
35019 gcc_unreachable ();
35020
35021 case 3:
35022 one_vec = false;
35023 if (!rtx_equal_p (op0, op1))
35024 break;
35025 /* FALLTHRU */
35026
35027 case 2:
35028 for (i = 0; i < 16; ++i)
35029 perm[i] &= 15;
35030 op0 = op1;
35031 one_vec = true;
35032 break;
35033
35034 case 1:
35035 op1 = op0;
35036 one_vec = true;
35037 break;
35038 }
35039
35040 /* Look for splat patterns. */
35041 if (one_vec)
35042 {
35043 elt = perm[0];
35044
35045 for (i = 0; i < 16; ++i)
35046 if (perm[i] != elt)
35047 break;
35048 if (i == 16)
35049 {
35050 if (!BYTES_BIG_ENDIAN)
35051 elt = 15 - elt;
35052 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35053 return true;
35054 }
35055
35056 if (elt % 2 == 0)
35057 {
35058 for (i = 0; i < 16; i += 2)
35059 if (perm[i] != elt || perm[i + 1] != elt + 1)
35060 break;
35061 if (i == 16)
35062 {
35063 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35064 x = gen_reg_rtx (V8HImode);
35065 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35066 GEN_INT (field)));
35067 emit_move_insn (target, gen_lowpart (V16QImode, x));
35068 return true;
35069 }
35070 }
35071
35072 if (elt % 4 == 0)
35073 {
35074 for (i = 0; i < 16; i += 4)
35075 if (perm[i] != elt
35076 || perm[i + 1] != elt + 1
35077 || perm[i + 2] != elt + 2
35078 || perm[i + 3] != elt + 3)
35079 break;
35080 if (i == 16)
35081 {
35082 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35083 x = gen_reg_rtx (V4SImode);
35084 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35085 GEN_INT (field)));
35086 emit_move_insn (target, gen_lowpart (V16QImode, x));
35087 return true;
35088 }
35089 }
35090 }
35091
35092 /* Look for merge and pack patterns. */
35093 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35094 {
35095 bool swapped;
35096
35097 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35098 continue;
35099
35100 elt = patterns[j].perm[0];
35101 if (perm[0] == elt)
35102 swapped = false;
35103 else if (perm[0] == elt + 16)
35104 swapped = true;
35105 else
35106 continue;
35107 for (i = 1; i < 16; ++i)
35108 {
35109 elt = patterns[j].perm[i];
35110 if (swapped)
35111 elt = (elt >= 16 ? elt - 16 : elt + 16);
35112 else if (one_vec && elt >= 16)
35113 elt -= 16;
35114 if (perm[i] != elt)
35115 break;
35116 }
35117 if (i == 16)
35118 {
35119 enum insn_code icode = patterns[j].impl;
35120 machine_mode omode = insn_data[icode].operand[0].mode;
35121 machine_mode imode = insn_data[icode].operand[1].mode;
35122
35123 /* For little-endian, don't use vpkuwum and vpkuhum if the
35124 underlying vector type is not V4SI and V8HI, respectively.
35125 For example, using vpkuwum with a V8HI picks up the even
35126 halfwords (BE numbering) when the even halfwords (LE
35127 numbering) are what we need. */
35128 if (!BYTES_BIG_ENDIAN
35129 && icode == CODE_FOR_altivec_vpkuwum_direct
35130 && ((GET_CODE (op0) == REG
35131 && GET_MODE (op0) != V4SImode)
35132 || (GET_CODE (op0) == SUBREG
35133 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35134 continue;
35135 if (!BYTES_BIG_ENDIAN
35136 && icode == CODE_FOR_altivec_vpkuhum_direct
35137 && ((GET_CODE (op0) == REG
35138 && GET_MODE (op0) != V8HImode)
35139 || (GET_CODE (op0) == SUBREG
35140 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35141 continue;
35142
35143 /* For little-endian, the two input operands must be swapped
35144 (or swapped back) to ensure proper right-to-left numbering
35145 from 0 to 2N-1. */
35146 if (swapped ^ !BYTES_BIG_ENDIAN)
35147 std::swap (op0, op1);
35148 if (imode != V16QImode)
35149 {
35150 op0 = gen_lowpart (imode, op0);
35151 op1 = gen_lowpart (imode, op1);
35152 }
35153 if (omode == V16QImode)
35154 x = target;
35155 else
35156 x = gen_reg_rtx (omode);
35157 emit_insn (GEN_FCN (icode) (x, op0, op1));
35158 if (omode != V16QImode)
35159 emit_move_insn (target, gen_lowpart (V16QImode, x));
35160 return true;
35161 }
35162 }
35163
35164 if (!BYTES_BIG_ENDIAN)
35165 {
35166 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
35167 return true;
35168 }
35169
35170 return false;
35171 }
35172
35173 /* Expand a VSX Permute Doubleword constant permutation.
35174 Return true if we match an efficient implementation. */
35175
35176 static bool
35177 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35178 unsigned char perm0, unsigned char perm1)
35179 {
35180 rtx x;
35181
35182 /* If both selectors come from the same operand, fold to single op. */
35183 if ((perm0 & 2) == (perm1 & 2))
35184 {
35185 if (perm0 & 2)
35186 op0 = op1;
35187 else
35188 op1 = op0;
35189 }
35190 /* If both operands are equal, fold to simpler permutation. */
35191 if (rtx_equal_p (op0, op1))
35192 {
35193 perm0 = perm0 & 1;
35194 perm1 = (perm1 & 1) + 2;
35195 }
35196 /* If the first selector comes from the second operand, swap. */
35197 else if (perm0 & 2)
35198 {
35199 if (perm1 & 2)
35200 return false;
35201 perm0 -= 2;
35202 perm1 += 2;
35203 std::swap (op0, op1);
35204 }
35205 /* If the second selector does not come from the second operand, fail. */
35206 else if ((perm1 & 2) == 0)
35207 return false;
35208
35209 /* Success! */
35210 if (target != NULL)
35211 {
35212 machine_mode vmode, dmode;
35213 rtvec v;
35214
35215 vmode = GET_MODE (target);
35216 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35217 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35218 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35219 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35220 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35221 emit_insn (gen_rtx_SET (target, x));
35222 }
35223 return true;
35224 }
35225
35226 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35227
35228 static bool
35229 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
35230 rtx op1, const vec_perm_indices &sel)
35231 {
35232 bool testing_p = !target;
35233
35234 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35235 if (TARGET_ALTIVEC && testing_p)
35236 return true;
35237
35238 /* Check for ps_merge* or xxpermdi insns. */
35239 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
35240 {
35241 if (testing_p)
35242 {
35243 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35244 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35245 }
35246 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
35247 return true;
35248 }
35249
35250 if (TARGET_ALTIVEC)
35251 {
35252 /* Force the target-independent code to lower to V16QImode. */
35253 if (vmode != V16QImode)
35254 return false;
35255 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
35256 return true;
35257 }
35258
35259 return false;
35260 }
35261
35262 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
35263 OP0 and OP1 are the input vectors and TARGET is the output vector.
35264 PERM specifies the constant permutation vector. */
35265
35266 static void
35267 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35268 machine_mode vmode, const vec_perm_builder &perm)
35269 {
35270 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
35271 if (x != target)
35272 emit_move_insn (target, x);
35273 }
35274
35275 /* Expand an extract even operation. */
35276
35277 void
35278 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35279 {
35280 machine_mode vmode = GET_MODE (target);
35281 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35282 vec_perm_builder perm (nelt, nelt, 1);
35283
35284 for (i = 0; i < nelt; i++)
35285 perm.quick_push (i * 2);
35286
35287 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35288 }
35289
35290 /* Expand a vector interleave operation. */
35291
35292 void
35293 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35294 {
35295 machine_mode vmode = GET_MODE (target);
35296 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35297 vec_perm_builder perm (nelt, nelt, 1);
35298
35299 high = (highp ? 0 : nelt / 2);
35300 for (i = 0; i < nelt / 2; i++)
35301 {
35302 perm.quick_push (i + high);
35303 perm.quick_push (i + nelt + high);
35304 }
35305
35306 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35307 }
35308
35309 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35310 void
35311 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35312 {
35313 HOST_WIDE_INT hwi_scale (scale);
35314 REAL_VALUE_TYPE r_pow;
35315 rtvec v = rtvec_alloc (2);
35316 rtx elt;
35317 rtx scale_vec = gen_reg_rtx (V2DFmode);
35318 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35319 elt = const_double_from_real_value (r_pow, DFmode);
35320 RTVEC_ELT (v, 0) = elt;
35321 RTVEC_ELT (v, 1) = elt;
35322 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35323 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35324 }
35325
35326 /* Return an RTX representing where to find the function value of a
35327 function returning MODE. */
35328 static rtx
35329 rs6000_complex_function_value (machine_mode mode)
35330 {
35331 unsigned int regno;
35332 rtx r1, r2;
35333 machine_mode inner = GET_MODE_INNER (mode);
35334 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35335
35336 if (TARGET_FLOAT128_TYPE
35337 && (mode == KCmode
35338 || (mode == TCmode && TARGET_IEEEQUAD)))
35339 regno = ALTIVEC_ARG_RETURN;
35340
35341 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35342 regno = FP_ARG_RETURN;
35343
35344 else
35345 {
35346 regno = GP_ARG_RETURN;
35347
35348 /* 32-bit is OK since it'll go in r3/r4. */
35349 if (TARGET_32BIT && inner_bytes >= 4)
35350 return gen_rtx_REG (mode, regno);
35351 }
35352
35353 if (inner_bytes >= 8)
35354 return gen_rtx_REG (mode, regno);
35355
35356 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35357 const0_rtx);
35358 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35359 GEN_INT (inner_bytes));
35360 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35361 }
35362
35363 /* Return an rtx describing a return value of MODE as a PARALLEL
35364 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35365 stride REG_STRIDE. */
35366
35367 static rtx
35368 rs6000_parallel_return (machine_mode mode,
35369 int n_elts, machine_mode elt_mode,
35370 unsigned int regno, unsigned int reg_stride)
35371 {
35372 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35373
35374 int i;
35375 for (i = 0; i < n_elts; i++)
35376 {
35377 rtx r = gen_rtx_REG (elt_mode, regno);
35378 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35379 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35380 regno += reg_stride;
35381 }
35382
35383 return par;
35384 }
35385
35386 /* Target hook for TARGET_FUNCTION_VALUE.
35387
35388 An integer value is in r3 and a floating-point value is in fp1,
35389 unless -msoft-float. */
35390
35391 static rtx
35392 rs6000_function_value (const_tree valtype,
35393 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35394 bool outgoing ATTRIBUTE_UNUSED)
35395 {
35396 machine_mode mode;
35397 unsigned int regno;
35398 machine_mode elt_mode;
35399 int n_elts;
35400
35401 /* Special handling for structs in darwin64. */
35402 if (TARGET_MACHO
35403 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35404 {
35405 CUMULATIVE_ARGS valcum;
35406 rtx valret;
35407
35408 valcum.words = 0;
35409 valcum.fregno = FP_ARG_MIN_REG;
35410 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35411 /* Do a trial code generation as if this were going to be passed as
35412 an argument; if any part goes in memory, we return NULL. */
35413 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35414 if (valret)
35415 return valret;
35416 /* Otherwise fall through to standard ABI rules. */
35417 }
35418
35419 mode = TYPE_MODE (valtype);
35420
35421 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35422 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35423 {
35424 int first_reg, n_regs;
35425
35426 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35427 {
35428 /* _Decimal128 must use even/odd register pairs. */
35429 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35430 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35431 }
35432 else
35433 {
35434 first_reg = ALTIVEC_ARG_RETURN;
35435 n_regs = 1;
35436 }
35437
35438 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35439 }
35440
35441 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35442 if (TARGET_32BIT && TARGET_POWERPC64)
35443 switch (mode)
35444 {
35445 default:
35446 break;
35447 case E_DImode:
35448 case E_SCmode:
35449 case E_DCmode:
35450 case E_TCmode:
35451 int count = GET_MODE_SIZE (mode) / 4;
35452 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35453 }
35454
35455 if ((INTEGRAL_TYPE_P (valtype)
35456 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35457 || POINTER_TYPE_P (valtype))
35458 mode = TARGET_32BIT ? SImode : DImode;
35459
35460 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35461 /* _Decimal128 must use an even/odd register pair. */
35462 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35463 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
35464 && !FLOAT128_VECTOR_P (mode))
35465 regno = FP_ARG_RETURN;
35466 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35467 && targetm.calls.split_complex_arg)
35468 return rs6000_complex_function_value (mode);
35469 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35470 return register is used in both cases, and we won't see V2DImode/V2DFmode
35471 for pure altivec, combine the two cases. */
35472 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35473 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35474 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35475 regno = ALTIVEC_ARG_RETURN;
35476 else
35477 regno = GP_ARG_RETURN;
35478
35479 return gen_rtx_REG (mode, regno);
35480 }
35481
35482 /* Define how to find the value returned by a library function
35483 assuming the value has mode MODE. */
35484 rtx
35485 rs6000_libcall_value (machine_mode mode)
35486 {
35487 unsigned int regno;
35488
35489 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35490 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35491 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35492
35493 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35494 /* _Decimal128 must use an even/odd register pair. */
35495 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35496 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
35497 regno = FP_ARG_RETURN;
35498 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35499 return register is used in both cases, and we won't see V2DImode/V2DFmode
35500 for pure altivec, combine the two cases. */
35501 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
35502 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
35503 regno = ALTIVEC_ARG_RETURN;
35504 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
35505 return rs6000_complex_function_value (mode);
35506 else
35507 regno = GP_ARG_RETURN;
35508
35509 return gen_rtx_REG (mode, regno);
35510 }
35511
35512 /* Compute register pressure classes. We implement the target hook to avoid
35513 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
35514 lead to incorrect estimates of number of available registers and therefor
35515 increased register pressure/spill. */
35516 static int
35517 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
35518 {
35519 int n;
35520
35521 n = 0;
35522 pressure_classes[n++] = GENERAL_REGS;
35523 if (TARGET_VSX)
35524 pressure_classes[n++] = VSX_REGS;
35525 else
35526 {
35527 if (TARGET_ALTIVEC)
35528 pressure_classes[n++] = ALTIVEC_REGS;
35529 if (TARGET_HARD_FLOAT)
35530 pressure_classes[n++] = FLOAT_REGS;
35531 }
35532 pressure_classes[n++] = CR_REGS;
35533 pressure_classes[n++] = SPECIAL_REGS;
35534
35535 return n;
35536 }
35537
35538 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35539 Frame pointer elimination is automatically handled.
35540
35541 For the RS/6000, if frame pointer elimination is being done, we would like
35542 to convert ap into fp, not sp.
35543
35544 We need r30 if -mminimal-toc was specified, and there are constant pool
35545 references. */
35546
35547 static bool
35548 rs6000_can_eliminate (const int from, const int to)
35549 {
35550 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
35551 ? ! frame_pointer_needed
35552 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
35553 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
35554 || constant_pool_empty_p ()
35555 : true);
35556 }
35557
35558 /* Define the offset between two registers, FROM to be eliminated and its
35559 replacement TO, at the start of a routine. */
35560 HOST_WIDE_INT
35561 rs6000_initial_elimination_offset (int from, int to)
35562 {
35563 rs6000_stack_t *info = rs6000_stack_info ();
35564 HOST_WIDE_INT offset;
35565
35566 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35567 offset = info->push_p ? 0 : -info->total_size;
35568 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35569 {
35570 offset = info->push_p ? 0 : -info->total_size;
35571 if (FRAME_GROWS_DOWNWARD)
35572 offset += info->fixed_size + info->vars_size + info->parm_size;
35573 }
35574 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35575 offset = FRAME_GROWS_DOWNWARD
35576 ? info->fixed_size + info->vars_size + info->parm_size
35577 : 0;
35578 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35579 offset = info->total_size;
35580 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35581 offset = info->push_p ? info->total_size : 0;
35582 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
35583 offset = 0;
35584 else
35585 gcc_unreachable ();
35586
35587 return offset;
35588 }
35589
35590 /* Fill in sizes of registers used by unwinder. */
35591
35592 static void
35593 rs6000_init_dwarf_reg_sizes_extra (tree address)
35594 {
35595 if (TARGET_MACHO && ! TARGET_ALTIVEC)
35596 {
35597 int i;
35598 machine_mode mode = TYPE_MODE (char_type_node);
35599 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
35600 rtx mem = gen_rtx_MEM (BLKmode, addr);
35601 rtx value = gen_int_mode (16, mode);
35602
35603 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
35604 The unwinder still needs to know the size of Altivec registers. */
35605
35606 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
35607 {
35608 int column = DWARF_REG_TO_UNWIND_COLUMN
35609 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
35610 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
35611
35612 emit_move_insn (adjust_address (mem, mode, offset), value);
35613 }
35614 }
35615 }
35616
35617 /* Map internal gcc register numbers to debug format register numbers.
35618 FORMAT specifies the type of debug register number to use:
35619 0 -- debug information, except for frame-related sections
35620 1 -- DWARF .debug_frame section
35621 2 -- DWARF .eh_frame section */
35622
35623 unsigned int
35624 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
35625 {
35626 /* Except for the above, we use the internal number for non-DWARF
35627 debug information, and also for .eh_frame. */
35628 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
35629 return regno;
35630
35631 /* On some platforms, we use the standard DWARF register
35632 numbering for .debug_info and .debug_frame. */
35633 #ifdef RS6000_USE_DWARF_NUMBERING
35634 if (regno <= 63)
35635 return regno;
35636 if (regno == LR_REGNO)
35637 return 108;
35638 if (regno == CTR_REGNO)
35639 return 109;
35640 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
35641 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
35642 The actual code emitted saves the whole of CR, so we map CR2_REGNO
35643 to the DWARF reg for CR. */
35644 if (format == 1 && regno == CR2_REGNO)
35645 return 64;
35646 if (CR_REGNO_P (regno))
35647 return regno - CR0_REGNO + 86;
35648 if (regno == CA_REGNO)
35649 return 101; /* XER */
35650 if (ALTIVEC_REGNO_P (regno))
35651 return regno - FIRST_ALTIVEC_REGNO + 1124;
35652 if (regno == VRSAVE_REGNO)
35653 return 356;
35654 if (regno == VSCR_REGNO)
35655 return 67;
35656 #endif
35657 return regno;
35658 }
35659
35660 /* target hook eh_return_filter_mode */
35661 static scalar_int_mode
35662 rs6000_eh_return_filter_mode (void)
35663 {
35664 return TARGET_32BIT ? SImode : word_mode;
35665 }
35666
35667 /* Target hook for scalar_mode_supported_p. */
35668 static bool
35669 rs6000_scalar_mode_supported_p (scalar_mode mode)
35670 {
35671 /* -m32 does not support TImode. This is the default, from
35672 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
35673 same ABI as for -m32. But default_scalar_mode_supported_p allows
35674 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
35675 for -mpowerpc64. */
35676 if (TARGET_32BIT && mode == TImode)
35677 return false;
35678
35679 if (DECIMAL_FLOAT_MODE_P (mode))
35680 return default_decimal_float_supported_p ();
35681 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
35682 return true;
35683 else
35684 return default_scalar_mode_supported_p (mode);
35685 }
35686
35687 /* Target hook for vector_mode_supported_p. */
35688 static bool
35689 rs6000_vector_mode_supported_p (machine_mode mode)
35690 {
35691 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
35692 128-bit, the compiler might try to widen IEEE 128-bit to IBM
35693 double-double. */
35694 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
35695 return true;
35696
35697 else
35698 return false;
35699 }
35700
35701 /* Target hook for floatn_mode. */
35702 static opt_scalar_float_mode
35703 rs6000_floatn_mode (int n, bool extended)
35704 {
35705 if (extended)
35706 {
35707 switch (n)
35708 {
35709 case 32:
35710 return DFmode;
35711
35712 case 64:
35713 if (TARGET_FLOAT128_TYPE)
35714 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35715 else
35716 return opt_scalar_float_mode ();
35717
35718 case 128:
35719 return opt_scalar_float_mode ();
35720
35721 default:
35722 /* Those are the only valid _FloatNx types. */
35723 gcc_unreachable ();
35724 }
35725 }
35726 else
35727 {
35728 switch (n)
35729 {
35730 case 32:
35731 return SFmode;
35732
35733 case 64:
35734 return DFmode;
35735
35736 case 128:
35737 if (TARGET_FLOAT128_TYPE)
35738 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35739 else
35740 return opt_scalar_float_mode ();
35741
35742 default:
35743 return opt_scalar_float_mode ();
35744 }
35745 }
35746
35747 }
35748
35749 /* Target hook for c_mode_for_suffix. */
35750 static machine_mode
35751 rs6000_c_mode_for_suffix (char suffix)
35752 {
35753 if (TARGET_FLOAT128_TYPE)
35754 {
35755 if (suffix == 'q' || suffix == 'Q')
35756 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35757
35758 /* At the moment, we are not defining a suffix for IBM extended double.
35759 If/when the default for -mabi=ieeelongdouble is changed, and we want
35760 to support __ibm128 constants in legacy library code, we may need to
35761 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
35762 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
35763 __float80 constants. */
35764 }
35765
35766 return VOIDmode;
35767 }
35768
35769 /* Target hook for invalid_arg_for_unprototyped_fn. */
35770 static const char *
35771 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
35772 {
35773 return (!rs6000_darwin64_abi
35774 && typelist == 0
35775 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
35776 && (funcdecl == NULL_TREE
35777 || (TREE_CODE (funcdecl) == FUNCTION_DECL
35778 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
35779 ? N_("AltiVec argument passed to unprototyped function")
35780 : NULL;
35781 }
35782
35783 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
35784 setup by using __stack_chk_fail_local hidden function instead of
35785 calling __stack_chk_fail directly. Otherwise it is better to call
35786 __stack_chk_fail directly. */
35787
35788 static tree ATTRIBUTE_UNUSED
35789 rs6000_stack_protect_fail (void)
35790 {
35791 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
35792 ? default_hidden_stack_protect_fail ()
35793 : default_external_stack_protect_fail ();
35794 }
35795
35796 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
35797
35798 #if TARGET_ELF
35799 static unsigned HOST_WIDE_INT
35800 rs6000_asan_shadow_offset (void)
35801 {
35802 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
35803 }
35804 #endif
35805 \f
35806 /* Mask options that we want to support inside of attribute((target)) and
35807 #pragma GCC target operations. Note, we do not include things like
35808 64/32-bit, endianness, hard/soft floating point, etc. that would have
35809 different calling sequences. */
35810
35811 struct rs6000_opt_mask {
35812 const char *name; /* option name */
35813 HOST_WIDE_INT mask; /* mask to set */
35814 bool invert; /* invert sense of mask */
35815 bool valid_target; /* option is a target option */
35816 };
35817
35818 static struct rs6000_opt_mask const rs6000_opt_masks[] =
35819 {
35820 { "altivec", OPTION_MASK_ALTIVEC, false, true },
35821 { "cmpb", OPTION_MASK_CMPB, false, true },
35822 { "crypto", OPTION_MASK_CRYPTO, false, true },
35823 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
35824 { "dlmzb", OPTION_MASK_DLMZB, false, true },
35825 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
35826 false, true },
35827 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
35828 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
35829 { "fprnd", OPTION_MASK_FPRND, false, true },
35830 { "hard-dfp", OPTION_MASK_DFP, false, true },
35831 { "htm", OPTION_MASK_HTM, false, true },
35832 { "isel", OPTION_MASK_ISEL, false, true },
35833 { "mfcrf", OPTION_MASK_MFCRF, false, true },
35834 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
35835 { "modulo", OPTION_MASK_MODULO, false, true },
35836 { "mulhw", OPTION_MASK_MULHW, false, true },
35837 { "multiple", OPTION_MASK_MULTIPLE, false, true },
35838 { "popcntb", OPTION_MASK_POPCNTB, false, true },
35839 { "popcntd", OPTION_MASK_POPCNTD, false, true },
35840 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
35841 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
35842 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
35843 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
35844 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
35845 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
35846 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
35847 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
35848 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
35849 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
35850 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
35851 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
35852 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
35853 { "string", 0, false, true },
35854 { "toc-fusion", OPTION_MASK_TOC_FUSION, false, true },
35855 { "update", OPTION_MASK_NO_UPDATE, true , true },
35856 { "vsx", OPTION_MASK_VSX, false, true },
35857 #ifdef OPTION_MASK_64BIT
35858 #if TARGET_AIX_OS
35859 { "aix64", OPTION_MASK_64BIT, false, false },
35860 { "aix32", OPTION_MASK_64BIT, true, false },
35861 #else
35862 { "64", OPTION_MASK_64BIT, false, false },
35863 { "32", OPTION_MASK_64BIT, true, false },
35864 #endif
35865 #endif
35866 #ifdef OPTION_MASK_EABI
35867 { "eabi", OPTION_MASK_EABI, false, false },
35868 #endif
35869 #ifdef OPTION_MASK_LITTLE_ENDIAN
35870 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
35871 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
35872 #endif
35873 #ifdef OPTION_MASK_RELOCATABLE
35874 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
35875 #endif
35876 #ifdef OPTION_MASK_STRICT_ALIGN
35877 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
35878 #endif
35879 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
35880 { "string", 0, false, false },
35881 };
35882
35883 /* Builtin mask mapping for printing the flags. */
35884 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
35885 {
35886 { "altivec", RS6000_BTM_ALTIVEC, false, false },
35887 { "vsx", RS6000_BTM_VSX, false, false },
35888 { "fre", RS6000_BTM_FRE, false, false },
35889 { "fres", RS6000_BTM_FRES, false, false },
35890 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
35891 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
35892 { "popcntd", RS6000_BTM_POPCNTD, false, false },
35893 { "cell", RS6000_BTM_CELL, false, false },
35894 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
35895 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
35896 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
35897 { "crypto", RS6000_BTM_CRYPTO, false, false },
35898 { "htm", RS6000_BTM_HTM, false, false },
35899 { "hard-dfp", RS6000_BTM_DFP, false, false },
35900 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
35901 { "long-double-128", RS6000_BTM_LDBL128, false, false },
35902 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
35903 { "float128", RS6000_BTM_FLOAT128, false, false },
35904 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
35905 };
35906
35907 /* Option variables that we want to support inside attribute((target)) and
35908 #pragma GCC target operations. */
35909
35910 struct rs6000_opt_var {
35911 const char *name; /* option name */
35912 size_t global_offset; /* offset of the option in global_options. */
35913 size_t target_offset; /* offset of the option in target options. */
35914 };
35915
35916 static struct rs6000_opt_var const rs6000_opt_vars[] =
35917 {
35918 { "friz",
35919 offsetof (struct gcc_options, x_TARGET_FRIZ),
35920 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
35921 { "avoid-indexed-addresses",
35922 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
35923 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
35924 { "longcall",
35925 offsetof (struct gcc_options, x_rs6000_default_long_calls),
35926 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
35927 { "optimize-swaps",
35928 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
35929 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
35930 { "allow-movmisalign",
35931 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
35932 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
35933 { "sched-groups",
35934 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
35935 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
35936 { "always-hint",
35937 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
35938 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
35939 { "align-branch-targets",
35940 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
35941 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
35942 { "tls-markers",
35943 offsetof (struct gcc_options, x_tls_markers),
35944 offsetof (struct cl_target_option, x_tls_markers), },
35945 { "sched-prolog",
35946 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
35947 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
35948 { "sched-epilog",
35949 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
35950 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
35951 { "speculate-indirect-jumps",
35952 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
35953 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
35954 };
35955
35956 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
35957 parsing. Return true if there were no errors. */
35958
35959 static bool
35960 rs6000_inner_target_options (tree args, bool attr_p)
35961 {
35962 bool ret = true;
35963
35964 if (args == NULL_TREE)
35965 ;
35966
35967 else if (TREE_CODE (args) == STRING_CST)
35968 {
35969 char *p = ASTRDUP (TREE_STRING_POINTER (args));
35970 char *q;
35971
35972 while ((q = strtok (p, ",")) != NULL)
35973 {
35974 bool error_p = false;
35975 bool not_valid_p = false;
35976 const char *cpu_opt = NULL;
35977
35978 p = NULL;
35979 if (strncmp (q, "cpu=", 4) == 0)
35980 {
35981 int cpu_index = rs6000_cpu_name_lookup (q+4);
35982 if (cpu_index >= 0)
35983 rs6000_cpu_index = cpu_index;
35984 else
35985 {
35986 error_p = true;
35987 cpu_opt = q+4;
35988 }
35989 }
35990 else if (strncmp (q, "tune=", 5) == 0)
35991 {
35992 int tune_index = rs6000_cpu_name_lookup (q+5);
35993 if (tune_index >= 0)
35994 rs6000_tune_index = tune_index;
35995 else
35996 {
35997 error_p = true;
35998 cpu_opt = q+5;
35999 }
36000 }
36001 else
36002 {
36003 size_t i;
36004 bool invert = false;
36005 char *r = q;
36006
36007 error_p = true;
36008 if (strncmp (r, "no-", 3) == 0)
36009 {
36010 invert = true;
36011 r += 3;
36012 }
36013
36014 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36015 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36016 {
36017 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36018
36019 if (!rs6000_opt_masks[i].valid_target)
36020 not_valid_p = true;
36021 else
36022 {
36023 error_p = false;
36024 rs6000_isa_flags_explicit |= mask;
36025
36026 /* VSX needs altivec, so -mvsx automagically sets
36027 altivec and disables -mavoid-indexed-addresses. */
36028 if (!invert)
36029 {
36030 if (mask == OPTION_MASK_VSX)
36031 {
36032 mask |= OPTION_MASK_ALTIVEC;
36033 TARGET_AVOID_XFORM = 0;
36034 }
36035 }
36036
36037 if (rs6000_opt_masks[i].invert)
36038 invert = !invert;
36039
36040 if (invert)
36041 rs6000_isa_flags &= ~mask;
36042 else
36043 rs6000_isa_flags |= mask;
36044 }
36045 break;
36046 }
36047
36048 if (error_p && !not_valid_p)
36049 {
36050 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36051 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36052 {
36053 size_t j = rs6000_opt_vars[i].global_offset;
36054 *((int *) ((char *)&global_options + j)) = !invert;
36055 error_p = false;
36056 not_valid_p = false;
36057 break;
36058 }
36059 }
36060 }
36061
36062 if (error_p)
36063 {
36064 const char *eprefix, *esuffix;
36065
36066 ret = false;
36067 if (attr_p)
36068 {
36069 eprefix = "__attribute__((__target__(";
36070 esuffix = ")))";
36071 }
36072 else
36073 {
36074 eprefix = "#pragma GCC target ";
36075 esuffix = "";
36076 }
36077
36078 if (cpu_opt)
36079 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36080 q, esuffix);
36081 else if (not_valid_p)
36082 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36083 else
36084 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36085 }
36086 }
36087 }
36088
36089 else if (TREE_CODE (args) == TREE_LIST)
36090 {
36091 do
36092 {
36093 tree value = TREE_VALUE (args);
36094 if (value)
36095 {
36096 bool ret2 = rs6000_inner_target_options (value, attr_p);
36097 if (!ret2)
36098 ret = false;
36099 }
36100 args = TREE_CHAIN (args);
36101 }
36102 while (args != NULL_TREE);
36103 }
36104
36105 else
36106 {
36107 error ("attribute %<target%> argument not a string");
36108 return false;
36109 }
36110
36111 return ret;
36112 }
36113
36114 /* Print out the target options as a list for -mdebug=target. */
36115
36116 static void
36117 rs6000_debug_target_options (tree args, const char *prefix)
36118 {
36119 if (args == NULL_TREE)
36120 fprintf (stderr, "%s<NULL>", prefix);
36121
36122 else if (TREE_CODE (args) == STRING_CST)
36123 {
36124 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36125 char *q;
36126
36127 while ((q = strtok (p, ",")) != NULL)
36128 {
36129 p = NULL;
36130 fprintf (stderr, "%s\"%s\"", prefix, q);
36131 prefix = ", ";
36132 }
36133 }
36134
36135 else if (TREE_CODE (args) == TREE_LIST)
36136 {
36137 do
36138 {
36139 tree value = TREE_VALUE (args);
36140 if (value)
36141 {
36142 rs6000_debug_target_options (value, prefix);
36143 prefix = ", ";
36144 }
36145 args = TREE_CHAIN (args);
36146 }
36147 while (args != NULL_TREE);
36148 }
36149
36150 else
36151 gcc_unreachable ();
36152
36153 return;
36154 }
36155
36156 \f
36157 /* Hook to validate attribute((target("..."))). */
36158
36159 static bool
36160 rs6000_valid_attribute_p (tree fndecl,
36161 tree ARG_UNUSED (name),
36162 tree args,
36163 int flags)
36164 {
36165 struct cl_target_option cur_target;
36166 bool ret;
36167 tree old_optimize;
36168 tree new_target, new_optimize;
36169 tree func_optimize;
36170
36171 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36172
36173 if (TARGET_DEBUG_TARGET)
36174 {
36175 tree tname = DECL_NAME (fndecl);
36176 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36177 if (tname)
36178 fprintf (stderr, "function: %.*s\n",
36179 (int) IDENTIFIER_LENGTH (tname),
36180 IDENTIFIER_POINTER (tname));
36181 else
36182 fprintf (stderr, "function: unknown\n");
36183
36184 fprintf (stderr, "args:");
36185 rs6000_debug_target_options (args, " ");
36186 fprintf (stderr, "\n");
36187
36188 if (flags)
36189 fprintf (stderr, "flags: 0x%x\n", flags);
36190
36191 fprintf (stderr, "--------------------\n");
36192 }
36193
36194 /* attribute((target("default"))) does nothing, beyond
36195 affecting multi-versioning. */
36196 if (TREE_VALUE (args)
36197 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36198 && TREE_CHAIN (args) == NULL_TREE
36199 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36200 return true;
36201
36202 old_optimize = build_optimization_node (&global_options);
36203 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36204
36205 /* If the function changed the optimization levels as well as setting target
36206 options, start with the optimizations specified. */
36207 if (func_optimize && func_optimize != old_optimize)
36208 cl_optimization_restore (&global_options,
36209 TREE_OPTIMIZATION (func_optimize));
36210
36211 /* The target attributes may also change some optimization flags, so update
36212 the optimization options if necessary. */
36213 cl_target_option_save (&cur_target, &global_options);
36214 rs6000_cpu_index = rs6000_tune_index = -1;
36215 ret = rs6000_inner_target_options (args, true);
36216
36217 /* Set up any additional state. */
36218 if (ret)
36219 {
36220 ret = rs6000_option_override_internal (false);
36221 new_target = build_target_option_node (&global_options);
36222 }
36223 else
36224 new_target = NULL;
36225
36226 new_optimize = build_optimization_node (&global_options);
36227
36228 if (!new_target)
36229 ret = false;
36230
36231 else if (fndecl)
36232 {
36233 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36234
36235 if (old_optimize != new_optimize)
36236 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36237 }
36238
36239 cl_target_option_restore (&global_options, &cur_target);
36240
36241 if (old_optimize != new_optimize)
36242 cl_optimization_restore (&global_options,
36243 TREE_OPTIMIZATION (old_optimize));
36244
36245 return ret;
36246 }
36247
36248 \f
36249 /* Hook to validate the current #pragma GCC target and set the state, and
36250 update the macros based on what was changed. If ARGS is NULL, then
36251 POP_TARGET is used to reset the options. */
36252
36253 bool
36254 rs6000_pragma_target_parse (tree args, tree pop_target)
36255 {
36256 tree prev_tree = build_target_option_node (&global_options);
36257 tree cur_tree;
36258 struct cl_target_option *prev_opt, *cur_opt;
36259 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36260 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36261
36262 if (TARGET_DEBUG_TARGET)
36263 {
36264 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36265 fprintf (stderr, "args:");
36266 rs6000_debug_target_options (args, " ");
36267 fprintf (stderr, "\n");
36268
36269 if (pop_target)
36270 {
36271 fprintf (stderr, "pop_target:\n");
36272 debug_tree (pop_target);
36273 }
36274 else
36275 fprintf (stderr, "pop_target: <NULL>\n");
36276
36277 fprintf (stderr, "--------------------\n");
36278 }
36279
36280 if (! args)
36281 {
36282 cur_tree = ((pop_target)
36283 ? pop_target
36284 : target_option_default_node);
36285 cl_target_option_restore (&global_options,
36286 TREE_TARGET_OPTION (cur_tree));
36287 }
36288 else
36289 {
36290 rs6000_cpu_index = rs6000_tune_index = -1;
36291 if (!rs6000_inner_target_options (args, false)
36292 || !rs6000_option_override_internal (false)
36293 || (cur_tree = build_target_option_node (&global_options))
36294 == NULL_TREE)
36295 {
36296 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36297 fprintf (stderr, "invalid pragma\n");
36298
36299 return false;
36300 }
36301 }
36302
36303 target_option_current_node = cur_tree;
36304 rs6000_activate_target_options (target_option_current_node);
36305
36306 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36307 change the macros that are defined. */
36308 if (rs6000_target_modify_macros_ptr)
36309 {
36310 prev_opt = TREE_TARGET_OPTION (prev_tree);
36311 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36312 prev_flags = prev_opt->x_rs6000_isa_flags;
36313
36314 cur_opt = TREE_TARGET_OPTION (cur_tree);
36315 cur_flags = cur_opt->x_rs6000_isa_flags;
36316 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36317
36318 diff_bumask = (prev_bumask ^ cur_bumask);
36319 diff_flags = (prev_flags ^ cur_flags);
36320
36321 if ((diff_flags != 0) || (diff_bumask != 0))
36322 {
36323 /* Delete old macros. */
36324 rs6000_target_modify_macros_ptr (false,
36325 prev_flags & diff_flags,
36326 prev_bumask & diff_bumask);
36327
36328 /* Define new macros. */
36329 rs6000_target_modify_macros_ptr (true,
36330 cur_flags & diff_flags,
36331 cur_bumask & diff_bumask);
36332 }
36333 }
36334
36335 return true;
36336 }
36337
36338 \f
36339 /* Remember the last target of rs6000_set_current_function. */
36340 static GTY(()) tree rs6000_previous_fndecl;
36341
36342 /* Restore target's globals from NEW_TREE and invalidate the
36343 rs6000_previous_fndecl cache. */
36344
36345 void
36346 rs6000_activate_target_options (tree new_tree)
36347 {
36348 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36349 if (TREE_TARGET_GLOBALS (new_tree))
36350 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36351 else if (new_tree == target_option_default_node)
36352 restore_target_globals (&default_target_globals);
36353 else
36354 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36355 rs6000_previous_fndecl = NULL_TREE;
36356 }
36357
36358 /* Establish appropriate back-end context for processing the function
36359 FNDECL. The argument might be NULL to indicate processing at top
36360 level, outside of any function scope. */
36361 static void
36362 rs6000_set_current_function (tree fndecl)
36363 {
36364 if (TARGET_DEBUG_TARGET)
36365 {
36366 fprintf (stderr, "\n==================== rs6000_set_current_function");
36367
36368 if (fndecl)
36369 fprintf (stderr, ", fndecl %s (%p)",
36370 (DECL_NAME (fndecl)
36371 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36372 : "<unknown>"), (void *)fndecl);
36373
36374 if (rs6000_previous_fndecl)
36375 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36376
36377 fprintf (stderr, "\n");
36378 }
36379
36380 /* Only change the context if the function changes. This hook is called
36381 several times in the course of compiling a function, and we don't want to
36382 slow things down too much or call target_reinit when it isn't safe. */
36383 if (fndecl == rs6000_previous_fndecl)
36384 return;
36385
36386 tree old_tree;
36387 if (rs6000_previous_fndecl == NULL_TREE)
36388 old_tree = target_option_current_node;
36389 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
36390 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
36391 else
36392 old_tree = target_option_default_node;
36393
36394 tree new_tree;
36395 if (fndecl == NULL_TREE)
36396 {
36397 if (old_tree != target_option_current_node)
36398 new_tree = target_option_current_node;
36399 else
36400 new_tree = NULL_TREE;
36401 }
36402 else
36403 {
36404 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36405 if (new_tree == NULL_TREE)
36406 new_tree = target_option_default_node;
36407 }
36408
36409 if (TARGET_DEBUG_TARGET)
36410 {
36411 if (new_tree)
36412 {
36413 fprintf (stderr, "\nnew fndecl target specific options:\n");
36414 debug_tree (new_tree);
36415 }
36416
36417 if (old_tree)
36418 {
36419 fprintf (stderr, "\nold fndecl target specific options:\n");
36420 debug_tree (old_tree);
36421 }
36422
36423 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
36424 fprintf (stderr, "--------------------\n");
36425 }
36426
36427 if (new_tree && old_tree != new_tree)
36428 rs6000_activate_target_options (new_tree);
36429
36430 if (fndecl)
36431 rs6000_previous_fndecl = fndecl;
36432 }
36433
36434 \f
36435 /* Save the current options */
36436
36437 static void
36438 rs6000_function_specific_save (struct cl_target_option *ptr,
36439 struct gcc_options *opts)
36440 {
36441 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36442 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36443 }
36444
36445 /* Restore the current options */
36446
36447 static void
36448 rs6000_function_specific_restore (struct gcc_options *opts,
36449 struct cl_target_option *ptr)
36450
36451 {
36452 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36453 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36454 (void) rs6000_option_override_internal (false);
36455 }
36456
36457 /* Print the current options */
36458
36459 static void
36460 rs6000_function_specific_print (FILE *file, int indent,
36461 struct cl_target_option *ptr)
36462 {
36463 rs6000_print_isa_options (file, indent, "Isa options set",
36464 ptr->x_rs6000_isa_flags);
36465
36466 rs6000_print_isa_options (file, indent, "Isa options explicit",
36467 ptr->x_rs6000_isa_flags_explicit);
36468 }
36469
36470 /* Helper function to print the current isa or misc options on a line. */
36471
36472 static void
36473 rs6000_print_options_internal (FILE *file,
36474 int indent,
36475 const char *string,
36476 HOST_WIDE_INT flags,
36477 const char *prefix,
36478 const struct rs6000_opt_mask *opts,
36479 size_t num_elements)
36480 {
36481 size_t i;
36482 size_t start_column = 0;
36483 size_t cur_column;
36484 size_t max_column = 120;
36485 size_t prefix_len = strlen (prefix);
36486 size_t comma_len = 0;
36487 const char *comma = "";
36488
36489 if (indent)
36490 start_column += fprintf (file, "%*s", indent, "");
36491
36492 if (!flags)
36493 {
36494 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
36495 return;
36496 }
36497
36498 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
36499
36500 /* Print the various mask options. */
36501 cur_column = start_column;
36502 for (i = 0; i < num_elements; i++)
36503 {
36504 bool invert = opts[i].invert;
36505 const char *name = opts[i].name;
36506 const char *no_str = "";
36507 HOST_WIDE_INT mask = opts[i].mask;
36508 size_t len = comma_len + prefix_len + strlen (name);
36509
36510 if (!invert)
36511 {
36512 if ((flags & mask) == 0)
36513 {
36514 no_str = "no-";
36515 len += sizeof ("no-") - 1;
36516 }
36517
36518 flags &= ~mask;
36519 }
36520
36521 else
36522 {
36523 if ((flags & mask) != 0)
36524 {
36525 no_str = "no-";
36526 len += sizeof ("no-") - 1;
36527 }
36528
36529 flags |= mask;
36530 }
36531
36532 cur_column += len;
36533 if (cur_column > max_column)
36534 {
36535 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
36536 cur_column = start_column + len;
36537 comma = "";
36538 }
36539
36540 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
36541 comma = ", ";
36542 comma_len = sizeof (", ") - 1;
36543 }
36544
36545 fputs ("\n", file);
36546 }
36547
36548 /* Helper function to print the current isa options on a line. */
36549
36550 static void
36551 rs6000_print_isa_options (FILE *file, int indent, const char *string,
36552 HOST_WIDE_INT flags)
36553 {
36554 rs6000_print_options_internal (file, indent, string, flags, "-m",
36555 &rs6000_opt_masks[0],
36556 ARRAY_SIZE (rs6000_opt_masks));
36557 }
36558
36559 static void
36560 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
36561 HOST_WIDE_INT flags)
36562 {
36563 rs6000_print_options_internal (file, indent, string, flags, "",
36564 &rs6000_builtin_mask_names[0],
36565 ARRAY_SIZE (rs6000_builtin_mask_names));
36566 }
36567
36568 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
36569 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
36570 -mupper-regs-df, etc.).
36571
36572 If the user used -mno-power8-vector, we need to turn off all of the implicit
36573 ISA 2.07 and 3.0 options that relate to the vector unit.
36574
36575 If the user used -mno-power9-vector, we need to turn off all of the implicit
36576 ISA 3.0 options that relate to the vector unit.
36577
36578 This function does not handle explicit options such as the user specifying
36579 -mdirect-move. These are handled in rs6000_option_override_internal, and
36580 the appropriate error is given if needed.
36581
36582 We return a mask of all of the implicit options that should not be enabled
36583 by default. */
36584
36585 static HOST_WIDE_INT
36586 rs6000_disable_incompatible_switches (void)
36587 {
36588 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
36589 size_t i, j;
36590
36591 static const struct {
36592 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
36593 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
36594 const char *const name; /* name of the switch. */
36595 } flags[] = {
36596 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
36597 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
36598 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
36599 };
36600
36601 for (i = 0; i < ARRAY_SIZE (flags); i++)
36602 {
36603 HOST_WIDE_INT no_flag = flags[i].no_flag;
36604
36605 if ((rs6000_isa_flags & no_flag) == 0
36606 && (rs6000_isa_flags_explicit & no_flag) != 0)
36607 {
36608 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
36609 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
36610 & rs6000_isa_flags
36611 & dep_flags);
36612
36613 if (set_flags)
36614 {
36615 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
36616 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
36617 {
36618 set_flags &= ~rs6000_opt_masks[j].mask;
36619 error ("%<-mno-%s%> turns off %<-m%s%>",
36620 flags[i].name,
36621 rs6000_opt_masks[j].name);
36622 }
36623
36624 gcc_assert (!set_flags);
36625 }
36626
36627 rs6000_isa_flags &= ~dep_flags;
36628 ignore_masks |= no_flag | dep_flags;
36629 }
36630 }
36631
36632 return ignore_masks;
36633 }
36634
36635 \f
36636 /* Helper function for printing the function name when debugging. */
36637
36638 static const char *
36639 get_decl_name (tree fn)
36640 {
36641 tree name;
36642
36643 if (!fn)
36644 return "<null>";
36645
36646 name = DECL_NAME (fn);
36647 if (!name)
36648 return "<no-name>";
36649
36650 return IDENTIFIER_POINTER (name);
36651 }
36652
36653 /* Return the clone id of the target we are compiling code for in a target
36654 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
36655 the priority list for the target clones (ordered from lowest to
36656 highest). */
36657
36658 static int
36659 rs6000_clone_priority (tree fndecl)
36660 {
36661 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36662 HOST_WIDE_INT isa_masks;
36663 int ret = CLONE_DEFAULT;
36664 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
36665 const char *attrs_str = NULL;
36666
36667 attrs = TREE_VALUE (TREE_VALUE (attrs));
36668 attrs_str = TREE_STRING_POINTER (attrs);
36669
36670 /* Return priority zero for default function. Return the ISA needed for the
36671 function if it is not the default. */
36672 if (strcmp (attrs_str, "default") != 0)
36673 {
36674 if (fn_opts == NULL_TREE)
36675 fn_opts = target_option_default_node;
36676
36677 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
36678 isa_masks = rs6000_isa_flags;
36679 else
36680 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
36681
36682 for (ret = CLONE_MAX - 1; ret != 0; ret--)
36683 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
36684 break;
36685 }
36686
36687 if (TARGET_DEBUG_TARGET)
36688 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
36689 get_decl_name (fndecl), ret);
36690
36691 return ret;
36692 }
36693
36694 /* This compares the priority of target features in function DECL1 and DECL2.
36695 It returns positive value if DECL1 is higher priority, negative value if
36696 DECL2 is higher priority and 0 if they are the same. Note, priorities are
36697 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
36698
36699 static int
36700 rs6000_compare_version_priority (tree decl1, tree decl2)
36701 {
36702 int priority1 = rs6000_clone_priority (decl1);
36703 int priority2 = rs6000_clone_priority (decl2);
36704 int ret = priority1 - priority2;
36705
36706 if (TARGET_DEBUG_TARGET)
36707 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
36708 get_decl_name (decl1), get_decl_name (decl2), ret);
36709
36710 return ret;
36711 }
36712
36713 /* Make a dispatcher declaration for the multi-versioned function DECL.
36714 Calls to DECL function will be replaced with calls to the dispatcher
36715 by the front-end. Returns the decl of the dispatcher function. */
36716
36717 static tree
36718 rs6000_get_function_versions_dispatcher (void *decl)
36719 {
36720 tree fn = (tree) decl;
36721 struct cgraph_node *node = NULL;
36722 struct cgraph_node *default_node = NULL;
36723 struct cgraph_function_version_info *node_v = NULL;
36724 struct cgraph_function_version_info *first_v = NULL;
36725
36726 tree dispatch_decl = NULL;
36727
36728 struct cgraph_function_version_info *default_version_info = NULL;
36729 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
36730
36731 if (TARGET_DEBUG_TARGET)
36732 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
36733 get_decl_name (fn));
36734
36735 node = cgraph_node::get (fn);
36736 gcc_assert (node != NULL);
36737
36738 node_v = node->function_version ();
36739 gcc_assert (node_v != NULL);
36740
36741 if (node_v->dispatcher_resolver != NULL)
36742 return node_v->dispatcher_resolver;
36743
36744 /* Find the default version and make it the first node. */
36745 first_v = node_v;
36746 /* Go to the beginning of the chain. */
36747 while (first_v->prev != NULL)
36748 first_v = first_v->prev;
36749
36750 default_version_info = first_v;
36751 while (default_version_info != NULL)
36752 {
36753 const tree decl2 = default_version_info->this_node->decl;
36754 if (is_function_default_version (decl2))
36755 break;
36756 default_version_info = default_version_info->next;
36757 }
36758
36759 /* If there is no default node, just return NULL. */
36760 if (default_version_info == NULL)
36761 return NULL;
36762
36763 /* Make default info the first node. */
36764 if (first_v != default_version_info)
36765 {
36766 default_version_info->prev->next = default_version_info->next;
36767 if (default_version_info->next)
36768 default_version_info->next->prev = default_version_info->prev;
36769 first_v->prev = default_version_info;
36770 default_version_info->next = first_v;
36771 default_version_info->prev = NULL;
36772 }
36773
36774 default_node = default_version_info->this_node;
36775
36776 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
36777 error_at (DECL_SOURCE_LOCATION (default_node->decl),
36778 "target_clones attribute needs GLIBC (2.23 and newer) that "
36779 "exports hardware capability bits");
36780 #else
36781
36782 if (targetm.has_ifunc_p ())
36783 {
36784 struct cgraph_function_version_info *it_v = NULL;
36785 struct cgraph_node *dispatcher_node = NULL;
36786 struct cgraph_function_version_info *dispatcher_version_info = NULL;
36787
36788 /* Right now, the dispatching is done via ifunc. */
36789 dispatch_decl = make_dispatcher_decl (default_node->decl);
36790
36791 dispatcher_node = cgraph_node::get_create (dispatch_decl);
36792 gcc_assert (dispatcher_node != NULL);
36793 dispatcher_node->dispatcher_function = 1;
36794 dispatcher_version_info
36795 = dispatcher_node->insert_new_function_version ();
36796 dispatcher_version_info->next = default_version_info;
36797 dispatcher_node->definition = 1;
36798
36799 /* Set the dispatcher for all the versions. */
36800 it_v = default_version_info;
36801 while (it_v != NULL)
36802 {
36803 it_v->dispatcher_resolver = dispatch_decl;
36804 it_v = it_v->next;
36805 }
36806 }
36807 else
36808 {
36809 error_at (DECL_SOURCE_LOCATION (default_node->decl),
36810 "multiversioning needs ifunc which is not supported "
36811 "on this target");
36812 }
36813 #endif
36814
36815 return dispatch_decl;
36816 }
36817
36818 /* Make the resolver function decl to dispatch the versions of a multi-
36819 versioned function, DEFAULT_DECL. Create an empty basic block in the
36820 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
36821 function. */
36822
36823 static tree
36824 make_resolver_func (const tree default_decl,
36825 const tree dispatch_decl,
36826 basic_block *empty_bb)
36827 {
36828 /* Make the resolver function static. The resolver function returns
36829 void *. */
36830 tree decl_name = clone_function_name (default_decl, "resolver");
36831 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
36832 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
36833 tree decl = build_fn_decl (resolver_name, type);
36834 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
36835
36836 DECL_NAME (decl) = decl_name;
36837 TREE_USED (decl) = 1;
36838 DECL_ARTIFICIAL (decl) = 1;
36839 DECL_IGNORED_P (decl) = 0;
36840 TREE_PUBLIC (decl) = 0;
36841 DECL_UNINLINABLE (decl) = 1;
36842
36843 /* Resolver is not external, body is generated. */
36844 DECL_EXTERNAL (decl) = 0;
36845 DECL_EXTERNAL (dispatch_decl) = 0;
36846
36847 DECL_CONTEXT (decl) = NULL_TREE;
36848 DECL_INITIAL (decl) = make_node (BLOCK);
36849 DECL_STATIC_CONSTRUCTOR (decl) = 0;
36850
36851 /* Build result decl and add to function_decl. */
36852 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
36853 DECL_ARTIFICIAL (t) = 1;
36854 DECL_IGNORED_P (t) = 1;
36855 DECL_RESULT (decl) = t;
36856
36857 gimplify_function_tree (decl);
36858 push_cfun (DECL_STRUCT_FUNCTION (decl));
36859 *empty_bb = init_lowered_empty_function (decl, false,
36860 profile_count::uninitialized ());
36861
36862 cgraph_node::add_new_function (decl, true);
36863 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
36864
36865 pop_cfun ();
36866
36867 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
36868 DECL_ATTRIBUTES (dispatch_decl)
36869 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
36870
36871 cgraph_node::create_same_body_alias (dispatch_decl, decl);
36872
36873 return decl;
36874 }
36875
36876 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
36877 return a pointer to VERSION_DECL if we are running on a machine that
36878 supports the index CLONE_ISA hardware architecture bits. This function will
36879 be called during version dispatch to decide which function version to
36880 execute. It returns the basic block at the end, to which more conditions
36881 can be added. */
36882
36883 static basic_block
36884 add_condition_to_bb (tree function_decl, tree version_decl,
36885 int clone_isa, basic_block new_bb)
36886 {
36887 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
36888
36889 gcc_assert (new_bb != NULL);
36890 gimple_seq gseq = bb_seq (new_bb);
36891
36892
36893 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
36894 build_fold_addr_expr (version_decl));
36895 tree result_var = create_tmp_var (ptr_type_node);
36896 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
36897 gimple *return_stmt = gimple_build_return (result_var);
36898
36899 if (clone_isa == CLONE_DEFAULT)
36900 {
36901 gimple_seq_add_stmt (&gseq, convert_stmt);
36902 gimple_seq_add_stmt (&gseq, return_stmt);
36903 set_bb_seq (new_bb, gseq);
36904 gimple_set_bb (convert_stmt, new_bb);
36905 gimple_set_bb (return_stmt, new_bb);
36906 pop_cfun ();
36907 return new_bb;
36908 }
36909
36910 tree bool_zero = build_int_cst (bool_int_type_node, 0);
36911 tree cond_var = create_tmp_var (bool_int_type_node);
36912 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
36913 const char *arg_str = rs6000_clone_map[clone_isa].name;
36914 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
36915 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
36916 gimple_call_set_lhs (call_cond_stmt, cond_var);
36917
36918 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
36919 gimple_set_bb (call_cond_stmt, new_bb);
36920 gimple_seq_add_stmt (&gseq, call_cond_stmt);
36921
36922 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
36923 NULL_TREE, NULL_TREE);
36924 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
36925 gimple_set_bb (if_else_stmt, new_bb);
36926 gimple_seq_add_stmt (&gseq, if_else_stmt);
36927
36928 gimple_seq_add_stmt (&gseq, convert_stmt);
36929 gimple_seq_add_stmt (&gseq, return_stmt);
36930 set_bb_seq (new_bb, gseq);
36931
36932 basic_block bb1 = new_bb;
36933 edge e12 = split_block (bb1, if_else_stmt);
36934 basic_block bb2 = e12->dest;
36935 e12->flags &= ~EDGE_FALLTHRU;
36936 e12->flags |= EDGE_TRUE_VALUE;
36937
36938 edge e23 = split_block (bb2, return_stmt);
36939 gimple_set_bb (convert_stmt, bb2);
36940 gimple_set_bb (return_stmt, bb2);
36941
36942 basic_block bb3 = e23->dest;
36943 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
36944
36945 remove_edge (e23);
36946 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
36947
36948 pop_cfun ();
36949 return bb3;
36950 }
36951
36952 /* This function generates the dispatch function for multi-versioned functions.
36953 DISPATCH_DECL is the function which will contain the dispatch logic.
36954 FNDECLS are the function choices for dispatch, and is a tree chain.
36955 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
36956 code is generated. */
36957
36958 static int
36959 dispatch_function_versions (tree dispatch_decl,
36960 void *fndecls_p,
36961 basic_block *empty_bb)
36962 {
36963 int ix;
36964 tree ele;
36965 vec<tree> *fndecls;
36966 tree clones[CLONE_MAX];
36967
36968 if (TARGET_DEBUG_TARGET)
36969 fputs ("dispatch_function_versions, top\n", stderr);
36970
36971 gcc_assert (dispatch_decl != NULL
36972 && fndecls_p != NULL
36973 && empty_bb != NULL);
36974
36975 /* fndecls_p is actually a vector. */
36976 fndecls = static_cast<vec<tree> *> (fndecls_p);
36977
36978 /* At least one more version other than the default. */
36979 gcc_assert (fndecls->length () >= 2);
36980
36981 /* The first version in the vector is the default decl. */
36982 memset ((void *) clones, '\0', sizeof (clones));
36983 clones[CLONE_DEFAULT] = (*fndecls)[0];
36984
36985 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
36986 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
36987 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
36988 recent glibc. If we ever need to call __builtin_cpu_init, we would need
36989 to insert the code here to do the call. */
36990
36991 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
36992 {
36993 int priority = rs6000_clone_priority (ele);
36994 if (!clones[priority])
36995 clones[priority] = ele;
36996 }
36997
36998 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
36999 if (clones[ix])
37000 {
37001 if (TARGET_DEBUG_TARGET)
37002 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37003 ix, get_decl_name (clones[ix]));
37004
37005 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37006 *empty_bb);
37007 }
37008
37009 return 0;
37010 }
37011
37012 /* Generate the dispatching code body to dispatch multi-versioned function
37013 DECL. The target hook is called to process the "target" attributes and
37014 provide the code to dispatch the right function at run-time. NODE points
37015 to the dispatcher decl whose body will be created. */
37016
37017 static tree
37018 rs6000_generate_version_dispatcher_body (void *node_p)
37019 {
37020 tree resolver;
37021 basic_block empty_bb;
37022 struct cgraph_node *node = (cgraph_node *) node_p;
37023 struct cgraph_function_version_info *ninfo = node->function_version ();
37024
37025 if (ninfo->dispatcher_resolver)
37026 return ninfo->dispatcher_resolver;
37027
37028 /* node is going to be an alias, so remove the finalized bit. */
37029 node->definition = false;
37030
37031 /* The first version in the chain corresponds to the default version. */
37032 ninfo->dispatcher_resolver = resolver
37033 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37034
37035 if (TARGET_DEBUG_TARGET)
37036 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37037 get_decl_name (resolver));
37038
37039 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37040 auto_vec<tree, 2> fn_ver_vec;
37041
37042 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37043 vinfo;
37044 vinfo = vinfo->next)
37045 {
37046 struct cgraph_node *version = vinfo->this_node;
37047 /* Check for virtual functions here again, as by this time it should
37048 have been determined if this function needs a vtable index or
37049 not. This happens for methods in derived classes that override
37050 virtual methods in base classes but are not explicitly marked as
37051 virtual. */
37052 if (DECL_VINDEX (version->decl))
37053 sorry ("Virtual function multiversioning not supported");
37054
37055 fn_ver_vec.safe_push (version->decl);
37056 }
37057
37058 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37059 cgraph_edge::rebuild_edges ();
37060 pop_cfun ();
37061 return resolver;
37062 }
37063
37064 \f
37065 /* Hook to determine if one function can safely inline another. */
37066
37067 static bool
37068 rs6000_can_inline_p (tree caller, tree callee)
37069 {
37070 bool ret = false;
37071 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37072 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37073
37074 /* If callee has no option attributes, then it is ok to inline. */
37075 if (!callee_tree)
37076 ret = true;
37077
37078 /* If caller has no option attributes, but callee does then it is not ok to
37079 inline. */
37080 else if (!caller_tree)
37081 ret = false;
37082
37083 else
37084 {
37085 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37086 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37087
37088 /* Callee's options should a subset of the caller's, i.e. a vsx function
37089 can inline an altivec function but a non-vsx function can't inline a
37090 vsx function. */
37091 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37092 == callee_opts->x_rs6000_isa_flags)
37093 ret = true;
37094 }
37095
37096 if (TARGET_DEBUG_TARGET)
37097 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37098 get_decl_name (caller), get_decl_name (callee),
37099 (ret ? "can" : "cannot"));
37100
37101 return ret;
37102 }
37103 \f
37104 /* Allocate a stack temp and fixup the address so it meets the particular
37105 memory requirements (either offetable or REG+REG addressing). */
37106
37107 rtx
37108 rs6000_allocate_stack_temp (machine_mode mode,
37109 bool offsettable_p,
37110 bool reg_reg_p)
37111 {
37112 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37113 rtx addr = XEXP (stack, 0);
37114 int strict_p = reload_completed;
37115
37116 if (!legitimate_indirect_address_p (addr, strict_p))
37117 {
37118 if (offsettable_p
37119 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37120 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37121
37122 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37123 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37124 }
37125
37126 return stack;
37127 }
37128
37129 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37130 to such a form to deal with memory reference instructions like STFIWX that
37131 only take reg+reg addressing. */
37132
37133 rtx
37134 rs6000_address_for_fpconvert (rtx x)
37135 {
37136 rtx addr;
37137
37138 gcc_assert (MEM_P (x));
37139 addr = XEXP (x, 0);
37140 if (can_create_pseudo_p ()
37141 && ! legitimate_indirect_address_p (addr, reload_completed)
37142 && ! legitimate_indexed_address_p (addr, reload_completed))
37143 {
37144 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37145 {
37146 rtx reg = XEXP (addr, 0);
37147 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37148 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37149 gcc_assert (REG_P (reg));
37150 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37151 addr = reg;
37152 }
37153 else if (GET_CODE (addr) == PRE_MODIFY)
37154 {
37155 rtx reg = XEXP (addr, 0);
37156 rtx expr = XEXP (addr, 1);
37157 gcc_assert (REG_P (reg));
37158 gcc_assert (GET_CODE (expr) == PLUS);
37159 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37160 addr = reg;
37161 }
37162
37163 x = replace_equiv_address (x, copy_addr_to_reg (addr));
37164 }
37165
37166 return x;
37167 }
37168
37169 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37170
37171 On the RS/6000, all integer constants are acceptable, most won't be valid
37172 for particular insns, though. Only easy FP constants are acceptable. */
37173
37174 static bool
37175 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37176 {
37177 if (TARGET_ELF && tls_referenced_p (x))
37178 return false;
37179
37180 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
37181 || GET_MODE (x) == VOIDmode
37182 || (TARGET_POWERPC64 && mode == DImode)
37183 || easy_fp_constant (x, mode)
37184 || easy_vector_constant (x, mode));
37185 }
37186
37187 \f
37188 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37189
37190 static bool
37191 chain_already_loaded (rtx_insn *last)
37192 {
37193 for (; last != NULL; last = PREV_INSN (last))
37194 {
37195 if (NONJUMP_INSN_P (last))
37196 {
37197 rtx patt = PATTERN (last);
37198
37199 if (GET_CODE (patt) == SET)
37200 {
37201 rtx lhs = XEXP (patt, 0);
37202
37203 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37204 return true;
37205 }
37206 }
37207 }
37208 return false;
37209 }
37210
37211 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37212
37213 void
37214 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37215 {
37216 const bool direct_call_p
37217 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
37218 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37219 rtx toc_load = NULL_RTX;
37220 rtx toc_restore = NULL_RTX;
37221 rtx func_addr;
37222 rtx abi_reg = NULL_RTX;
37223 rtx call[4];
37224 int n_call;
37225 rtx insn;
37226
37227 /* Handle longcall attributes. */
37228 if (INTVAL (cookie) & CALL_LONG)
37229 func_desc = rs6000_longcall_ref (func_desc);
37230
37231 /* Handle indirect calls. */
37232 if (GET_CODE (func_desc) != SYMBOL_REF
37233 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
37234 {
37235 /* Save the TOC into its reserved slot before the call,
37236 and prepare to restore it after the call. */
37237 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37238 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37239 rtx stack_toc_mem = gen_frame_mem (Pmode,
37240 gen_rtx_PLUS (Pmode, stack_ptr,
37241 stack_toc_offset));
37242 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37243 gen_rtvec (1, stack_toc_offset),
37244 UNSPEC_TOCSLOT);
37245 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37246
37247 /* Can we optimize saving the TOC in the prologue or
37248 do we need to do it at every call? */
37249 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37250 cfun->machine->save_toc_in_prologue = true;
37251 else
37252 {
37253 MEM_VOLATILE_P (stack_toc_mem) = 1;
37254 emit_move_insn (stack_toc_mem, toc_reg);
37255 }
37256
37257 if (DEFAULT_ABI == ABI_ELFv2)
37258 {
37259 /* A function pointer in the ELFv2 ABI is just a plain address, but
37260 the ABI requires it to be loaded into r12 before the call. */
37261 func_addr = gen_rtx_REG (Pmode, 12);
37262 emit_move_insn (func_addr, func_desc);
37263 abi_reg = func_addr;
37264 }
37265 else
37266 {
37267 /* A function pointer under AIX is a pointer to a data area whose
37268 first word contains the actual address of the function, whose
37269 second word contains a pointer to its TOC, and whose third word
37270 contains a value to place in the static chain register (r11).
37271 Note that if we load the static chain, our "trampoline" need
37272 not have any executable code. */
37273
37274 /* Load up address of the actual function. */
37275 func_desc = force_reg (Pmode, func_desc);
37276 func_addr = gen_reg_rtx (Pmode);
37277 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
37278
37279 /* Prepare to load the TOC of the called function. Note that the
37280 TOC load must happen immediately before the actual call so
37281 that unwinding the TOC registers works correctly. See the
37282 comment in frob_update_context. */
37283 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37284 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37285 gen_rtx_PLUS (Pmode, func_desc,
37286 func_toc_offset));
37287 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37288
37289 /* If we have a static chain, load it up. But, if the call was
37290 originally direct, the 3rd word has not been written since no
37291 trampoline has been built, so we ought not to load it, lest we
37292 override a static chain value. */
37293 if (!direct_call_p
37294 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37295 && !chain_already_loaded (get_current_sequence ()->next->last))
37296 {
37297 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37298 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37299 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37300 gen_rtx_PLUS (Pmode, func_desc,
37301 func_sc_offset));
37302 emit_move_insn (sc_reg, func_sc_mem);
37303 abi_reg = sc_reg;
37304 }
37305 }
37306 }
37307 else
37308 {
37309 /* Direct calls use the TOC: for local calls, the callee will
37310 assume the TOC register is set; for non-local calls, the
37311 PLT stub needs the TOC register. */
37312 abi_reg = toc_reg;
37313 func_addr = func_desc;
37314 }
37315
37316 /* Create the call. */
37317 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
37318 if (value != NULL_RTX)
37319 call[0] = gen_rtx_SET (value, call[0]);
37320 n_call = 1;
37321
37322 if (toc_load)
37323 call[n_call++] = toc_load;
37324 if (toc_restore)
37325 call[n_call++] = toc_restore;
37326
37327 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
37328
37329 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37330 insn = emit_call_insn (insn);
37331
37332 /* Mention all registers defined by the ABI to hold information
37333 as uses in CALL_INSN_FUNCTION_USAGE. */
37334 if (abi_reg)
37335 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37336 }
37337
37338 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37339
37340 void
37341 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37342 {
37343 rtx call[2];
37344 rtx insn;
37345
37346 gcc_assert (INTVAL (cookie) == 0);
37347
37348 /* Create the call. */
37349 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
37350 if (value != NULL_RTX)
37351 call[0] = gen_rtx_SET (value, call[0]);
37352
37353 call[1] = simple_return_rtx;
37354
37355 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37356 insn = emit_call_insn (insn);
37357
37358 /* Note use of the TOC register. */
37359 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37360 }
37361
37362 /* Return whether we need to always update the saved TOC pointer when we update
37363 the stack pointer. */
37364
37365 static bool
37366 rs6000_save_toc_in_prologue_p (void)
37367 {
37368 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
37369 }
37370
37371 #ifdef HAVE_GAS_HIDDEN
37372 # define USE_HIDDEN_LINKONCE 1
37373 #else
37374 # define USE_HIDDEN_LINKONCE 0
37375 #endif
37376
37377 /* Fills in the label name that should be used for a 476 link stack thunk. */
37378
37379 void
37380 get_ppc476_thunk_name (char name[32])
37381 {
37382 gcc_assert (TARGET_LINK_STACK);
37383
37384 if (USE_HIDDEN_LINKONCE)
37385 sprintf (name, "__ppc476.get_thunk");
37386 else
37387 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
37388 }
37389
37390 /* This function emits the simple thunk routine that is used to preserve
37391 the link stack on the 476 cpu. */
37392
37393 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
37394 static void
37395 rs6000_code_end (void)
37396 {
37397 char name[32];
37398 tree decl;
37399
37400 if (!TARGET_LINK_STACK)
37401 return;
37402
37403 get_ppc476_thunk_name (name);
37404
37405 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
37406 build_function_type_list (void_type_node, NULL_TREE));
37407 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
37408 NULL_TREE, void_type_node);
37409 TREE_PUBLIC (decl) = 1;
37410 TREE_STATIC (decl) = 1;
37411
37412 #if RS6000_WEAK
37413 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
37414 {
37415 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
37416 targetm.asm_out.unique_section (decl, 0);
37417 switch_to_section (get_named_section (decl, NULL, 0));
37418 DECL_WEAK (decl) = 1;
37419 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
37420 targetm.asm_out.globalize_label (asm_out_file, name);
37421 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
37422 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
37423 }
37424 else
37425 #endif
37426 {
37427 switch_to_section (text_section);
37428 ASM_OUTPUT_LABEL (asm_out_file, name);
37429 }
37430
37431 DECL_INITIAL (decl) = make_node (BLOCK);
37432 current_function_decl = decl;
37433 allocate_struct_function (decl, false);
37434 init_function_start (decl);
37435 first_function_block_is_cold = false;
37436 /* Make sure unwind info is emitted for the thunk if needed. */
37437 final_start_function (emit_barrier (), asm_out_file, 1);
37438
37439 fputs ("\tblr\n", asm_out_file);
37440
37441 final_end_function ();
37442 init_insn_lengths ();
37443 free_after_compilation (cfun);
37444 set_cfun (NULL);
37445 current_function_decl = NULL;
37446 }
37447
37448 /* Add r30 to hard reg set if the prologue sets it up and it is not
37449 pic_offset_table_rtx. */
37450
37451 static void
37452 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
37453 {
37454 if (!TARGET_SINGLE_PIC_BASE
37455 && TARGET_TOC
37456 && TARGET_MINIMAL_TOC
37457 && !constant_pool_empty_p ())
37458 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
37459 if (cfun->machine->split_stack_argp_used)
37460 add_to_hard_reg_set (&set->set, Pmode, 12);
37461
37462 /* Make sure the hard reg set doesn't include r2, which was possibly added
37463 via PIC_OFFSET_TABLE_REGNUM. */
37464 if (TARGET_TOC)
37465 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
37466 }
37467
37468 \f
37469 /* Helper function for rs6000_split_logical to emit a logical instruction after
37470 spliting the operation to single GPR registers.
37471
37472 DEST is the destination register.
37473 OP1 and OP2 are the input source registers.
37474 CODE is the base operation (AND, IOR, XOR, NOT).
37475 MODE is the machine mode.
37476 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37477 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37478 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37479
37480 static void
37481 rs6000_split_logical_inner (rtx dest,
37482 rtx op1,
37483 rtx op2,
37484 enum rtx_code code,
37485 machine_mode mode,
37486 bool complement_final_p,
37487 bool complement_op1_p,
37488 bool complement_op2_p)
37489 {
37490 rtx bool_rtx;
37491
37492 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
37493 if (op2 && GET_CODE (op2) == CONST_INT
37494 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
37495 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37496 {
37497 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
37498 HOST_WIDE_INT value = INTVAL (op2) & mask;
37499
37500 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
37501 if (code == AND)
37502 {
37503 if (value == 0)
37504 {
37505 emit_insn (gen_rtx_SET (dest, const0_rtx));
37506 return;
37507 }
37508
37509 else if (value == mask)
37510 {
37511 if (!rtx_equal_p (dest, op1))
37512 emit_insn (gen_rtx_SET (dest, op1));
37513 return;
37514 }
37515 }
37516
37517 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
37518 into separate ORI/ORIS or XORI/XORIS instrucitons. */
37519 else if (code == IOR || code == XOR)
37520 {
37521 if (value == 0)
37522 {
37523 if (!rtx_equal_p (dest, op1))
37524 emit_insn (gen_rtx_SET (dest, op1));
37525 return;
37526 }
37527 }
37528 }
37529
37530 if (code == AND && mode == SImode
37531 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37532 {
37533 emit_insn (gen_andsi3 (dest, op1, op2));
37534 return;
37535 }
37536
37537 if (complement_op1_p)
37538 op1 = gen_rtx_NOT (mode, op1);
37539
37540 if (complement_op2_p)
37541 op2 = gen_rtx_NOT (mode, op2);
37542
37543 /* For canonical RTL, if only one arm is inverted it is the first. */
37544 if (!complement_op1_p && complement_op2_p)
37545 std::swap (op1, op2);
37546
37547 bool_rtx = ((code == NOT)
37548 ? gen_rtx_NOT (mode, op1)
37549 : gen_rtx_fmt_ee (code, mode, op1, op2));
37550
37551 if (complement_final_p)
37552 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
37553
37554 emit_insn (gen_rtx_SET (dest, bool_rtx));
37555 }
37556
37557 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
37558 operations are split immediately during RTL generation to allow for more
37559 optimizations of the AND/IOR/XOR.
37560
37561 OPERANDS is an array containing the destination and two input operands.
37562 CODE is the base operation (AND, IOR, XOR, NOT).
37563 MODE is the machine mode.
37564 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37565 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37566 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
37567 CLOBBER_REG is either NULL or a scratch register of type CC to allow
37568 formation of the AND instructions. */
37569
37570 static void
37571 rs6000_split_logical_di (rtx operands[3],
37572 enum rtx_code code,
37573 bool complement_final_p,
37574 bool complement_op1_p,
37575 bool complement_op2_p)
37576 {
37577 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
37578 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
37579 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
37580 enum hi_lo { hi = 0, lo = 1 };
37581 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
37582 size_t i;
37583
37584 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
37585 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
37586 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
37587 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
37588
37589 if (code == NOT)
37590 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
37591 else
37592 {
37593 if (GET_CODE (operands[2]) != CONST_INT)
37594 {
37595 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
37596 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
37597 }
37598 else
37599 {
37600 HOST_WIDE_INT value = INTVAL (operands[2]);
37601 HOST_WIDE_INT value_hi_lo[2];
37602
37603 gcc_assert (!complement_final_p);
37604 gcc_assert (!complement_op1_p);
37605 gcc_assert (!complement_op2_p);
37606
37607 value_hi_lo[hi] = value >> 32;
37608 value_hi_lo[lo] = value & lower_32bits;
37609
37610 for (i = 0; i < 2; i++)
37611 {
37612 HOST_WIDE_INT sub_value = value_hi_lo[i];
37613
37614 if (sub_value & sign_bit)
37615 sub_value |= upper_32bits;
37616
37617 op2_hi_lo[i] = GEN_INT (sub_value);
37618
37619 /* If this is an AND instruction, check to see if we need to load
37620 the value in a register. */
37621 if (code == AND && sub_value != -1 && sub_value != 0
37622 && !and_operand (op2_hi_lo[i], SImode))
37623 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
37624 }
37625 }
37626 }
37627
37628 for (i = 0; i < 2; i++)
37629 {
37630 /* Split large IOR/XOR operations. */
37631 if ((code == IOR || code == XOR)
37632 && GET_CODE (op2_hi_lo[i]) == CONST_INT
37633 && !complement_final_p
37634 && !complement_op1_p
37635 && !complement_op2_p
37636 && !logical_const_operand (op2_hi_lo[i], SImode))
37637 {
37638 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
37639 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
37640 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
37641 rtx tmp = gen_reg_rtx (SImode);
37642
37643 /* Make sure the constant is sign extended. */
37644 if ((hi_16bits & sign_bit) != 0)
37645 hi_16bits |= upper_32bits;
37646
37647 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
37648 code, SImode, false, false, false);
37649
37650 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
37651 code, SImode, false, false, false);
37652 }
37653 else
37654 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
37655 code, SImode, complement_final_p,
37656 complement_op1_p, complement_op2_p);
37657 }
37658
37659 return;
37660 }
37661
37662 /* Split the insns that make up boolean operations operating on multiple GPR
37663 registers. The boolean MD patterns ensure that the inputs either are
37664 exactly the same as the output registers, or there is no overlap.
37665
37666 OPERANDS is an array containing the destination and two input operands.
37667 CODE is the base operation (AND, IOR, XOR, NOT).
37668 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37669 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37670 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37671
37672 void
37673 rs6000_split_logical (rtx operands[3],
37674 enum rtx_code code,
37675 bool complement_final_p,
37676 bool complement_op1_p,
37677 bool complement_op2_p)
37678 {
37679 machine_mode mode = GET_MODE (operands[0]);
37680 machine_mode sub_mode;
37681 rtx op0, op1, op2;
37682 int sub_size, regno0, regno1, nregs, i;
37683
37684 /* If this is DImode, use the specialized version that can run before
37685 register allocation. */
37686 if (mode == DImode && !TARGET_POWERPC64)
37687 {
37688 rs6000_split_logical_di (operands, code, complement_final_p,
37689 complement_op1_p, complement_op2_p);
37690 return;
37691 }
37692
37693 op0 = operands[0];
37694 op1 = operands[1];
37695 op2 = (code == NOT) ? NULL_RTX : operands[2];
37696 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
37697 sub_size = GET_MODE_SIZE (sub_mode);
37698 regno0 = REGNO (op0);
37699 regno1 = REGNO (op1);
37700
37701 gcc_assert (reload_completed);
37702 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
37703 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
37704
37705 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
37706 gcc_assert (nregs > 1);
37707
37708 if (op2 && REG_P (op2))
37709 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
37710
37711 for (i = 0; i < nregs; i++)
37712 {
37713 int offset = i * sub_size;
37714 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
37715 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
37716 rtx sub_op2 = ((code == NOT)
37717 ? NULL_RTX
37718 : simplify_subreg (sub_mode, op2, mode, offset));
37719
37720 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
37721 complement_final_p, complement_op1_p,
37722 complement_op2_p);
37723 }
37724
37725 return;
37726 }
37727
37728 \f
37729 /* Return true if the peephole2 can combine a load involving a combination of
37730 an addis instruction and a load with an offset that can be fused together on
37731 a power8. */
37732
37733 bool
37734 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
37735 rtx addis_value, /* addis value. */
37736 rtx target, /* target register that is loaded. */
37737 rtx mem) /* bottom part of the memory addr. */
37738 {
37739 rtx addr;
37740 rtx base_reg;
37741
37742 /* Validate arguments. */
37743 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
37744 return false;
37745
37746 if (!base_reg_operand (target, GET_MODE (target)))
37747 return false;
37748
37749 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
37750 return false;
37751
37752 /* Allow sign/zero extension. */
37753 if (GET_CODE (mem) == ZERO_EXTEND
37754 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
37755 mem = XEXP (mem, 0);
37756
37757 if (!MEM_P (mem))
37758 return false;
37759
37760 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
37761 return false;
37762
37763 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
37764 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
37765 return false;
37766
37767 /* Validate that the register used to load the high value is either the
37768 register being loaded, or we can safely replace its use.
37769
37770 This function is only called from the peephole2 pass and we assume that
37771 there are 2 instructions in the peephole (addis and load), so we want to
37772 check if the target register was not used in the memory address and the
37773 register to hold the addis result is dead after the peephole. */
37774 if (REGNO (addis_reg) != REGNO (target))
37775 {
37776 if (reg_mentioned_p (target, mem))
37777 return false;
37778
37779 if (!peep2_reg_dead_p (2, addis_reg))
37780 return false;
37781
37782 /* If the target register being loaded is the stack pointer, we must
37783 avoid loading any other value into it, even temporarily. */
37784 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
37785 return false;
37786 }
37787
37788 base_reg = XEXP (addr, 0);
37789 return REGNO (addis_reg) == REGNO (base_reg);
37790 }
37791
37792 /* During the peephole2 pass, adjust and expand the insns for a load fusion
37793 sequence. We adjust the addis register to use the target register. If the
37794 load sign extends, we adjust the code to do the zero extending load, and an
37795 explicit sign extension later since the fusion only covers zero extending
37796 loads.
37797
37798 The operands are:
37799 operands[0] register set with addis (to be replaced with target)
37800 operands[1] value set via addis
37801 operands[2] target register being loaded
37802 operands[3] D-form memory reference using operands[0]. */
37803
37804 void
37805 expand_fusion_gpr_load (rtx *operands)
37806 {
37807 rtx addis_value = operands[1];
37808 rtx target = operands[2];
37809 rtx orig_mem = operands[3];
37810 rtx new_addr, new_mem, orig_addr, offset;
37811 enum rtx_code plus_or_lo_sum;
37812 machine_mode target_mode = GET_MODE (target);
37813 machine_mode extend_mode = target_mode;
37814 machine_mode ptr_mode = Pmode;
37815 enum rtx_code extend = UNKNOWN;
37816
37817 if (GET_CODE (orig_mem) == ZERO_EXTEND
37818 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
37819 {
37820 extend = GET_CODE (orig_mem);
37821 orig_mem = XEXP (orig_mem, 0);
37822 target_mode = GET_MODE (orig_mem);
37823 }
37824
37825 gcc_assert (MEM_P (orig_mem));
37826
37827 orig_addr = XEXP (orig_mem, 0);
37828 plus_or_lo_sum = GET_CODE (orig_addr);
37829 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
37830
37831 offset = XEXP (orig_addr, 1);
37832 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
37833 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
37834
37835 if (extend != UNKNOWN)
37836 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
37837
37838 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
37839 UNSPEC_FUSION_GPR);
37840 emit_insn (gen_rtx_SET (target, new_mem));
37841
37842 if (extend == SIGN_EXTEND)
37843 {
37844 int sub_off = ((BYTES_BIG_ENDIAN)
37845 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
37846 : 0);
37847 rtx sign_reg
37848 = simplify_subreg (target_mode, target, extend_mode, sub_off);
37849
37850 emit_insn (gen_rtx_SET (target,
37851 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
37852 }
37853
37854 return;
37855 }
37856
37857 /* Emit the addis instruction that will be part of a fused instruction
37858 sequence. */
37859
37860 void
37861 emit_fusion_addis (rtx target, rtx addis_value)
37862 {
37863 rtx fuse_ops[10];
37864 const char *addis_str = NULL;
37865
37866 /* Emit the addis instruction. */
37867 fuse_ops[0] = target;
37868 if (satisfies_constraint_L (addis_value))
37869 {
37870 fuse_ops[1] = addis_value;
37871 addis_str = "lis %0,%v1";
37872 }
37873
37874 else if (GET_CODE (addis_value) == PLUS)
37875 {
37876 rtx op0 = XEXP (addis_value, 0);
37877 rtx op1 = XEXP (addis_value, 1);
37878
37879 if (REG_P (op0) && CONST_INT_P (op1)
37880 && satisfies_constraint_L (op1))
37881 {
37882 fuse_ops[1] = op0;
37883 fuse_ops[2] = op1;
37884 addis_str = "addis %0,%1,%v2";
37885 }
37886 }
37887
37888 else if (GET_CODE (addis_value) == HIGH)
37889 {
37890 rtx value = XEXP (addis_value, 0);
37891 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
37892 {
37893 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
37894 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
37895 if (TARGET_ELF)
37896 addis_str = "addis %0,%2,%1@toc@ha";
37897
37898 else if (TARGET_XCOFF)
37899 addis_str = "addis %0,%1@u(%2)";
37900
37901 else
37902 gcc_unreachable ();
37903 }
37904
37905 else if (GET_CODE (value) == PLUS)
37906 {
37907 rtx op0 = XEXP (value, 0);
37908 rtx op1 = XEXP (value, 1);
37909
37910 if (GET_CODE (op0) == UNSPEC
37911 && XINT (op0, 1) == UNSPEC_TOCREL
37912 && CONST_INT_P (op1))
37913 {
37914 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
37915 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
37916 fuse_ops[3] = op1;
37917 if (TARGET_ELF)
37918 addis_str = "addis %0,%2,%1+%3@toc@ha";
37919
37920 else if (TARGET_XCOFF)
37921 addis_str = "addis %0,%1+%3@u(%2)";
37922
37923 else
37924 gcc_unreachable ();
37925 }
37926 }
37927
37928 else if (satisfies_constraint_L (value))
37929 {
37930 fuse_ops[1] = value;
37931 addis_str = "lis %0,%v1";
37932 }
37933
37934 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
37935 {
37936 fuse_ops[1] = value;
37937 addis_str = "lis %0,%1@ha";
37938 }
37939 }
37940
37941 if (!addis_str)
37942 fatal_insn ("Could not generate addis value for fusion", addis_value);
37943
37944 output_asm_insn (addis_str, fuse_ops);
37945 }
37946
37947 /* Emit a D-form load or store instruction that is the second instruction
37948 of a fusion sequence. */
37949
37950 void
37951 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
37952 const char *insn_str)
37953 {
37954 rtx fuse_ops[10];
37955 char insn_template[80];
37956
37957 fuse_ops[0] = load_store_reg;
37958 fuse_ops[1] = addis_reg;
37959
37960 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
37961 {
37962 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
37963 fuse_ops[2] = offset;
37964 output_asm_insn (insn_template, fuse_ops);
37965 }
37966
37967 else if (GET_CODE (offset) == UNSPEC
37968 && XINT (offset, 1) == UNSPEC_TOCREL)
37969 {
37970 if (TARGET_ELF)
37971 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
37972
37973 else if (TARGET_XCOFF)
37974 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
37975
37976 else
37977 gcc_unreachable ();
37978
37979 fuse_ops[2] = XVECEXP (offset, 0, 0);
37980 output_asm_insn (insn_template, fuse_ops);
37981 }
37982
37983 else if (GET_CODE (offset) == PLUS
37984 && GET_CODE (XEXP (offset, 0)) == UNSPEC
37985 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
37986 && CONST_INT_P (XEXP (offset, 1)))
37987 {
37988 rtx tocrel_unspec = XEXP (offset, 0);
37989 if (TARGET_ELF)
37990 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
37991
37992 else if (TARGET_XCOFF)
37993 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
37994
37995 else
37996 gcc_unreachable ();
37997
37998 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
37999 fuse_ops[3] = XEXP (offset, 1);
38000 output_asm_insn (insn_template, fuse_ops);
38001 }
38002
38003 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38004 {
38005 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38006
38007 fuse_ops[2] = offset;
38008 output_asm_insn (insn_template, fuse_ops);
38009 }
38010
38011 else
38012 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38013
38014 return;
38015 }
38016
38017 /* Wrap a TOC address that can be fused to indicate that special fusion
38018 processing is needed. */
38019
38020 rtx
38021 fusion_wrap_memory_address (rtx old_mem)
38022 {
38023 rtx old_addr = XEXP (old_mem, 0);
38024 rtvec v = gen_rtvec (1, old_addr);
38025 rtx new_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_FUSION_ADDIS);
38026 return replace_equiv_address_nv (old_mem, new_addr, false);
38027 }
38028
38029 /* Given an address, convert it into the addis and load offset parts. Addresses
38030 created during the peephole2 process look like:
38031 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38032 (unspec [(...)] UNSPEC_TOCREL))
38033
38034 Addresses created via toc fusion look like:
38035 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
38036
38037 static void
38038 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38039 {
38040 rtx hi, lo;
38041
38042 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_FUSION_ADDIS)
38043 {
38044 lo = XVECEXP (addr, 0, 0);
38045 hi = gen_rtx_HIGH (Pmode, lo);
38046 }
38047 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38048 {
38049 hi = XEXP (addr, 0);
38050 lo = XEXP (addr, 1);
38051 }
38052 else
38053 gcc_unreachable ();
38054
38055 *p_hi = hi;
38056 *p_lo = lo;
38057 }
38058
38059 /* Return a string to fuse an addis instruction with a gpr load to the same
38060 register that we loaded up the addis instruction. The address that is used
38061 is the logical address that was formed during peephole2:
38062 (lo_sum (high) (low-part))
38063
38064 Or the address is the TOC address that is wrapped before register allocation:
38065 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
38066
38067 The code is complicated, so we call output_asm_insn directly, and just
38068 return "". */
38069
38070 const char *
38071 emit_fusion_gpr_load (rtx target, rtx mem)
38072 {
38073 rtx addis_value;
38074 rtx addr;
38075 rtx load_offset;
38076 const char *load_str = NULL;
38077 machine_mode mode;
38078
38079 if (GET_CODE (mem) == ZERO_EXTEND)
38080 mem = XEXP (mem, 0);
38081
38082 gcc_assert (REG_P (target) && MEM_P (mem));
38083
38084 addr = XEXP (mem, 0);
38085 fusion_split_address (addr, &addis_value, &load_offset);
38086
38087 /* Now emit the load instruction to the same register. */
38088 mode = GET_MODE (mem);
38089 switch (mode)
38090 {
38091 case E_QImode:
38092 load_str = "lbz";
38093 break;
38094
38095 case E_HImode:
38096 load_str = "lhz";
38097 break;
38098
38099 case E_SImode:
38100 case E_SFmode:
38101 load_str = "lwz";
38102 break;
38103
38104 case E_DImode:
38105 case E_DFmode:
38106 gcc_assert (TARGET_POWERPC64);
38107 load_str = "ld";
38108 break;
38109
38110 default:
38111 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38112 }
38113
38114 /* Emit the addis instruction. */
38115 emit_fusion_addis (target, addis_value);
38116
38117 /* Emit the D-form load instruction. */
38118 emit_fusion_load_store (target, target, load_offset, load_str);
38119
38120 return "";
38121 }
38122 \f
38123
38124 /* Return true if the peephole2 can combine a load/store involving a
38125 combination of an addis instruction and the memory operation. This was
38126 added to the ISA 3.0 (power9) hardware. */
38127
38128 bool
38129 fusion_p9_p (rtx addis_reg, /* register set via addis. */
38130 rtx addis_value, /* addis value. */
38131 rtx dest, /* destination (memory or register). */
38132 rtx src) /* source (register or memory). */
38133 {
38134 rtx addr, mem, offset;
38135 machine_mode mode = GET_MODE (src);
38136
38137 /* Validate arguments. */
38138 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38139 return false;
38140
38141 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38142 return false;
38143
38144 /* Ignore extend operations that are part of the load. */
38145 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
38146 src = XEXP (src, 0);
38147
38148 /* Test for memory<-register or register<-memory. */
38149 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
38150 {
38151 if (!MEM_P (dest))
38152 return false;
38153
38154 mem = dest;
38155 }
38156
38157 else if (MEM_P (src))
38158 {
38159 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
38160 return false;
38161
38162 mem = src;
38163 }
38164
38165 else
38166 return false;
38167
38168 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38169 if (GET_CODE (addr) == PLUS)
38170 {
38171 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38172 return false;
38173
38174 return satisfies_constraint_I (XEXP (addr, 1));
38175 }
38176
38177 else if (GET_CODE (addr) == LO_SUM)
38178 {
38179 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38180 return false;
38181
38182 offset = XEXP (addr, 1);
38183 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
38184 return small_toc_ref (offset, GET_MODE (offset));
38185
38186 else if (TARGET_ELF && !TARGET_POWERPC64)
38187 return CONSTANT_P (offset);
38188 }
38189
38190 return false;
38191 }
38192
38193 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38194 load sequence.
38195
38196 The operands are:
38197 operands[0] register set with addis
38198 operands[1] value set via addis
38199 operands[2] target register being loaded
38200 operands[3] D-form memory reference using operands[0].
38201
38202 This is similar to the fusion introduced with power8, except it scales to
38203 both loads/stores and does not require the result register to be the same as
38204 the base register. At the moment, we only do this if register set with addis
38205 is dead. */
38206
38207 void
38208 expand_fusion_p9_load (rtx *operands)
38209 {
38210 rtx tmp_reg = operands[0];
38211 rtx addis_value = operands[1];
38212 rtx target = operands[2];
38213 rtx orig_mem = operands[3];
38214 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
38215 enum rtx_code plus_or_lo_sum;
38216 machine_mode target_mode = GET_MODE (target);
38217 machine_mode extend_mode = target_mode;
38218 machine_mode ptr_mode = Pmode;
38219 enum rtx_code extend = UNKNOWN;
38220
38221 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
38222 {
38223 extend = GET_CODE (orig_mem);
38224 orig_mem = XEXP (orig_mem, 0);
38225 target_mode = GET_MODE (orig_mem);
38226 }
38227
38228 gcc_assert (MEM_P (orig_mem));
38229
38230 orig_addr = XEXP (orig_mem, 0);
38231 plus_or_lo_sum = GET_CODE (orig_addr);
38232 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38233
38234 offset = XEXP (orig_addr, 1);
38235 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38236 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38237
38238 if (extend != UNKNOWN)
38239 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
38240
38241 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38242 UNSPEC_FUSION_P9);
38243
38244 set = gen_rtx_SET (target, new_mem);
38245 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38246 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38247 emit_insn (insn);
38248
38249 return;
38250 }
38251
38252 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38253 store sequence.
38254
38255 The operands are:
38256 operands[0] register set with addis
38257 operands[1] value set via addis
38258 operands[2] target D-form memory being stored to
38259 operands[3] register being stored
38260
38261 This is similar to the fusion introduced with power8, except it scales to
38262 both loads/stores and does not require the result register to be the same as
38263 the base register. At the moment, we only do this if register set with addis
38264 is dead. */
38265
38266 void
38267 expand_fusion_p9_store (rtx *operands)
38268 {
38269 rtx tmp_reg = operands[0];
38270 rtx addis_value = operands[1];
38271 rtx orig_mem = operands[2];
38272 rtx src = operands[3];
38273 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
38274 enum rtx_code plus_or_lo_sum;
38275 machine_mode target_mode = GET_MODE (orig_mem);
38276 machine_mode ptr_mode = Pmode;
38277
38278 gcc_assert (MEM_P (orig_mem));
38279
38280 orig_addr = XEXP (orig_mem, 0);
38281 plus_or_lo_sum = GET_CODE (orig_addr);
38282 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38283
38284 offset = XEXP (orig_addr, 1);
38285 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38286 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38287
38288 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
38289 UNSPEC_FUSION_P9);
38290
38291 set = gen_rtx_SET (new_mem, new_src);
38292 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38293 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38294 emit_insn (insn);
38295
38296 return;
38297 }
38298
38299 /* Return a string to fuse an addis instruction with a load using extended
38300 fusion. The address that is used is the logical address that was formed
38301 during peephole2: (lo_sum (high) (low-part))
38302
38303 The code is complicated, so we call output_asm_insn directly, and just
38304 return "". */
38305
38306 const char *
38307 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
38308 {
38309 machine_mode mode = GET_MODE (reg);
38310 rtx hi;
38311 rtx lo;
38312 rtx addr;
38313 const char *load_string;
38314 int r;
38315
38316 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
38317 {
38318 mem = XEXP (mem, 0);
38319 mode = GET_MODE (mem);
38320 }
38321
38322 if (GET_CODE (reg) == SUBREG)
38323 {
38324 gcc_assert (SUBREG_BYTE (reg) == 0);
38325 reg = SUBREG_REG (reg);
38326 }
38327
38328 if (!REG_P (reg))
38329 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
38330
38331 r = REGNO (reg);
38332 if (FP_REGNO_P (r))
38333 {
38334 if (mode == SFmode)
38335 load_string = "lfs";
38336 else if (mode == DFmode || mode == DImode)
38337 load_string = "lfd";
38338 else
38339 gcc_unreachable ();
38340 }
38341 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
38342 {
38343 if (mode == SFmode)
38344 load_string = "lxssp";
38345 else if (mode == DFmode || mode == DImode)
38346 load_string = "lxsd";
38347 else
38348 gcc_unreachable ();
38349 }
38350 else if (INT_REGNO_P (r))
38351 {
38352 switch (mode)
38353 {
38354 case E_QImode:
38355 load_string = "lbz";
38356 break;
38357 case E_HImode:
38358 load_string = "lhz";
38359 break;
38360 case E_SImode:
38361 case E_SFmode:
38362 load_string = "lwz";
38363 break;
38364 case E_DImode:
38365 case E_DFmode:
38366 if (!TARGET_POWERPC64)
38367 gcc_unreachable ();
38368 load_string = "ld";
38369 break;
38370 default:
38371 gcc_unreachable ();
38372 }
38373 }
38374 else
38375 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
38376
38377 if (!MEM_P (mem))
38378 fatal_insn ("emit_fusion_p9_load not MEM", mem);
38379
38380 addr = XEXP (mem, 0);
38381 fusion_split_address (addr, &hi, &lo);
38382
38383 /* Emit the addis instruction. */
38384 emit_fusion_addis (tmp_reg, hi);
38385
38386 /* Emit the D-form load instruction. */
38387 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
38388
38389 return "";
38390 }
38391
38392 /* Return a string to fuse an addis instruction with a store using extended
38393 fusion. The address that is used is the logical address that was formed
38394 during peephole2: (lo_sum (high) (low-part))
38395
38396 The code is complicated, so we call output_asm_insn directly, and just
38397 return "". */
38398
38399 const char *
38400 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
38401 {
38402 machine_mode mode = GET_MODE (reg);
38403 rtx hi;
38404 rtx lo;
38405 rtx addr;
38406 const char *store_string;
38407 int r;
38408
38409 if (GET_CODE (reg) == SUBREG)
38410 {
38411 gcc_assert (SUBREG_BYTE (reg) == 0);
38412 reg = SUBREG_REG (reg);
38413 }
38414
38415 if (!REG_P (reg))
38416 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
38417
38418 r = REGNO (reg);
38419 if (FP_REGNO_P (r))
38420 {
38421 if (mode == SFmode)
38422 store_string = "stfs";
38423 else if (mode == DFmode)
38424 store_string = "stfd";
38425 else
38426 gcc_unreachable ();
38427 }
38428 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
38429 {
38430 if (mode == SFmode)
38431 store_string = "stxssp";
38432 else if (mode == DFmode || mode == DImode)
38433 store_string = "stxsd";
38434 else
38435 gcc_unreachable ();
38436 }
38437 else if (INT_REGNO_P (r))
38438 {
38439 switch (mode)
38440 {
38441 case E_QImode:
38442 store_string = "stb";
38443 break;
38444 case E_HImode:
38445 store_string = "sth";
38446 break;
38447 case E_SImode:
38448 case E_SFmode:
38449 store_string = "stw";
38450 break;
38451 case E_DImode:
38452 case E_DFmode:
38453 if (!TARGET_POWERPC64)
38454 gcc_unreachable ();
38455 store_string = "std";
38456 break;
38457 default:
38458 gcc_unreachable ();
38459 }
38460 }
38461 else
38462 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
38463
38464 if (!MEM_P (mem))
38465 fatal_insn ("emit_fusion_p9_store not MEM", mem);
38466
38467 addr = XEXP (mem, 0);
38468 fusion_split_address (addr, &hi, &lo);
38469
38470 /* Emit the addis instruction. */
38471 emit_fusion_addis (tmp_reg, hi);
38472
38473 /* Emit the D-form load instruction. */
38474 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
38475
38476 return "";
38477 }
38478
38479 #ifdef RS6000_GLIBC_ATOMIC_FENV
38480 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38481 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
38482 #endif
38483
38484 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38485
38486 static void
38487 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
38488 {
38489 if (!TARGET_HARD_FLOAT)
38490 {
38491 #ifdef RS6000_GLIBC_ATOMIC_FENV
38492 if (atomic_hold_decl == NULL_TREE)
38493 {
38494 atomic_hold_decl
38495 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38496 get_identifier ("__atomic_feholdexcept"),
38497 build_function_type_list (void_type_node,
38498 double_ptr_type_node,
38499 NULL_TREE));
38500 TREE_PUBLIC (atomic_hold_decl) = 1;
38501 DECL_EXTERNAL (atomic_hold_decl) = 1;
38502 }
38503
38504 if (atomic_clear_decl == NULL_TREE)
38505 {
38506 atomic_clear_decl
38507 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38508 get_identifier ("__atomic_feclearexcept"),
38509 build_function_type_list (void_type_node,
38510 NULL_TREE));
38511 TREE_PUBLIC (atomic_clear_decl) = 1;
38512 DECL_EXTERNAL (atomic_clear_decl) = 1;
38513 }
38514
38515 tree const_double = build_qualified_type (double_type_node,
38516 TYPE_QUAL_CONST);
38517 tree const_double_ptr = build_pointer_type (const_double);
38518 if (atomic_update_decl == NULL_TREE)
38519 {
38520 atomic_update_decl
38521 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38522 get_identifier ("__atomic_feupdateenv"),
38523 build_function_type_list (void_type_node,
38524 const_double_ptr,
38525 NULL_TREE));
38526 TREE_PUBLIC (atomic_update_decl) = 1;
38527 DECL_EXTERNAL (atomic_update_decl) = 1;
38528 }
38529
38530 tree fenv_var = create_tmp_var_raw (double_type_node);
38531 TREE_ADDRESSABLE (fenv_var) = 1;
38532 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
38533
38534 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
38535 *clear = build_call_expr (atomic_clear_decl, 0);
38536 *update = build_call_expr (atomic_update_decl, 1,
38537 fold_convert (const_double_ptr, fenv_addr));
38538 #endif
38539 return;
38540 }
38541
38542 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
38543 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
38544 tree call_mffs = build_call_expr (mffs, 0);
38545
38546 /* Generates the equivalent of feholdexcept (&fenv_var)
38547
38548 *fenv_var = __builtin_mffs ();
38549 double fenv_hold;
38550 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38551 __builtin_mtfsf (0xff, fenv_hold); */
38552
38553 /* Mask to clear everything except for the rounding modes and non-IEEE
38554 arithmetic flag. */
38555 const unsigned HOST_WIDE_INT hold_exception_mask =
38556 HOST_WIDE_INT_C (0xffffffff00000007);
38557
38558 tree fenv_var = create_tmp_var_raw (double_type_node);
38559
38560 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
38561
38562 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
38563 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38564 build_int_cst (uint64_type_node,
38565 hold_exception_mask));
38566
38567 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38568 fenv_llu_and);
38569
38570 tree hold_mtfsf = build_call_expr (mtfsf, 2,
38571 build_int_cst (unsigned_type_node, 0xff),
38572 fenv_hold_mtfsf);
38573
38574 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
38575
38576 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38577
38578 double fenv_clear = __builtin_mffs ();
38579 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38580 __builtin_mtfsf (0xff, fenv_clear); */
38581
38582 /* Mask to clear everything except for the rounding modes and non-IEEE
38583 arithmetic flag. */
38584 const unsigned HOST_WIDE_INT clear_exception_mask =
38585 HOST_WIDE_INT_C (0xffffffff00000000);
38586
38587 tree fenv_clear = create_tmp_var_raw (double_type_node);
38588
38589 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
38590
38591 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
38592 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
38593 fenv_clean_llu,
38594 build_int_cst (uint64_type_node,
38595 clear_exception_mask));
38596
38597 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38598 fenv_clear_llu_and);
38599
38600 tree clear_mtfsf = build_call_expr (mtfsf, 2,
38601 build_int_cst (unsigned_type_node, 0xff),
38602 fenv_clear_mtfsf);
38603
38604 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
38605
38606 /* Generates the equivalent of feupdateenv (&fenv_var)
38607
38608 double old_fenv = __builtin_mffs ();
38609 double fenv_update;
38610 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
38611 (*(uint64_t*)fenv_var 0x1ff80fff);
38612 __builtin_mtfsf (0xff, fenv_update); */
38613
38614 const unsigned HOST_WIDE_INT update_exception_mask =
38615 HOST_WIDE_INT_C (0xffffffff1fffff00);
38616 const unsigned HOST_WIDE_INT new_exception_mask =
38617 HOST_WIDE_INT_C (0x1ff80fff);
38618
38619 tree old_fenv = create_tmp_var_raw (double_type_node);
38620 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
38621
38622 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
38623 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
38624 build_int_cst (uint64_type_node,
38625 update_exception_mask));
38626
38627 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38628 build_int_cst (uint64_type_node,
38629 new_exception_mask));
38630
38631 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
38632 old_llu_and, new_llu_and);
38633
38634 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38635 new_llu_mask);
38636
38637 tree update_mtfsf = build_call_expr (mtfsf, 2,
38638 build_int_cst (unsigned_type_node, 0xff),
38639 fenv_update_mtfsf);
38640
38641 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
38642 }
38643
38644 void
38645 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
38646 {
38647 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38648
38649 rtx_tmp0 = gen_reg_rtx (V2DFmode);
38650 rtx_tmp1 = gen_reg_rtx (V2DFmode);
38651
38652 /* The destination of the vmrgew instruction layout is:
38653 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38654 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38655 vmrgew instruction will be correct. */
38656 if (BYTES_BIG_ENDIAN)
38657 {
38658 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
38659 GEN_INT (0)));
38660 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
38661 GEN_INT (3)));
38662 }
38663 else
38664 {
38665 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
38666 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
38667 }
38668
38669 rtx_tmp2 = gen_reg_rtx (V4SFmode);
38670 rtx_tmp3 = gen_reg_rtx (V4SFmode);
38671
38672 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
38673 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
38674
38675 if (BYTES_BIG_ENDIAN)
38676 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
38677 else
38678 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
38679 }
38680
38681 void
38682 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
38683 {
38684 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38685
38686 rtx_tmp0 = gen_reg_rtx (V2DImode);
38687 rtx_tmp1 = gen_reg_rtx (V2DImode);
38688
38689 /* The destination of the vmrgew instruction layout is:
38690 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38691 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38692 vmrgew instruction will be correct. */
38693 if (BYTES_BIG_ENDIAN)
38694 {
38695 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
38696 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
38697 }
38698 else
38699 {
38700 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
38701 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
38702 }
38703
38704 rtx_tmp2 = gen_reg_rtx (V4SFmode);
38705 rtx_tmp3 = gen_reg_rtx (V4SFmode);
38706
38707 if (signed_convert)
38708 {
38709 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
38710 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
38711 }
38712 else
38713 {
38714 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
38715 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
38716 }
38717
38718 if (BYTES_BIG_ENDIAN)
38719 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
38720 else
38721 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
38722 }
38723
38724 void
38725 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
38726 rtx src2)
38727 {
38728 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38729
38730 rtx_tmp0 = gen_reg_rtx (V2DFmode);
38731 rtx_tmp1 = gen_reg_rtx (V2DFmode);
38732
38733 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
38734 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
38735
38736 rtx_tmp2 = gen_reg_rtx (V4SImode);
38737 rtx_tmp3 = gen_reg_rtx (V4SImode);
38738
38739 if (signed_convert)
38740 {
38741 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
38742 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
38743 }
38744 else
38745 {
38746 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
38747 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
38748 }
38749
38750 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
38751 }
38752
38753 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
38754
38755 static bool
38756 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
38757 optimization_type opt_type)
38758 {
38759 switch (op)
38760 {
38761 case rsqrt_optab:
38762 return (opt_type == OPTIMIZE_FOR_SPEED
38763 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
38764
38765 default:
38766 return true;
38767 }
38768 }
38769
38770 /* Implement TARGET_CONSTANT_ALIGNMENT. */
38771
38772 static HOST_WIDE_INT
38773 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
38774 {
38775 if (TREE_CODE (exp) == STRING_CST
38776 && (STRICT_ALIGNMENT || !optimize_size))
38777 return MAX (align, BITS_PER_WORD);
38778 return align;
38779 }
38780
38781 /* Implement TARGET_STARTING_FRAME_OFFSET. */
38782
38783 static HOST_WIDE_INT
38784 rs6000_starting_frame_offset (void)
38785 {
38786 if (FRAME_GROWS_DOWNWARD)
38787 return 0;
38788 return RS6000_STARTING_FRAME_OFFSET;
38789 }
38790 \f
38791
38792 /* Create an alias for a mangled name where we have changed the mangling (in
38793 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
38794 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
38795
38796 #if TARGET_ELF && RS6000_WEAK
38797 static void
38798 rs6000_globalize_decl_name (FILE * stream, tree decl)
38799 {
38800 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
38801
38802 targetm.asm_out.globalize_label (stream, name);
38803
38804 if (rs6000_passes_ieee128 && name[0] == '_' && name[1] == 'Z')
38805 {
38806 tree save_asm_name = DECL_ASSEMBLER_NAME (decl);
38807 const char *old_name;
38808
38809 ieee128_mangling_gcc_8_1 = true;
38810 lang_hooks.set_decl_assembler_name (decl);
38811 old_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
38812 SET_DECL_ASSEMBLER_NAME (decl, save_asm_name);
38813 ieee128_mangling_gcc_8_1 = false;
38814
38815 if (strcmp (name, old_name) != 0)
38816 {
38817 fprintf (stream, "\t.weak %s\n", old_name);
38818 fprintf (stream, "\t.set %s,%s\n", old_name, name);
38819 }
38820 }
38821 }
38822 #endif
38823
38824 \f
38825 struct gcc_target targetm = TARGET_INITIALIZER;
38826
38827 #include "gt-rs6000.h"