]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/rs6000/rs6000.c
Add D front-end, libphobos library, and D2 testsuite.
[thirdparty/gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2018 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
84
85 /* This file should be included last. */
86 #include "target-def.h"
87
88 #ifndef TARGET_NO_PROTOTYPE
89 #define TARGET_NO_PROTOTYPE 0
90 #endif
91
92 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
93 systems will also set long double to be IEEE 128-bit. AIX and Darwin
94 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
95 those systems will not pick up this default. This needs to be after all
96 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
97 properly defined. */
98 #ifndef TARGET_IEEEQUAD_DEFAULT
99 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
100 #define TARGET_IEEEQUAD_DEFAULT 1
101 #else
102 #define TARGET_IEEEQUAD_DEFAULT 0
103 #endif
104 #endif
105
106 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
107
108 /* Structure used to define the rs6000 stack */
109 typedef struct rs6000_stack {
110 int reload_completed; /* stack info won't change from here on */
111 int first_gp_reg_save; /* first callee saved GP register used */
112 int first_fp_reg_save; /* first callee saved FP register used */
113 int first_altivec_reg_save; /* first callee saved AltiVec register used */
114 int lr_save_p; /* true if the link reg needs to be saved */
115 int cr_save_p; /* true if the CR reg needs to be saved */
116 unsigned int vrsave_mask; /* mask of vec registers to save */
117 int push_p; /* true if we need to allocate stack space */
118 int calls_p; /* true if the function makes any calls */
119 int world_save_p; /* true if we're saving *everything*:
120 r13-r31, cr, f14-f31, vrsave, v20-v31 */
121 enum rs6000_abi abi; /* which ABI to use */
122 int gp_save_offset; /* offset to save GP regs from initial SP */
123 int fp_save_offset; /* offset to save FP regs from initial SP */
124 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
125 int lr_save_offset; /* offset to save LR from initial SP */
126 int cr_save_offset; /* offset to save CR from initial SP */
127 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
128 int varargs_save_offset; /* offset to save the varargs registers */
129 int ehrd_offset; /* offset to EH return data */
130 int ehcr_offset; /* offset to EH CR field data */
131 int reg_size; /* register size (4 or 8) */
132 HOST_WIDE_INT vars_size; /* variable save area size */
133 int parm_size; /* outgoing parameter size */
134 int save_size; /* save area size */
135 int fixed_size; /* fixed size of stack frame */
136 int gp_size; /* size of saved GP registers */
137 int fp_size; /* size of saved FP registers */
138 int altivec_size; /* size of saved AltiVec registers */
139 int cr_size; /* size to hold CR if not in fixed area */
140 int vrsave_size; /* size to hold VRSAVE */
141 int altivec_padding_size; /* size of altivec alignment padding */
142 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
143 int savres_strategy;
144 } rs6000_stack_t;
145
146 /* A C structure for machine-specific, per-function data.
147 This is added to the cfun structure. */
148 typedef struct GTY(()) machine_function
149 {
150 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
151 int ra_needs_full_frame;
152 /* Flags if __builtin_return_address (0) was used. */
153 int ra_need_lr;
154 /* Cache lr_save_p after expansion of builtin_eh_return. */
155 int lr_save_state;
156 /* Whether we need to save the TOC to the reserved stack location in the
157 function prologue. */
158 bool save_toc_in_prologue;
159 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
160 varargs save area. */
161 HOST_WIDE_INT varargs_save_offset;
162 /* Alternative internal arg pointer for -fsplit-stack. */
163 rtx split_stack_arg_pointer;
164 bool split_stack_argp_used;
165 /* Flag if r2 setup is needed with ELFv2 ABI. */
166 bool r2_setup_needed;
167 /* The number of components we use for separate shrink-wrapping. */
168 int n_components;
169 /* The components already handled by separate shrink-wrapping, which should
170 not be considered by the prologue and epilogue. */
171 bool gpr_is_wrapped_separately[32];
172 bool fpr_is_wrapped_separately[32];
173 bool lr_is_wrapped_separately;
174 bool toc_is_wrapped_separately;
175 } machine_function;
176
177 /* Support targetm.vectorize.builtin_mask_for_load. */
178 static GTY(()) tree altivec_builtin_mask_for_load;
179
180 /* Set to nonzero once AIX common-mode calls have been defined. */
181 static GTY(()) int common_mode_defined;
182
183 /* Label number of label created for -mrelocatable, to call to so we can
184 get the address of the GOT section */
185 static int rs6000_pic_labelno;
186
187 #ifdef USING_ELFOS_H
188 /* Counter for labels which are to be placed in .fixup. */
189 int fixuplabelno = 0;
190 #endif
191
192 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
193 int dot_symbols;
194
195 /* Specify the machine mode that pointers have. After generation of rtl, the
196 compiler makes no further distinction between pointers and any other objects
197 of this machine mode. */
198 scalar_int_mode rs6000_pmode;
199
200 #if TARGET_ELF
201 /* Note whether IEEE 128-bit floating point was passed or returned, either as
202 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
203 floating point. We changed the default C++ mangling for these types and we
204 may want to generate a weak alias of the old mangling (U10__float128) to the
205 new mangling (u9__ieee128). */
206 static bool rs6000_passes_ieee128;
207 #endif
208
209 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
210 name used in current releases (i.e. u9__ieee128). */
211 static bool ieee128_mangling_gcc_8_1;
212
213 /* Width in bits of a pointer. */
214 unsigned rs6000_pointer_size;
215
216 #ifdef HAVE_AS_GNU_ATTRIBUTE
217 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
218 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
219 # endif
220 /* Flag whether floating point values have been passed/returned.
221 Note that this doesn't say whether fprs are used, since the
222 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
223 should be set for soft-float values passed in gprs and ieee128
224 values passed in vsx registers. */
225 static bool rs6000_passes_float;
226 static bool rs6000_passes_long_double;
227 /* Flag whether vector values have been passed/returned. */
228 static bool rs6000_passes_vector;
229 /* Flag whether small (<= 8 byte) structures have been returned. */
230 static bool rs6000_returns_struct;
231 #endif
232
233 /* Value is TRUE if register/mode pair is acceptable. */
234 static bool rs6000_hard_regno_mode_ok_p
235 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
236
237 /* Maximum number of registers needed for a given register class and mode. */
238 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
239
240 /* How many registers are needed for a given register and mode. */
241 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
242
243 /* Map register number to register class. */
244 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
245
246 static int dbg_cost_ctrl;
247
248 /* Built in types. */
249 tree rs6000_builtin_types[RS6000_BTI_MAX];
250 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
251
252 /* Flag to say the TOC is initialized */
253 int toc_initialized, need_toc_init;
254 char toc_label_name[10];
255
256 /* Cached value of rs6000_variable_issue. This is cached in
257 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
258 static short cached_can_issue_more;
259
260 static GTY(()) section *read_only_data_section;
261 static GTY(()) section *private_data_section;
262 static GTY(()) section *tls_data_section;
263 static GTY(()) section *tls_private_data_section;
264 static GTY(()) section *read_only_private_data_section;
265 static GTY(()) section *sdata2_section;
266 static GTY(()) section *toc_section;
267
268 struct builtin_description
269 {
270 const HOST_WIDE_INT mask;
271 const enum insn_code icode;
272 const char *const name;
273 const enum rs6000_builtins code;
274 };
275
276 /* Describe the vector unit used for modes. */
277 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
278 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
279
280 /* Register classes for various constraints that are based on the target
281 switches. */
282 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
283
284 /* Describe the alignment of a vector. */
285 int rs6000_vector_align[NUM_MACHINE_MODES];
286
287 /* Map selected modes to types for builtins. */
288 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
289
290 /* What modes to automatically generate reciprocal divide estimate (fre) and
291 reciprocal sqrt (frsqrte) for. */
292 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
293
294 /* Masks to determine which reciprocal esitmate instructions to generate
295 automatically. */
296 enum rs6000_recip_mask {
297 RECIP_SF_DIV = 0x001, /* Use divide estimate */
298 RECIP_DF_DIV = 0x002,
299 RECIP_V4SF_DIV = 0x004,
300 RECIP_V2DF_DIV = 0x008,
301
302 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
303 RECIP_DF_RSQRT = 0x020,
304 RECIP_V4SF_RSQRT = 0x040,
305 RECIP_V2DF_RSQRT = 0x080,
306
307 /* Various combination of flags for -mrecip=xxx. */
308 RECIP_NONE = 0,
309 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
310 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
311 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
312
313 RECIP_HIGH_PRECISION = RECIP_ALL,
314
315 /* On low precision machines like the power5, don't enable double precision
316 reciprocal square root estimate, since it isn't accurate enough. */
317 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
318 };
319
320 /* -mrecip options. */
321 static struct
322 {
323 const char *string; /* option name */
324 unsigned int mask; /* mask bits to set */
325 } recip_options[] = {
326 { "all", RECIP_ALL },
327 { "none", RECIP_NONE },
328 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
329 | RECIP_V2DF_DIV) },
330 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
331 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
332 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
333 | RECIP_V2DF_RSQRT) },
334 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
335 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
336 };
337
338 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
339 static const struct
340 {
341 const char *cpu;
342 unsigned int cpuid;
343 } cpu_is_info[] = {
344 { "power9", PPC_PLATFORM_POWER9 },
345 { "power8", PPC_PLATFORM_POWER8 },
346 { "power7", PPC_PLATFORM_POWER7 },
347 { "power6x", PPC_PLATFORM_POWER6X },
348 { "power6", PPC_PLATFORM_POWER6 },
349 { "power5+", PPC_PLATFORM_POWER5_PLUS },
350 { "power5", PPC_PLATFORM_POWER5 },
351 { "ppc970", PPC_PLATFORM_PPC970 },
352 { "power4", PPC_PLATFORM_POWER4 },
353 { "ppca2", PPC_PLATFORM_PPCA2 },
354 { "ppc476", PPC_PLATFORM_PPC476 },
355 { "ppc464", PPC_PLATFORM_PPC464 },
356 { "ppc440", PPC_PLATFORM_PPC440 },
357 { "ppc405", PPC_PLATFORM_PPC405 },
358 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
359 };
360
361 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
362 static const struct
363 {
364 const char *hwcap;
365 int mask;
366 unsigned int id;
367 } cpu_supports_info[] = {
368 /* AT_HWCAP masks. */
369 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
370 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
371 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
372 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
373 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
374 { "booke", PPC_FEATURE_BOOKE, 0 },
375 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
376 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
377 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
378 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
379 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
380 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
381 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
382 { "notb", PPC_FEATURE_NO_TB, 0 },
383 { "pa6t", PPC_FEATURE_PA6T, 0 },
384 { "power4", PPC_FEATURE_POWER4, 0 },
385 { "power5", PPC_FEATURE_POWER5, 0 },
386 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
387 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
388 { "ppc32", PPC_FEATURE_32, 0 },
389 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
390 { "ppc64", PPC_FEATURE_64, 0 },
391 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
392 { "smt", PPC_FEATURE_SMT, 0 },
393 { "spe", PPC_FEATURE_HAS_SPE, 0 },
394 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
395 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
396 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
397
398 /* AT_HWCAP2 masks. */
399 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
400 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
401 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
402 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
403 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
404 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
405 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
406 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
407 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
408 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
409 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
410 { "darn", PPC_FEATURE2_DARN, 1 },
411 { "scv", PPC_FEATURE2_SCV, 1 }
412 };
413
414 /* On PowerPC, we have a limited number of target clones that we care about
415 which means we can use an array to hold the options, rather than having more
416 elaborate data structures to identify each possible variation. Order the
417 clones from the default to the highest ISA. */
418 enum {
419 CLONE_DEFAULT = 0, /* default clone. */
420 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
421 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
422 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
423 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
424 CLONE_MAX
425 };
426
427 /* Map compiler ISA bits into HWCAP names. */
428 struct clone_map {
429 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
430 const char *name; /* name to use in __builtin_cpu_supports. */
431 };
432
433 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
434 { 0, "" }, /* Default options. */
435 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
436 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
437 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
438 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
439 };
440
441
442 /* Newer LIBCs explicitly export this symbol to declare that they provide
443 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
444 reference to this symbol whenever we expand a CPU builtin, so that
445 we never link against an old LIBC. */
446 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
447
448 /* True if we have expanded a CPU builtin. */
449 bool cpu_builtin_p;
450
451 /* Pointer to function (in rs6000-c.c) that can define or undefine target
452 macros that have changed. Languages that don't support the preprocessor
453 don't link in rs6000-c.c, so we can't call it directly. */
454 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
455
456 /* Simplfy register classes into simpler classifications. We assume
457 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
458 check for standard register classes (gpr/floating/altivec/vsx) and
459 floating/vector classes (float/altivec/vsx). */
460
461 enum rs6000_reg_type {
462 NO_REG_TYPE,
463 PSEUDO_REG_TYPE,
464 GPR_REG_TYPE,
465 VSX_REG_TYPE,
466 ALTIVEC_REG_TYPE,
467 FPR_REG_TYPE,
468 SPR_REG_TYPE,
469 CR_REG_TYPE
470 };
471
472 /* Map register class to register type. */
473 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
474
475 /* First/last register type for the 'normal' register types (i.e. general
476 purpose, floating point, altivec, and VSX registers). */
477 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
478
479 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
480
481
482 /* Register classes we care about in secondary reload or go if legitimate
483 address. We only need to worry about GPR, FPR, and Altivec registers here,
484 along an ANY field that is the OR of the 3 register classes. */
485
486 enum rs6000_reload_reg_type {
487 RELOAD_REG_GPR, /* General purpose registers. */
488 RELOAD_REG_FPR, /* Traditional floating point regs. */
489 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
490 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
491 N_RELOAD_REG
492 };
493
494 /* For setting up register classes, loop through the 3 register classes mapping
495 into real registers, and skip the ANY class, which is just an OR of the
496 bits. */
497 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
498 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
499
500 /* Map reload register type to a register in the register class. */
501 struct reload_reg_map_type {
502 const char *name; /* Register class name. */
503 int reg; /* Register in the register class. */
504 };
505
506 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
507 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
508 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
509 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
510 { "Any", -1 }, /* RELOAD_REG_ANY. */
511 };
512
513 /* Mask bits for each register class, indexed per mode. Historically the
514 compiler has been more restrictive which types can do PRE_MODIFY instead of
515 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
516 typedef unsigned char addr_mask_type;
517
518 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
519 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
520 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
521 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
522 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
523 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
524 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
525 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
526
527 /* Register type masks based on the type, of valid addressing modes. */
528 struct rs6000_reg_addr {
529 enum insn_code reload_load; /* INSN to reload for loading. */
530 enum insn_code reload_store; /* INSN to reload for storing. */
531 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
532 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
533 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
534 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
535 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
536 };
537
538 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
539
540 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
541 static inline bool
542 mode_supports_pre_incdec_p (machine_mode mode)
543 {
544 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
545 != 0);
546 }
547
548 /* Helper function to say whether a mode supports PRE_MODIFY. */
549 static inline bool
550 mode_supports_pre_modify_p (machine_mode mode)
551 {
552 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
553 != 0);
554 }
555
556 /* Return true if we have D-form addressing in altivec registers. */
557 static inline bool
558 mode_supports_vmx_dform (machine_mode mode)
559 {
560 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
561 }
562
563 /* Return true if we have D-form addressing in VSX registers. This addressing
564 is more limited than normal d-form addressing in that the offset must be
565 aligned on a 16-byte boundary. */
566 static inline bool
567 mode_supports_dq_form (machine_mode mode)
568 {
569 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
570 != 0);
571 }
572
573 /* Given that there exists at least one variable that is set (produced)
574 by OUT_INSN and read (consumed) by IN_INSN, return true iff
575 IN_INSN represents one or more memory store operations and none of
576 the variables set by OUT_INSN is used by IN_INSN as the address of a
577 store operation. If either IN_INSN or OUT_INSN does not represent
578 a "single" RTL SET expression (as loosely defined by the
579 implementation of the single_set function) or a PARALLEL with only
580 SETs, CLOBBERs, and USEs inside, this function returns false.
581
582 This rs6000-specific version of store_data_bypass_p checks for
583 certain conditions that result in assertion failures (and internal
584 compiler errors) in the generic store_data_bypass_p function and
585 returns false rather than calling store_data_bypass_p if one of the
586 problematic conditions is detected. */
587
588 int
589 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
590 {
591 rtx out_set, in_set;
592 rtx out_pat, in_pat;
593 rtx out_exp, in_exp;
594 int i, j;
595
596 in_set = single_set (in_insn);
597 if (in_set)
598 {
599 if (MEM_P (SET_DEST (in_set)))
600 {
601 out_set = single_set (out_insn);
602 if (!out_set)
603 {
604 out_pat = PATTERN (out_insn);
605 if (GET_CODE (out_pat) == PARALLEL)
606 {
607 for (i = 0; i < XVECLEN (out_pat, 0); i++)
608 {
609 out_exp = XVECEXP (out_pat, 0, i);
610 if ((GET_CODE (out_exp) == CLOBBER)
611 || (GET_CODE (out_exp) == USE))
612 continue;
613 else if (GET_CODE (out_exp) != SET)
614 return false;
615 }
616 }
617 }
618 }
619 }
620 else
621 {
622 in_pat = PATTERN (in_insn);
623 if (GET_CODE (in_pat) != PARALLEL)
624 return false;
625
626 for (i = 0; i < XVECLEN (in_pat, 0); i++)
627 {
628 in_exp = XVECEXP (in_pat, 0, i);
629 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
630 continue;
631 else if (GET_CODE (in_exp) != SET)
632 return false;
633
634 if (MEM_P (SET_DEST (in_exp)))
635 {
636 out_set = single_set (out_insn);
637 if (!out_set)
638 {
639 out_pat = PATTERN (out_insn);
640 if (GET_CODE (out_pat) != PARALLEL)
641 return false;
642 for (j = 0; j < XVECLEN (out_pat, 0); j++)
643 {
644 out_exp = XVECEXP (out_pat, 0, j);
645 if ((GET_CODE (out_exp) == CLOBBER)
646 || (GET_CODE (out_exp) == USE))
647 continue;
648 else if (GET_CODE (out_exp) != SET)
649 return false;
650 }
651 }
652 }
653 }
654 }
655 return store_data_bypass_p (out_insn, in_insn);
656 }
657
658 \f
659 /* Processor costs (relative to an add) */
660
661 const struct processor_costs *rs6000_cost;
662
663 /* Instruction size costs on 32bit processors. */
664 static const
665 struct processor_costs size32_cost = {
666 COSTS_N_INSNS (1), /* mulsi */
667 COSTS_N_INSNS (1), /* mulsi_const */
668 COSTS_N_INSNS (1), /* mulsi_const9 */
669 COSTS_N_INSNS (1), /* muldi */
670 COSTS_N_INSNS (1), /* divsi */
671 COSTS_N_INSNS (1), /* divdi */
672 COSTS_N_INSNS (1), /* fp */
673 COSTS_N_INSNS (1), /* dmul */
674 COSTS_N_INSNS (1), /* sdiv */
675 COSTS_N_INSNS (1), /* ddiv */
676 32, /* cache line size */
677 0, /* l1 cache */
678 0, /* l2 cache */
679 0, /* streams */
680 0, /* SF->DF convert */
681 };
682
683 /* Instruction size costs on 64bit processors. */
684 static const
685 struct processor_costs size64_cost = {
686 COSTS_N_INSNS (1), /* mulsi */
687 COSTS_N_INSNS (1), /* mulsi_const */
688 COSTS_N_INSNS (1), /* mulsi_const9 */
689 COSTS_N_INSNS (1), /* muldi */
690 COSTS_N_INSNS (1), /* divsi */
691 COSTS_N_INSNS (1), /* divdi */
692 COSTS_N_INSNS (1), /* fp */
693 COSTS_N_INSNS (1), /* dmul */
694 COSTS_N_INSNS (1), /* sdiv */
695 COSTS_N_INSNS (1), /* ddiv */
696 128, /* cache line size */
697 0, /* l1 cache */
698 0, /* l2 cache */
699 0, /* streams */
700 0, /* SF->DF convert */
701 };
702
703 /* Instruction costs on RS64A processors. */
704 static const
705 struct processor_costs rs64a_cost = {
706 COSTS_N_INSNS (20), /* mulsi */
707 COSTS_N_INSNS (12), /* mulsi_const */
708 COSTS_N_INSNS (8), /* mulsi_const9 */
709 COSTS_N_INSNS (34), /* muldi */
710 COSTS_N_INSNS (65), /* divsi */
711 COSTS_N_INSNS (67), /* divdi */
712 COSTS_N_INSNS (4), /* fp */
713 COSTS_N_INSNS (4), /* dmul */
714 COSTS_N_INSNS (31), /* sdiv */
715 COSTS_N_INSNS (31), /* ddiv */
716 128, /* cache line size */
717 128, /* l1 cache */
718 2048, /* l2 cache */
719 1, /* streams */
720 0, /* SF->DF convert */
721 };
722
723 /* Instruction costs on MPCCORE processors. */
724 static const
725 struct processor_costs mpccore_cost = {
726 COSTS_N_INSNS (2), /* mulsi */
727 COSTS_N_INSNS (2), /* mulsi_const */
728 COSTS_N_INSNS (2), /* mulsi_const9 */
729 COSTS_N_INSNS (2), /* muldi */
730 COSTS_N_INSNS (6), /* divsi */
731 COSTS_N_INSNS (6), /* divdi */
732 COSTS_N_INSNS (4), /* fp */
733 COSTS_N_INSNS (5), /* dmul */
734 COSTS_N_INSNS (10), /* sdiv */
735 COSTS_N_INSNS (17), /* ddiv */
736 32, /* cache line size */
737 4, /* l1 cache */
738 16, /* l2 cache */
739 1, /* streams */
740 0, /* SF->DF convert */
741 };
742
743 /* Instruction costs on PPC403 processors. */
744 static const
745 struct processor_costs ppc403_cost = {
746 COSTS_N_INSNS (4), /* mulsi */
747 COSTS_N_INSNS (4), /* mulsi_const */
748 COSTS_N_INSNS (4), /* mulsi_const9 */
749 COSTS_N_INSNS (4), /* muldi */
750 COSTS_N_INSNS (33), /* divsi */
751 COSTS_N_INSNS (33), /* divdi */
752 COSTS_N_INSNS (11), /* fp */
753 COSTS_N_INSNS (11), /* dmul */
754 COSTS_N_INSNS (11), /* sdiv */
755 COSTS_N_INSNS (11), /* ddiv */
756 32, /* cache line size */
757 4, /* l1 cache */
758 16, /* l2 cache */
759 1, /* streams */
760 0, /* SF->DF convert */
761 };
762
763 /* Instruction costs on PPC405 processors. */
764 static const
765 struct processor_costs ppc405_cost = {
766 COSTS_N_INSNS (5), /* mulsi */
767 COSTS_N_INSNS (4), /* mulsi_const */
768 COSTS_N_INSNS (3), /* mulsi_const9 */
769 COSTS_N_INSNS (5), /* muldi */
770 COSTS_N_INSNS (35), /* divsi */
771 COSTS_N_INSNS (35), /* divdi */
772 COSTS_N_INSNS (11), /* fp */
773 COSTS_N_INSNS (11), /* dmul */
774 COSTS_N_INSNS (11), /* sdiv */
775 COSTS_N_INSNS (11), /* ddiv */
776 32, /* cache line size */
777 16, /* l1 cache */
778 128, /* l2 cache */
779 1, /* streams */
780 0, /* SF->DF convert */
781 };
782
783 /* Instruction costs on PPC440 processors. */
784 static const
785 struct processor_costs ppc440_cost = {
786 COSTS_N_INSNS (3), /* mulsi */
787 COSTS_N_INSNS (2), /* mulsi_const */
788 COSTS_N_INSNS (2), /* mulsi_const9 */
789 COSTS_N_INSNS (3), /* muldi */
790 COSTS_N_INSNS (34), /* divsi */
791 COSTS_N_INSNS (34), /* divdi */
792 COSTS_N_INSNS (5), /* fp */
793 COSTS_N_INSNS (5), /* dmul */
794 COSTS_N_INSNS (19), /* sdiv */
795 COSTS_N_INSNS (33), /* ddiv */
796 32, /* cache line size */
797 32, /* l1 cache */
798 256, /* l2 cache */
799 1, /* streams */
800 0, /* SF->DF convert */
801 };
802
803 /* Instruction costs on PPC476 processors. */
804 static const
805 struct processor_costs ppc476_cost = {
806 COSTS_N_INSNS (4), /* mulsi */
807 COSTS_N_INSNS (4), /* mulsi_const */
808 COSTS_N_INSNS (4), /* mulsi_const9 */
809 COSTS_N_INSNS (4), /* muldi */
810 COSTS_N_INSNS (11), /* divsi */
811 COSTS_N_INSNS (11), /* divdi */
812 COSTS_N_INSNS (6), /* fp */
813 COSTS_N_INSNS (6), /* dmul */
814 COSTS_N_INSNS (19), /* sdiv */
815 COSTS_N_INSNS (33), /* ddiv */
816 32, /* l1 cache line size */
817 32, /* l1 cache */
818 512, /* l2 cache */
819 1, /* streams */
820 0, /* SF->DF convert */
821 };
822
823 /* Instruction costs on PPC601 processors. */
824 static const
825 struct processor_costs ppc601_cost = {
826 COSTS_N_INSNS (5), /* mulsi */
827 COSTS_N_INSNS (5), /* mulsi_const */
828 COSTS_N_INSNS (5), /* mulsi_const9 */
829 COSTS_N_INSNS (5), /* muldi */
830 COSTS_N_INSNS (36), /* divsi */
831 COSTS_N_INSNS (36), /* divdi */
832 COSTS_N_INSNS (4), /* fp */
833 COSTS_N_INSNS (5), /* dmul */
834 COSTS_N_INSNS (17), /* sdiv */
835 COSTS_N_INSNS (31), /* ddiv */
836 32, /* cache line size */
837 32, /* l1 cache */
838 256, /* l2 cache */
839 1, /* streams */
840 0, /* SF->DF convert */
841 };
842
843 /* Instruction costs on PPC603 processors. */
844 static const
845 struct processor_costs ppc603_cost = {
846 COSTS_N_INSNS (5), /* mulsi */
847 COSTS_N_INSNS (3), /* mulsi_const */
848 COSTS_N_INSNS (2), /* mulsi_const9 */
849 COSTS_N_INSNS (5), /* muldi */
850 COSTS_N_INSNS (37), /* divsi */
851 COSTS_N_INSNS (37), /* divdi */
852 COSTS_N_INSNS (3), /* fp */
853 COSTS_N_INSNS (4), /* dmul */
854 COSTS_N_INSNS (18), /* sdiv */
855 COSTS_N_INSNS (33), /* ddiv */
856 32, /* cache line size */
857 8, /* l1 cache */
858 64, /* l2 cache */
859 1, /* streams */
860 0, /* SF->DF convert */
861 };
862
863 /* Instruction costs on PPC604 processors. */
864 static const
865 struct processor_costs ppc604_cost = {
866 COSTS_N_INSNS (4), /* mulsi */
867 COSTS_N_INSNS (4), /* mulsi_const */
868 COSTS_N_INSNS (4), /* mulsi_const9 */
869 COSTS_N_INSNS (4), /* muldi */
870 COSTS_N_INSNS (20), /* divsi */
871 COSTS_N_INSNS (20), /* divdi */
872 COSTS_N_INSNS (3), /* fp */
873 COSTS_N_INSNS (3), /* dmul */
874 COSTS_N_INSNS (18), /* sdiv */
875 COSTS_N_INSNS (32), /* ddiv */
876 32, /* cache line size */
877 16, /* l1 cache */
878 512, /* l2 cache */
879 1, /* streams */
880 0, /* SF->DF convert */
881 };
882
883 /* Instruction costs on PPC604e processors. */
884 static const
885 struct processor_costs ppc604e_cost = {
886 COSTS_N_INSNS (2), /* mulsi */
887 COSTS_N_INSNS (2), /* mulsi_const */
888 COSTS_N_INSNS (2), /* mulsi_const9 */
889 COSTS_N_INSNS (2), /* muldi */
890 COSTS_N_INSNS (20), /* divsi */
891 COSTS_N_INSNS (20), /* divdi */
892 COSTS_N_INSNS (3), /* fp */
893 COSTS_N_INSNS (3), /* dmul */
894 COSTS_N_INSNS (18), /* sdiv */
895 COSTS_N_INSNS (32), /* ddiv */
896 32, /* cache line size */
897 32, /* l1 cache */
898 1024, /* l2 cache */
899 1, /* streams */
900 0, /* SF->DF convert */
901 };
902
903 /* Instruction costs on PPC620 processors. */
904 static const
905 struct processor_costs ppc620_cost = {
906 COSTS_N_INSNS (5), /* mulsi */
907 COSTS_N_INSNS (4), /* mulsi_const */
908 COSTS_N_INSNS (3), /* mulsi_const9 */
909 COSTS_N_INSNS (7), /* muldi */
910 COSTS_N_INSNS (21), /* divsi */
911 COSTS_N_INSNS (37), /* divdi */
912 COSTS_N_INSNS (3), /* fp */
913 COSTS_N_INSNS (3), /* dmul */
914 COSTS_N_INSNS (18), /* sdiv */
915 COSTS_N_INSNS (32), /* ddiv */
916 128, /* cache line size */
917 32, /* l1 cache */
918 1024, /* l2 cache */
919 1, /* streams */
920 0, /* SF->DF convert */
921 };
922
923 /* Instruction costs on PPC630 processors. */
924 static const
925 struct processor_costs ppc630_cost = {
926 COSTS_N_INSNS (5), /* mulsi */
927 COSTS_N_INSNS (4), /* mulsi_const */
928 COSTS_N_INSNS (3), /* mulsi_const9 */
929 COSTS_N_INSNS (7), /* muldi */
930 COSTS_N_INSNS (21), /* divsi */
931 COSTS_N_INSNS (37), /* divdi */
932 COSTS_N_INSNS (3), /* fp */
933 COSTS_N_INSNS (3), /* dmul */
934 COSTS_N_INSNS (17), /* sdiv */
935 COSTS_N_INSNS (21), /* ddiv */
936 128, /* cache line size */
937 64, /* l1 cache */
938 1024, /* l2 cache */
939 1, /* streams */
940 0, /* SF->DF convert */
941 };
942
943 /* Instruction costs on Cell processor. */
944 /* COSTS_N_INSNS (1) ~ one add. */
945 static const
946 struct processor_costs ppccell_cost = {
947 COSTS_N_INSNS (9/2)+2, /* mulsi */
948 COSTS_N_INSNS (6/2), /* mulsi_const */
949 COSTS_N_INSNS (6/2), /* mulsi_const9 */
950 COSTS_N_INSNS (15/2)+2, /* muldi */
951 COSTS_N_INSNS (38/2), /* divsi */
952 COSTS_N_INSNS (70/2), /* divdi */
953 COSTS_N_INSNS (10/2), /* fp */
954 COSTS_N_INSNS (10/2), /* dmul */
955 COSTS_N_INSNS (74/2), /* sdiv */
956 COSTS_N_INSNS (74/2), /* ddiv */
957 128, /* cache line size */
958 32, /* l1 cache */
959 512, /* l2 cache */
960 6, /* streams */
961 0, /* SF->DF convert */
962 };
963
964 /* Instruction costs on PPC750 and PPC7400 processors. */
965 static const
966 struct processor_costs ppc750_cost = {
967 COSTS_N_INSNS (5), /* mulsi */
968 COSTS_N_INSNS (3), /* mulsi_const */
969 COSTS_N_INSNS (2), /* mulsi_const9 */
970 COSTS_N_INSNS (5), /* muldi */
971 COSTS_N_INSNS (17), /* divsi */
972 COSTS_N_INSNS (17), /* divdi */
973 COSTS_N_INSNS (3), /* fp */
974 COSTS_N_INSNS (3), /* dmul */
975 COSTS_N_INSNS (17), /* sdiv */
976 COSTS_N_INSNS (31), /* ddiv */
977 32, /* cache line size */
978 32, /* l1 cache */
979 512, /* l2 cache */
980 1, /* streams */
981 0, /* SF->DF convert */
982 };
983
984 /* Instruction costs on PPC7450 processors. */
985 static const
986 struct processor_costs ppc7450_cost = {
987 COSTS_N_INSNS (4), /* mulsi */
988 COSTS_N_INSNS (3), /* mulsi_const */
989 COSTS_N_INSNS (3), /* mulsi_const9 */
990 COSTS_N_INSNS (4), /* muldi */
991 COSTS_N_INSNS (23), /* divsi */
992 COSTS_N_INSNS (23), /* divdi */
993 COSTS_N_INSNS (5), /* fp */
994 COSTS_N_INSNS (5), /* dmul */
995 COSTS_N_INSNS (21), /* sdiv */
996 COSTS_N_INSNS (35), /* ddiv */
997 32, /* cache line size */
998 32, /* l1 cache */
999 1024, /* l2 cache */
1000 1, /* streams */
1001 0, /* SF->DF convert */
1002 };
1003
1004 /* Instruction costs on PPC8540 processors. */
1005 static const
1006 struct processor_costs ppc8540_cost = {
1007 COSTS_N_INSNS (4), /* mulsi */
1008 COSTS_N_INSNS (4), /* mulsi_const */
1009 COSTS_N_INSNS (4), /* mulsi_const9 */
1010 COSTS_N_INSNS (4), /* muldi */
1011 COSTS_N_INSNS (19), /* divsi */
1012 COSTS_N_INSNS (19), /* divdi */
1013 COSTS_N_INSNS (4), /* fp */
1014 COSTS_N_INSNS (4), /* dmul */
1015 COSTS_N_INSNS (29), /* sdiv */
1016 COSTS_N_INSNS (29), /* ddiv */
1017 32, /* cache line size */
1018 32, /* l1 cache */
1019 256, /* l2 cache */
1020 1, /* prefetch streams /*/
1021 0, /* SF->DF convert */
1022 };
1023
1024 /* Instruction costs on E300C2 and E300C3 cores. */
1025 static const
1026 struct processor_costs ppce300c2c3_cost = {
1027 COSTS_N_INSNS (4), /* mulsi */
1028 COSTS_N_INSNS (4), /* mulsi_const */
1029 COSTS_N_INSNS (4), /* mulsi_const9 */
1030 COSTS_N_INSNS (4), /* muldi */
1031 COSTS_N_INSNS (19), /* divsi */
1032 COSTS_N_INSNS (19), /* divdi */
1033 COSTS_N_INSNS (3), /* fp */
1034 COSTS_N_INSNS (4), /* dmul */
1035 COSTS_N_INSNS (18), /* sdiv */
1036 COSTS_N_INSNS (33), /* ddiv */
1037 32,
1038 16, /* l1 cache */
1039 16, /* l2 cache */
1040 1, /* prefetch streams /*/
1041 0, /* SF->DF convert */
1042 };
1043
1044 /* Instruction costs on PPCE500MC processors. */
1045 static const
1046 struct processor_costs ppce500mc_cost = {
1047 COSTS_N_INSNS (4), /* mulsi */
1048 COSTS_N_INSNS (4), /* mulsi_const */
1049 COSTS_N_INSNS (4), /* mulsi_const9 */
1050 COSTS_N_INSNS (4), /* muldi */
1051 COSTS_N_INSNS (14), /* divsi */
1052 COSTS_N_INSNS (14), /* divdi */
1053 COSTS_N_INSNS (8), /* fp */
1054 COSTS_N_INSNS (10), /* dmul */
1055 COSTS_N_INSNS (36), /* sdiv */
1056 COSTS_N_INSNS (66), /* ddiv */
1057 64, /* cache line size */
1058 32, /* l1 cache */
1059 128, /* l2 cache */
1060 1, /* prefetch streams /*/
1061 0, /* SF->DF convert */
1062 };
1063
1064 /* Instruction costs on PPCE500MC64 processors. */
1065 static const
1066 struct processor_costs ppce500mc64_cost = {
1067 COSTS_N_INSNS (4), /* mulsi */
1068 COSTS_N_INSNS (4), /* mulsi_const */
1069 COSTS_N_INSNS (4), /* mulsi_const9 */
1070 COSTS_N_INSNS (4), /* muldi */
1071 COSTS_N_INSNS (14), /* divsi */
1072 COSTS_N_INSNS (14), /* divdi */
1073 COSTS_N_INSNS (4), /* fp */
1074 COSTS_N_INSNS (10), /* dmul */
1075 COSTS_N_INSNS (36), /* sdiv */
1076 COSTS_N_INSNS (66), /* ddiv */
1077 64, /* cache line size */
1078 32, /* l1 cache */
1079 128, /* l2 cache */
1080 1, /* prefetch streams /*/
1081 0, /* SF->DF convert */
1082 };
1083
1084 /* Instruction costs on PPCE5500 processors. */
1085 static const
1086 struct processor_costs ppce5500_cost = {
1087 COSTS_N_INSNS (5), /* mulsi */
1088 COSTS_N_INSNS (5), /* mulsi_const */
1089 COSTS_N_INSNS (4), /* mulsi_const9 */
1090 COSTS_N_INSNS (5), /* muldi */
1091 COSTS_N_INSNS (14), /* divsi */
1092 COSTS_N_INSNS (14), /* divdi */
1093 COSTS_N_INSNS (7), /* fp */
1094 COSTS_N_INSNS (10), /* dmul */
1095 COSTS_N_INSNS (36), /* sdiv */
1096 COSTS_N_INSNS (66), /* ddiv */
1097 64, /* cache line size */
1098 32, /* l1 cache */
1099 128, /* l2 cache */
1100 1, /* prefetch streams /*/
1101 0, /* SF->DF convert */
1102 };
1103
1104 /* Instruction costs on PPCE6500 processors. */
1105 static const
1106 struct processor_costs ppce6500_cost = {
1107 COSTS_N_INSNS (5), /* mulsi */
1108 COSTS_N_INSNS (5), /* mulsi_const */
1109 COSTS_N_INSNS (4), /* mulsi_const9 */
1110 COSTS_N_INSNS (5), /* muldi */
1111 COSTS_N_INSNS (14), /* divsi */
1112 COSTS_N_INSNS (14), /* divdi */
1113 COSTS_N_INSNS (7), /* fp */
1114 COSTS_N_INSNS (10), /* dmul */
1115 COSTS_N_INSNS (36), /* sdiv */
1116 COSTS_N_INSNS (66), /* ddiv */
1117 64, /* cache line size */
1118 32, /* l1 cache */
1119 128, /* l2 cache */
1120 1, /* prefetch streams /*/
1121 0, /* SF->DF convert */
1122 };
1123
1124 /* Instruction costs on AppliedMicro Titan processors. */
1125 static const
1126 struct processor_costs titan_cost = {
1127 COSTS_N_INSNS (5), /* mulsi */
1128 COSTS_N_INSNS (5), /* mulsi_const */
1129 COSTS_N_INSNS (5), /* mulsi_const9 */
1130 COSTS_N_INSNS (5), /* muldi */
1131 COSTS_N_INSNS (18), /* divsi */
1132 COSTS_N_INSNS (18), /* divdi */
1133 COSTS_N_INSNS (10), /* fp */
1134 COSTS_N_INSNS (10), /* dmul */
1135 COSTS_N_INSNS (46), /* sdiv */
1136 COSTS_N_INSNS (72), /* ddiv */
1137 32, /* cache line size */
1138 32, /* l1 cache */
1139 512, /* l2 cache */
1140 1, /* prefetch streams /*/
1141 0, /* SF->DF convert */
1142 };
1143
1144 /* Instruction costs on POWER4 and POWER5 processors. */
1145 static const
1146 struct processor_costs power4_cost = {
1147 COSTS_N_INSNS (3), /* mulsi */
1148 COSTS_N_INSNS (2), /* mulsi_const */
1149 COSTS_N_INSNS (2), /* mulsi_const9 */
1150 COSTS_N_INSNS (4), /* muldi */
1151 COSTS_N_INSNS (18), /* divsi */
1152 COSTS_N_INSNS (34), /* divdi */
1153 COSTS_N_INSNS (3), /* fp */
1154 COSTS_N_INSNS (3), /* dmul */
1155 COSTS_N_INSNS (17), /* sdiv */
1156 COSTS_N_INSNS (17), /* ddiv */
1157 128, /* cache line size */
1158 32, /* l1 cache */
1159 1024, /* l2 cache */
1160 8, /* prefetch streams /*/
1161 0, /* SF->DF convert */
1162 };
1163
1164 /* Instruction costs on POWER6 processors. */
1165 static const
1166 struct processor_costs power6_cost = {
1167 COSTS_N_INSNS (8), /* mulsi */
1168 COSTS_N_INSNS (8), /* mulsi_const */
1169 COSTS_N_INSNS (8), /* mulsi_const9 */
1170 COSTS_N_INSNS (8), /* muldi */
1171 COSTS_N_INSNS (22), /* divsi */
1172 COSTS_N_INSNS (28), /* divdi */
1173 COSTS_N_INSNS (3), /* fp */
1174 COSTS_N_INSNS (3), /* dmul */
1175 COSTS_N_INSNS (13), /* sdiv */
1176 COSTS_N_INSNS (16), /* ddiv */
1177 128, /* cache line size */
1178 64, /* l1 cache */
1179 2048, /* l2 cache */
1180 16, /* prefetch streams */
1181 0, /* SF->DF convert */
1182 };
1183
1184 /* Instruction costs on POWER7 processors. */
1185 static const
1186 struct processor_costs power7_cost = {
1187 COSTS_N_INSNS (2), /* mulsi */
1188 COSTS_N_INSNS (2), /* mulsi_const */
1189 COSTS_N_INSNS (2), /* mulsi_const9 */
1190 COSTS_N_INSNS (2), /* muldi */
1191 COSTS_N_INSNS (18), /* divsi */
1192 COSTS_N_INSNS (34), /* divdi */
1193 COSTS_N_INSNS (3), /* fp */
1194 COSTS_N_INSNS (3), /* dmul */
1195 COSTS_N_INSNS (13), /* sdiv */
1196 COSTS_N_INSNS (16), /* ddiv */
1197 128, /* cache line size */
1198 32, /* l1 cache */
1199 256, /* l2 cache */
1200 12, /* prefetch streams */
1201 COSTS_N_INSNS (3), /* SF->DF convert */
1202 };
1203
1204 /* Instruction costs on POWER8 processors. */
1205 static const
1206 struct processor_costs power8_cost = {
1207 COSTS_N_INSNS (3), /* mulsi */
1208 COSTS_N_INSNS (3), /* mulsi_const */
1209 COSTS_N_INSNS (3), /* mulsi_const9 */
1210 COSTS_N_INSNS (3), /* muldi */
1211 COSTS_N_INSNS (19), /* divsi */
1212 COSTS_N_INSNS (35), /* divdi */
1213 COSTS_N_INSNS (3), /* fp */
1214 COSTS_N_INSNS (3), /* dmul */
1215 COSTS_N_INSNS (14), /* sdiv */
1216 COSTS_N_INSNS (17), /* ddiv */
1217 128, /* cache line size */
1218 32, /* l1 cache */
1219 256, /* l2 cache */
1220 12, /* prefetch streams */
1221 COSTS_N_INSNS (3), /* SF->DF convert */
1222 };
1223
1224 /* Instruction costs on POWER9 processors. */
1225 static const
1226 struct processor_costs power9_cost = {
1227 COSTS_N_INSNS (3), /* mulsi */
1228 COSTS_N_INSNS (3), /* mulsi_const */
1229 COSTS_N_INSNS (3), /* mulsi_const9 */
1230 COSTS_N_INSNS (3), /* muldi */
1231 COSTS_N_INSNS (8), /* divsi */
1232 COSTS_N_INSNS (12), /* divdi */
1233 COSTS_N_INSNS (3), /* fp */
1234 COSTS_N_INSNS (3), /* dmul */
1235 COSTS_N_INSNS (13), /* sdiv */
1236 COSTS_N_INSNS (18), /* ddiv */
1237 128, /* cache line size */
1238 32, /* l1 cache */
1239 512, /* l2 cache */
1240 8, /* prefetch streams */
1241 COSTS_N_INSNS (3), /* SF->DF convert */
1242 };
1243
1244 /* Instruction costs on POWER A2 processors. */
1245 static const
1246 struct processor_costs ppca2_cost = {
1247 COSTS_N_INSNS (16), /* mulsi */
1248 COSTS_N_INSNS (16), /* mulsi_const */
1249 COSTS_N_INSNS (16), /* mulsi_const9 */
1250 COSTS_N_INSNS (16), /* muldi */
1251 COSTS_N_INSNS (22), /* divsi */
1252 COSTS_N_INSNS (28), /* divdi */
1253 COSTS_N_INSNS (3), /* fp */
1254 COSTS_N_INSNS (3), /* dmul */
1255 COSTS_N_INSNS (59), /* sdiv */
1256 COSTS_N_INSNS (72), /* ddiv */
1257 64,
1258 16, /* l1 cache */
1259 2048, /* l2 cache */
1260 16, /* prefetch streams */
1261 0, /* SF->DF convert */
1262 };
1263
1264 \f
1265 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1266 #undef RS6000_BUILTIN_0
1267 #undef RS6000_BUILTIN_1
1268 #undef RS6000_BUILTIN_2
1269 #undef RS6000_BUILTIN_3
1270 #undef RS6000_BUILTIN_A
1271 #undef RS6000_BUILTIN_D
1272 #undef RS6000_BUILTIN_H
1273 #undef RS6000_BUILTIN_P
1274 #undef RS6000_BUILTIN_X
1275
1276 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1277 { NAME, ICODE, MASK, ATTR },
1278
1279 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1280 { NAME, ICODE, MASK, ATTR },
1281
1282 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1283 { NAME, ICODE, MASK, ATTR },
1284
1285 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1286 { NAME, ICODE, MASK, ATTR },
1287
1288 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1289 { NAME, ICODE, MASK, ATTR },
1290
1291 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1292 { NAME, ICODE, MASK, ATTR },
1293
1294 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1295 { NAME, ICODE, MASK, ATTR },
1296
1297 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1298 { NAME, ICODE, MASK, ATTR },
1299
1300 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1301 { NAME, ICODE, MASK, ATTR },
1302
1303 struct rs6000_builtin_info_type {
1304 const char *name;
1305 const enum insn_code icode;
1306 const HOST_WIDE_INT mask;
1307 const unsigned attr;
1308 };
1309
1310 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1311 {
1312 #include "rs6000-builtin.def"
1313 };
1314
1315 #undef RS6000_BUILTIN_0
1316 #undef RS6000_BUILTIN_1
1317 #undef RS6000_BUILTIN_2
1318 #undef RS6000_BUILTIN_3
1319 #undef RS6000_BUILTIN_A
1320 #undef RS6000_BUILTIN_D
1321 #undef RS6000_BUILTIN_H
1322 #undef RS6000_BUILTIN_P
1323 #undef RS6000_BUILTIN_X
1324
1325 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1326 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1327
1328 \f
1329 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1330 static struct machine_function * rs6000_init_machine_status (void);
1331 static int rs6000_ra_ever_killed (void);
1332 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1333 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1334 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1335 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1336 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1337 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1338 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1339 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1340 bool);
1341 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1342 unsigned int);
1343 static bool is_microcoded_insn (rtx_insn *);
1344 static bool is_nonpipeline_insn (rtx_insn *);
1345 static bool is_cracked_insn (rtx_insn *);
1346 static bool is_load_insn (rtx, rtx *);
1347 static bool is_store_insn (rtx, rtx *);
1348 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1349 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1350 static bool insn_must_be_first_in_group (rtx_insn *);
1351 static bool insn_must_be_last_in_group (rtx_insn *);
1352 static void altivec_init_builtins (void);
1353 static tree builtin_function_type (machine_mode, machine_mode,
1354 machine_mode, machine_mode,
1355 enum rs6000_builtins, const char *name);
1356 static void rs6000_common_init_builtins (void);
1357 static void htm_init_builtins (void);
1358 static rs6000_stack_t *rs6000_stack_info (void);
1359 static void is_altivec_return_reg (rtx, void *);
1360 int easy_vector_constant (rtx, machine_mode);
1361 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1362 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1363 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1364 bool, bool);
1365 #if TARGET_MACHO
1366 static void macho_branch_islands (void);
1367 #endif
1368 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1369 int, int *);
1370 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1371 int, int, int *);
1372 static bool rs6000_mode_dependent_address (const_rtx);
1373 static bool rs6000_debug_mode_dependent_address (const_rtx);
1374 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1375 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1376 machine_mode, rtx);
1377 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1378 machine_mode,
1379 rtx);
1380 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1381 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1382 enum reg_class);
1383 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1384 reg_class_t,
1385 reg_class_t);
1386 static bool rs6000_debug_can_change_mode_class (machine_mode,
1387 machine_mode,
1388 reg_class_t);
1389 static bool rs6000_save_toc_in_prologue_p (void);
1390 static rtx rs6000_internal_arg_pointer (void);
1391
1392 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1393 int, int *)
1394 = rs6000_legitimize_reload_address;
1395
1396 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1397 = rs6000_mode_dependent_address;
1398
1399 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1400 machine_mode, rtx)
1401 = rs6000_secondary_reload_class;
1402
1403 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1404 = rs6000_preferred_reload_class;
1405
1406 const int INSN_NOT_AVAILABLE = -1;
1407
1408 static void rs6000_print_isa_options (FILE *, int, const char *,
1409 HOST_WIDE_INT);
1410 static void rs6000_print_builtin_options (FILE *, int, const char *,
1411 HOST_WIDE_INT);
1412 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1413
1414 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1415 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1416 enum rs6000_reg_type,
1417 machine_mode,
1418 secondary_reload_info *,
1419 bool);
1420 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1421 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1422 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1423
1424 /* Hash table stuff for keeping track of TOC entries. */
1425
1426 struct GTY((for_user)) toc_hash_struct
1427 {
1428 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1429 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1430 rtx key;
1431 machine_mode key_mode;
1432 int labelno;
1433 };
1434
1435 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1436 {
1437 static hashval_t hash (toc_hash_struct *);
1438 static bool equal (toc_hash_struct *, toc_hash_struct *);
1439 };
1440
1441 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1442
1443 /* Hash table to keep track of the argument types for builtin functions. */
1444
1445 struct GTY((for_user)) builtin_hash_struct
1446 {
1447 tree type;
1448 machine_mode mode[4]; /* return value + 3 arguments. */
1449 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1450 };
1451
1452 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1453 {
1454 static hashval_t hash (builtin_hash_struct *);
1455 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1456 };
1457
1458 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1459
1460 \f
1461 /* Default register names. */
1462 char rs6000_reg_names[][8] =
1463 {
1464 "0", "1", "2", "3", "4", "5", "6", "7",
1465 "8", "9", "10", "11", "12", "13", "14", "15",
1466 "16", "17", "18", "19", "20", "21", "22", "23",
1467 "24", "25", "26", "27", "28", "29", "30", "31",
1468 "0", "1", "2", "3", "4", "5", "6", "7",
1469 "8", "9", "10", "11", "12", "13", "14", "15",
1470 "16", "17", "18", "19", "20", "21", "22", "23",
1471 "24", "25", "26", "27", "28", "29", "30", "31",
1472 "mq", "lr", "ctr","ap",
1473 "0", "1", "2", "3", "4", "5", "6", "7",
1474 "ca",
1475 /* AltiVec registers. */
1476 "0", "1", "2", "3", "4", "5", "6", "7",
1477 "8", "9", "10", "11", "12", "13", "14", "15",
1478 "16", "17", "18", "19", "20", "21", "22", "23",
1479 "24", "25", "26", "27", "28", "29", "30", "31",
1480 "vrsave", "vscr",
1481 /* Soft frame pointer. */
1482 "sfp",
1483 /* HTM SPR registers. */
1484 "tfhar", "tfiar", "texasr"
1485 };
1486
1487 #ifdef TARGET_REGNAMES
1488 static const char alt_reg_names[][8] =
1489 {
1490 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1491 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1492 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1493 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1494 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1495 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1496 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1497 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1498 "mq", "lr", "ctr", "ap",
1499 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1500 "ca",
1501 /* AltiVec registers. */
1502 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1503 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1504 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1505 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1506 "vrsave", "vscr",
1507 /* Soft frame pointer. */
1508 "sfp",
1509 /* HTM SPR registers. */
1510 "tfhar", "tfiar", "texasr"
1511 };
1512 #endif
1513
1514 /* Table of valid machine attributes. */
1515
1516 static const struct attribute_spec rs6000_attribute_table[] =
1517 {
1518 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1519 affects_type_identity, handler, exclude } */
1520 { "altivec", 1, 1, false, true, false, false,
1521 rs6000_handle_altivec_attribute, NULL },
1522 { "longcall", 0, 0, false, true, true, false,
1523 rs6000_handle_longcall_attribute, NULL },
1524 { "shortcall", 0, 0, false, true, true, false,
1525 rs6000_handle_longcall_attribute, NULL },
1526 { "ms_struct", 0, 0, false, false, false, false,
1527 rs6000_handle_struct_attribute, NULL },
1528 { "gcc_struct", 0, 0, false, false, false, false,
1529 rs6000_handle_struct_attribute, NULL },
1530 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1531 SUBTARGET_ATTRIBUTE_TABLE,
1532 #endif
1533 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1534 };
1535 \f
1536 #ifndef TARGET_PROFILE_KERNEL
1537 #define TARGET_PROFILE_KERNEL 0
1538 #endif
1539
1540 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1541 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1542 \f
1543 /* Initialize the GCC target structure. */
1544 #undef TARGET_ATTRIBUTE_TABLE
1545 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1546 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1547 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1548 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1549 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1550
1551 #undef TARGET_ASM_ALIGNED_DI_OP
1552 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1553
1554 /* Default unaligned ops are only provided for ELF. Find the ops needed
1555 for non-ELF systems. */
1556 #ifndef OBJECT_FORMAT_ELF
1557 #if TARGET_XCOFF
1558 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1559 64-bit targets. */
1560 #undef TARGET_ASM_UNALIGNED_HI_OP
1561 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1562 #undef TARGET_ASM_UNALIGNED_SI_OP
1563 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1564 #undef TARGET_ASM_UNALIGNED_DI_OP
1565 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1566 #else
1567 /* For Darwin. */
1568 #undef TARGET_ASM_UNALIGNED_HI_OP
1569 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1570 #undef TARGET_ASM_UNALIGNED_SI_OP
1571 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1572 #undef TARGET_ASM_UNALIGNED_DI_OP
1573 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1574 #undef TARGET_ASM_ALIGNED_DI_OP
1575 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1576 #endif
1577 #endif
1578
1579 /* This hook deals with fixups for relocatable code and DI-mode objects
1580 in 64-bit code. */
1581 #undef TARGET_ASM_INTEGER
1582 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1583
1584 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1585 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1586 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1587 #endif
1588
1589 #undef TARGET_SET_UP_BY_PROLOGUE
1590 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1591
1592 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1593 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1594 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1595 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1596 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1597 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1598 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1599 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1600 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1601 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1602 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1603 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1604
1605 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1606 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1607
1608 #undef TARGET_INTERNAL_ARG_POINTER
1609 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1610
1611 #undef TARGET_HAVE_TLS
1612 #define TARGET_HAVE_TLS HAVE_AS_TLS
1613
1614 #undef TARGET_CANNOT_FORCE_CONST_MEM
1615 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1616
1617 #undef TARGET_DELEGITIMIZE_ADDRESS
1618 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1619
1620 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1621 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1622
1623 #undef TARGET_LEGITIMATE_COMBINED_INSN
1624 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1625
1626 #undef TARGET_ASM_FUNCTION_PROLOGUE
1627 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1628 #undef TARGET_ASM_FUNCTION_EPILOGUE
1629 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1630
1631 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1632 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1633
1634 #undef TARGET_LEGITIMIZE_ADDRESS
1635 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1636
1637 #undef TARGET_SCHED_VARIABLE_ISSUE
1638 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1639
1640 #undef TARGET_SCHED_ISSUE_RATE
1641 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1642 #undef TARGET_SCHED_ADJUST_COST
1643 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1644 #undef TARGET_SCHED_ADJUST_PRIORITY
1645 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1646 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1647 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1648 #undef TARGET_SCHED_INIT
1649 #define TARGET_SCHED_INIT rs6000_sched_init
1650 #undef TARGET_SCHED_FINISH
1651 #define TARGET_SCHED_FINISH rs6000_sched_finish
1652 #undef TARGET_SCHED_REORDER
1653 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1654 #undef TARGET_SCHED_REORDER2
1655 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1656
1657 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1658 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1659
1660 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1661 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1662
1663 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1664 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1665 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1666 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1667 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1668 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1669 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1670 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1671
1672 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1673 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1674
1675 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1676 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1677 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1678 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1679 rs6000_builtin_support_vector_misalignment
1680 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1681 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1682 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1683 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1684 rs6000_builtin_vectorization_cost
1685 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1686 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1687 rs6000_preferred_simd_mode
1688 #undef TARGET_VECTORIZE_INIT_COST
1689 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1690 #undef TARGET_VECTORIZE_ADD_STMT_COST
1691 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1692 #undef TARGET_VECTORIZE_FINISH_COST
1693 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1694 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1695 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1696
1697 #undef TARGET_INIT_BUILTINS
1698 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1699 #undef TARGET_BUILTIN_DECL
1700 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1701
1702 #undef TARGET_FOLD_BUILTIN
1703 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1704 #undef TARGET_GIMPLE_FOLD_BUILTIN
1705 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1706
1707 #undef TARGET_EXPAND_BUILTIN
1708 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1709
1710 #undef TARGET_MANGLE_TYPE
1711 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1712
1713 #undef TARGET_INIT_LIBFUNCS
1714 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1715
1716 #if TARGET_MACHO
1717 #undef TARGET_BINDS_LOCAL_P
1718 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1719 #endif
1720
1721 #undef TARGET_MS_BITFIELD_LAYOUT_P
1722 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1723
1724 #undef TARGET_ASM_OUTPUT_MI_THUNK
1725 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1726
1727 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1728 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1729
1730 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1731 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1732
1733 #undef TARGET_REGISTER_MOVE_COST
1734 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1735 #undef TARGET_MEMORY_MOVE_COST
1736 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1737 #undef TARGET_CANNOT_COPY_INSN_P
1738 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1739 #undef TARGET_RTX_COSTS
1740 #define TARGET_RTX_COSTS rs6000_rtx_costs
1741 #undef TARGET_ADDRESS_COST
1742 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1743 #undef TARGET_INSN_COST
1744 #define TARGET_INSN_COST rs6000_insn_cost
1745
1746 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1747 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1748
1749 #undef TARGET_PROMOTE_FUNCTION_MODE
1750 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1751
1752 #undef TARGET_RETURN_IN_MEMORY
1753 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1754
1755 #undef TARGET_RETURN_IN_MSB
1756 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1757
1758 #undef TARGET_SETUP_INCOMING_VARARGS
1759 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1760
1761 /* Always strict argument naming on rs6000. */
1762 #undef TARGET_STRICT_ARGUMENT_NAMING
1763 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1764 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1765 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1766 #undef TARGET_SPLIT_COMPLEX_ARG
1767 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1768 #undef TARGET_MUST_PASS_IN_STACK
1769 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1770 #undef TARGET_PASS_BY_REFERENCE
1771 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1772 #undef TARGET_ARG_PARTIAL_BYTES
1773 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1774 #undef TARGET_FUNCTION_ARG_ADVANCE
1775 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1776 #undef TARGET_FUNCTION_ARG
1777 #define TARGET_FUNCTION_ARG rs6000_function_arg
1778 #undef TARGET_FUNCTION_ARG_PADDING
1779 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1780 #undef TARGET_FUNCTION_ARG_BOUNDARY
1781 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1782
1783 #undef TARGET_BUILD_BUILTIN_VA_LIST
1784 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1785
1786 #undef TARGET_EXPAND_BUILTIN_VA_START
1787 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1788
1789 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1790 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1791
1792 #undef TARGET_EH_RETURN_FILTER_MODE
1793 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1794
1795 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1796 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1797
1798 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1799 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1800
1801 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1802 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1803
1804 #undef TARGET_FLOATN_MODE
1805 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1806
1807 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1808 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1809
1810 #undef TARGET_MD_ASM_ADJUST
1811 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1812
1813 #undef TARGET_OPTION_OVERRIDE
1814 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1815
1816 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1817 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1818 rs6000_builtin_vectorized_function
1819
1820 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1821 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1822 rs6000_builtin_md_vectorized_function
1823
1824 #undef TARGET_STACK_PROTECT_GUARD
1825 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1826
1827 #if !TARGET_MACHO
1828 #undef TARGET_STACK_PROTECT_FAIL
1829 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1830 #endif
1831
1832 #ifdef HAVE_AS_TLS
1833 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1834 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1835 #endif
1836
1837 /* Use a 32-bit anchor range. This leads to sequences like:
1838
1839 addis tmp,anchor,high
1840 add dest,tmp,low
1841
1842 where tmp itself acts as an anchor, and can be shared between
1843 accesses to the same 64k page. */
1844 #undef TARGET_MIN_ANCHOR_OFFSET
1845 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1846 #undef TARGET_MAX_ANCHOR_OFFSET
1847 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1848 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1849 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1850 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1851 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1852
1853 #undef TARGET_BUILTIN_RECIPROCAL
1854 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1855
1856 #undef TARGET_SECONDARY_RELOAD
1857 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1858 #undef TARGET_SECONDARY_MEMORY_NEEDED
1859 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1860 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1861 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1862
1863 #undef TARGET_LEGITIMATE_ADDRESS_P
1864 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1865
1866 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1867 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1868
1869 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1870 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1871
1872 #undef TARGET_CAN_ELIMINATE
1873 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1874
1875 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1876 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1877
1878 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1879 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1880
1881 #undef TARGET_TRAMPOLINE_INIT
1882 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1883
1884 #undef TARGET_FUNCTION_VALUE
1885 #define TARGET_FUNCTION_VALUE rs6000_function_value
1886
1887 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1888 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1889
1890 #undef TARGET_OPTION_SAVE
1891 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1892
1893 #undef TARGET_OPTION_RESTORE
1894 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1895
1896 #undef TARGET_OPTION_PRINT
1897 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1898
1899 #undef TARGET_CAN_INLINE_P
1900 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1901
1902 #undef TARGET_SET_CURRENT_FUNCTION
1903 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1904
1905 #undef TARGET_LEGITIMATE_CONSTANT_P
1906 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1907
1908 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1909 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1910
1911 #undef TARGET_CAN_USE_DOLOOP_P
1912 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1913
1914 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1915 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1916
1917 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1918 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1919 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1920 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1921 #undef TARGET_UNWIND_WORD_MODE
1922 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1923
1924 #undef TARGET_OFFLOAD_OPTIONS
1925 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1926
1927 #undef TARGET_C_MODE_FOR_SUFFIX
1928 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1929
1930 #undef TARGET_INVALID_BINARY_OP
1931 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1932
1933 #undef TARGET_OPTAB_SUPPORTED_P
1934 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1935
1936 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1937 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1938
1939 #undef TARGET_COMPARE_VERSION_PRIORITY
1940 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1941
1942 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1943 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1944 rs6000_generate_version_dispatcher_body
1945
1946 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1947 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1948 rs6000_get_function_versions_dispatcher
1949
1950 #undef TARGET_OPTION_FUNCTION_VERSIONS
1951 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1952
1953 #undef TARGET_HARD_REGNO_NREGS
1954 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1955 #undef TARGET_HARD_REGNO_MODE_OK
1956 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1957
1958 #undef TARGET_MODES_TIEABLE_P
1959 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1960
1961 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1962 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1963 rs6000_hard_regno_call_part_clobbered
1964
1965 #undef TARGET_SLOW_UNALIGNED_ACCESS
1966 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1967
1968 #undef TARGET_CAN_CHANGE_MODE_CLASS
1969 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1970
1971 #undef TARGET_CONSTANT_ALIGNMENT
1972 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1973
1974 #undef TARGET_STARTING_FRAME_OFFSET
1975 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1976
1977 #if TARGET_ELF && RS6000_WEAK
1978 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1979 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1980 #endif
1981
1982 #undef TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P
1983 #define TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P hook_bool_void_true
1984
1985 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
1986 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME rs6000_mangle_decl_assembler_name
1987 \f
1988
1989 /* Processor table. */
1990 struct rs6000_ptt
1991 {
1992 const char *const name; /* Canonical processor name. */
1993 const enum processor_type processor; /* Processor type enum value. */
1994 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1995 };
1996
1997 static struct rs6000_ptt const processor_target_table[] =
1998 {
1999 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
2000 #include "rs6000-cpus.def"
2001 #undef RS6000_CPU
2002 };
2003
2004 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2005 name is invalid. */
2006
2007 static int
2008 rs6000_cpu_name_lookup (const char *name)
2009 {
2010 size_t i;
2011
2012 if (name != NULL)
2013 {
2014 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2015 if (! strcmp (name, processor_target_table[i].name))
2016 return (int)i;
2017 }
2018
2019 return -1;
2020 }
2021
2022 \f
2023 /* Return number of consecutive hard regs needed starting at reg REGNO
2024 to hold something of mode MODE.
2025 This is ordinarily the length in words of a value of mode MODE
2026 but can be less for certain modes in special long registers.
2027
2028 POWER and PowerPC GPRs hold 32 bits worth;
2029 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2030
2031 static int
2032 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2033 {
2034 unsigned HOST_WIDE_INT reg_size;
2035
2036 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2037 128-bit floating point that can go in vector registers, which has VSX
2038 memory addressing. */
2039 if (FP_REGNO_P (regno))
2040 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2041 ? UNITS_PER_VSX_WORD
2042 : UNITS_PER_FP_WORD);
2043
2044 else if (ALTIVEC_REGNO_P (regno))
2045 reg_size = UNITS_PER_ALTIVEC_WORD;
2046
2047 else
2048 reg_size = UNITS_PER_WORD;
2049
2050 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2051 }
2052
2053 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2054 MODE. */
2055 static int
2056 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2057 {
2058 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2059
2060 if (COMPLEX_MODE_P (mode))
2061 mode = GET_MODE_INNER (mode);
2062
2063 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2064 register combinations, and use PTImode where we need to deal with quad
2065 word memory operations. Don't allow quad words in the argument or frame
2066 pointer registers, just registers 0..31. */
2067 if (mode == PTImode)
2068 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2069 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2070 && ((regno & 1) == 0));
2071
2072 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2073 implementations. Don't allow an item to be split between a FP register
2074 and an Altivec register. Allow TImode in all VSX registers if the user
2075 asked for it. */
2076 if (TARGET_VSX && VSX_REGNO_P (regno)
2077 && (VECTOR_MEM_VSX_P (mode)
2078 || FLOAT128_VECTOR_P (mode)
2079 || reg_addr[mode].scalar_in_vmx_p
2080 || mode == TImode
2081 || (TARGET_VADDUQM && mode == V1TImode)))
2082 {
2083 if (FP_REGNO_P (regno))
2084 return FP_REGNO_P (last_regno);
2085
2086 if (ALTIVEC_REGNO_P (regno))
2087 {
2088 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2089 return 0;
2090
2091 return ALTIVEC_REGNO_P (last_regno);
2092 }
2093 }
2094
2095 /* The GPRs can hold any mode, but values bigger than one register
2096 cannot go past R31. */
2097 if (INT_REGNO_P (regno))
2098 return INT_REGNO_P (last_regno);
2099
2100 /* The float registers (except for VSX vector modes) can only hold floating
2101 modes and DImode. */
2102 if (FP_REGNO_P (regno))
2103 {
2104 if (FLOAT128_VECTOR_P (mode))
2105 return false;
2106
2107 if (SCALAR_FLOAT_MODE_P (mode)
2108 && (mode != TDmode || (regno % 2) == 0)
2109 && FP_REGNO_P (last_regno))
2110 return 1;
2111
2112 if (GET_MODE_CLASS (mode) == MODE_INT)
2113 {
2114 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2115 return 1;
2116
2117 if (TARGET_P8_VECTOR && (mode == SImode))
2118 return 1;
2119
2120 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2121 return 1;
2122 }
2123
2124 return 0;
2125 }
2126
2127 /* The CR register can only hold CC modes. */
2128 if (CR_REGNO_P (regno))
2129 return GET_MODE_CLASS (mode) == MODE_CC;
2130
2131 if (CA_REGNO_P (regno))
2132 return mode == Pmode || mode == SImode;
2133
2134 /* AltiVec only in AldyVec registers. */
2135 if (ALTIVEC_REGNO_P (regno))
2136 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2137 || mode == V1TImode);
2138
2139 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2140 and it must be able to fit within the register set. */
2141
2142 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2143 }
2144
2145 /* Implement TARGET_HARD_REGNO_NREGS. */
2146
2147 static unsigned int
2148 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2149 {
2150 return rs6000_hard_regno_nregs[mode][regno];
2151 }
2152
2153 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2154
2155 static bool
2156 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2157 {
2158 return rs6000_hard_regno_mode_ok_p[mode][regno];
2159 }
2160
2161 /* Implement TARGET_MODES_TIEABLE_P.
2162
2163 PTImode cannot tie with other modes because PTImode is restricted to even
2164 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2165 57744).
2166
2167 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2168 128-bit floating point on VSX systems ties with other vectors. */
2169
2170 static bool
2171 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2172 {
2173 if (mode1 == PTImode)
2174 return mode2 == PTImode;
2175 if (mode2 == PTImode)
2176 return false;
2177
2178 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2179 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2180 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2181 return false;
2182
2183 if (SCALAR_FLOAT_MODE_P (mode1))
2184 return SCALAR_FLOAT_MODE_P (mode2);
2185 if (SCALAR_FLOAT_MODE_P (mode2))
2186 return false;
2187
2188 if (GET_MODE_CLASS (mode1) == MODE_CC)
2189 return GET_MODE_CLASS (mode2) == MODE_CC;
2190 if (GET_MODE_CLASS (mode2) == MODE_CC)
2191 return false;
2192
2193 return true;
2194 }
2195
2196 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2197
2198 static bool
2199 rs6000_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
2200 {
2201 if (TARGET_32BIT
2202 && TARGET_POWERPC64
2203 && GET_MODE_SIZE (mode) > 4
2204 && INT_REGNO_P (regno))
2205 return true;
2206
2207 if (TARGET_VSX
2208 && FP_REGNO_P (regno)
2209 && GET_MODE_SIZE (mode) > 8
2210 && !FLOAT128_2REG_P (mode))
2211 return true;
2212
2213 return false;
2214 }
2215
2216 /* Print interesting facts about registers. */
2217 static void
2218 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2219 {
2220 int r, m;
2221
2222 for (r = first_regno; r <= last_regno; ++r)
2223 {
2224 const char *comma = "";
2225 int len;
2226
2227 if (first_regno == last_regno)
2228 fprintf (stderr, "%s:\t", reg_name);
2229 else
2230 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2231
2232 len = 8;
2233 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2234 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2235 {
2236 if (len > 70)
2237 {
2238 fprintf (stderr, ",\n\t");
2239 len = 8;
2240 comma = "";
2241 }
2242
2243 if (rs6000_hard_regno_nregs[m][r] > 1)
2244 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2245 rs6000_hard_regno_nregs[m][r]);
2246 else
2247 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2248
2249 comma = ", ";
2250 }
2251
2252 if (call_used_regs[r])
2253 {
2254 if (len > 70)
2255 {
2256 fprintf (stderr, ",\n\t");
2257 len = 8;
2258 comma = "";
2259 }
2260
2261 len += fprintf (stderr, "%s%s", comma, "call-used");
2262 comma = ", ";
2263 }
2264
2265 if (fixed_regs[r])
2266 {
2267 if (len > 70)
2268 {
2269 fprintf (stderr, ",\n\t");
2270 len = 8;
2271 comma = "";
2272 }
2273
2274 len += fprintf (stderr, "%s%s", comma, "fixed");
2275 comma = ", ";
2276 }
2277
2278 if (len > 70)
2279 {
2280 fprintf (stderr, ",\n\t");
2281 comma = "";
2282 }
2283
2284 len += fprintf (stderr, "%sreg-class = %s", comma,
2285 reg_class_names[(int)rs6000_regno_regclass[r]]);
2286 comma = ", ";
2287
2288 if (len > 70)
2289 {
2290 fprintf (stderr, ",\n\t");
2291 comma = "";
2292 }
2293
2294 fprintf (stderr, "%sregno = %d\n", comma, r);
2295 }
2296 }
2297
2298 static const char *
2299 rs6000_debug_vector_unit (enum rs6000_vector v)
2300 {
2301 const char *ret;
2302
2303 switch (v)
2304 {
2305 case VECTOR_NONE: ret = "none"; break;
2306 case VECTOR_ALTIVEC: ret = "altivec"; break;
2307 case VECTOR_VSX: ret = "vsx"; break;
2308 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2309 default: ret = "unknown"; break;
2310 }
2311
2312 return ret;
2313 }
2314
2315 /* Inner function printing just the address mask for a particular reload
2316 register class. */
2317 DEBUG_FUNCTION char *
2318 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2319 {
2320 static char ret[8];
2321 char *p = ret;
2322
2323 if ((mask & RELOAD_REG_VALID) != 0)
2324 *p++ = 'v';
2325 else if (keep_spaces)
2326 *p++ = ' ';
2327
2328 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2329 *p++ = 'm';
2330 else if (keep_spaces)
2331 *p++ = ' ';
2332
2333 if ((mask & RELOAD_REG_INDEXED) != 0)
2334 *p++ = 'i';
2335 else if (keep_spaces)
2336 *p++ = ' ';
2337
2338 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2339 *p++ = 'O';
2340 else if ((mask & RELOAD_REG_OFFSET) != 0)
2341 *p++ = 'o';
2342 else if (keep_spaces)
2343 *p++ = ' ';
2344
2345 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2346 *p++ = '+';
2347 else if (keep_spaces)
2348 *p++ = ' ';
2349
2350 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2351 *p++ = '+';
2352 else if (keep_spaces)
2353 *p++ = ' ';
2354
2355 if ((mask & RELOAD_REG_AND_M16) != 0)
2356 *p++ = '&';
2357 else if (keep_spaces)
2358 *p++ = ' ';
2359
2360 *p = '\0';
2361
2362 return ret;
2363 }
2364
2365 /* Print the address masks in a human readble fashion. */
2366 DEBUG_FUNCTION void
2367 rs6000_debug_print_mode (ssize_t m)
2368 {
2369 ssize_t rc;
2370 int spaces = 0;
2371
2372 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2373 for (rc = 0; rc < N_RELOAD_REG; rc++)
2374 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2375 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2376
2377 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2378 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2379 {
2380 fprintf (stderr, "%*s Reload=%c%c", spaces, "",
2381 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2382 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2383 spaces = 0;
2384 }
2385 else
2386 spaces += sizeof (" Reload=sl") - 1;
2387
2388 if (reg_addr[m].scalar_in_vmx_p)
2389 {
2390 fprintf (stderr, "%*s Upper=y", spaces, "");
2391 spaces = 0;
2392 }
2393 else
2394 spaces += sizeof (" Upper=y") - 1;
2395
2396 if (rs6000_vector_unit[m] != VECTOR_NONE
2397 || rs6000_vector_mem[m] != VECTOR_NONE)
2398 {
2399 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2400 spaces, "",
2401 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2402 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2403 }
2404
2405 fputs ("\n", stderr);
2406 }
2407
2408 #define DEBUG_FMT_ID "%-32s= "
2409 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2410 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2411 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2412
2413 /* Print various interesting information with -mdebug=reg. */
2414 static void
2415 rs6000_debug_reg_global (void)
2416 {
2417 static const char *const tf[2] = { "false", "true" };
2418 const char *nl = (const char *)0;
2419 int m;
2420 size_t m1, m2, v;
2421 char costly_num[20];
2422 char nop_num[20];
2423 char flags_buffer[40];
2424 const char *costly_str;
2425 const char *nop_str;
2426 const char *trace_str;
2427 const char *abi_str;
2428 const char *cmodel_str;
2429 struct cl_target_option cl_opts;
2430
2431 /* Modes we want tieable information on. */
2432 static const machine_mode print_tieable_modes[] = {
2433 QImode,
2434 HImode,
2435 SImode,
2436 DImode,
2437 TImode,
2438 PTImode,
2439 SFmode,
2440 DFmode,
2441 TFmode,
2442 IFmode,
2443 KFmode,
2444 SDmode,
2445 DDmode,
2446 TDmode,
2447 V16QImode,
2448 V8HImode,
2449 V4SImode,
2450 V2DImode,
2451 V1TImode,
2452 V32QImode,
2453 V16HImode,
2454 V8SImode,
2455 V4DImode,
2456 V2TImode,
2457 V4SFmode,
2458 V2DFmode,
2459 V8SFmode,
2460 V4DFmode,
2461 CCmode,
2462 CCUNSmode,
2463 CCEQmode,
2464 };
2465
2466 /* Virtual regs we are interested in. */
2467 const static struct {
2468 int regno; /* register number. */
2469 const char *name; /* register name. */
2470 } virtual_regs[] = {
2471 { STACK_POINTER_REGNUM, "stack pointer:" },
2472 { TOC_REGNUM, "toc: " },
2473 { STATIC_CHAIN_REGNUM, "static chain: " },
2474 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2475 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2476 { ARG_POINTER_REGNUM, "arg pointer: " },
2477 { FRAME_POINTER_REGNUM, "frame pointer:" },
2478 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2479 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2480 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2481 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2482 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2483 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2484 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2485 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2486 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2487 };
2488
2489 fputs ("\nHard register information:\n", stderr);
2490 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2491 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2492 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2493 LAST_ALTIVEC_REGNO,
2494 "vs");
2495 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2496 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2497 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2498 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2499 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2500 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2501
2502 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2503 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2504 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2505
2506 fprintf (stderr,
2507 "\n"
2508 "d reg_class = %s\n"
2509 "f reg_class = %s\n"
2510 "v reg_class = %s\n"
2511 "wa reg_class = %s\n"
2512 "wb reg_class = %s\n"
2513 "wd reg_class = %s\n"
2514 "we reg_class = %s\n"
2515 "wf reg_class = %s\n"
2516 "wg reg_class = %s\n"
2517 "wh reg_class = %s\n"
2518 "wi reg_class = %s\n"
2519 "wj reg_class = %s\n"
2520 "wk reg_class = %s\n"
2521 "wl reg_class = %s\n"
2522 "wm reg_class = %s\n"
2523 "wo reg_class = %s\n"
2524 "wp reg_class = %s\n"
2525 "wq reg_class = %s\n"
2526 "wr reg_class = %s\n"
2527 "ws reg_class = %s\n"
2528 "wt reg_class = %s\n"
2529 "wu reg_class = %s\n"
2530 "wv reg_class = %s\n"
2531 "ww reg_class = %s\n"
2532 "wx reg_class = %s\n"
2533 "wy reg_class = %s\n"
2534 "wz reg_class = %s\n"
2535 "wA reg_class = %s\n"
2536 "wH reg_class = %s\n"
2537 "wI reg_class = %s\n"
2538 "wJ reg_class = %s\n"
2539 "wK reg_class = %s\n"
2540 "\n",
2541 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2542 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2543 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2544 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2545 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2546 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2547 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2548 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2549 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2550 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2551 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2552 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2553 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2554 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2555 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2556 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2557 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2558 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2559 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2560 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2561 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2562 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2563 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2564 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2565 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2566 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2567 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2568 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2569 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2570 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2571 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2572 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2573
2574 nl = "\n";
2575 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2576 rs6000_debug_print_mode (m);
2577
2578 fputs ("\n", stderr);
2579
2580 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2581 {
2582 machine_mode mode1 = print_tieable_modes[m1];
2583 bool first_time = true;
2584
2585 nl = (const char *)0;
2586 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2587 {
2588 machine_mode mode2 = print_tieable_modes[m2];
2589 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2590 {
2591 if (first_time)
2592 {
2593 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2594 nl = "\n";
2595 first_time = false;
2596 }
2597
2598 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2599 }
2600 }
2601
2602 if (!first_time)
2603 fputs ("\n", stderr);
2604 }
2605
2606 if (nl)
2607 fputs (nl, stderr);
2608
2609 if (rs6000_recip_control)
2610 {
2611 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2612
2613 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2614 if (rs6000_recip_bits[m])
2615 {
2616 fprintf (stderr,
2617 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2618 GET_MODE_NAME (m),
2619 (RS6000_RECIP_AUTO_RE_P (m)
2620 ? "auto"
2621 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2622 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2623 ? "auto"
2624 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2625 }
2626
2627 fputs ("\n", stderr);
2628 }
2629
2630 if (rs6000_cpu_index >= 0)
2631 {
2632 const char *name = processor_target_table[rs6000_cpu_index].name;
2633 HOST_WIDE_INT flags
2634 = processor_target_table[rs6000_cpu_index].target_enable;
2635
2636 sprintf (flags_buffer, "-mcpu=%s flags", name);
2637 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2638 }
2639 else
2640 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2641
2642 if (rs6000_tune_index >= 0)
2643 {
2644 const char *name = processor_target_table[rs6000_tune_index].name;
2645 HOST_WIDE_INT flags
2646 = processor_target_table[rs6000_tune_index].target_enable;
2647
2648 sprintf (flags_buffer, "-mtune=%s flags", name);
2649 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2650 }
2651 else
2652 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2653
2654 cl_target_option_save (&cl_opts, &global_options);
2655 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2656 rs6000_isa_flags);
2657
2658 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2659 rs6000_isa_flags_explicit);
2660
2661 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2662 rs6000_builtin_mask);
2663
2664 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2665
2666 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2667 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2668
2669 switch (rs6000_sched_costly_dep)
2670 {
2671 case max_dep_latency:
2672 costly_str = "max_dep_latency";
2673 break;
2674
2675 case no_dep_costly:
2676 costly_str = "no_dep_costly";
2677 break;
2678
2679 case all_deps_costly:
2680 costly_str = "all_deps_costly";
2681 break;
2682
2683 case true_store_to_load_dep_costly:
2684 costly_str = "true_store_to_load_dep_costly";
2685 break;
2686
2687 case store_to_load_dep_costly:
2688 costly_str = "store_to_load_dep_costly";
2689 break;
2690
2691 default:
2692 costly_str = costly_num;
2693 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2694 break;
2695 }
2696
2697 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2698
2699 switch (rs6000_sched_insert_nops)
2700 {
2701 case sched_finish_regroup_exact:
2702 nop_str = "sched_finish_regroup_exact";
2703 break;
2704
2705 case sched_finish_pad_groups:
2706 nop_str = "sched_finish_pad_groups";
2707 break;
2708
2709 case sched_finish_none:
2710 nop_str = "sched_finish_none";
2711 break;
2712
2713 default:
2714 nop_str = nop_num;
2715 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2716 break;
2717 }
2718
2719 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2720
2721 switch (rs6000_sdata)
2722 {
2723 default:
2724 case SDATA_NONE:
2725 break;
2726
2727 case SDATA_DATA:
2728 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2729 break;
2730
2731 case SDATA_SYSV:
2732 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2733 break;
2734
2735 case SDATA_EABI:
2736 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2737 break;
2738
2739 }
2740
2741 switch (rs6000_traceback)
2742 {
2743 case traceback_default: trace_str = "default"; break;
2744 case traceback_none: trace_str = "none"; break;
2745 case traceback_part: trace_str = "part"; break;
2746 case traceback_full: trace_str = "full"; break;
2747 default: trace_str = "unknown"; break;
2748 }
2749
2750 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2751
2752 switch (rs6000_current_cmodel)
2753 {
2754 case CMODEL_SMALL: cmodel_str = "small"; break;
2755 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2756 case CMODEL_LARGE: cmodel_str = "large"; break;
2757 default: cmodel_str = "unknown"; break;
2758 }
2759
2760 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2761
2762 switch (rs6000_current_abi)
2763 {
2764 case ABI_NONE: abi_str = "none"; break;
2765 case ABI_AIX: abi_str = "aix"; break;
2766 case ABI_ELFv2: abi_str = "ELFv2"; break;
2767 case ABI_V4: abi_str = "V4"; break;
2768 case ABI_DARWIN: abi_str = "darwin"; break;
2769 default: abi_str = "unknown"; break;
2770 }
2771
2772 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2773
2774 if (rs6000_altivec_abi)
2775 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2776
2777 if (rs6000_darwin64_abi)
2778 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2779
2780 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2781 (TARGET_SOFT_FLOAT ? "true" : "false"));
2782
2783 if (TARGET_LINK_STACK)
2784 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2785
2786 if (TARGET_P8_FUSION)
2787 {
2788 char options[80];
2789
2790 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2791 if (TARGET_P8_FUSION_SIGN)
2792 strcat (options, ", sign");
2793
2794 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2795 }
2796
2797 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2798 TARGET_SECURE_PLT ? "secure" : "bss");
2799 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2800 aix_struct_return ? "aix" : "sysv");
2801 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2802 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2803 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2804 tf[!!rs6000_align_branch_targets]);
2805 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2806 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2807 rs6000_long_double_type_size);
2808 if (rs6000_long_double_type_size > 64)
2809 {
2810 fprintf (stderr, DEBUG_FMT_S, "long double type",
2811 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2812 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2813 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2814 }
2815 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2816 (int)rs6000_sched_restricted_insns_priority);
2817 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2818 (int)END_BUILTINS);
2819 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2820 (int)RS6000_BUILTIN_COUNT);
2821
2822 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2823 (int)TARGET_FLOAT128_ENABLE_TYPE);
2824
2825 if (TARGET_VSX)
2826 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2827 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2828
2829 if (TARGET_DIRECT_MOVE_128)
2830 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2831 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2832 }
2833
2834 \f
2835 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2836 legitimate address support to figure out the appropriate addressing to
2837 use. */
2838
2839 static void
2840 rs6000_setup_reg_addr_masks (void)
2841 {
2842 ssize_t rc, reg, m, nregs;
2843 addr_mask_type any_addr_mask, addr_mask;
2844
2845 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2846 {
2847 machine_mode m2 = (machine_mode) m;
2848 bool complex_p = false;
2849 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2850 size_t msize;
2851
2852 if (COMPLEX_MODE_P (m2))
2853 {
2854 complex_p = true;
2855 m2 = GET_MODE_INNER (m2);
2856 }
2857
2858 msize = GET_MODE_SIZE (m2);
2859
2860 /* SDmode is special in that we want to access it only via REG+REG
2861 addressing on power7 and above, since we want to use the LFIWZX and
2862 STFIWZX instructions to load it. */
2863 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2864
2865 any_addr_mask = 0;
2866 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2867 {
2868 addr_mask = 0;
2869 reg = reload_reg_map[rc].reg;
2870
2871 /* Can mode values go in the GPR/FPR/Altivec registers? */
2872 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2873 {
2874 bool small_int_vsx_p = (small_int_p
2875 && (rc == RELOAD_REG_FPR
2876 || rc == RELOAD_REG_VMX));
2877
2878 nregs = rs6000_hard_regno_nregs[m][reg];
2879 addr_mask |= RELOAD_REG_VALID;
2880
2881 /* Indicate if the mode takes more than 1 physical register. If
2882 it takes a single register, indicate it can do REG+REG
2883 addressing. Small integers in VSX registers can only do
2884 REG+REG addressing. */
2885 if (small_int_vsx_p)
2886 addr_mask |= RELOAD_REG_INDEXED;
2887 else if (nregs > 1 || m == BLKmode || complex_p)
2888 addr_mask |= RELOAD_REG_MULTIPLE;
2889 else
2890 addr_mask |= RELOAD_REG_INDEXED;
2891
2892 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2893 addressing. If we allow scalars into Altivec registers,
2894 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2895
2896 For VSX systems, we don't allow update addressing for
2897 DFmode/SFmode if those registers can go in both the
2898 traditional floating point registers and Altivec registers.
2899 The load/store instructions for the Altivec registers do not
2900 have update forms. If we allowed update addressing, it seems
2901 to break IV-OPT code using floating point if the index type is
2902 int instead of long (PR target/81550 and target/84042). */
2903
2904 if (TARGET_UPDATE
2905 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2906 && msize <= 8
2907 && !VECTOR_MODE_P (m2)
2908 && !FLOAT128_VECTOR_P (m2)
2909 && !complex_p
2910 && (m != E_DFmode || !TARGET_VSX)
2911 && (m != E_SFmode || !TARGET_P8_VECTOR)
2912 && !small_int_vsx_p)
2913 {
2914 addr_mask |= RELOAD_REG_PRE_INCDEC;
2915
2916 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2917 we don't allow PRE_MODIFY for some multi-register
2918 operations. */
2919 switch (m)
2920 {
2921 default:
2922 addr_mask |= RELOAD_REG_PRE_MODIFY;
2923 break;
2924
2925 case E_DImode:
2926 if (TARGET_POWERPC64)
2927 addr_mask |= RELOAD_REG_PRE_MODIFY;
2928 break;
2929
2930 case E_DFmode:
2931 case E_DDmode:
2932 if (TARGET_HARD_FLOAT)
2933 addr_mask |= RELOAD_REG_PRE_MODIFY;
2934 break;
2935 }
2936 }
2937 }
2938
2939 /* GPR and FPR registers can do REG+OFFSET addressing, except
2940 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2941 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2942 if ((addr_mask != 0) && !indexed_only_p
2943 && msize <= 8
2944 && (rc == RELOAD_REG_GPR
2945 || ((msize == 8 || m2 == SFmode)
2946 && (rc == RELOAD_REG_FPR
2947 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
2948 addr_mask |= RELOAD_REG_OFFSET;
2949
2950 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2951 instructions are enabled. The offset for 128-bit VSX registers is
2952 only 12-bits. While GPRs can handle the full offset range, VSX
2953 registers can only handle the restricted range. */
2954 else if ((addr_mask != 0) && !indexed_only_p
2955 && msize == 16 && TARGET_P9_VECTOR
2956 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2957 || (m2 == TImode && TARGET_VSX)))
2958 {
2959 addr_mask |= RELOAD_REG_OFFSET;
2960 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2961 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2962 }
2963
2964 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2965 addressing on 128-bit types. */
2966 if (rc == RELOAD_REG_VMX && msize == 16
2967 && (addr_mask & RELOAD_REG_VALID) != 0)
2968 addr_mask |= RELOAD_REG_AND_M16;
2969
2970 reg_addr[m].addr_mask[rc] = addr_mask;
2971 any_addr_mask |= addr_mask;
2972 }
2973
2974 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2975 }
2976 }
2977
2978 \f
2979 /* Initialize the various global tables that are based on register size. */
2980 static void
2981 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2982 {
2983 ssize_t r, m, c;
2984 int align64;
2985 int align32;
2986
2987 /* Precalculate REGNO_REG_CLASS. */
2988 rs6000_regno_regclass[0] = GENERAL_REGS;
2989 for (r = 1; r < 32; ++r)
2990 rs6000_regno_regclass[r] = BASE_REGS;
2991
2992 for (r = 32; r < 64; ++r)
2993 rs6000_regno_regclass[r] = FLOAT_REGS;
2994
2995 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2996 rs6000_regno_regclass[r] = NO_REGS;
2997
2998 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2999 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3000
3001 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3002 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3003 rs6000_regno_regclass[r] = CR_REGS;
3004
3005 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3006 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3007 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3008 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3009 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3010 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3011 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3012 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3013 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3014 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3015
3016 /* Precalculate register class to simpler reload register class. We don't
3017 need all of the register classes that are combinations of different
3018 classes, just the simple ones that have constraint letters. */
3019 for (c = 0; c < N_REG_CLASSES; c++)
3020 reg_class_to_reg_type[c] = NO_REG_TYPE;
3021
3022 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3023 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3024 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3025 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3026 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3027 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3028 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3029 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3030 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3031 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3032
3033 if (TARGET_VSX)
3034 {
3035 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3036 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3037 }
3038 else
3039 {
3040 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3041 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3042 }
3043
3044 /* Precalculate the valid memory formats as well as the vector information,
3045 this must be set up before the rs6000_hard_regno_nregs_internal calls
3046 below. */
3047 gcc_assert ((int)VECTOR_NONE == 0);
3048 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3049 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3050
3051 gcc_assert ((int)CODE_FOR_nothing == 0);
3052 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3053
3054 gcc_assert ((int)NO_REGS == 0);
3055 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3056
3057 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3058 believes it can use native alignment or still uses 128-bit alignment. */
3059 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3060 {
3061 align64 = 64;
3062 align32 = 32;
3063 }
3064 else
3065 {
3066 align64 = 128;
3067 align32 = 128;
3068 }
3069
3070 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3071 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3072 if (TARGET_FLOAT128_TYPE)
3073 {
3074 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3075 rs6000_vector_align[KFmode] = 128;
3076
3077 if (FLOAT128_IEEE_P (TFmode))
3078 {
3079 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3080 rs6000_vector_align[TFmode] = 128;
3081 }
3082 }
3083
3084 /* V2DF mode, VSX only. */
3085 if (TARGET_VSX)
3086 {
3087 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3088 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3089 rs6000_vector_align[V2DFmode] = align64;
3090 }
3091
3092 /* V4SF mode, either VSX or Altivec. */
3093 if (TARGET_VSX)
3094 {
3095 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3096 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3097 rs6000_vector_align[V4SFmode] = align32;
3098 }
3099 else if (TARGET_ALTIVEC)
3100 {
3101 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3102 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3103 rs6000_vector_align[V4SFmode] = align32;
3104 }
3105
3106 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3107 and stores. */
3108 if (TARGET_ALTIVEC)
3109 {
3110 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3111 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3112 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3113 rs6000_vector_align[V4SImode] = align32;
3114 rs6000_vector_align[V8HImode] = align32;
3115 rs6000_vector_align[V16QImode] = align32;
3116
3117 if (TARGET_VSX)
3118 {
3119 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3120 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3121 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3122 }
3123 else
3124 {
3125 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3126 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3127 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3128 }
3129 }
3130
3131 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3132 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3133 if (TARGET_VSX)
3134 {
3135 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3136 rs6000_vector_unit[V2DImode]
3137 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3138 rs6000_vector_align[V2DImode] = align64;
3139
3140 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3141 rs6000_vector_unit[V1TImode]
3142 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3143 rs6000_vector_align[V1TImode] = 128;
3144 }
3145
3146 /* DFmode, see if we want to use the VSX unit. Memory is handled
3147 differently, so don't set rs6000_vector_mem. */
3148 if (TARGET_VSX)
3149 {
3150 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3151 rs6000_vector_align[DFmode] = 64;
3152 }
3153
3154 /* SFmode, see if we want to use the VSX unit. */
3155 if (TARGET_P8_VECTOR)
3156 {
3157 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3158 rs6000_vector_align[SFmode] = 32;
3159 }
3160
3161 /* Allow TImode in VSX register and set the VSX memory macros. */
3162 if (TARGET_VSX)
3163 {
3164 rs6000_vector_mem[TImode] = VECTOR_VSX;
3165 rs6000_vector_align[TImode] = align64;
3166 }
3167
3168 /* Register class constraints for the constraints that depend on compile
3169 switches. When the VSX code was added, different constraints were added
3170 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3171 of the VSX registers are used. The register classes for scalar floating
3172 point types is set, based on whether we allow that type into the upper
3173 (Altivec) registers. GCC has register classes to target the Altivec
3174 registers for load/store operations, to select using a VSX memory
3175 operation instead of the traditional floating point operation. The
3176 constraints are:
3177
3178 d - Register class to use with traditional DFmode instructions.
3179 f - Register class to use with traditional SFmode instructions.
3180 v - Altivec register.
3181 wa - Any VSX register.
3182 wc - Reserved to represent individual CR bits (used in LLVM).
3183 wd - Preferred register class for V2DFmode.
3184 wf - Preferred register class for V4SFmode.
3185 wg - Float register for power6x move insns.
3186 wh - FP register for direct move instructions.
3187 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3188 wj - FP or VSX register to hold 64-bit integers for direct moves.
3189 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3190 wl - Float register if we can do 32-bit signed int loads.
3191 wm - VSX register for ISA 2.07 direct move operations.
3192 wn - always NO_REGS.
3193 wr - GPR if 64-bit mode is permitted.
3194 ws - Register class to do ISA 2.06 DF operations.
3195 wt - VSX register for TImode in VSX registers.
3196 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3197 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3198 ww - Register class to do SF conversions in with VSX operations.
3199 wx - Float register if we can do 32-bit int stores.
3200 wy - Register class to do ISA 2.07 SF operations.
3201 wz - Float register if we can do 32-bit unsigned int loads.
3202 wH - Altivec register if SImode is allowed in VSX registers.
3203 wI - VSX register if SImode is allowed in VSX registers.
3204 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3205 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3206
3207 if (TARGET_HARD_FLOAT)
3208 {
3209 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3210 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3211 }
3212
3213 if (TARGET_VSX)
3214 {
3215 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3216 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3217 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3218 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3219 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3220 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3221 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3222 }
3223
3224 /* Add conditional constraints based on various options, to allow us to
3225 collapse multiple insn patterns. */
3226 if (TARGET_ALTIVEC)
3227 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3228
3229 if (TARGET_MFPGPR) /* DFmode */
3230 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3231
3232 if (TARGET_LFIWAX)
3233 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3234
3235 if (TARGET_DIRECT_MOVE)
3236 {
3237 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3238 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3239 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3240 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3241 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3242 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3243 }
3244
3245 if (TARGET_POWERPC64)
3246 {
3247 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3248 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3249 }
3250
3251 if (TARGET_P8_VECTOR) /* SFmode */
3252 {
3253 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3254 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3255 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3256 }
3257 else if (TARGET_VSX)
3258 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3259
3260 if (TARGET_STFIWX)
3261 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3262
3263 if (TARGET_LFIWZX)
3264 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3265
3266 if (TARGET_FLOAT128_TYPE)
3267 {
3268 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3269 if (FLOAT128_IEEE_P (TFmode))
3270 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3271 }
3272
3273 if (TARGET_P9_VECTOR)
3274 {
3275 /* Support for new D-form instructions. */
3276 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3277
3278 /* Support for ISA 3.0 (power9) vectors. */
3279 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3280 }
3281
3282 /* Support for new direct moves (ISA 3.0 + 64bit). */
3283 if (TARGET_DIRECT_MOVE_128)
3284 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3285
3286 /* Support small integers in VSX registers. */
3287 if (TARGET_P8_VECTOR)
3288 {
3289 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3290 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3291 if (TARGET_P9_VECTOR)
3292 {
3293 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3294 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3295 }
3296 }
3297
3298 /* Set up the reload helper and direct move functions. */
3299 if (TARGET_VSX || TARGET_ALTIVEC)
3300 {
3301 if (TARGET_64BIT)
3302 {
3303 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3304 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3305 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3306 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3307 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3308 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3309 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3310 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3311 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3312 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3313 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3314 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3315 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3316 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3317 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3318 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3319 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3320 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3321 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3322 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3323
3324 if (FLOAT128_VECTOR_P (KFmode))
3325 {
3326 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3327 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3328 }
3329
3330 if (FLOAT128_VECTOR_P (TFmode))
3331 {
3332 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3333 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3334 }
3335
3336 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3337 available. */
3338 if (TARGET_NO_SDMODE_STACK)
3339 {
3340 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3341 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3342 }
3343
3344 if (TARGET_VSX)
3345 {
3346 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3347 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3348 }
3349
3350 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3351 {
3352 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3353 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3354 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3355 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3356 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3357 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3358 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3359 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3360 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3361
3362 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3363 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3364 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3365 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3366 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3367 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3368 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3369 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3370 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3371
3372 if (FLOAT128_VECTOR_P (KFmode))
3373 {
3374 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3375 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3376 }
3377
3378 if (FLOAT128_VECTOR_P (TFmode))
3379 {
3380 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3381 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3382 }
3383 }
3384 }
3385 else
3386 {
3387 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3388 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3389 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3390 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3391 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3392 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3393 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3394 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3395 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3396 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3397 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3398 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3399 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3400 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3401 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3402 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3403 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3404 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3405 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3406 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3407
3408 if (FLOAT128_VECTOR_P (KFmode))
3409 {
3410 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3411 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3412 }
3413
3414 if (FLOAT128_IEEE_P (TFmode))
3415 {
3416 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3417 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3418 }
3419
3420 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3421 available. */
3422 if (TARGET_NO_SDMODE_STACK)
3423 {
3424 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3425 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3426 }
3427
3428 if (TARGET_VSX)
3429 {
3430 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3431 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3432 }
3433
3434 if (TARGET_DIRECT_MOVE)
3435 {
3436 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3437 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3438 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3439 }
3440 }
3441
3442 reg_addr[DFmode].scalar_in_vmx_p = true;
3443 reg_addr[DImode].scalar_in_vmx_p = true;
3444
3445 if (TARGET_P8_VECTOR)
3446 {
3447 reg_addr[SFmode].scalar_in_vmx_p = true;
3448 reg_addr[SImode].scalar_in_vmx_p = true;
3449
3450 if (TARGET_P9_VECTOR)
3451 {
3452 reg_addr[HImode].scalar_in_vmx_p = true;
3453 reg_addr[QImode].scalar_in_vmx_p = true;
3454 }
3455 }
3456 }
3457
3458 /* Precalculate HARD_REGNO_NREGS. */
3459 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3460 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3461 rs6000_hard_regno_nregs[m][r]
3462 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3463
3464 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3465 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3466 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3467 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3468 rs6000_hard_regno_mode_ok_p[m][r] = true;
3469
3470 /* Precalculate CLASS_MAX_NREGS sizes. */
3471 for (c = 0; c < LIM_REG_CLASSES; ++c)
3472 {
3473 int reg_size;
3474
3475 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3476 reg_size = UNITS_PER_VSX_WORD;
3477
3478 else if (c == ALTIVEC_REGS)
3479 reg_size = UNITS_PER_ALTIVEC_WORD;
3480
3481 else if (c == FLOAT_REGS)
3482 reg_size = UNITS_PER_FP_WORD;
3483
3484 else
3485 reg_size = UNITS_PER_WORD;
3486
3487 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3488 {
3489 machine_mode m2 = (machine_mode)m;
3490 int reg_size2 = reg_size;
3491
3492 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3493 in VSX. */
3494 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3495 reg_size2 = UNITS_PER_FP_WORD;
3496
3497 rs6000_class_max_nregs[m][c]
3498 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3499 }
3500 }
3501
3502 /* Calculate which modes to automatically generate code to use a the
3503 reciprocal divide and square root instructions. In the future, possibly
3504 automatically generate the instructions even if the user did not specify
3505 -mrecip. The older machines double precision reciprocal sqrt estimate is
3506 not accurate enough. */
3507 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3508 if (TARGET_FRES)
3509 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3510 if (TARGET_FRE)
3511 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3512 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3513 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3514 if (VECTOR_UNIT_VSX_P (V2DFmode))
3515 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3516
3517 if (TARGET_FRSQRTES)
3518 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3519 if (TARGET_FRSQRTE)
3520 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3521 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3522 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3523 if (VECTOR_UNIT_VSX_P (V2DFmode))
3524 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3525
3526 if (rs6000_recip_control)
3527 {
3528 if (!flag_finite_math_only)
3529 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3530 "-ffast-math");
3531 if (flag_trapping_math)
3532 warning (0, "%qs requires %qs or %qs", "-mrecip",
3533 "-fno-trapping-math", "-ffast-math");
3534 if (!flag_reciprocal_math)
3535 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3536 "-ffast-math");
3537 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3538 {
3539 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3540 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3541 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3542
3543 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3544 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3545 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3546
3547 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3548 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3549 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3550
3551 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3552 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3553 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3554
3555 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3556 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3557 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3558
3559 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3560 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3561 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3562
3563 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3564 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3565 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3566
3567 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3568 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3569 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3570 }
3571 }
3572
3573 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3574 legitimate address support to figure out the appropriate addressing to
3575 use. */
3576 rs6000_setup_reg_addr_masks ();
3577
3578 if (global_init_p || TARGET_DEBUG_TARGET)
3579 {
3580 if (TARGET_DEBUG_REG)
3581 rs6000_debug_reg_global ();
3582
3583 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3584 fprintf (stderr,
3585 "SImode variable mult cost = %d\n"
3586 "SImode constant mult cost = %d\n"
3587 "SImode short constant mult cost = %d\n"
3588 "DImode multipliciation cost = %d\n"
3589 "SImode division cost = %d\n"
3590 "DImode division cost = %d\n"
3591 "Simple fp operation cost = %d\n"
3592 "DFmode multiplication cost = %d\n"
3593 "SFmode division cost = %d\n"
3594 "DFmode division cost = %d\n"
3595 "cache line size = %d\n"
3596 "l1 cache size = %d\n"
3597 "l2 cache size = %d\n"
3598 "simultaneous prefetches = %d\n"
3599 "\n",
3600 rs6000_cost->mulsi,
3601 rs6000_cost->mulsi_const,
3602 rs6000_cost->mulsi_const9,
3603 rs6000_cost->muldi,
3604 rs6000_cost->divsi,
3605 rs6000_cost->divdi,
3606 rs6000_cost->fp,
3607 rs6000_cost->dmul,
3608 rs6000_cost->sdiv,
3609 rs6000_cost->ddiv,
3610 rs6000_cost->cache_line_size,
3611 rs6000_cost->l1_cache_size,
3612 rs6000_cost->l2_cache_size,
3613 rs6000_cost->simultaneous_prefetches);
3614 }
3615 }
3616
3617 #if TARGET_MACHO
3618 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3619
3620 static void
3621 darwin_rs6000_override_options (void)
3622 {
3623 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3624 off. */
3625 rs6000_altivec_abi = 1;
3626 TARGET_ALTIVEC_VRSAVE = 1;
3627 rs6000_current_abi = ABI_DARWIN;
3628
3629 if (DEFAULT_ABI == ABI_DARWIN
3630 && TARGET_64BIT)
3631 darwin_one_byte_bool = 1;
3632
3633 if (TARGET_64BIT && ! TARGET_POWERPC64)
3634 {
3635 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3636 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3637 }
3638 if (flag_mkernel)
3639 {
3640 rs6000_default_long_calls = 1;
3641 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3642 }
3643
3644 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3645 Altivec. */
3646 if (!flag_mkernel && !flag_apple_kext
3647 && TARGET_64BIT
3648 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3649 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3650
3651 /* Unless the user (not the configurer) has explicitly overridden
3652 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3653 G4 unless targeting the kernel. */
3654 if (!flag_mkernel
3655 && !flag_apple_kext
3656 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3657 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3658 && ! global_options_set.x_rs6000_cpu_index)
3659 {
3660 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3661 }
3662 }
3663 #endif
3664
3665 /* If not otherwise specified by a target, make 'long double' equivalent to
3666 'double'. */
3667
3668 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3669 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3670 #endif
3671
3672 /* Return the builtin mask of the various options used that could affect which
3673 builtins were used. In the past we used target_flags, but we've run out of
3674 bits, and some options are no longer in target_flags. */
3675
3676 HOST_WIDE_INT
3677 rs6000_builtin_mask_calculate (void)
3678 {
3679 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3680 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3681 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3682 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3683 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3684 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3685 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3686 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3687 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3688 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3689 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3690 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3691 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3692 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3693 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3694 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3695 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3696 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3697 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3698 | ((TARGET_LONG_DOUBLE_128
3699 && TARGET_HARD_FLOAT
3700 && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0)
3701 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3702 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3703 }
3704
3705 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3706 to clobber the XER[CA] bit because clobbering that bit without telling
3707 the compiler worked just fine with versions of GCC before GCC 5, and
3708 breaking a lot of older code in ways that are hard to track down is
3709 not such a great idea. */
3710
3711 static rtx_insn *
3712 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3713 vec<const char *> &/*constraints*/,
3714 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3715 {
3716 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3717 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3718 return NULL;
3719 }
3720
3721 /* Override command line options.
3722
3723 Combine build-specific configuration information with options
3724 specified on the command line to set various state variables which
3725 influence code generation, optimization, and expansion of built-in
3726 functions. Assure that command-line configuration preferences are
3727 compatible with each other and with the build configuration; issue
3728 warnings while adjusting configuration or error messages while
3729 rejecting configuration.
3730
3731 Upon entry to this function:
3732
3733 This function is called once at the beginning of
3734 compilation, and then again at the start and end of compiling
3735 each section of code that has a different configuration, as
3736 indicated, for example, by adding the
3737
3738 __attribute__((__target__("cpu=power9")))
3739
3740 qualifier to a function definition or, for example, by bracketing
3741 code between
3742
3743 #pragma GCC target("altivec")
3744
3745 and
3746
3747 #pragma GCC reset_options
3748
3749 directives. Parameter global_init_p is true for the initial
3750 invocation, which initializes global variables, and false for all
3751 subsequent invocations.
3752
3753
3754 Various global state information is assumed to be valid. This
3755 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3756 default CPU specified at build configure time, TARGET_DEFAULT,
3757 representing the default set of option flags for the default
3758 target, and global_options_set.x_rs6000_isa_flags, representing
3759 which options were requested on the command line.
3760
3761 Upon return from this function:
3762
3763 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3764 was set by name on the command line. Additionally, if certain
3765 attributes are automatically enabled or disabled by this function
3766 in order to assure compatibility between options and
3767 configuration, the flags associated with those attributes are
3768 also set. By setting these "explicit bits", we avoid the risk
3769 that other code might accidentally overwrite these particular
3770 attributes with "default values".
3771
3772 The various bits of rs6000_isa_flags are set to indicate the
3773 target options that have been selected for the most current
3774 compilation efforts. This has the effect of also turning on the
3775 associated TARGET_XXX values since these are macros which are
3776 generally defined to test the corresponding bit of the
3777 rs6000_isa_flags variable.
3778
3779 The variable rs6000_builtin_mask is set to represent the target
3780 options for the most current compilation efforts, consistent with
3781 the current contents of rs6000_isa_flags. This variable controls
3782 expansion of built-in functions.
3783
3784 Various other global variables and fields of global structures
3785 (over 50 in all) are initialized to reflect the desired options
3786 for the most current compilation efforts. */
3787
3788 static bool
3789 rs6000_option_override_internal (bool global_init_p)
3790 {
3791 bool ret = true;
3792
3793 HOST_WIDE_INT set_masks;
3794 HOST_WIDE_INT ignore_masks;
3795 int cpu_index = -1;
3796 int tune_index;
3797 struct cl_target_option *main_target_opt
3798 = ((global_init_p || target_option_default_node == NULL)
3799 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3800
3801 /* Print defaults. */
3802 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3803 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3804
3805 /* Remember the explicit arguments. */
3806 if (global_init_p)
3807 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3808
3809 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3810 library functions, so warn about it. The flag may be useful for
3811 performance studies from time to time though, so don't disable it
3812 entirely. */
3813 if (global_options_set.x_rs6000_alignment_flags
3814 && rs6000_alignment_flags == MASK_ALIGN_POWER
3815 && DEFAULT_ABI == ABI_DARWIN
3816 && TARGET_64BIT)
3817 warning (0, "%qs is not supported for 64-bit Darwin;"
3818 " it is incompatible with the installed C and C++ libraries",
3819 "-malign-power");
3820
3821 /* Numerous experiment shows that IRA based loop pressure
3822 calculation works better for RTL loop invariant motion on targets
3823 with enough (>= 32) registers. It is an expensive optimization.
3824 So it is on only for peak performance. */
3825 if (optimize >= 3 && global_init_p
3826 && !global_options_set.x_flag_ira_loop_pressure)
3827 flag_ira_loop_pressure = 1;
3828
3829 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3830 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3831 options were already specified. */
3832 if (flag_sanitize & SANITIZE_USER_ADDRESS
3833 && !global_options_set.x_flag_asynchronous_unwind_tables)
3834 flag_asynchronous_unwind_tables = 1;
3835
3836 /* Set the pointer size. */
3837 if (TARGET_64BIT)
3838 {
3839 rs6000_pmode = DImode;
3840 rs6000_pointer_size = 64;
3841 }
3842 else
3843 {
3844 rs6000_pmode = SImode;
3845 rs6000_pointer_size = 32;
3846 }
3847
3848 /* Some OSs don't support saving the high part of 64-bit registers on context
3849 switch. Other OSs don't support saving Altivec registers. On those OSs,
3850 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3851 if the user wants either, the user must explicitly specify them and we
3852 won't interfere with the user's specification. */
3853
3854 set_masks = POWERPC_MASKS;
3855 #ifdef OS_MISSING_POWERPC64
3856 if (OS_MISSING_POWERPC64)
3857 set_masks &= ~OPTION_MASK_POWERPC64;
3858 #endif
3859 #ifdef OS_MISSING_ALTIVEC
3860 if (OS_MISSING_ALTIVEC)
3861 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3862 | OTHER_VSX_VECTOR_MASKS);
3863 #endif
3864
3865 /* Don't override by the processor default if given explicitly. */
3866 set_masks &= ~rs6000_isa_flags_explicit;
3867
3868 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3869 the cpu in a target attribute or pragma, but did not specify a tuning
3870 option, use the cpu for the tuning option rather than the option specified
3871 with -mtune on the command line. Process a '--with-cpu' configuration
3872 request as an implicit --cpu. */
3873 if (rs6000_cpu_index >= 0)
3874 cpu_index = rs6000_cpu_index;
3875 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3876 cpu_index = main_target_opt->x_rs6000_cpu_index;
3877 else if (OPTION_TARGET_CPU_DEFAULT)
3878 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
3879
3880 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3881 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3882 with those from the cpu, except for options that were explicitly set. If
3883 we don't have a cpu, do not override the target bits set in
3884 TARGET_DEFAULT. */
3885 if (cpu_index >= 0)
3886 {
3887 rs6000_cpu_index = cpu_index;
3888 rs6000_isa_flags &= ~set_masks;
3889 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3890 & set_masks);
3891 }
3892 else
3893 {
3894 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3895 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3896 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3897 to using rs6000_isa_flags, we need to do the initialization here.
3898
3899 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3900 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3901 HOST_WIDE_INT flags;
3902 if (TARGET_DEFAULT)
3903 flags = TARGET_DEFAULT;
3904 else
3905 {
3906 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3907 const char *default_cpu = (!TARGET_POWERPC64
3908 ? "powerpc"
3909 : (BYTES_BIG_ENDIAN
3910 ? "powerpc64"
3911 : "powerpc64le"));
3912 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
3913 flags = processor_target_table[default_cpu_index].target_enable;
3914 }
3915 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3916 }
3917
3918 if (rs6000_tune_index >= 0)
3919 tune_index = rs6000_tune_index;
3920 else if (cpu_index >= 0)
3921 rs6000_tune_index = tune_index = cpu_index;
3922 else
3923 {
3924 size_t i;
3925 enum processor_type tune_proc
3926 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3927
3928 tune_index = -1;
3929 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3930 if (processor_target_table[i].processor == tune_proc)
3931 {
3932 tune_index = i;
3933 break;
3934 }
3935 }
3936
3937 if (cpu_index >= 0)
3938 rs6000_cpu = processor_target_table[cpu_index].processor;
3939 else
3940 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
3941
3942 gcc_assert (tune_index >= 0);
3943 rs6000_tune = processor_target_table[tune_index].processor;
3944
3945 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3946 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3947 || rs6000_cpu == PROCESSOR_PPCE5500)
3948 {
3949 if (TARGET_ALTIVEC)
3950 error ("AltiVec not supported in this target");
3951 }
3952
3953 /* If we are optimizing big endian systems for space, use the load/store
3954 multiple instructions. */
3955 if (BYTES_BIG_ENDIAN && optimize_size)
3956 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
3957
3958 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
3959 because the hardware doesn't support the instructions used in little
3960 endian mode, and causes an alignment trap. The 750 does not cause an
3961 alignment trap (except when the target is unaligned). */
3962
3963 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
3964 {
3965 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3966 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3967 warning (0, "%qs is not supported on little endian systems",
3968 "-mmultiple");
3969 }
3970
3971 /* If little-endian, default to -mstrict-align on older processors.
3972 Testing for htm matches power8 and later. */
3973 if (!BYTES_BIG_ENDIAN
3974 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3975 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3976
3977 if (!rs6000_fold_gimple)
3978 fprintf (stderr,
3979 "gimple folding of rs6000 builtins has been disabled.\n");
3980
3981 /* Add some warnings for VSX. */
3982 if (TARGET_VSX)
3983 {
3984 const char *msg = NULL;
3985 if (!TARGET_HARD_FLOAT)
3986 {
3987 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3988 msg = N_("-mvsx requires hardware floating point");
3989 else
3990 {
3991 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3992 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3993 }
3994 }
3995 else if (TARGET_AVOID_XFORM > 0)
3996 msg = N_("-mvsx needs indexed addressing");
3997 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3998 & OPTION_MASK_ALTIVEC))
3999 {
4000 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4001 msg = N_("-mvsx and -mno-altivec are incompatible");
4002 else
4003 msg = N_("-mno-altivec disables vsx");
4004 }
4005
4006 if (msg)
4007 {
4008 warning (0, msg);
4009 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4010 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4011 }
4012 }
4013
4014 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4015 the -mcpu setting to enable options that conflict. */
4016 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4017 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4018 | OPTION_MASK_ALTIVEC
4019 | OPTION_MASK_VSX)) != 0)
4020 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4021 | OPTION_MASK_DIRECT_MOVE)
4022 & ~rs6000_isa_flags_explicit);
4023
4024 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4025 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4026
4027 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4028 off all of the options that depend on those flags. */
4029 ignore_masks = rs6000_disable_incompatible_switches ();
4030
4031 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4032 unless the user explicitly used the -mno-<option> to disable the code. */
4033 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4034 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4035 else if (TARGET_P9_MINMAX)
4036 {
4037 if (cpu_index >= 0)
4038 {
4039 if (cpu_index == PROCESSOR_POWER9)
4040 {
4041 /* legacy behavior: allow -mcpu=power9 with certain
4042 capabilities explicitly disabled. */
4043 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4044 }
4045 else
4046 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4047 "for <xxx> less than power9", "-mcpu");
4048 }
4049 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4050 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4051 & rs6000_isa_flags_explicit))
4052 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4053 were explicitly cleared. */
4054 error ("%qs incompatible with explicitly disabled options",
4055 "-mpower9-minmax");
4056 else
4057 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4058 }
4059 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4060 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4061 else if (TARGET_VSX)
4062 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4063 else if (TARGET_POPCNTD)
4064 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4065 else if (TARGET_DFP)
4066 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4067 else if (TARGET_CMPB)
4068 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4069 else if (TARGET_FPRND)
4070 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4071 else if (TARGET_POPCNTB)
4072 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4073 else if (TARGET_ALTIVEC)
4074 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4075
4076 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4077 {
4078 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4079 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4080 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4081 }
4082
4083 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4084 {
4085 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4086 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4087 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4088 }
4089
4090 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4091 {
4092 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4093 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4094 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4095 }
4096
4097 if (TARGET_P8_VECTOR && !TARGET_VSX)
4098 {
4099 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4100 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4101 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4102 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4103 {
4104 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4105 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4106 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4107 }
4108 else
4109 {
4110 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4111 not explicit. */
4112 rs6000_isa_flags |= OPTION_MASK_VSX;
4113 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4114 }
4115 }
4116
4117 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4118 {
4119 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4120 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4121 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4122 }
4123
4124 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4125 silently turn off quad memory mode. */
4126 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4127 {
4128 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4129 warning (0, N_("-mquad-memory requires 64-bit mode"));
4130
4131 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4132 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4133
4134 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4135 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4136 }
4137
4138 /* Non-atomic quad memory load/store are disabled for little endian, since
4139 the words are reversed, but atomic operations can still be done by
4140 swapping the words. */
4141 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4142 {
4143 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4144 warning (0, N_("-mquad-memory is not available in little endian "
4145 "mode"));
4146
4147 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4148 }
4149
4150 /* Assume if the user asked for normal quad memory instructions, they want
4151 the atomic versions as well, unless they explicity told us not to use quad
4152 word atomic instructions. */
4153 if (TARGET_QUAD_MEMORY
4154 && !TARGET_QUAD_MEMORY_ATOMIC
4155 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4156 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4157
4158 /* If we can shrink-wrap the TOC register save separately, then use
4159 -msave-toc-indirect unless explicitly disabled. */
4160 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4161 && flag_shrink_wrap_separate
4162 && optimize_function_for_speed_p (cfun))
4163 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4164
4165 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4166 generating power8 instructions. */
4167 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4168 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4169 & OPTION_MASK_P8_FUSION);
4170
4171 /* Setting additional fusion flags turns on base fusion. */
4172 if (!TARGET_P8_FUSION && TARGET_P8_FUSION_SIGN)
4173 {
4174 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4175 {
4176 if (TARGET_P8_FUSION_SIGN)
4177 error ("%qs requires %qs", "-mpower8-fusion-sign",
4178 "-mpower8-fusion");
4179
4180 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4181 }
4182 else
4183 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4184 }
4185
4186 /* Power9 fusion is a superset over power8 fusion. */
4187 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4188 {
4189 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4190 {
4191 /* We prefer to not mention undocumented options in
4192 error messages. However, if users have managed to select
4193 power9-fusion without selecting power8-fusion, they
4194 already know about undocumented flags. */
4195 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4196 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4197 }
4198 else
4199 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4200 }
4201
4202 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4203 generating power9 instructions. */
4204 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4205 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4206 & OPTION_MASK_P9_FUSION);
4207
4208 /* Power8 does not fuse sign extended loads with the addis. If we are
4209 optimizing at high levels for speed, convert a sign extended load into a
4210 zero extending load, and an explicit sign extension. */
4211 if (TARGET_P8_FUSION
4212 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4213 && optimize_function_for_speed_p (cfun)
4214 && optimize >= 3)
4215 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4216
4217 /* ISA 3.0 vector instructions include ISA 2.07. */
4218 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4219 {
4220 /* We prefer to not mention undocumented options in
4221 error messages. However, if users have managed to select
4222 power9-vector without selecting power8-vector, they
4223 already know about undocumented flags. */
4224 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4225 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4226 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4227 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4228 {
4229 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4230 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4231 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4232 }
4233 else
4234 {
4235 /* OPTION_MASK_P9_VECTOR is explicit and
4236 OPTION_MASK_P8_VECTOR is not explicit. */
4237 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4238 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4239 }
4240 }
4241
4242 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4243 support. If we only have ISA 2.06 support, and the user did not specify
4244 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4245 but we don't enable the full vectorization support */
4246 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4247 TARGET_ALLOW_MOVMISALIGN = 1;
4248
4249 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4250 {
4251 if (TARGET_ALLOW_MOVMISALIGN > 0
4252 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4253 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4254
4255 TARGET_ALLOW_MOVMISALIGN = 0;
4256 }
4257
4258 /* Determine when unaligned vector accesses are permitted, and when
4259 they are preferred over masked Altivec loads. Note that if
4260 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4261 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4262 not true. */
4263 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4264 {
4265 if (!TARGET_VSX)
4266 {
4267 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4268 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4269
4270 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4271 }
4272
4273 else if (!TARGET_ALLOW_MOVMISALIGN)
4274 {
4275 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4276 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4277 "-mallow-movmisalign");
4278
4279 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4280 }
4281 }
4282
4283 /* Use long double size to select the appropriate long double. We use
4284 TYPE_PRECISION to differentiate the 3 different long double types. We map
4285 128 into the precision used for TFmode. */
4286 int default_long_double_size = (RS6000_DEFAULT_LONG_DOUBLE_SIZE == 64
4287 ? 64
4288 : FLOAT_PRECISION_TFmode);
4289
4290 /* Set long double size before the IEEE 128-bit tests. */
4291 if (!global_options_set.x_rs6000_long_double_type_size)
4292 {
4293 if (main_target_opt != NULL
4294 && (main_target_opt->x_rs6000_long_double_type_size
4295 != default_long_double_size))
4296 error ("target attribute or pragma changes long double size");
4297 else
4298 rs6000_long_double_type_size = default_long_double_size;
4299 }
4300 else if (rs6000_long_double_type_size == 128)
4301 rs6000_long_double_type_size = FLOAT_PRECISION_TFmode;
4302
4303 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4304 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4305 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4306 those systems will not pick up this default. Warn if the user changes the
4307 default unless -Wno-psabi. */
4308 if (!global_options_set.x_rs6000_ieeequad)
4309 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4310
4311 else if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4312 {
4313 static bool warned_change_long_double;
4314 if (!warned_change_long_double)
4315 {
4316 warned_change_long_double = true;
4317 if (TARGET_IEEEQUAD)
4318 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4319 else
4320 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4321 }
4322 }
4323
4324 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4325 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4326 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4327 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4328 the keyword as well as the type. */
4329 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4330
4331 /* IEEE 128-bit floating point requires VSX support. */
4332 if (TARGET_FLOAT128_KEYWORD)
4333 {
4334 if (!TARGET_VSX)
4335 {
4336 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4337 error ("%qs requires VSX support", "-mfloat128");
4338
4339 TARGET_FLOAT128_TYPE = 0;
4340 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4341 | OPTION_MASK_FLOAT128_HW);
4342 }
4343 else if (!TARGET_FLOAT128_TYPE)
4344 {
4345 TARGET_FLOAT128_TYPE = 1;
4346 warning (0, "The -mfloat128 option may not be fully supported");
4347 }
4348 }
4349
4350 /* Enable the __float128 keyword under Linux by default. */
4351 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4352 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4353 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4354
4355 /* If we have are supporting the float128 type and full ISA 3.0 support,
4356 enable -mfloat128-hardware by default. However, don't enable the
4357 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4358 because sometimes the compiler wants to put things in an integer
4359 container, and if we don't have __int128 support, it is impossible. */
4360 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4361 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4362 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4363 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4364
4365 if (TARGET_FLOAT128_HW
4366 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4367 {
4368 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4369 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4370
4371 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4372 }
4373
4374 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4375 {
4376 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4377 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4378
4379 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4380 }
4381
4382 /* Print the options after updating the defaults. */
4383 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4384 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4385
4386 /* E500mc does "better" if we inline more aggressively. Respect the
4387 user's opinion, though. */
4388 if (rs6000_block_move_inline_limit == 0
4389 && (rs6000_tune == PROCESSOR_PPCE500MC
4390 || rs6000_tune == PROCESSOR_PPCE500MC64
4391 || rs6000_tune == PROCESSOR_PPCE5500
4392 || rs6000_tune == PROCESSOR_PPCE6500))
4393 rs6000_block_move_inline_limit = 128;
4394
4395 /* store_one_arg depends on expand_block_move to handle at least the
4396 size of reg_parm_stack_space. */
4397 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4398 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4399
4400 if (global_init_p)
4401 {
4402 /* If the appropriate debug option is enabled, replace the target hooks
4403 with debug versions that call the real version and then prints
4404 debugging information. */
4405 if (TARGET_DEBUG_COST)
4406 {
4407 targetm.rtx_costs = rs6000_debug_rtx_costs;
4408 targetm.address_cost = rs6000_debug_address_cost;
4409 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4410 }
4411
4412 if (TARGET_DEBUG_ADDR)
4413 {
4414 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4415 targetm.legitimize_address = rs6000_debug_legitimize_address;
4416 rs6000_secondary_reload_class_ptr
4417 = rs6000_debug_secondary_reload_class;
4418 targetm.secondary_memory_needed
4419 = rs6000_debug_secondary_memory_needed;
4420 targetm.can_change_mode_class
4421 = rs6000_debug_can_change_mode_class;
4422 rs6000_preferred_reload_class_ptr
4423 = rs6000_debug_preferred_reload_class;
4424 rs6000_legitimize_reload_address_ptr
4425 = rs6000_debug_legitimize_reload_address;
4426 rs6000_mode_dependent_address_ptr
4427 = rs6000_debug_mode_dependent_address;
4428 }
4429
4430 if (rs6000_veclibabi_name)
4431 {
4432 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4433 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4434 else
4435 {
4436 error ("unknown vectorization library ABI type (%qs) for "
4437 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4438 ret = false;
4439 }
4440 }
4441 }
4442
4443 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4444 target attribute or pragma which automatically enables both options,
4445 unless the altivec ABI was set. This is set by default for 64-bit, but
4446 not for 32-bit. */
4447 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4448 {
4449 TARGET_FLOAT128_TYPE = 0;
4450 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4451 | OPTION_MASK_FLOAT128_KEYWORD)
4452 & ~rs6000_isa_flags_explicit);
4453 }
4454
4455 /* Enable Altivec ABI for AIX -maltivec. */
4456 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4457 {
4458 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4459 error ("target attribute or pragma changes AltiVec ABI");
4460 else
4461 rs6000_altivec_abi = 1;
4462 }
4463
4464 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4465 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4466 be explicitly overridden in either case. */
4467 if (TARGET_ELF)
4468 {
4469 if (!global_options_set.x_rs6000_altivec_abi
4470 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4471 {
4472 if (main_target_opt != NULL &&
4473 !main_target_opt->x_rs6000_altivec_abi)
4474 error ("target attribute or pragma changes AltiVec ABI");
4475 else
4476 rs6000_altivec_abi = 1;
4477 }
4478 }
4479
4480 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4481 So far, the only darwin64 targets are also MACH-O. */
4482 if (TARGET_MACHO
4483 && DEFAULT_ABI == ABI_DARWIN
4484 && TARGET_64BIT)
4485 {
4486 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4487 error ("target attribute or pragma changes darwin64 ABI");
4488 else
4489 {
4490 rs6000_darwin64_abi = 1;
4491 /* Default to natural alignment, for better performance. */
4492 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4493 }
4494 }
4495
4496 /* Place FP constants in the constant pool instead of TOC
4497 if section anchors enabled. */
4498 if (flag_section_anchors
4499 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4500 TARGET_NO_FP_IN_TOC = 1;
4501
4502 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4503 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4504
4505 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4506 SUBTARGET_OVERRIDE_OPTIONS;
4507 #endif
4508 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4509 SUBSUBTARGET_OVERRIDE_OPTIONS;
4510 #endif
4511 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4512 SUB3TARGET_OVERRIDE_OPTIONS;
4513 #endif
4514
4515 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4516 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4517
4518 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4519 && rs6000_tune != PROCESSOR_POWER5
4520 && rs6000_tune != PROCESSOR_POWER6
4521 && rs6000_tune != PROCESSOR_POWER7
4522 && rs6000_tune != PROCESSOR_POWER8
4523 && rs6000_tune != PROCESSOR_POWER9
4524 && rs6000_tune != PROCESSOR_PPCA2
4525 && rs6000_tune != PROCESSOR_CELL
4526 && rs6000_tune != PROCESSOR_PPC476);
4527 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4528 || rs6000_tune == PROCESSOR_POWER5
4529 || rs6000_tune == PROCESSOR_POWER7
4530 || rs6000_tune == PROCESSOR_POWER8);
4531 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4532 || rs6000_tune == PROCESSOR_POWER5
4533 || rs6000_tune == PROCESSOR_POWER6
4534 || rs6000_tune == PROCESSOR_POWER7
4535 || rs6000_tune == PROCESSOR_POWER8
4536 || rs6000_tune == PROCESSOR_POWER9
4537 || rs6000_tune == PROCESSOR_PPCE500MC
4538 || rs6000_tune == PROCESSOR_PPCE500MC64
4539 || rs6000_tune == PROCESSOR_PPCE5500
4540 || rs6000_tune == PROCESSOR_PPCE6500);
4541
4542 /* Allow debug switches to override the above settings. These are set to -1
4543 in rs6000.opt to indicate the user hasn't directly set the switch. */
4544 if (TARGET_ALWAYS_HINT >= 0)
4545 rs6000_always_hint = TARGET_ALWAYS_HINT;
4546
4547 if (TARGET_SCHED_GROUPS >= 0)
4548 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4549
4550 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4551 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4552
4553 rs6000_sched_restricted_insns_priority
4554 = (rs6000_sched_groups ? 1 : 0);
4555
4556 /* Handle -msched-costly-dep option. */
4557 rs6000_sched_costly_dep
4558 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4559
4560 if (rs6000_sched_costly_dep_str)
4561 {
4562 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4563 rs6000_sched_costly_dep = no_dep_costly;
4564 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4565 rs6000_sched_costly_dep = all_deps_costly;
4566 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4567 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4568 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4569 rs6000_sched_costly_dep = store_to_load_dep_costly;
4570 else
4571 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4572 atoi (rs6000_sched_costly_dep_str));
4573 }
4574
4575 /* Handle -minsert-sched-nops option. */
4576 rs6000_sched_insert_nops
4577 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4578
4579 if (rs6000_sched_insert_nops_str)
4580 {
4581 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4582 rs6000_sched_insert_nops = sched_finish_none;
4583 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4584 rs6000_sched_insert_nops = sched_finish_pad_groups;
4585 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4586 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4587 else
4588 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4589 atoi (rs6000_sched_insert_nops_str));
4590 }
4591
4592 /* Handle stack protector */
4593 if (!global_options_set.x_rs6000_stack_protector_guard)
4594 #ifdef TARGET_THREAD_SSP_OFFSET
4595 rs6000_stack_protector_guard = SSP_TLS;
4596 #else
4597 rs6000_stack_protector_guard = SSP_GLOBAL;
4598 #endif
4599
4600 #ifdef TARGET_THREAD_SSP_OFFSET
4601 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4602 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4603 #endif
4604
4605 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4606 {
4607 char *endp;
4608 const char *str = rs6000_stack_protector_guard_offset_str;
4609
4610 errno = 0;
4611 long offset = strtol (str, &endp, 0);
4612 if (!*str || *endp || errno)
4613 error ("%qs is not a valid number in %qs", str,
4614 "-mstack-protector-guard-offset=");
4615
4616 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4617 || (TARGET_64BIT && (offset & 3)))
4618 error ("%qs is not a valid offset in %qs", str,
4619 "-mstack-protector-guard-offset=");
4620
4621 rs6000_stack_protector_guard_offset = offset;
4622 }
4623
4624 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4625 {
4626 const char *str = rs6000_stack_protector_guard_reg_str;
4627 int reg = decode_reg_name (str);
4628
4629 if (!IN_RANGE (reg, 1, 31))
4630 error ("%qs is not a valid base register in %qs", str,
4631 "-mstack-protector-guard-reg=");
4632
4633 rs6000_stack_protector_guard_reg = reg;
4634 }
4635
4636 if (rs6000_stack_protector_guard == SSP_TLS
4637 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4638 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4639
4640 if (global_init_p)
4641 {
4642 #ifdef TARGET_REGNAMES
4643 /* If the user desires alternate register names, copy in the
4644 alternate names now. */
4645 if (TARGET_REGNAMES)
4646 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4647 #endif
4648
4649 /* Set aix_struct_return last, after the ABI is determined.
4650 If -maix-struct-return or -msvr4-struct-return was explicitly
4651 used, don't override with the ABI default. */
4652 if (!global_options_set.x_aix_struct_return)
4653 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4654
4655 #if 0
4656 /* IBM XL compiler defaults to unsigned bitfields. */
4657 if (TARGET_XL_COMPAT)
4658 flag_signed_bitfields = 0;
4659 #endif
4660
4661 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4662 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4663
4664 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4665
4666 /* We can only guarantee the availability of DI pseudo-ops when
4667 assembling for 64-bit targets. */
4668 if (!TARGET_64BIT)
4669 {
4670 targetm.asm_out.aligned_op.di = NULL;
4671 targetm.asm_out.unaligned_op.di = NULL;
4672 }
4673
4674
4675 /* Set branch target alignment, if not optimizing for size. */
4676 if (!optimize_size)
4677 {
4678 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4679 aligned 8byte to avoid misprediction by the branch predictor. */
4680 if (rs6000_tune == PROCESSOR_TITAN
4681 || rs6000_tune == PROCESSOR_CELL)
4682 {
4683 if (flag_align_functions && !str_align_functions)
4684 str_align_functions = "8";
4685 if (flag_align_jumps && !str_align_jumps)
4686 str_align_jumps = "8";
4687 if (flag_align_loops && !str_align_loops)
4688 str_align_loops = "8";
4689 }
4690 if (rs6000_align_branch_targets)
4691 {
4692 if (flag_align_functions && !str_align_functions)
4693 str_align_functions = "16";
4694 if (flag_align_jumps && !str_align_jumps)
4695 str_align_jumps = "16";
4696 if (flag_align_loops && !str_align_loops)
4697 {
4698 can_override_loop_align = 1;
4699 str_align_loops = "16";
4700 }
4701 }
4702
4703 if (flag_align_jumps && !str_align_jumps)
4704 str_align_jumps = "16";
4705 if (flag_align_loops && !str_align_loops)
4706 str_align_loops = "16";
4707 }
4708
4709 /* Arrange to save and restore machine status around nested functions. */
4710 init_machine_status = rs6000_init_machine_status;
4711
4712 /* We should always be splitting complex arguments, but we can't break
4713 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4714 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4715 targetm.calls.split_complex_arg = NULL;
4716
4717 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4718 if (DEFAULT_ABI == ABI_AIX)
4719 targetm.calls.custom_function_descriptors = 0;
4720 }
4721
4722 /* Initialize rs6000_cost with the appropriate target costs. */
4723 if (optimize_size)
4724 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4725 else
4726 switch (rs6000_tune)
4727 {
4728 case PROCESSOR_RS64A:
4729 rs6000_cost = &rs64a_cost;
4730 break;
4731
4732 case PROCESSOR_MPCCORE:
4733 rs6000_cost = &mpccore_cost;
4734 break;
4735
4736 case PROCESSOR_PPC403:
4737 rs6000_cost = &ppc403_cost;
4738 break;
4739
4740 case PROCESSOR_PPC405:
4741 rs6000_cost = &ppc405_cost;
4742 break;
4743
4744 case PROCESSOR_PPC440:
4745 rs6000_cost = &ppc440_cost;
4746 break;
4747
4748 case PROCESSOR_PPC476:
4749 rs6000_cost = &ppc476_cost;
4750 break;
4751
4752 case PROCESSOR_PPC601:
4753 rs6000_cost = &ppc601_cost;
4754 break;
4755
4756 case PROCESSOR_PPC603:
4757 rs6000_cost = &ppc603_cost;
4758 break;
4759
4760 case PROCESSOR_PPC604:
4761 rs6000_cost = &ppc604_cost;
4762 break;
4763
4764 case PROCESSOR_PPC604e:
4765 rs6000_cost = &ppc604e_cost;
4766 break;
4767
4768 case PROCESSOR_PPC620:
4769 rs6000_cost = &ppc620_cost;
4770 break;
4771
4772 case PROCESSOR_PPC630:
4773 rs6000_cost = &ppc630_cost;
4774 break;
4775
4776 case PROCESSOR_CELL:
4777 rs6000_cost = &ppccell_cost;
4778 break;
4779
4780 case PROCESSOR_PPC750:
4781 case PROCESSOR_PPC7400:
4782 rs6000_cost = &ppc750_cost;
4783 break;
4784
4785 case PROCESSOR_PPC7450:
4786 rs6000_cost = &ppc7450_cost;
4787 break;
4788
4789 case PROCESSOR_PPC8540:
4790 case PROCESSOR_PPC8548:
4791 rs6000_cost = &ppc8540_cost;
4792 break;
4793
4794 case PROCESSOR_PPCE300C2:
4795 case PROCESSOR_PPCE300C3:
4796 rs6000_cost = &ppce300c2c3_cost;
4797 break;
4798
4799 case PROCESSOR_PPCE500MC:
4800 rs6000_cost = &ppce500mc_cost;
4801 break;
4802
4803 case PROCESSOR_PPCE500MC64:
4804 rs6000_cost = &ppce500mc64_cost;
4805 break;
4806
4807 case PROCESSOR_PPCE5500:
4808 rs6000_cost = &ppce5500_cost;
4809 break;
4810
4811 case PROCESSOR_PPCE6500:
4812 rs6000_cost = &ppce6500_cost;
4813 break;
4814
4815 case PROCESSOR_TITAN:
4816 rs6000_cost = &titan_cost;
4817 break;
4818
4819 case PROCESSOR_POWER4:
4820 case PROCESSOR_POWER5:
4821 rs6000_cost = &power4_cost;
4822 break;
4823
4824 case PROCESSOR_POWER6:
4825 rs6000_cost = &power6_cost;
4826 break;
4827
4828 case PROCESSOR_POWER7:
4829 rs6000_cost = &power7_cost;
4830 break;
4831
4832 case PROCESSOR_POWER8:
4833 rs6000_cost = &power8_cost;
4834 break;
4835
4836 case PROCESSOR_POWER9:
4837 rs6000_cost = &power9_cost;
4838 break;
4839
4840 case PROCESSOR_PPCA2:
4841 rs6000_cost = &ppca2_cost;
4842 break;
4843
4844 default:
4845 gcc_unreachable ();
4846 }
4847
4848 if (global_init_p)
4849 {
4850 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4851 rs6000_cost->simultaneous_prefetches,
4852 global_options.x_param_values,
4853 global_options_set.x_param_values);
4854 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4855 global_options.x_param_values,
4856 global_options_set.x_param_values);
4857 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4858 rs6000_cost->cache_line_size,
4859 global_options.x_param_values,
4860 global_options_set.x_param_values);
4861 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4862 global_options.x_param_values,
4863 global_options_set.x_param_values);
4864
4865 /* Increase loop peeling limits based on performance analysis. */
4866 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4867 global_options.x_param_values,
4868 global_options_set.x_param_values);
4869 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4870 global_options.x_param_values,
4871 global_options_set.x_param_values);
4872
4873 /* Use the 'model' -fsched-pressure algorithm by default. */
4874 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
4875 SCHED_PRESSURE_MODEL,
4876 global_options.x_param_values,
4877 global_options_set.x_param_values);
4878
4879 /* If using typedef char *va_list, signal that
4880 __builtin_va_start (&ap, 0) can be optimized to
4881 ap = __builtin_next_arg (0). */
4882 if (DEFAULT_ABI != ABI_V4)
4883 targetm.expand_builtin_va_start = NULL;
4884 }
4885
4886 /* If not explicitly specified via option, decide whether to generate indexed
4887 load/store instructions. A value of -1 indicates that the
4888 initial value of this variable has not been overwritten. During
4889 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
4890 if (TARGET_AVOID_XFORM == -1)
4891 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4892 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4893 need indexed accesses and the type used is the scalar type of the element
4894 being loaded or stored. */
4895 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
4896 && !TARGET_ALTIVEC);
4897
4898 /* Set the -mrecip options. */
4899 if (rs6000_recip_name)
4900 {
4901 char *p = ASTRDUP (rs6000_recip_name);
4902 char *q;
4903 unsigned int mask, i;
4904 bool invert;
4905
4906 while ((q = strtok (p, ",")) != NULL)
4907 {
4908 p = NULL;
4909 if (*q == '!')
4910 {
4911 invert = true;
4912 q++;
4913 }
4914 else
4915 invert = false;
4916
4917 if (!strcmp (q, "default"))
4918 mask = ((TARGET_RECIP_PRECISION)
4919 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4920 else
4921 {
4922 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4923 if (!strcmp (q, recip_options[i].string))
4924 {
4925 mask = recip_options[i].mask;
4926 break;
4927 }
4928
4929 if (i == ARRAY_SIZE (recip_options))
4930 {
4931 error ("unknown option for %<%s=%s%>", "-mrecip", q);
4932 invert = false;
4933 mask = 0;
4934 ret = false;
4935 }
4936 }
4937
4938 if (invert)
4939 rs6000_recip_control &= ~mask;
4940 else
4941 rs6000_recip_control |= mask;
4942 }
4943 }
4944
4945 /* Set the builtin mask of the various options used that could affect which
4946 builtins were used. In the past we used target_flags, but we've run out
4947 of bits, and some options are no longer in target_flags. */
4948 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4949 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4950 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4951 rs6000_builtin_mask);
4952
4953 /* Initialize all of the registers. */
4954 rs6000_init_hard_regno_mode_ok (global_init_p);
4955
4956 /* Save the initial options in case the user does function specific options */
4957 if (global_init_p)
4958 target_option_default_node = target_option_current_node
4959 = build_target_option_node (&global_options);
4960
4961 /* If not explicitly specified via option, decide whether to generate the
4962 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4963 if (TARGET_LINK_STACK == -1)
4964 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
4965
4966 /* Deprecate use of -mno-speculate-indirect-jumps. */
4967 if (!rs6000_speculate_indirect_jumps)
4968 warning (0, "%qs is deprecated and not recommended in any circumstances",
4969 "-mno-speculate-indirect-jumps");
4970
4971 return ret;
4972 }
4973
4974 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4975 define the target cpu type. */
4976
4977 static void
4978 rs6000_option_override (void)
4979 {
4980 (void) rs6000_option_override_internal (true);
4981 }
4982
4983 \f
4984 /* Implement targetm.vectorize.builtin_mask_for_load. */
4985 static tree
4986 rs6000_builtin_mask_for_load (void)
4987 {
4988 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4989 if ((TARGET_ALTIVEC && !TARGET_VSX)
4990 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
4991 return altivec_builtin_mask_for_load;
4992 else
4993 return 0;
4994 }
4995
4996 /* Implement LOOP_ALIGN. */
4997 align_flags
4998 rs6000_loop_align (rtx label)
4999 {
5000 basic_block bb;
5001 int ninsns;
5002
5003 /* Don't override loop alignment if -falign-loops was specified. */
5004 if (!can_override_loop_align)
5005 return align_loops;
5006
5007 bb = BLOCK_FOR_INSN (label);
5008 ninsns = num_loop_insns(bb->loop_father);
5009
5010 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5011 if (ninsns > 4 && ninsns <= 8
5012 && (rs6000_tune == PROCESSOR_POWER4
5013 || rs6000_tune == PROCESSOR_POWER5
5014 || rs6000_tune == PROCESSOR_POWER6
5015 || rs6000_tune == PROCESSOR_POWER7
5016 || rs6000_tune == PROCESSOR_POWER8))
5017 return align_flags (5);
5018 else
5019 return align_loops;
5020 }
5021
5022 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5023 after applying N number of iterations. This routine does not determine
5024 how may iterations are required to reach desired alignment. */
5025
5026 static bool
5027 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5028 {
5029 if (is_packed)
5030 return false;
5031
5032 if (TARGET_32BIT)
5033 {
5034 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5035 return true;
5036
5037 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5038 return true;
5039
5040 return false;
5041 }
5042 else
5043 {
5044 if (TARGET_MACHO)
5045 return false;
5046
5047 /* Assuming that all other types are naturally aligned. CHECKME! */
5048 return true;
5049 }
5050 }
5051
5052 /* Return true if the vector misalignment factor is supported by the
5053 target. */
5054 static bool
5055 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5056 const_tree type,
5057 int misalignment,
5058 bool is_packed)
5059 {
5060 if (TARGET_VSX)
5061 {
5062 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5063 return true;
5064
5065 /* Return if movmisalign pattern is not supported for this mode. */
5066 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5067 return false;
5068
5069 if (misalignment == -1)
5070 {
5071 /* Misalignment factor is unknown at compile time but we know
5072 it's word aligned. */
5073 if (rs6000_vector_alignment_reachable (type, is_packed))
5074 {
5075 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5076
5077 if (element_size == 64 || element_size == 32)
5078 return true;
5079 }
5080
5081 return false;
5082 }
5083
5084 /* VSX supports word-aligned vector. */
5085 if (misalignment % 4 == 0)
5086 return true;
5087 }
5088 return false;
5089 }
5090
5091 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5092 static int
5093 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5094 tree vectype, int misalign)
5095 {
5096 unsigned elements;
5097 tree elem_type;
5098
5099 switch (type_of_cost)
5100 {
5101 case scalar_stmt:
5102 case scalar_load:
5103 case scalar_store:
5104 case vector_stmt:
5105 case vector_load:
5106 case vector_store:
5107 case vec_to_scalar:
5108 case scalar_to_vec:
5109 case cond_branch_not_taken:
5110 return 1;
5111
5112 case vec_perm:
5113 if (TARGET_VSX)
5114 return 3;
5115 else
5116 return 1;
5117
5118 case vec_promote_demote:
5119 if (TARGET_VSX)
5120 return 4;
5121 else
5122 return 1;
5123
5124 case cond_branch_taken:
5125 return 3;
5126
5127 case unaligned_load:
5128 case vector_gather_load:
5129 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5130 return 1;
5131
5132 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5133 {
5134 elements = TYPE_VECTOR_SUBPARTS (vectype);
5135 if (elements == 2)
5136 /* Double word aligned. */
5137 return 2;
5138
5139 if (elements == 4)
5140 {
5141 switch (misalign)
5142 {
5143 case 8:
5144 /* Double word aligned. */
5145 return 2;
5146
5147 case -1:
5148 /* Unknown misalignment. */
5149 case 4:
5150 case 12:
5151 /* Word aligned. */
5152 return 22;
5153
5154 default:
5155 gcc_unreachable ();
5156 }
5157 }
5158 }
5159
5160 if (TARGET_ALTIVEC)
5161 /* Misaligned loads are not supported. */
5162 gcc_unreachable ();
5163
5164 return 2;
5165
5166 case unaligned_store:
5167 case vector_scatter_store:
5168 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5169 return 1;
5170
5171 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5172 {
5173 elements = TYPE_VECTOR_SUBPARTS (vectype);
5174 if (elements == 2)
5175 /* Double word aligned. */
5176 return 2;
5177
5178 if (elements == 4)
5179 {
5180 switch (misalign)
5181 {
5182 case 8:
5183 /* Double word aligned. */
5184 return 2;
5185
5186 case -1:
5187 /* Unknown misalignment. */
5188 case 4:
5189 case 12:
5190 /* Word aligned. */
5191 return 23;
5192
5193 default:
5194 gcc_unreachable ();
5195 }
5196 }
5197 }
5198
5199 if (TARGET_ALTIVEC)
5200 /* Misaligned stores are not supported. */
5201 gcc_unreachable ();
5202
5203 return 2;
5204
5205 case vec_construct:
5206 /* This is a rough approximation assuming non-constant elements
5207 constructed into a vector via element insertion. FIXME:
5208 vec_construct is not granular enough for uniformly good
5209 decisions. If the initialization is a splat, this is
5210 cheaper than we estimate. Improve this someday. */
5211 elem_type = TREE_TYPE (vectype);
5212 /* 32-bit vectors loaded into registers are stored as double
5213 precision, so we need 2 permutes, 2 converts, and 1 merge
5214 to construct a vector of short floats from them. */
5215 if (SCALAR_FLOAT_TYPE_P (elem_type)
5216 && TYPE_PRECISION (elem_type) == 32)
5217 return 5;
5218 /* On POWER9, integer vector types are built up in GPRs and then
5219 use a direct move (2 cycles). For POWER8 this is even worse,
5220 as we need two direct moves and a merge, and the direct moves
5221 are five cycles. */
5222 else if (INTEGRAL_TYPE_P (elem_type))
5223 {
5224 if (TARGET_P9_VECTOR)
5225 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5226 else
5227 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5228 }
5229 else
5230 /* V2DFmode doesn't need a direct move. */
5231 return 2;
5232
5233 default:
5234 gcc_unreachable ();
5235 }
5236 }
5237
5238 /* Implement targetm.vectorize.preferred_simd_mode. */
5239
5240 static machine_mode
5241 rs6000_preferred_simd_mode (scalar_mode mode)
5242 {
5243 if (TARGET_VSX)
5244 switch (mode)
5245 {
5246 case E_DFmode:
5247 return V2DFmode;
5248 default:;
5249 }
5250 if (TARGET_ALTIVEC || TARGET_VSX)
5251 switch (mode)
5252 {
5253 case E_SFmode:
5254 return V4SFmode;
5255 case E_TImode:
5256 return V1TImode;
5257 case E_DImode:
5258 return V2DImode;
5259 case E_SImode:
5260 return V4SImode;
5261 case E_HImode:
5262 return V8HImode;
5263 case E_QImode:
5264 return V16QImode;
5265 default:;
5266 }
5267 return word_mode;
5268 }
5269
5270 typedef struct _rs6000_cost_data
5271 {
5272 struct loop *loop_info;
5273 unsigned cost[3];
5274 } rs6000_cost_data;
5275
5276 /* Test for likely overcommitment of vector hardware resources. If a
5277 loop iteration is relatively large, and too large a percentage of
5278 instructions in the loop are vectorized, the cost model may not
5279 adequately reflect delays from unavailable vector resources.
5280 Penalize the loop body cost for this case. */
5281
5282 static void
5283 rs6000_density_test (rs6000_cost_data *data)
5284 {
5285 const int DENSITY_PCT_THRESHOLD = 85;
5286 const int DENSITY_SIZE_THRESHOLD = 70;
5287 const int DENSITY_PENALTY = 10;
5288 struct loop *loop = data->loop_info;
5289 basic_block *bbs = get_loop_body (loop);
5290 int nbbs = loop->num_nodes;
5291 loop_vec_info loop_vinfo = loop_vec_info_for_loop (data->loop_info);
5292 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5293 int i, density_pct;
5294
5295 for (i = 0; i < nbbs; i++)
5296 {
5297 basic_block bb = bbs[i];
5298 gimple_stmt_iterator gsi;
5299
5300 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5301 {
5302 gimple *stmt = gsi_stmt (gsi);
5303 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
5304
5305 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5306 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5307 not_vec_cost++;
5308 }
5309 }
5310
5311 free (bbs);
5312 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5313
5314 if (density_pct > DENSITY_PCT_THRESHOLD
5315 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5316 {
5317 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5318 if (dump_enabled_p ())
5319 dump_printf_loc (MSG_NOTE, vect_location,
5320 "density %d%%, cost %d exceeds threshold, penalizing "
5321 "loop body cost by %d%%", density_pct,
5322 vec_cost + not_vec_cost, DENSITY_PENALTY);
5323 }
5324 }
5325
5326 /* Implement targetm.vectorize.init_cost. */
5327
5328 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5329 instruction is needed by the vectorization. */
5330 static bool rs6000_vect_nonmem;
5331
5332 static void *
5333 rs6000_init_cost (struct loop *loop_info)
5334 {
5335 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5336 data->loop_info = loop_info;
5337 data->cost[vect_prologue] = 0;
5338 data->cost[vect_body] = 0;
5339 data->cost[vect_epilogue] = 0;
5340 rs6000_vect_nonmem = false;
5341 return data;
5342 }
5343
5344 /* Implement targetm.vectorize.add_stmt_cost. */
5345
5346 static unsigned
5347 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5348 struct _stmt_vec_info *stmt_info, int misalign,
5349 enum vect_cost_model_location where)
5350 {
5351 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5352 unsigned retval = 0;
5353
5354 if (flag_vect_cost_model)
5355 {
5356 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5357 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5358 misalign);
5359 /* Statements in an inner loop relative to the loop being
5360 vectorized are weighted more heavily. The value here is
5361 arbitrary and could potentially be improved with analysis. */
5362 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5363 count *= 50; /* FIXME. */
5364
5365 retval = (unsigned) (count * stmt_cost);
5366 cost_data->cost[where] += retval;
5367
5368 /* Check whether we're doing something other than just a copy loop.
5369 Not all such loops may be profitably vectorized; see
5370 rs6000_finish_cost. */
5371 if ((kind == vec_to_scalar || kind == vec_perm
5372 || kind == vec_promote_demote || kind == vec_construct
5373 || kind == scalar_to_vec)
5374 || (where == vect_body && kind == vector_stmt))
5375 rs6000_vect_nonmem = true;
5376 }
5377
5378 return retval;
5379 }
5380
5381 /* Implement targetm.vectorize.finish_cost. */
5382
5383 static void
5384 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5385 unsigned *body_cost, unsigned *epilogue_cost)
5386 {
5387 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5388
5389 if (cost_data->loop_info)
5390 rs6000_density_test (cost_data);
5391
5392 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5393 that require versioning for any reason. The vectorization is at
5394 best a wash inside the loop, and the versioning checks make
5395 profitability highly unlikely and potentially quite harmful. */
5396 if (cost_data->loop_info)
5397 {
5398 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5399 if (!rs6000_vect_nonmem
5400 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5401 && LOOP_REQUIRES_VERSIONING (vec_info))
5402 cost_data->cost[vect_body] += 10000;
5403 }
5404
5405 *prologue_cost = cost_data->cost[vect_prologue];
5406 *body_cost = cost_data->cost[vect_body];
5407 *epilogue_cost = cost_data->cost[vect_epilogue];
5408 }
5409
5410 /* Implement targetm.vectorize.destroy_cost_data. */
5411
5412 static void
5413 rs6000_destroy_cost_data (void *data)
5414 {
5415 free (data);
5416 }
5417
5418 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5419 library with vectorized intrinsics. */
5420
5421 static tree
5422 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5423 tree type_in)
5424 {
5425 char name[32];
5426 const char *suffix = NULL;
5427 tree fntype, new_fndecl, bdecl = NULL_TREE;
5428 int n_args = 1;
5429 const char *bname;
5430 machine_mode el_mode, in_mode;
5431 int n, in_n;
5432
5433 /* Libmass is suitable for unsafe math only as it does not correctly support
5434 parts of IEEE with the required precision such as denormals. Only support
5435 it if we have VSX to use the simd d2 or f4 functions.
5436 XXX: Add variable length support. */
5437 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5438 return NULL_TREE;
5439
5440 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5441 n = TYPE_VECTOR_SUBPARTS (type_out);
5442 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5443 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5444 if (el_mode != in_mode
5445 || n != in_n)
5446 return NULL_TREE;
5447
5448 switch (fn)
5449 {
5450 CASE_CFN_ATAN2:
5451 CASE_CFN_HYPOT:
5452 CASE_CFN_POW:
5453 n_args = 2;
5454 gcc_fallthrough ();
5455
5456 CASE_CFN_ACOS:
5457 CASE_CFN_ACOSH:
5458 CASE_CFN_ASIN:
5459 CASE_CFN_ASINH:
5460 CASE_CFN_ATAN:
5461 CASE_CFN_ATANH:
5462 CASE_CFN_CBRT:
5463 CASE_CFN_COS:
5464 CASE_CFN_COSH:
5465 CASE_CFN_ERF:
5466 CASE_CFN_ERFC:
5467 CASE_CFN_EXP2:
5468 CASE_CFN_EXP:
5469 CASE_CFN_EXPM1:
5470 CASE_CFN_LGAMMA:
5471 CASE_CFN_LOG10:
5472 CASE_CFN_LOG1P:
5473 CASE_CFN_LOG2:
5474 CASE_CFN_LOG:
5475 CASE_CFN_SIN:
5476 CASE_CFN_SINH:
5477 CASE_CFN_SQRT:
5478 CASE_CFN_TAN:
5479 CASE_CFN_TANH:
5480 if (el_mode == DFmode && n == 2)
5481 {
5482 bdecl = mathfn_built_in (double_type_node, fn);
5483 suffix = "d2"; /* pow -> powd2 */
5484 }
5485 else if (el_mode == SFmode && n == 4)
5486 {
5487 bdecl = mathfn_built_in (float_type_node, fn);
5488 suffix = "4"; /* powf -> powf4 */
5489 }
5490 else
5491 return NULL_TREE;
5492 if (!bdecl)
5493 return NULL_TREE;
5494 break;
5495
5496 default:
5497 return NULL_TREE;
5498 }
5499
5500 gcc_assert (suffix != NULL);
5501 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5502 if (!bname)
5503 return NULL_TREE;
5504
5505 strcpy (name, bname + sizeof ("__builtin_") - 1);
5506 strcat (name, suffix);
5507
5508 if (n_args == 1)
5509 fntype = build_function_type_list (type_out, type_in, NULL);
5510 else if (n_args == 2)
5511 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5512 else
5513 gcc_unreachable ();
5514
5515 /* Build a function declaration for the vectorized function. */
5516 new_fndecl = build_decl (BUILTINS_LOCATION,
5517 FUNCTION_DECL, get_identifier (name), fntype);
5518 TREE_PUBLIC (new_fndecl) = 1;
5519 DECL_EXTERNAL (new_fndecl) = 1;
5520 DECL_IS_NOVOPS (new_fndecl) = 1;
5521 TREE_READONLY (new_fndecl) = 1;
5522
5523 return new_fndecl;
5524 }
5525
5526 /* Returns a function decl for a vectorized version of the builtin function
5527 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5528 if it is not available. */
5529
5530 static tree
5531 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5532 tree type_in)
5533 {
5534 machine_mode in_mode, out_mode;
5535 int in_n, out_n;
5536
5537 if (TARGET_DEBUG_BUILTIN)
5538 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5539 combined_fn_name (combined_fn (fn)),
5540 GET_MODE_NAME (TYPE_MODE (type_out)),
5541 GET_MODE_NAME (TYPE_MODE (type_in)));
5542
5543 if (TREE_CODE (type_out) != VECTOR_TYPE
5544 || TREE_CODE (type_in) != VECTOR_TYPE)
5545 return NULL_TREE;
5546
5547 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5548 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5549 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5550 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5551
5552 switch (fn)
5553 {
5554 CASE_CFN_COPYSIGN:
5555 if (VECTOR_UNIT_VSX_P (V2DFmode)
5556 && out_mode == DFmode && out_n == 2
5557 && in_mode == DFmode && in_n == 2)
5558 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5559 if (VECTOR_UNIT_VSX_P (V4SFmode)
5560 && out_mode == SFmode && out_n == 4
5561 && in_mode == SFmode && in_n == 4)
5562 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5563 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5564 && out_mode == SFmode && out_n == 4
5565 && in_mode == SFmode && in_n == 4)
5566 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5567 break;
5568 CASE_CFN_CEIL:
5569 if (VECTOR_UNIT_VSX_P (V2DFmode)
5570 && out_mode == DFmode && out_n == 2
5571 && in_mode == DFmode && in_n == 2)
5572 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5573 if (VECTOR_UNIT_VSX_P (V4SFmode)
5574 && out_mode == SFmode && out_n == 4
5575 && in_mode == SFmode && in_n == 4)
5576 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5577 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5578 && out_mode == SFmode && out_n == 4
5579 && in_mode == SFmode && in_n == 4)
5580 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5581 break;
5582 CASE_CFN_FLOOR:
5583 if (VECTOR_UNIT_VSX_P (V2DFmode)
5584 && out_mode == DFmode && out_n == 2
5585 && in_mode == DFmode && in_n == 2)
5586 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5587 if (VECTOR_UNIT_VSX_P (V4SFmode)
5588 && out_mode == SFmode && out_n == 4
5589 && in_mode == SFmode && in_n == 4)
5590 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5591 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5592 && out_mode == SFmode && out_n == 4
5593 && in_mode == SFmode && in_n == 4)
5594 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5595 break;
5596 CASE_CFN_FMA:
5597 if (VECTOR_UNIT_VSX_P (V2DFmode)
5598 && out_mode == DFmode && out_n == 2
5599 && in_mode == DFmode && in_n == 2)
5600 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5601 if (VECTOR_UNIT_VSX_P (V4SFmode)
5602 && out_mode == SFmode && out_n == 4
5603 && in_mode == SFmode && in_n == 4)
5604 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5605 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5606 && out_mode == SFmode && out_n == 4
5607 && in_mode == SFmode && in_n == 4)
5608 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5609 break;
5610 CASE_CFN_TRUNC:
5611 if (VECTOR_UNIT_VSX_P (V2DFmode)
5612 && out_mode == DFmode && out_n == 2
5613 && in_mode == DFmode && in_n == 2)
5614 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5615 if (VECTOR_UNIT_VSX_P (V4SFmode)
5616 && out_mode == SFmode && out_n == 4
5617 && in_mode == SFmode && in_n == 4)
5618 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5619 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5620 && out_mode == SFmode && out_n == 4
5621 && in_mode == SFmode && in_n == 4)
5622 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5623 break;
5624 CASE_CFN_NEARBYINT:
5625 if (VECTOR_UNIT_VSX_P (V2DFmode)
5626 && flag_unsafe_math_optimizations
5627 && out_mode == DFmode && out_n == 2
5628 && in_mode == DFmode && in_n == 2)
5629 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5630 if (VECTOR_UNIT_VSX_P (V4SFmode)
5631 && flag_unsafe_math_optimizations
5632 && out_mode == SFmode && out_n == 4
5633 && in_mode == SFmode && in_n == 4)
5634 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5635 break;
5636 CASE_CFN_RINT:
5637 if (VECTOR_UNIT_VSX_P (V2DFmode)
5638 && !flag_trapping_math
5639 && out_mode == DFmode && out_n == 2
5640 && in_mode == DFmode && in_n == 2)
5641 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5642 if (VECTOR_UNIT_VSX_P (V4SFmode)
5643 && !flag_trapping_math
5644 && out_mode == SFmode && out_n == 4
5645 && in_mode == SFmode && in_n == 4)
5646 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5647 break;
5648 default:
5649 break;
5650 }
5651
5652 /* Generate calls to libmass if appropriate. */
5653 if (rs6000_veclib_handler)
5654 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5655
5656 return NULL_TREE;
5657 }
5658
5659 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5660
5661 static tree
5662 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5663 tree type_in)
5664 {
5665 machine_mode in_mode, out_mode;
5666 int in_n, out_n;
5667
5668 if (TARGET_DEBUG_BUILTIN)
5669 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5670 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5671 GET_MODE_NAME (TYPE_MODE (type_out)),
5672 GET_MODE_NAME (TYPE_MODE (type_in)));
5673
5674 if (TREE_CODE (type_out) != VECTOR_TYPE
5675 || TREE_CODE (type_in) != VECTOR_TYPE)
5676 return NULL_TREE;
5677
5678 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5679 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5680 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5681 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5682
5683 enum rs6000_builtins fn
5684 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5685 switch (fn)
5686 {
5687 case RS6000_BUILTIN_RSQRTF:
5688 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5689 && out_mode == SFmode && out_n == 4
5690 && in_mode == SFmode && in_n == 4)
5691 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5692 break;
5693 case RS6000_BUILTIN_RSQRT:
5694 if (VECTOR_UNIT_VSX_P (V2DFmode)
5695 && out_mode == DFmode && out_n == 2
5696 && in_mode == DFmode && in_n == 2)
5697 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5698 break;
5699 case RS6000_BUILTIN_RECIPF:
5700 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5701 && out_mode == SFmode && out_n == 4
5702 && in_mode == SFmode && in_n == 4)
5703 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5704 break;
5705 case RS6000_BUILTIN_RECIP:
5706 if (VECTOR_UNIT_VSX_P (V2DFmode)
5707 && out_mode == DFmode && out_n == 2
5708 && in_mode == DFmode && in_n == 2)
5709 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5710 break;
5711 default:
5712 break;
5713 }
5714 return NULL_TREE;
5715 }
5716 \f
5717 /* Default CPU string for rs6000*_file_start functions. */
5718 static const char *rs6000_default_cpu;
5719
5720 /* Do anything needed at the start of the asm file. */
5721
5722 static void
5723 rs6000_file_start (void)
5724 {
5725 char buffer[80];
5726 const char *start = buffer;
5727 FILE *file = asm_out_file;
5728
5729 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5730
5731 default_file_start ();
5732
5733 if (flag_verbose_asm)
5734 {
5735 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5736
5737 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5738 {
5739 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5740 start = "";
5741 }
5742
5743 if (global_options_set.x_rs6000_cpu_index)
5744 {
5745 fprintf (file, "%s -mcpu=%s", start,
5746 processor_target_table[rs6000_cpu_index].name);
5747 start = "";
5748 }
5749
5750 if (global_options_set.x_rs6000_tune_index)
5751 {
5752 fprintf (file, "%s -mtune=%s", start,
5753 processor_target_table[rs6000_tune_index].name);
5754 start = "";
5755 }
5756
5757 if (PPC405_ERRATUM77)
5758 {
5759 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5760 start = "";
5761 }
5762
5763 #ifdef USING_ELFOS_H
5764 switch (rs6000_sdata)
5765 {
5766 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5767 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5768 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5769 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5770 }
5771
5772 if (rs6000_sdata && g_switch_value)
5773 {
5774 fprintf (file, "%s -G %d", start,
5775 g_switch_value);
5776 start = "";
5777 }
5778 #endif
5779
5780 if (*start == '\0')
5781 putc ('\n', file);
5782 }
5783
5784 #ifdef USING_ELFOS_H
5785 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5786 && !global_options_set.x_rs6000_cpu_index)
5787 {
5788 fputs ("\t.machine ", asm_out_file);
5789 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
5790 fputs ("power9\n", asm_out_file);
5791 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
5792 fputs ("power8\n", asm_out_file);
5793 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
5794 fputs ("power7\n", asm_out_file);
5795 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
5796 fputs ("power6\n", asm_out_file);
5797 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
5798 fputs ("power5\n", asm_out_file);
5799 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
5800 fputs ("power4\n", asm_out_file);
5801 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5802 fputs ("ppc64\n", asm_out_file);
5803 else
5804 fputs ("ppc\n", asm_out_file);
5805 }
5806 #endif
5807
5808 if (DEFAULT_ABI == ABI_ELFv2)
5809 fprintf (file, "\t.abiversion 2\n");
5810 }
5811
5812 \f
5813 /* Return nonzero if this function is known to have a null epilogue. */
5814
5815 int
5816 direct_return (void)
5817 {
5818 if (reload_completed)
5819 {
5820 rs6000_stack_t *info = rs6000_stack_info ();
5821
5822 if (info->first_gp_reg_save == 32
5823 && info->first_fp_reg_save == 64
5824 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5825 && ! info->lr_save_p
5826 && ! info->cr_save_p
5827 && info->vrsave_size == 0
5828 && ! info->push_p)
5829 return 1;
5830 }
5831
5832 return 0;
5833 }
5834
5835 /* Return the number of instructions it takes to form a constant in an
5836 integer register. */
5837
5838 int
5839 num_insns_constant_wide (HOST_WIDE_INT value)
5840 {
5841 /* signed constant loadable with addi */
5842 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5843 return 1;
5844
5845 /* constant loadable with addis */
5846 else if ((value & 0xffff) == 0
5847 && (value >> 31 == -1 || value >> 31 == 0))
5848 return 1;
5849
5850 else if (TARGET_POWERPC64)
5851 {
5852 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5853 HOST_WIDE_INT high = value >> 31;
5854
5855 if (high == 0 || high == -1)
5856 return 2;
5857
5858 high >>= 1;
5859
5860 if (low == 0)
5861 return num_insns_constant_wide (high) + 1;
5862 else if (high == 0)
5863 return num_insns_constant_wide (low) + 1;
5864 else
5865 return (num_insns_constant_wide (high)
5866 + num_insns_constant_wide (low) + 1);
5867 }
5868
5869 else
5870 return 2;
5871 }
5872
5873 int
5874 num_insns_constant (rtx op, machine_mode mode)
5875 {
5876 HOST_WIDE_INT low, high;
5877
5878 switch (GET_CODE (op))
5879 {
5880 case CONST_INT:
5881 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
5882 && rs6000_is_valid_and_mask (op, mode))
5883 return 2;
5884 else
5885 return num_insns_constant_wide (INTVAL (op));
5886
5887 case CONST_WIDE_INT:
5888 {
5889 int i;
5890 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
5891 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5892 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
5893 return ins;
5894 }
5895
5896 case CONST_DOUBLE:
5897 if (mode == SFmode || mode == SDmode)
5898 {
5899 long l;
5900
5901 if (DECIMAL_FLOAT_MODE_P (mode))
5902 REAL_VALUE_TO_TARGET_DECIMAL32
5903 (*CONST_DOUBLE_REAL_VALUE (op), l);
5904 else
5905 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5906 return num_insns_constant_wide ((HOST_WIDE_INT) l);
5907 }
5908
5909 long l[2];
5910 if (DECIMAL_FLOAT_MODE_P (mode))
5911 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
5912 else
5913 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5914 high = l[WORDS_BIG_ENDIAN == 0];
5915 low = l[WORDS_BIG_ENDIAN != 0];
5916
5917 if (TARGET_32BIT)
5918 return (num_insns_constant_wide (low)
5919 + num_insns_constant_wide (high));
5920 else
5921 {
5922 if ((high == 0 && low >= 0)
5923 || (high == -1 && low < 0))
5924 return num_insns_constant_wide (low);
5925
5926 else if (rs6000_is_valid_and_mask (op, mode))
5927 return 2;
5928
5929 else if (low == 0)
5930 return num_insns_constant_wide (high) + 1;
5931
5932 else
5933 return (num_insns_constant_wide (high)
5934 + num_insns_constant_wide (low) + 1);
5935 }
5936
5937 default:
5938 gcc_unreachable ();
5939 }
5940 }
5941
5942 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5943 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5944 corresponding element of the vector, but for V4SFmode, the
5945 corresponding "float" is interpreted as an SImode integer. */
5946
5947 HOST_WIDE_INT
5948 const_vector_elt_as_int (rtx op, unsigned int elt)
5949 {
5950 rtx tmp;
5951
5952 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5953 gcc_assert (GET_MODE (op) != V2DImode
5954 && GET_MODE (op) != V2DFmode);
5955
5956 tmp = CONST_VECTOR_ELT (op, elt);
5957 if (GET_MODE (op) == V4SFmode)
5958 tmp = gen_lowpart (SImode, tmp);
5959 return INTVAL (tmp);
5960 }
5961
5962 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5963 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5964 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5965 all items are set to the same value and contain COPIES replicas of the
5966 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5967 operand and the others are set to the value of the operand's msb. */
5968
5969 static bool
5970 vspltis_constant (rtx op, unsigned step, unsigned copies)
5971 {
5972 machine_mode mode = GET_MODE (op);
5973 machine_mode inner = GET_MODE_INNER (mode);
5974
5975 unsigned i;
5976 unsigned nunits;
5977 unsigned bitsize;
5978 unsigned mask;
5979
5980 HOST_WIDE_INT val;
5981 HOST_WIDE_INT splat_val;
5982 HOST_WIDE_INT msb_val;
5983
5984 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
5985 return false;
5986
5987 nunits = GET_MODE_NUNITS (mode);
5988 bitsize = GET_MODE_BITSIZE (inner);
5989 mask = GET_MODE_MASK (inner);
5990
5991 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5992 splat_val = val;
5993 msb_val = val >= 0 ? 0 : -1;
5994
5995 /* Construct the value to be splatted, if possible. If not, return 0. */
5996 for (i = 2; i <= copies; i *= 2)
5997 {
5998 HOST_WIDE_INT small_val;
5999 bitsize /= 2;
6000 small_val = splat_val >> bitsize;
6001 mask >>= bitsize;
6002 if (splat_val != ((HOST_WIDE_INT)
6003 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6004 | (small_val & mask)))
6005 return false;
6006 splat_val = small_val;
6007 }
6008
6009 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6010 if (EASY_VECTOR_15 (splat_val))
6011 ;
6012
6013 /* Also check if we can splat, and then add the result to itself. Do so if
6014 the value is positive, of if the splat instruction is using OP's mode;
6015 for splat_val < 0, the splat and the add should use the same mode. */
6016 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6017 && (splat_val >= 0 || (step == 1 && copies == 1)))
6018 ;
6019
6020 /* Also check if are loading up the most significant bit which can be done by
6021 loading up -1 and shifting the value left by -1. */
6022 else if (EASY_VECTOR_MSB (splat_val, inner))
6023 ;
6024
6025 else
6026 return false;
6027
6028 /* Check if VAL is present in every STEP-th element, and the
6029 other elements are filled with its most significant bit. */
6030 for (i = 1; i < nunits; ++i)
6031 {
6032 HOST_WIDE_INT desired_val;
6033 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6034 if ((i & (step - 1)) == 0)
6035 desired_val = val;
6036 else
6037 desired_val = msb_val;
6038
6039 if (desired_val != const_vector_elt_as_int (op, elt))
6040 return false;
6041 }
6042
6043 return true;
6044 }
6045
6046 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6047 instruction, filling in the bottom elements with 0 or -1.
6048
6049 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6050 for the number of zeroes to shift in, or negative for the number of 0xff
6051 bytes to shift in.
6052
6053 OP is a CONST_VECTOR. */
6054
6055 int
6056 vspltis_shifted (rtx op)
6057 {
6058 machine_mode mode = GET_MODE (op);
6059 machine_mode inner = GET_MODE_INNER (mode);
6060
6061 unsigned i, j;
6062 unsigned nunits;
6063 unsigned mask;
6064
6065 HOST_WIDE_INT val;
6066
6067 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6068 return false;
6069
6070 /* We need to create pseudo registers to do the shift, so don't recognize
6071 shift vector constants after reload. */
6072 if (!can_create_pseudo_p ())
6073 return false;
6074
6075 nunits = GET_MODE_NUNITS (mode);
6076 mask = GET_MODE_MASK (inner);
6077
6078 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6079
6080 /* Check if the value can really be the operand of a vspltis[bhw]. */
6081 if (EASY_VECTOR_15 (val))
6082 ;
6083
6084 /* Also check if we are loading up the most significant bit which can be done
6085 by loading up -1 and shifting the value left by -1. */
6086 else if (EASY_VECTOR_MSB (val, inner))
6087 ;
6088
6089 else
6090 return 0;
6091
6092 /* Check if VAL is present in every STEP-th element until we find elements
6093 that are 0 or all 1 bits. */
6094 for (i = 1; i < nunits; ++i)
6095 {
6096 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6097 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6098
6099 /* If the value isn't the splat value, check for the remaining elements
6100 being 0/-1. */
6101 if (val != elt_val)
6102 {
6103 if (elt_val == 0)
6104 {
6105 for (j = i+1; j < nunits; ++j)
6106 {
6107 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6108 if (const_vector_elt_as_int (op, elt2) != 0)
6109 return 0;
6110 }
6111
6112 return (nunits - i) * GET_MODE_SIZE (inner);
6113 }
6114
6115 else if ((elt_val & mask) == mask)
6116 {
6117 for (j = i+1; j < nunits; ++j)
6118 {
6119 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6120 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6121 return 0;
6122 }
6123
6124 return -((nunits - i) * GET_MODE_SIZE (inner));
6125 }
6126
6127 else
6128 return 0;
6129 }
6130 }
6131
6132 /* If all elements are equal, we don't need to do VLSDOI. */
6133 return 0;
6134 }
6135
6136
6137 /* Return true if OP is of the given MODE and can be synthesized
6138 with a vspltisb, vspltish or vspltisw. */
6139
6140 bool
6141 easy_altivec_constant (rtx op, machine_mode mode)
6142 {
6143 unsigned step, copies;
6144
6145 if (mode == VOIDmode)
6146 mode = GET_MODE (op);
6147 else if (mode != GET_MODE (op))
6148 return false;
6149
6150 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6151 constants. */
6152 if (mode == V2DFmode)
6153 return zero_constant (op, mode);
6154
6155 else if (mode == V2DImode)
6156 {
6157 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6158 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6159 return false;
6160
6161 if (zero_constant (op, mode))
6162 return true;
6163
6164 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6165 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6166 return true;
6167
6168 return false;
6169 }
6170
6171 /* V1TImode is a special container for TImode. Ignore for now. */
6172 else if (mode == V1TImode)
6173 return false;
6174
6175 /* Start with a vspltisw. */
6176 step = GET_MODE_NUNITS (mode) / 4;
6177 copies = 1;
6178
6179 if (vspltis_constant (op, step, copies))
6180 return true;
6181
6182 /* Then try with a vspltish. */
6183 if (step == 1)
6184 copies <<= 1;
6185 else
6186 step >>= 1;
6187
6188 if (vspltis_constant (op, step, copies))
6189 return true;
6190
6191 /* And finally a vspltisb. */
6192 if (step == 1)
6193 copies <<= 1;
6194 else
6195 step >>= 1;
6196
6197 if (vspltis_constant (op, step, copies))
6198 return true;
6199
6200 if (vspltis_shifted (op) != 0)
6201 return true;
6202
6203 return false;
6204 }
6205
6206 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6207 result is OP. Abort if it is not possible. */
6208
6209 rtx
6210 gen_easy_altivec_constant (rtx op)
6211 {
6212 machine_mode mode = GET_MODE (op);
6213 int nunits = GET_MODE_NUNITS (mode);
6214 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6215 unsigned step = nunits / 4;
6216 unsigned copies = 1;
6217
6218 /* Start with a vspltisw. */
6219 if (vspltis_constant (op, step, copies))
6220 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6221
6222 /* Then try with a vspltish. */
6223 if (step == 1)
6224 copies <<= 1;
6225 else
6226 step >>= 1;
6227
6228 if (vspltis_constant (op, step, copies))
6229 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6230
6231 /* And finally a vspltisb. */
6232 if (step == 1)
6233 copies <<= 1;
6234 else
6235 step >>= 1;
6236
6237 if (vspltis_constant (op, step, copies))
6238 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6239
6240 gcc_unreachable ();
6241 }
6242
6243 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6244 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6245
6246 Return the number of instructions needed (1 or 2) into the address pointed
6247 via NUM_INSNS_PTR.
6248
6249 Return the constant that is being split via CONSTANT_PTR. */
6250
6251 bool
6252 xxspltib_constant_p (rtx op,
6253 machine_mode mode,
6254 int *num_insns_ptr,
6255 int *constant_ptr)
6256 {
6257 size_t nunits = GET_MODE_NUNITS (mode);
6258 size_t i;
6259 HOST_WIDE_INT value;
6260 rtx element;
6261
6262 /* Set the returned values to out of bound values. */
6263 *num_insns_ptr = -1;
6264 *constant_ptr = 256;
6265
6266 if (!TARGET_P9_VECTOR)
6267 return false;
6268
6269 if (mode == VOIDmode)
6270 mode = GET_MODE (op);
6271
6272 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6273 return false;
6274
6275 /* Handle (vec_duplicate <constant>). */
6276 if (GET_CODE (op) == VEC_DUPLICATE)
6277 {
6278 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6279 && mode != V2DImode)
6280 return false;
6281
6282 element = XEXP (op, 0);
6283 if (!CONST_INT_P (element))
6284 return false;
6285
6286 value = INTVAL (element);
6287 if (!IN_RANGE (value, -128, 127))
6288 return false;
6289 }
6290
6291 /* Handle (const_vector [...]). */
6292 else if (GET_CODE (op) == CONST_VECTOR)
6293 {
6294 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6295 && mode != V2DImode)
6296 return false;
6297
6298 element = CONST_VECTOR_ELT (op, 0);
6299 if (!CONST_INT_P (element))
6300 return false;
6301
6302 value = INTVAL (element);
6303 if (!IN_RANGE (value, -128, 127))
6304 return false;
6305
6306 for (i = 1; i < nunits; i++)
6307 {
6308 element = CONST_VECTOR_ELT (op, i);
6309 if (!CONST_INT_P (element))
6310 return false;
6311
6312 if (value != INTVAL (element))
6313 return false;
6314 }
6315 }
6316
6317 /* Handle integer constants being loaded into the upper part of the VSX
6318 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6319 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6320 else if (CONST_INT_P (op))
6321 {
6322 if (!SCALAR_INT_MODE_P (mode))
6323 return false;
6324
6325 value = INTVAL (op);
6326 if (!IN_RANGE (value, -128, 127))
6327 return false;
6328
6329 if (!IN_RANGE (value, -1, 0))
6330 {
6331 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6332 return false;
6333
6334 if (EASY_VECTOR_15 (value))
6335 return false;
6336 }
6337 }
6338
6339 else
6340 return false;
6341
6342 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6343 sign extend. Special case 0/-1 to allow getting any VSX register instead
6344 of an Altivec register. */
6345 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6346 && EASY_VECTOR_15 (value))
6347 return false;
6348
6349 /* Return # of instructions and the constant byte for XXSPLTIB. */
6350 if (mode == V16QImode)
6351 *num_insns_ptr = 1;
6352
6353 else if (IN_RANGE (value, -1, 0))
6354 *num_insns_ptr = 1;
6355
6356 else
6357 *num_insns_ptr = 2;
6358
6359 *constant_ptr = (int) value;
6360 return true;
6361 }
6362
6363 const char *
6364 output_vec_const_move (rtx *operands)
6365 {
6366 int shift;
6367 machine_mode mode;
6368 rtx dest, vec;
6369
6370 dest = operands[0];
6371 vec = operands[1];
6372 mode = GET_MODE (dest);
6373
6374 if (TARGET_VSX)
6375 {
6376 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6377 int xxspltib_value = 256;
6378 int num_insns = -1;
6379
6380 if (zero_constant (vec, mode))
6381 {
6382 if (TARGET_P9_VECTOR)
6383 return "xxspltib %x0,0";
6384
6385 else if (dest_vmx_p)
6386 return "vspltisw %0,0";
6387
6388 else
6389 return "xxlxor %x0,%x0,%x0";
6390 }
6391
6392 if (all_ones_constant (vec, mode))
6393 {
6394 if (TARGET_P9_VECTOR)
6395 return "xxspltib %x0,255";
6396
6397 else if (dest_vmx_p)
6398 return "vspltisw %0,-1";
6399
6400 else if (TARGET_P8_VECTOR)
6401 return "xxlorc %x0,%x0,%x0";
6402
6403 else
6404 gcc_unreachable ();
6405 }
6406
6407 if (TARGET_P9_VECTOR
6408 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6409 {
6410 if (num_insns == 1)
6411 {
6412 operands[2] = GEN_INT (xxspltib_value & 0xff);
6413 return "xxspltib %x0,%2";
6414 }
6415
6416 return "#";
6417 }
6418 }
6419
6420 if (TARGET_ALTIVEC)
6421 {
6422 rtx splat_vec;
6423
6424 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6425 if (zero_constant (vec, mode))
6426 return "vspltisw %0,0";
6427
6428 if (all_ones_constant (vec, mode))
6429 return "vspltisw %0,-1";
6430
6431 /* Do we need to construct a value using VSLDOI? */
6432 shift = vspltis_shifted (vec);
6433 if (shift != 0)
6434 return "#";
6435
6436 splat_vec = gen_easy_altivec_constant (vec);
6437 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6438 operands[1] = XEXP (splat_vec, 0);
6439 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6440 return "#";
6441
6442 switch (GET_MODE (splat_vec))
6443 {
6444 case E_V4SImode:
6445 return "vspltisw %0,%1";
6446
6447 case E_V8HImode:
6448 return "vspltish %0,%1";
6449
6450 case E_V16QImode:
6451 return "vspltisb %0,%1";
6452
6453 default:
6454 gcc_unreachable ();
6455 }
6456 }
6457
6458 gcc_unreachable ();
6459 }
6460
6461 /* Initialize vector TARGET to VALS. */
6462
6463 void
6464 rs6000_expand_vector_init (rtx target, rtx vals)
6465 {
6466 machine_mode mode = GET_MODE (target);
6467 machine_mode inner_mode = GET_MODE_INNER (mode);
6468 int n_elts = GET_MODE_NUNITS (mode);
6469 int n_var = 0, one_var = -1;
6470 bool all_same = true, all_const_zero = true;
6471 rtx x, mem;
6472 int i;
6473
6474 for (i = 0; i < n_elts; ++i)
6475 {
6476 x = XVECEXP (vals, 0, i);
6477 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6478 ++n_var, one_var = i;
6479 else if (x != CONST0_RTX (inner_mode))
6480 all_const_zero = false;
6481
6482 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6483 all_same = false;
6484 }
6485
6486 if (n_var == 0)
6487 {
6488 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6489 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6490 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6491 {
6492 /* Zero register. */
6493 emit_move_insn (target, CONST0_RTX (mode));
6494 return;
6495 }
6496 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6497 {
6498 /* Splat immediate. */
6499 emit_insn (gen_rtx_SET (target, const_vec));
6500 return;
6501 }
6502 else
6503 {
6504 /* Load from constant pool. */
6505 emit_move_insn (target, const_vec);
6506 return;
6507 }
6508 }
6509
6510 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6511 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6512 {
6513 rtx op[2];
6514 size_t i;
6515 size_t num_elements = all_same ? 1 : 2;
6516 for (i = 0; i < num_elements; i++)
6517 {
6518 op[i] = XVECEXP (vals, 0, i);
6519 /* Just in case there is a SUBREG with a smaller mode, do a
6520 conversion. */
6521 if (GET_MODE (op[i]) != inner_mode)
6522 {
6523 rtx tmp = gen_reg_rtx (inner_mode);
6524 convert_move (tmp, op[i], 0);
6525 op[i] = tmp;
6526 }
6527 /* Allow load with splat double word. */
6528 else if (MEM_P (op[i]))
6529 {
6530 if (!all_same)
6531 op[i] = force_reg (inner_mode, op[i]);
6532 }
6533 else if (!REG_P (op[i]))
6534 op[i] = force_reg (inner_mode, op[i]);
6535 }
6536
6537 if (all_same)
6538 {
6539 if (mode == V2DFmode)
6540 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6541 else
6542 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6543 }
6544 else
6545 {
6546 if (mode == V2DFmode)
6547 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6548 else
6549 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6550 }
6551 return;
6552 }
6553
6554 /* Special case initializing vector int if we are on 64-bit systems with
6555 direct move or we have the ISA 3.0 instructions. */
6556 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6557 && TARGET_DIRECT_MOVE_64BIT)
6558 {
6559 if (all_same)
6560 {
6561 rtx element0 = XVECEXP (vals, 0, 0);
6562 if (MEM_P (element0))
6563 element0 = rs6000_address_for_fpconvert (element0);
6564 else
6565 element0 = force_reg (SImode, element0);
6566
6567 if (TARGET_P9_VECTOR)
6568 emit_insn (gen_vsx_splat_v4si (target, element0));
6569 else
6570 {
6571 rtx tmp = gen_reg_rtx (DImode);
6572 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6573 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6574 }
6575 return;
6576 }
6577 else
6578 {
6579 rtx elements[4];
6580 size_t i;
6581
6582 for (i = 0; i < 4; i++)
6583 elements[i] = force_reg (SImode, XVECEXP (vals, 0, i));
6584
6585 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6586 elements[2], elements[3]));
6587 return;
6588 }
6589 }
6590
6591 /* With single precision floating point on VSX, know that internally single
6592 precision is actually represented as a double, and either make 2 V2DF
6593 vectors, and convert these vectors to single precision, or do one
6594 conversion, and splat the result to the other elements. */
6595 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6596 {
6597 if (all_same)
6598 {
6599 rtx element0 = XVECEXP (vals, 0, 0);
6600
6601 if (TARGET_P9_VECTOR)
6602 {
6603 if (MEM_P (element0))
6604 element0 = rs6000_address_for_fpconvert (element0);
6605
6606 emit_insn (gen_vsx_splat_v4sf (target, element0));
6607 }
6608
6609 else
6610 {
6611 rtx freg = gen_reg_rtx (V4SFmode);
6612 rtx sreg = force_reg (SFmode, element0);
6613 rtx cvt = (TARGET_XSCVDPSPN
6614 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6615 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6616
6617 emit_insn (cvt);
6618 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6619 const0_rtx));
6620 }
6621 }
6622 else
6623 {
6624 rtx dbl_even = gen_reg_rtx (V2DFmode);
6625 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6626 rtx flt_even = gen_reg_rtx (V4SFmode);
6627 rtx flt_odd = gen_reg_rtx (V4SFmode);
6628 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6629 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6630 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6631 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6632
6633 /* Use VMRGEW if we can instead of doing a permute. */
6634 if (TARGET_P8_VECTOR)
6635 {
6636 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6637 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6638 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6639 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6640 if (BYTES_BIG_ENDIAN)
6641 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6642 else
6643 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6644 }
6645 else
6646 {
6647 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6648 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6649 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6650 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6651 rs6000_expand_extract_even (target, flt_even, flt_odd);
6652 }
6653 }
6654 return;
6655 }
6656
6657 /* Special case initializing vector short/char that are splats if we are on
6658 64-bit systems with direct move. */
6659 if (all_same && TARGET_DIRECT_MOVE_64BIT
6660 && (mode == V16QImode || mode == V8HImode))
6661 {
6662 rtx op0 = XVECEXP (vals, 0, 0);
6663 rtx di_tmp = gen_reg_rtx (DImode);
6664
6665 if (!REG_P (op0))
6666 op0 = force_reg (GET_MODE_INNER (mode), op0);
6667
6668 if (mode == V16QImode)
6669 {
6670 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6671 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6672 return;
6673 }
6674
6675 if (mode == V8HImode)
6676 {
6677 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6678 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6679 return;
6680 }
6681 }
6682
6683 /* Store value to stack temp. Load vector element. Splat. However, splat
6684 of 64-bit items is not supported on Altivec. */
6685 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6686 {
6687 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6688 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6689 XVECEXP (vals, 0, 0));
6690 x = gen_rtx_UNSPEC (VOIDmode,
6691 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6692 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6693 gen_rtvec (2,
6694 gen_rtx_SET (target, mem),
6695 x)));
6696 x = gen_rtx_VEC_SELECT (inner_mode, target,
6697 gen_rtx_PARALLEL (VOIDmode,
6698 gen_rtvec (1, const0_rtx)));
6699 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6700 return;
6701 }
6702
6703 /* One field is non-constant. Load constant then overwrite
6704 varying field. */
6705 if (n_var == 1)
6706 {
6707 rtx copy = copy_rtx (vals);
6708
6709 /* Load constant part of vector, substitute neighboring value for
6710 varying element. */
6711 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6712 rs6000_expand_vector_init (target, copy);
6713
6714 /* Insert variable. */
6715 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6716 return;
6717 }
6718
6719 /* Construct the vector in memory one field at a time
6720 and load the whole vector. */
6721 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6722 for (i = 0; i < n_elts; i++)
6723 emit_move_insn (adjust_address_nv (mem, inner_mode,
6724 i * GET_MODE_SIZE (inner_mode)),
6725 XVECEXP (vals, 0, i));
6726 emit_move_insn (target, mem);
6727 }
6728
6729 /* Set field ELT of TARGET to VAL. */
6730
6731 void
6732 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6733 {
6734 machine_mode mode = GET_MODE (target);
6735 machine_mode inner_mode = GET_MODE_INNER (mode);
6736 rtx reg = gen_reg_rtx (mode);
6737 rtx mask, mem, x;
6738 int width = GET_MODE_SIZE (inner_mode);
6739 int i;
6740
6741 val = force_reg (GET_MODE (val), val);
6742
6743 if (VECTOR_MEM_VSX_P (mode))
6744 {
6745 rtx insn = NULL_RTX;
6746 rtx elt_rtx = GEN_INT (elt);
6747
6748 if (mode == V2DFmode)
6749 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
6750
6751 else if (mode == V2DImode)
6752 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
6753
6754 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
6755 {
6756 if (mode == V4SImode)
6757 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
6758 else if (mode == V8HImode)
6759 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
6760 else if (mode == V16QImode)
6761 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
6762 else if (mode == V4SFmode)
6763 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
6764 }
6765
6766 if (insn)
6767 {
6768 emit_insn (insn);
6769 return;
6770 }
6771 }
6772
6773 /* Simplify setting single element vectors like V1TImode. */
6774 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6775 {
6776 emit_move_insn (target, gen_lowpart (mode, val));
6777 return;
6778 }
6779
6780 /* Load single variable value. */
6781 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6782 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6783 x = gen_rtx_UNSPEC (VOIDmode,
6784 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6785 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6786 gen_rtvec (2,
6787 gen_rtx_SET (reg, mem),
6788 x)));
6789
6790 /* Linear sequence. */
6791 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6792 for (i = 0; i < 16; ++i)
6793 XVECEXP (mask, 0, i) = GEN_INT (i);
6794
6795 /* Set permute mask to insert element into target. */
6796 for (i = 0; i < width; ++i)
6797 XVECEXP (mask, 0, elt*width + i)
6798 = GEN_INT (i + 0x10);
6799 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6800
6801 if (BYTES_BIG_ENDIAN)
6802 x = gen_rtx_UNSPEC (mode,
6803 gen_rtvec (3, target, reg,
6804 force_reg (V16QImode, x)),
6805 UNSPEC_VPERM);
6806 else
6807 {
6808 if (TARGET_P9_VECTOR)
6809 x = gen_rtx_UNSPEC (mode,
6810 gen_rtvec (3, reg, target,
6811 force_reg (V16QImode, x)),
6812 UNSPEC_VPERMR);
6813 else
6814 {
6815 /* Invert selector. We prefer to generate VNAND on P8 so
6816 that future fusion opportunities can kick in, but must
6817 generate VNOR elsewhere. */
6818 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6819 rtx iorx = (TARGET_P8_VECTOR
6820 ? gen_rtx_IOR (V16QImode, notx, notx)
6821 : gen_rtx_AND (V16QImode, notx, notx));
6822 rtx tmp = gen_reg_rtx (V16QImode);
6823 emit_insn (gen_rtx_SET (tmp, iorx));
6824
6825 /* Permute with operands reversed and adjusted selector. */
6826 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6827 UNSPEC_VPERM);
6828 }
6829 }
6830
6831 emit_insn (gen_rtx_SET (target, x));
6832 }
6833
6834 /* Extract field ELT from VEC into TARGET. */
6835
6836 void
6837 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6838 {
6839 machine_mode mode = GET_MODE (vec);
6840 machine_mode inner_mode = GET_MODE_INNER (mode);
6841 rtx mem;
6842
6843 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6844 {
6845 switch (mode)
6846 {
6847 default:
6848 break;
6849 case E_V1TImode:
6850 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
6851 emit_move_insn (target, gen_lowpart (TImode, vec));
6852 break;
6853 case E_V2DFmode:
6854 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6855 return;
6856 case E_V2DImode:
6857 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6858 return;
6859 case E_V4SFmode:
6860 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6861 return;
6862 case E_V16QImode:
6863 if (TARGET_DIRECT_MOVE_64BIT)
6864 {
6865 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6866 return;
6867 }
6868 else
6869 break;
6870 case E_V8HImode:
6871 if (TARGET_DIRECT_MOVE_64BIT)
6872 {
6873 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6874 return;
6875 }
6876 else
6877 break;
6878 case E_V4SImode:
6879 if (TARGET_DIRECT_MOVE_64BIT)
6880 {
6881 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6882 return;
6883 }
6884 break;
6885 }
6886 }
6887 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
6888 && TARGET_DIRECT_MOVE_64BIT)
6889 {
6890 if (GET_MODE (elt) != DImode)
6891 {
6892 rtx tmp = gen_reg_rtx (DImode);
6893 convert_move (tmp, elt, 0);
6894 elt = tmp;
6895 }
6896 else if (!REG_P (elt))
6897 elt = force_reg (DImode, elt);
6898
6899 switch (mode)
6900 {
6901 case E_V2DFmode:
6902 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
6903 return;
6904
6905 case E_V2DImode:
6906 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
6907 return;
6908
6909 case E_V4SFmode:
6910 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
6911 return;
6912
6913 case E_V4SImode:
6914 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
6915 return;
6916
6917 case E_V8HImode:
6918 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
6919 return;
6920
6921 case E_V16QImode:
6922 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
6923 return;
6924
6925 default:
6926 gcc_unreachable ();
6927 }
6928 }
6929
6930 gcc_assert (CONST_INT_P (elt));
6931
6932 /* Allocate mode-sized buffer. */
6933 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6934
6935 emit_move_insn (mem, vec);
6936
6937 /* Add offset to field within buffer matching vector element. */
6938 mem = adjust_address_nv (mem, inner_mode,
6939 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
6940
6941 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6942 }
6943
6944 /* Helper function to return the register number of a RTX. */
6945 static inline int
6946 regno_or_subregno (rtx op)
6947 {
6948 if (REG_P (op))
6949 return REGNO (op);
6950 else if (SUBREG_P (op))
6951 return subreg_regno (op);
6952 else
6953 gcc_unreachable ();
6954 }
6955
6956 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
6957 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
6958 temporary (BASE_TMP) to fixup the address. Return the new memory address
6959 that is valid for reads or writes to a given register (SCALAR_REG). */
6960
6961 rtx
6962 rs6000_adjust_vec_address (rtx scalar_reg,
6963 rtx mem,
6964 rtx element,
6965 rtx base_tmp,
6966 machine_mode scalar_mode)
6967 {
6968 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
6969 rtx addr = XEXP (mem, 0);
6970 rtx element_offset;
6971 rtx new_addr;
6972 bool valid_addr_p;
6973
6974 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
6975 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
6976
6977 /* Calculate what we need to add to the address to get the element
6978 address. */
6979 if (CONST_INT_P (element))
6980 element_offset = GEN_INT (INTVAL (element) * scalar_size);
6981 else
6982 {
6983 int byte_shift = exact_log2 (scalar_size);
6984 gcc_assert (byte_shift >= 0);
6985
6986 if (byte_shift == 0)
6987 element_offset = element;
6988
6989 else
6990 {
6991 if (TARGET_POWERPC64)
6992 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
6993 else
6994 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
6995
6996 element_offset = base_tmp;
6997 }
6998 }
6999
7000 /* Create the new address pointing to the element within the vector. If we
7001 are adding 0, we don't have to change the address. */
7002 if (element_offset == const0_rtx)
7003 new_addr = addr;
7004
7005 /* A simple indirect address can be converted into a reg + offset
7006 address. */
7007 else if (REG_P (addr) || SUBREG_P (addr))
7008 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7009
7010 /* Optimize D-FORM addresses with constant offset with a constant element, to
7011 include the element offset in the address directly. */
7012 else if (GET_CODE (addr) == PLUS)
7013 {
7014 rtx op0 = XEXP (addr, 0);
7015 rtx op1 = XEXP (addr, 1);
7016 rtx insn;
7017
7018 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7019 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7020 {
7021 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7022 rtx offset_rtx = GEN_INT (offset);
7023
7024 if (IN_RANGE (offset, -32768, 32767)
7025 && (scalar_size < 8 || (offset & 0x3) == 0))
7026 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7027 else
7028 {
7029 emit_move_insn (base_tmp, offset_rtx);
7030 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7031 }
7032 }
7033 else
7034 {
7035 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7036 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7037
7038 /* Note, ADDI requires the register being added to be a base
7039 register. If the register was R0, load it up into the temporary
7040 and do the add. */
7041 if (op1_reg_p
7042 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7043 {
7044 insn = gen_add3_insn (base_tmp, op1, element_offset);
7045 gcc_assert (insn != NULL_RTX);
7046 emit_insn (insn);
7047 }
7048
7049 else if (ele_reg_p
7050 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7051 {
7052 insn = gen_add3_insn (base_tmp, element_offset, op1);
7053 gcc_assert (insn != NULL_RTX);
7054 emit_insn (insn);
7055 }
7056
7057 else
7058 {
7059 emit_move_insn (base_tmp, op1);
7060 emit_insn (gen_add2_insn (base_tmp, element_offset));
7061 }
7062
7063 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7064 }
7065 }
7066
7067 else
7068 {
7069 emit_move_insn (base_tmp, addr);
7070 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7071 }
7072
7073 /* If we have a PLUS, we need to see whether the particular register class
7074 allows for D-FORM or X-FORM addressing. */
7075 if (GET_CODE (new_addr) == PLUS)
7076 {
7077 rtx op1 = XEXP (new_addr, 1);
7078 addr_mask_type addr_mask;
7079 int scalar_regno = regno_or_subregno (scalar_reg);
7080
7081 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7082 if (INT_REGNO_P (scalar_regno))
7083 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7084
7085 else if (FP_REGNO_P (scalar_regno))
7086 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7087
7088 else if (ALTIVEC_REGNO_P (scalar_regno))
7089 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7090
7091 else
7092 gcc_unreachable ();
7093
7094 if (REG_P (op1) || SUBREG_P (op1))
7095 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7096 else
7097 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7098 }
7099
7100 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7101 valid_addr_p = true;
7102
7103 else
7104 valid_addr_p = false;
7105
7106 if (!valid_addr_p)
7107 {
7108 emit_move_insn (base_tmp, new_addr);
7109 new_addr = base_tmp;
7110 }
7111
7112 return change_address (mem, scalar_mode, new_addr);
7113 }
7114
7115 /* Split a variable vec_extract operation into the component instructions. */
7116
7117 void
7118 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7119 rtx tmp_altivec)
7120 {
7121 machine_mode mode = GET_MODE (src);
7122 machine_mode scalar_mode = GET_MODE (dest);
7123 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7124 int byte_shift = exact_log2 (scalar_size);
7125
7126 gcc_assert (byte_shift >= 0);
7127
7128 /* If we are given a memory address, optimize to load just the element. We
7129 don't have to adjust the vector element number on little endian
7130 systems. */
7131 if (MEM_P (src))
7132 {
7133 gcc_assert (REG_P (tmp_gpr));
7134 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7135 tmp_gpr, scalar_mode));
7136 return;
7137 }
7138
7139 else if (REG_P (src) || SUBREG_P (src))
7140 {
7141 int bit_shift = byte_shift + 3;
7142 rtx element2;
7143 int dest_regno = regno_or_subregno (dest);
7144 int src_regno = regno_or_subregno (src);
7145 int element_regno = regno_or_subregno (element);
7146
7147 gcc_assert (REG_P (tmp_gpr));
7148
7149 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7150 a general purpose register. */
7151 if (TARGET_P9_VECTOR
7152 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7153 && INT_REGNO_P (dest_regno)
7154 && ALTIVEC_REGNO_P (src_regno)
7155 && INT_REGNO_P (element_regno))
7156 {
7157 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7158 rtx element_si = gen_rtx_REG (SImode, element_regno);
7159
7160 if (mode == V16QImode)
7161 emit_insn (BYTES_BIG_ENDIAN
7162 ? gen_vextublx (dest_si, element_si, src)
7163 : gen_vextubrx (dest_si, element_si, src));
7164
7165 else if (mode == V8HImode)
7166 {
7167 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7168 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7169 emit_insn (BYTES_BIG_ENDIAN
7170 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7171 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7172 }
7173
7174
7175 else
7176 {
7177 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7178 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7179 emit_insn (BYTES_BIG_ENDIAN
7180 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7181 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7182 }
7183
7184 return;
7185 }
7186
7187
7188 gcc_assert (REG_P (tmp_altivec));
7189
7190 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7191 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7192 will shift the element into the upper position (adding 3 to convert a
7193 byte shift into a bit shift). */
7194 if (scalar_size == 8)
7195 {
7196 if (!BYTES_BIG_ENDIAN)
7197 {
7198 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7199 element2 = tmp_gpr;
7200 }
7201 else
7202 element2 = element;
7203
7204 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7205 bit. */
7206 emit_insn (gen_rtx_SET (tmp_gpr,
7207 gen_rtx_AND (DImode,
7208 gen_rtx_ASHIFT (DImode,
7209 element2,
7210 GEN_INT (6)),
7211 GEN_INT (64))));
7212 }
7213 else
7214 {
7215 if (!BYTES_BIG_ENDIAN)
7216 {
7217 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7218
7219 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7220 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7221 element2 = tmp_gpr;
7222 }
7223 else
7224 element2 = element;
7225
7226 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7227 }
7228
7229 /* Get the value into the lower byte of the Altivec register where VSLO
7230 expects it. */
7231 if (TARGET_P9_VECTOR)
7232 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7233 else if (can_create_pseudo_p ())
7234 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7235 else
7236 {
7237 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7238 emit_move_insn (tmp_di, tmp_gpr);
7239 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7240 }
7241
7242 /* Do the VSLO to get the value into the final location. */
7243 switch (mode)
7244 {
7245 case E_V2DFmode:
7246 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7247 return;
7248
7249 case E_V2DImode:
7250 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7251 return;
7252
7253 case E_V4SFmode:
7254 {
7255 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7256 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7257 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7258 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7259 tmp_altivec));
7260
7261 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7262 return;
7263 }
7264
7265 case E_V4SImode:
7266 case E_V8HImode:
7267 case E_V16QImode:
7268 {
7269 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7270 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7271 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7272 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7273 tmp_altivec));
7274 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7275 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7276 GEN_INT (64 - (8 * scalar_size))));
7277 return;
7278 }
7279
7280 default:
7281 gcc_unreachable ();
7282 }
7283
7284 return;
7285 }
7286 else
7287 gcc_unreachable ();
7288 }
7289
7290 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7291 selects whether the alignment is abi mandated, optional, or
7292 both abi and optional alignment. */
7293
7294 unsigned int
7295 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7296 {
7297 if (how != align_opt)
7298 {
7299 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7300 align = 128;
7301 }
7302
7303 if (how != align_abi)
7304 {
7305 if (TREE_CODE (type) == ARRAY_TYPE
7306 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7307 {
7308 if (align < BITS_PER_WORD)
7309 align = BITS_PER_WORD;
7310 }
7311 }
7312
7313 return align;
7314 }
7315
7316 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7317 instructions simply ignore the low bits; VSX memory instructions
7318 are aligned to 4 or 8 bytes. */
7319
7320 static bool
7321 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7322 {
7323 return (STRICT_ALIGNMENT
7324 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7325 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7326 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7327 && (int) align < VECTOR_ALIGN (mode)))));
7328 }
7329
7330 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7331
7332 bool
7333 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7334 {
7335 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7336 {
7337 if (computed != 128)
7338 {
7339 static bool warned;
7340 if (!warned && warn_psabi)
7341 {
7342 warned = true;
7343 inform (input_location,
7344 "the layout of aggregates containing vectors with"
7345 " %d-byte alignment has changed in GCC 5",
7346 computed / BITS_PER_UNIT);
7347 }
7348 }
7349 /* In current GCC there is no special case. */
7350 return false;
7351 }
7352
7353 return false;
7354 }
7355
7356 /* AIX increases natural record alignment to doubleword if the first
7357 field is an FP double while the FP fields remain word aligned. */
7358
7359 unsigned int
7360 rs6000_special_round_type_align (tree type, unsigned int computed,
7361 unsigned int specified)
7362 {
7363 unsigned int align = MAX (computed, specified);
7364 tree field = TYPE_FIELDS (type);
7365
7366 /* Skip all non field decls */
7367 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7368 field = DECL_CHAIN (field);
7369
7370 if (field != NULL && field != type)
7371 {
7372 type = TREE_TYPE (field);
7373 while (TREE_CODE (type) == ARRAY_TYPE)
7374 type = TREE_TYPE (type);
7375
7376 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7377 align = MAX (align, 64);
7378 }
7379
7380 return align;
7381 }
7382
7383 /* Darwin increases record alignment to the natural alignment of
7384 the first field. */
7385
7386 unsigned int
7387 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7388 unsigned int specified)
7389 {
7390 unsigned int align = MAX (computed, specified);
7391
7392 if (TYPE_PACKED (type))
7393 return align;
7394
7395 /* Find the first field, looking down into aggregates. */
7396 do {
7397 tree field = TYPE_FIELDS (type);
7398 /* Skip all non field decls */
7399 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7400 field = DECL_CHAIN (field);
7401 if (! field)
7402 break;
7403 /* A packed field does not contribute any extra alignment. */
7404 if (DECL_PACKED (field))
7405 return align;
7406 type = TREE_TYPE (field);
7407 while (TREE_CODE (type) == ARRAY_TYPE)
7408 type = TREE_TYPE (type);
7409 } while (AGGREGATE_TYPE_P (type));
7410
7411 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7412 align = MAX (align, TYPE_ALIGN (type));
7413
7414 return align;
7415 }
7416
7417 /* Return 1 for an operand in small memory on V.4/eabi. */
7418
7419 int
7420 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7421 machine_mode mode ATTRIBUTE_UNUSED)
7422 {
7423 #if TARGET_ELF
7424 rtx sym_ref;
7425
7426 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7427 return 0;
7428
7429 if (DEFAULT_ABI != ABI_V4)
7430 return 0;
7431
7432 if (GET_CODE (op) == SYMBOL_REF)
7433 sym_ref = op;
7434
7435 else if (GET_CODE (op) != CONST
7436 || GET_CODE (XEXP (op, 0)) != PLUS
7437 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
7438 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
7439 return 0;
7440
7441 else
7442 {
7443 rtx sum = XEXP (op, 0);
7444 HOST_WIDE_INT summand;
7445
7446 /* We have to be careful here, because it is the referenced address
7447 that must be 32k from _SDA_BASE_, not just the symbol. */
7448 summand = INTVAL (XEXP (sum, 1));
7449 if (summand < 0 || summand > g_switch_value)
7450 return 0;
7451
7452 sym_ref = XEXP (sum, 0);
7453 }
7454
7455 return SYMBOL_REF_SMALL_P (sym_ref);
7456 #else
7457 return 0;
7458 #endif
7459 }
7460
7461 /* Return true if either operand is a general purpose register. */
7462
7463 bool
7464 gpr_or_gpr_p (rtx op0, rtx op1)
7465 {
7466 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7467 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7468 }
7469
7470 /* Return true if this is a move direct operation between GPR registers and
7471 floating point/VSX registers. */
7472
7473 bool
7474 direct_move_p (rtx op0, rtx op1)
7475 {
7476 int regno0, regno1;
7477
7478 if (!REG_P (op0) || !REG_P (op1))
7479 return false;
7480
7481 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7482 return false;
7483
7484 regno0 = REGNO (op0);
7485 regno1 = REGNO (op1);
7486 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
7487 return false;
7488
7489 if (INT_REGNO_P (regno0))
7490 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7491
7492 else if (INT_REGNO_P (regno1))
7493 {
7494 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7495 return true;
7496
7497 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7498 return true;
7499 }
7500
7501 return false;
7502 }
7503
7504 /* Return true if the OFFSET is valid for the quad address instructions that
7505 use d-form (register + offset) addressing. */
7506
7507 static inline bool
7508 quad_address_offset_p (HOST_WIDE_INT offset)
7509 {
7510 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7511 }
7512
7513 /* Return true if the ADDR is an acceptable address for a quad memory
7514 operation of mode MODE (either LQ/STQ for general purpose registers, or
7515 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7516 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7517 3.0 LXV/STXV instruction. */
7518
7519 bool
7520 quad_address_p (rtx addr, machine_mode mode, bool strict)
7521 {
7522 rtx op0, op1;
7523
7524 if (GET_MODE_SIZE (mode) != 16)
7525 return false;
7526
7527 if (legitimate_indirect_address_p (addr, strict))
7528 return true;
7529
7530 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7531 return false;
7532
7533 if (GET_CODE (addr) != PLUS)
7534 return false;
7535
7536 op0 = XEXP (addr, 0);
7537 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7538 return false;
7539
7540 op1 = XEXP (addr, 1);
7541 if (!CONST_INT_P (op1))
7542 return false;
7543
7544 return quad_address_offset_p (INTVAL (op1));
7545 }
7546
7547 /* Return true if this is a load or store quad operation. This function does
7548 not handle the atomic quad memory instructions. */
7549
7550 bool
7551 quad_load_store_p (rtx op0, rtx op1)
7552 {
7553 bool ret;
7554
7555 if (!TARGET_QUAD_MEMORY)
7556 ret = false;
7557
7558 else if (REG_P (op0) && MEM_P (op1))
7559 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7560 && quad_memory_operand (op1, GET_MODE (op1))
7561 && !reg_overlap_mentioned_p (op0, op1));
7562
7563 else if (MEM_P (op0) && REG_P (op1))
7564 ret = (quad_memory_operand (op0, GET_MODE (op0))
7565 && quad_int_reg_operand (op1, GET_MODE (op1)));
7566
7567 else
7568 ret = false;
7569
7570 if (TARGET_DEBUG_ADDR)
7571 {
7572 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7573 ret ? "true" : "false");
7574 debug_rtx (gen_rtx_SET (op0, op1));
7575 }
7576
7577 return ret;
7578 }
7579
7580 /* Given an address, return a constant offset term if one exists. */
7581
7582 static rtx
7583 address_offset (rtx op)
7584 {
7585 if (GET_CODE (op) == PRE_INC
7586 || GET_CODE (op) == PRE_DEC)
7587 op = XEXP (op, 0);
7588 else if (GET_CODE (op) == PRE_MODIFY
7589 || GET_CODE (op) == LO_SUM)
7590 op = XEXP (op, 1);
7591
7592 if (GET_CODE (op) == CONST)
7593 op = XEXP (op, 0);
7594
7595 if (GET_CODE (op) == PLUS)
7596 op = XEXP (op, 1);
7597
7598 if (CONST_INT_P (op))
7599 return op;
7600
7601 return NULL_RTX;
7602 }
7603
7604 /* Return true if the MEM operand is a memory operand suitable for use
7605 with a (full width, possibly multiple) gpr load/store. On
7606 powerpc64 this means the offset must be divisible by 4.
7607 Implements 'Y' constraint.
7608
7609 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7610 a constraint function we know the operand has satisfied a suitable
7611 memory predicate. Also accept some odd rtl generated by reload
7612 (see rs6000_legitimize_reload_address for various forms). It is
7613 important that reload rtl be accepted by appropriate constraints
7614 but not by the operand predicate.
7615
7616 Offsetting a lo_sum should not be allowed, except where we know by
7617 alignment that a 32k boundary is not crossed, but see the ???
7618 comment in rs6000_legitimize_reload_address. Note that by
7619 "offsetting" here we mean a further offset to access parts of the
7620 MEM. It's fine to have a lo_sum where the inner address is offset
7621 from a sym, since the same sym+offset will appear in the high part
7622 of the address calculation. */
7623
7624 bool
7625 mem_operand_gpr (rtx op, machine_mode mode)
7626 {
7627 unsigned HOST_WIDE_INT offset;
7628 int extra;
7629 rtx addr = XEXP (op, 0);
7630
7631 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
7632 if (TARGET_UPDATE
7633 && (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
7634 && mode_supports_pre_incdec_p (mode)
7635 && legitimate_indirect_address_p (XEXP (addr, 0), false))
7636 return true;
7637
7638 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7639 if (!rs6000_offsettable_memref_p (op, mode, false))
7640 return false;
7641
7642 op = address_offset (addr);
7643 if (op == NULL_RTX)
7644 return true;
7645
7646 offset = INTVAL (op);
7647 if (TARGET_POWERPC64 && (offset & 3) != 0)
7648 return false;
7649
7650 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7651 if (extra < 0)
7652 extra = 0;
7653
7654 if (GET_CODE (addr) == LO_SUM)
7655 /* For lo_sum addresses, we must allow any offset except one that
7656 causes a wrap, so test only the low 16 bits. */
7657 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7658
7659 return offset + 0x8000 < 0x10000u - extra;
7660 }
7661
7662 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
7663 enforce an offset divisible by 4 even for 32-bit. */
7664
7665 bool
7666 mem_operand_ds_form (rtx op, machine_mode mode)
7667 {
7668 unsigned HOST_WIDE_INT offset;
7669 int extra;
7670 rtx addr = XEXP (op, 0);
7671
7672 if (!offsettable_address_p (false, mode, addr))
7673 return false;
7674
7675 op = address_offset (addr);
7676 if (op == NULL_RTX)
7677 return true;
7678
7679 offset = INTVAL (op);
7680 if ((offset & 3) != 0)
7681 return false;
7682
7683 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7684 if (extra < 0)
7685 extra = 0;
7686
7687 if (GET_CODE (addr) == LO_SUM)
7688 /* For lo_sum addresses, we must allow any offset except one that
7689 causes a wrap, so test only the low 16 bits. */
7690 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7691
7692 return offset + 0x8000 < 0x10000u - extra;
7693 }
7694 \f
7695 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7696
7697 static bool
7698 reg_offset_addressing_ok_p (machine_mode mode)
7699 {
7700 switch (mode)
7701 {
7702 case E_V16QImode:
7703 case E_V8HImode:
7704 case E_V4SFmode:
7705 case E_V4SImode:
7706 case E_V2DFmode:
7707 case E_V2DImode:
7708 case E_V1TImode:
7709 case E_TImode:
7710 case E_TFmode:
7711 case E_KFmode:
7712 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7713 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7714 a vector mode, if we want to use the VSX registers to move it around,
7715 we need to restrict ourselves to reg+reg addressing. Similarly for
7716 IEEE 128-bit floating point that is passed in a single vector
7717 register. */
7718 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7719 return mode_supports_dq_form (mode);
7720 break;
7721
7722 case E_SDmode:
7723 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7724 addressing for the LFIWZX and STFIWX instructions. */
7725 if (TARGET_NO_SDMODE_STACK)
7726 return false;
7727 break;
7728
7729 default:
7730 break;
7731 }
7732
7733 return true;
7734 }
7735
7736 static bool
7737 virtual_stack_registers_memory_p (rtx op)
7738 {
7739 int regnum;
7740
7741 if (GET_CODE (op) == REG)
7742 regnum = REGNO (op);
7743
7744 else if (GET_CODE (op) == PLUS
7745 && GET_CODE (XEXP (op, 0)) == REG
7746 && GET_CODE (XEXP (op, 1)) == CONST_INT)
7747 regnum = REGNO (XEXP (op, 0));
7748
7749 else
7750 return false;
7751
7752 return (regnum >= FIRST_VIRTUAL_REGISTER
7753 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7754 }
7755
7756 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7757 is known to not straddle a 32k boundary. This function is used
7758 to determine whether -mcmodel=medium code can use TOC pointer
7759 relative addressing for OP. This means the alignment of the TOC
7760 pointer must also be taken into account, and unfortunately that is
7761 only 8 bytes. */
7762
7763 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7764 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7765 #endif
7766
7767 static bool
7768 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7769 machine_mode mode)
7770 {
7771 tree decl;
7772 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7773
7774 if (GET_CODE (op) != SYMBOL_REF)
7775 return false;
7776
7777 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7778 SYMBOL_REF. */
7779 if (mode_supports_dq_form (mode))
7780 return false;
7781
7782 dsize = GET_MODE_SIZE (mode);
7783 decl = SYMBOL_REF_DECL (op);
7784 if (!decl)
7785 {
7786 if (dsize == 0)
7787 return false;
7788
7789 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7790 replacing memory addresses with an anchor plus offset. We
7791 could find the decl by rummaging around in the block->objects
7792 VEC for the given offset but that seems like too much work. */
7793 dalign = BITS_PER_UNIT;
7794 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7795 && SYMBOL_REF_ANCHOR_P (op)
7796 && SYMBOL_REF_BLOCK (op) != NULL)
7797 {
7798 struct object_block *block = SYMBOL_REF_BLOCK (op);
7799
7800 dalign = block->alignment;
7801 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7802 }
7803 else if (CONSTANT_POOL_ADDRESS_P (op))
7804 {
7805 /* It would be nice to have get_pool_align().. */
7806 machine_mode cmode = get_pool_mode (op);
7807
7808 dalign = GET_MODE_ALIGNMENT (cmode);
7809 }
7810 }
7811 else if (DECL_P (decl))
7812 {
7813 dalign = DECL_ALIGN (decl);
7814
7815 if (dsize == 0)
7816 {
7817 /* Allow BLKmode when the entire object is known to not
7818 cross a 32k boundary. */
7819 if (!DECL_SIZE_UNIT (decl))
7820 return false;
7821
7822 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7823 return false;
7824
7825 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7826 if (dsize > 32768)
7827 return false;
7828
7829 dalign /= BITS_PER_UNIT;
7830 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7831 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7832 return dalign >= dsize;
7833 }
7834 }
7835 else
7836 gcc_unreachable ();
7837
7838 /* Find how many bits of the alignment we know for this access. */
7839 dalign /= BITS_PER_UNIT;
7840 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7841 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7842 mask = dalign - 1;
7843 lsb = offset & -offset;
7844 mask &= lsb - 1;
7845 dalign = mask + 1;
7846
7847 return dalign >= dsize;
7848 }
7849
7850 static bool
7851 constant_pool_expr_p (rtx op)
7852 {
7853 rtx base, offset;
7854
7855 split_const (op, &base, &offset);
7856 return (GET_CODE (base) == SYMBOL_REF
7857 && CONSTANT_POOL_ADDRESS_P (base)
7858 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7859 }
7860
7861 /* These are only used to pass through from print_operand/print_operand_address
7862 to rs6000_output_addr_const_extra over the intervening function
7863 output_addr_const which is not target code. */
7864 static const_rtx tocrel_base_oac, tocrel_offset_oac;
7865
7866 /* Return true if OP is a toc pointer relative address (the output
7867 of create_TOC_reference). If STRICT, do not match non-split
7868 -mcmodel=large/medium toc pointer relative addresses. If the pointers
7869 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
7870 TOCREL_OFFSET_RET respectively. */
7871
7872 bool
7873 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
7874 const_rtx *tocrel_offset_ret)
7875 {
7876 if (!TARGET_TOC)
7877 return false;
7878
7879 if (TARGET_CMODEL != CMODEL_SMALL)
7880 {
7881 /* When strict ensure we have everything tidy. */
7882 if (strict
7883 && !(GET_CODE (op) == LO_SUM
7884 && REG_P (XEXP (op, 0))
7885 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
7886 return false;
7887
7888 /* When not strict, allow non-split TOC addresses and also allow
7889 (lo_sum (high ..)) TOC addresses created during reload. */
7890 if (GET_CODE (op) == LO_SUM)
7891 op = XEXP (op, 1);
7892 }
7893
7894 const_rtx tocrel_base = op;
7895 const_rtx tocrel_offset = const0_rtx;
7896
7897 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7898 {
7899 tocrel_base = XEXP (op, 0);
7900 tocrel_offset = XEXP (op, 1);
7901 }
7902
7903 if (tocrel_base_ret)
7904 *tocrel_base_ret = tocrel_base;
7905 if (tocrel_offset_ret)
7906 *tocrel_offset_ret = tocrel_offset;
7907
7908 return (GET_CODE (tocrel_base) == UNSPEC
7909 && XINT (tocrel_base, 1) == UNSPEC_TOCREL
7910 && REG_P (XVECEXP (tocrel_base, 0, 1))
7911 && REGNO (XVECEXP (tocrel_base, 0, 1)) == TOC_REGISTER);
7912 }
7913
7914 /* Return true if X is a constant pool address, and also for cmodel=medium
7915 if X is a toc-relative address known to be offsettable within MODE. */
7916
7917 bool
7918 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7919 bool strict)
7920 {
7921 const_rtx tocrel_base, tocrel_offset;
7922 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
7923 && (TARGET_CMODEL != CMODEL_MEDIUM
7924 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7925 || mode == QImode
7926 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7927 INTVAL (tocrel_offset), mode)));
7928 }
7929
7930 static bool
7931 legitimate_small_data_p (machine_mode mode, rtx x)
7932 {
7933 return (DEFAULT_ABI == ABI_V4
7934 && !flag_pic && !TARGET_TOC
7935 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
7936 && small_data_operand (x, mode));
7937 }
7938
7939 bool
7940 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7941 bool strict, bool worst_case)
7942 {
7943 unsigned HOST_WIDE_INT offset;
7944 unsigned int extra;
7945
7946 if (GET_CODE (x) != PLUS)
7947 return false;
7948 if (!REG_P (XEXP (x, 0)))
7949 return false;
7950 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7951 return false;
7952 if (mode_supports_dq_form (mode))
7953 return quad_address_p (x, mode, strict);
7954 if (!reg_offset_addressing_ok_p (mode))
7955 return virtual_stack_registers_memory_p (x);
7956 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
7957 return true;
7958 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
7959 return false;
7960
7961 offset = INTVAL (XEXP (x, 1));
7962 extra = 0;
7963 switch (mode)
7964 {
7965 case E_DFmode:
7966 case E_DDmode:
7967 case E_DImode:
7968 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
7969 addressing. */
7970 if (VECTOR_MEM_VSX_P (mode))
7971 return false;
7972
7973 if (!worst_case)
7974 break;
7975 if (!TARGET_POWERPC64)
7976 extra = 4;
7977 else if (offset & 3)
7978 return false;
7979 break;
7980
7981 case E_TFmode:
7982 case E_IFmode:
7983 case E_KFmode:
7984 case E_TDmode:
7985 case E_TImode:
7986 case E_PTImode:
7987 extra = 8;
7988 if (!worst_case)
7989 break;
7990 if (!TARGET_POWERPC64)
7991 extra = 12;
7992 else if (offset & 3)
7993 return false;
7994 break;
7995
7996 default:
7997 break;
7998 }
7999
8000 offset += 0x8000;
8001 return offset < 0x10000 - extra;
8002 }
8003
8004 bool
8005 legitimate_indexed_address_p (rtx x, int strict)
8006 {
8007 rtx op0, op1;
8008
8009 if (GET_CODE (x) != PLUS)
8010 return false;
8011
8012 op0 = XEXP (x, 0);
8013 op1 = XEXP (x, 1);
8014
8015 return (REG_P (op0) && REG_P (op1)
8016 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8017 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8018 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8019 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8020 }
8021
8022 bool
8023 avoiding_indexed_address_p (machine_mode mode)
8024 {
8025 /* Avoid indexed addressing for modes that have non-indexed
8026 load/store instruction forms. */
8027 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8028 }
8029
8030 bool
8031 legitimate_indirect_address_p (rtx x, int strict)
8032 {
8033 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8034 }
8035
8036 bool
8037 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8038 {
8039 if (!TARGET_MACHO || !flag_pic
8040 || mode != SImode || GET_CODE (x) != MEM)
8041 return false;
8042 x = XEXP (x, 0);
8043
8044 if (GET_CODE (x) != LO_SUM)
8045 return false;
8046 if (GET_CODE (XEXP (x, 0)) != REG)
8047 return false;
8048 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8049 return false;
8050 x = XEXP (x, 1);
8051
8052 return CONSTANT_P (x);
8053 }
8054
8055 static bool
8056 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8057 {
8058 if (GET_CODE (x) != LO_SUM)
8059 return false;
8060 if (GET_CODE (XEXP (x, 0)) != REG)
8061 return false;
8062 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8063 return false;
8064 /* quad word addresses are restricted, and we can't use LO_SUM. */
8065 if (mode_supports_dq_form (mode))
8066 return false;
8067 x = XEXP (x, 1);
8068
8069 if (TARGET_ELF || TARGET_MACHO)
8070 {
8071 bool large_toc_ok;
8072
8073 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8074 return false;
8075 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8076 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8077 recognizes some LO_SUM addresses as valid although this
8078 function says opposite. In most cases, LRA through different
8079 transformations can generate correct code for address reloads.
8080 It can not manage only some LO_SUM cases. So we need to add
8081 code analogous to one in rs6000_legitimize_reload_address for
8082 LOW_SUM here saying that some addresses are still valid. */
8083 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8084 && small_toc_ref (x, VOIDmode));
8085 if (TARGET_TOC && ! large_toc_ok)
8086 return false;
8087 if (GET_MODE_NUNITS (mode) != 1)
8088 return false;
8089 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8090 && !(/* ??? Assume floating point reg based on mode? */
8091 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8092 return false;
8093
8094 return CONSTANT_P (x) || large_toc_ok;
8095 }
8096
8097 return false;
8098 }
8099
8100
8101 /* Try machine-dependent ways of modifying an illegitimate address
8102 to be legitimate. If we find one, return the new, valid address.
8103 This is used from only one place: `memory_address' in explow.c.
8104
8105 OLDX is the address as it was before break_out_memory_refs was
8106 called. In some cases it is useful to look at this to decide what
8107 needs to be done.
8108
8109 It is always safe for this function to do nothing. It exists to
8110 recognize opportunities to optimize the output.
8111
8112 On RS/6000, first check for the sum of a register with a constant
8113 integer that is out of range. If so, generate code to add the
8114 constant with the low-order 16 bits masked to the register and force
8115 this result into another register (this can be done with `cau').
8116 Then generate an address of REG+(CONST&0xffff), allowing for the
8117 possibility of bit 16 being a one.
8118
8119 Then check for the sum of a register and something not constant, try to
8120 load the other things into a register and return the sum. */
8121
8122 static rtx
8123 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8124 machine_mode mode)
8125 {
8126 unsigned int extra;
8127
8128 if (!reg_offset_addressing_ok_p (mode)
8129 || mode_supports_dq_form (mode))
8130 {
8131 if (virtual_stack_registers_memory_p (x))
8132 return x;
8133
8134 /* In theory we should not be seeing addresses of the form reg+0,
8135 but just in case it is generated, optimize it away. */
8136 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8137 return force_reg (Pmode, XEXP (x, 0));
8138
8139 /* For TImode with load/store quad, restrict addresses to just a single
8140 pointer, so it works with both GPRs and VSX registers. */
8141 /* Make sure both operands are registers. */
8142 else if (GET_CODE (x) == PLUS
8143 && (mode != TImode || !TARGET_VSX))
8144 return gen_rtx_PLUS (Pmode,
8145 force_reg (Pmode, XEXP (x, 0)),
8146 force_reg (Pmode, XEXP (x, 1)));
8147 else
8148 return force_reg (Pmode, x);
8149 }
8150 if (GET_CODE (x) == SYMBOL_REF)
8151 {
8152 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8153 if (model != 0)
8154 return rs6000_legitimize_tls_address (x, model);
8155 }
8156
8157 extra = 0;
8158 switch (mode)
8159 {
8160 case E_TFmode:
8161 case E_TDmode:
8162 case E_TImode:
8163 case E_PTImode:
8164 case E_IFmode:
8165 case E_KFmode:
8166 /* As in legitimate_offset_address_p we do not assume
8167 worst-case. The mode here is just a hint as to the registers
8168 used. A TImode is usually in gprs, but may actually be in
8169 fprs. Leave worst-case scenario for reload to handle via
8170 insn constraints. PTImode is only GPRs. */
8171 extra = 8;
8172 break;
8173 default:
8174 break;
8175 }
8176
8177 if (GET_CODE (x) == PLUS
8178 && GET_CODE (XEXP (x, 0)) == REG
8179 && GET_CODE (XEXP (x, 1)) == CONST_INT
8180 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8181 >= 0x10000 - extra))
8182 {
8183 HOST_WIDE_INT high_int, low_int;
8184 rtx sum;
8185 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8186 if (low_int >= 0x8000 - extra)
8187 low_int = 0;
8188 high_int = INTVAL (XEXP (x, 1)) - low_int;
8189 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8190 GEN_INT (high_int)), 0);
8191 return plus_constant (Pmode, sum, low_int);
8192 }
8193 else if (GET_CODE (x) == PLUS
8194 && GET_CODE (XEXP (x, 0)) == REG
8195 && GET_CODE (XEXP (x, 1)) != CONST_INT
8196 && GET_MODE_NUNITS (mode) == 1
8197 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8198 || (/* ??? Assume floating point reg based on mode? */
8199 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8200 && !avoiding_indexed_address_p (mode))
8201 {
8202 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8203 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8204 }
8205 else if ((TARGET_ELF
8206 #if TARGET_MACHO
8207 || !MACHO_DYNAMIC_NO_PIC_P
8208 #endif
8209 )
8210 && TARGET_32BIT
8211 && TARGET_NO_TOC
8212 && ! flag_pic
8213 && GET_CODE (x) != CONST_INT
8214 && GET_CODE (x) != CONST_WIDE_INT
8215 && GET_CODE (x) != CONST_DOUBLE
8216 && CONSTANT_P (x)
8217 && GET_MODE_NUNITS (mode) == 1
8218 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8219 || (/* ??? Assume floating point reg based on mode? */
8220 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8221 {
8222 rtx reg = gen_reg_rtx (Pmode);
8223 if (TARGET_ELF)
8224 emit_insn (gen_elf_high (reg, x));
8225 else
8226 emit_insn (gen_macho_high (reg, x));
8227 return gen_rtx_LO_SUM (Pmode, reg, x);
8228 }
8229 else if (TARGET_TOC
8230 && GET_CODE (x) == SYMBOL_REF
8231 && constant_pool_expr_p (x)
8232 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8233 return create_TOC_reference (x, NULL_RTX);
8234 else
8235 return x;
8236 }
8237
8238 /* Debug version of rs6000_legitimize_address. */
8239 static rtx
8240 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8241 {
8242 rtx ret;
8243 rtx_insn *insns;
8244
8245 start_sequence ();
8246 ret = rs6000_legitimize_address (x, oldx, mode);
8247 insns = get_insns ();
8248 end_sequence ();
8249
8250 if (ret != x)
8251 {
8252 fprintf (stderr,
8253 "\nrs6000_legitimize_address: mode %s, old code %s, "
8254 "new code %s, modified\n",
8255 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8256 GET_RTX_NAME (GET_CODE (ret)));
8257
8258 fprintf (stderr, "Original address:\n");
8259 debug_rtx (x);
8260
8261 fprintf (stderr, "oldx:\n");
8262 debug_rtx (oldx);
8263
8264 fprintf (stderr, "New address:\n");
8265 debug_rtx (ret);
8266
8267 if (insns)
8268 {
8269 fprintf (stderr, "Insns added:\n");
8270 debug_rtx_list (insns, 20);
8271 }
8272 }
8273 else
8274 {
8275 fprintf (stderr,
8276 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8277 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8278
8279 debug_rtx (x);
8280 }
8281
8282 if (insns)
8283 emit_insn (insns);
8284
8285 return ret;
8286 }
8287
8288 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8289 We need to emit DTP-relative relocations. */
8290
8291 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8292 static void
8293 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8294 {
8295 switch (size)
8296 {
8297 case 4:
8298 fputs ("\t.long\t", file);
8299 break;
8300 case 8:
8301 fputs (DOUBLE_INT_ASM_OP, file);
8302 break;
8303 default:
8304 gcc_unreachable ();
8305 }
8306 output_addr_const (file, x);
8307 if (TARGET_ELF)
8308 fputs ("@dtprel+0x8000", file);
8309 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8310 {
8311 switch (SYMBOL_REF_TLS_MODEL (x))
8312 {
8313 case 0:
8314 break;
8315 case TLS_MODEL_LOCAL_EXEC:
8316 fputs ("@le", file);
8317 break;
8318 case TLS_MODEL_INITIAL_EXEC:
8319 fputs ("@ie", file);
8320 break;
8321 case TLS_MODEL_GLOBAL_DYNAMIC:
8322 case TLS_MODEL_LOCAL_DYNAMIC:
8323 fputs ("@m", file);
8324 break;
8325 default:
8326 gcc_unreachable ();
8327 }
8328 }
8329 }
8330
8331 /* Return true if X is a symbol that refers to real (rather than emulated)
8332 TLS. */
8333
8334 static bool
8335 rs6000_real_tls_symbol_ref_p (rtx x)
8336 {
8337 return (GET_CODE (x) == SYMBOL_REF
8338 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8339 }
8340
8341 /* In the name of slightly smaller debug output, and to cater to
8342 general assembler lossage, recognize various UNSPEC sequences
8343 and turn them back into a direct symbol reference. */
8344
8345 static rtx
8346 rs6000_delegitimize_address (rtx orig_x)
8347 {
8348 rtx x, y, offset;
8349
8350 orig_x = delegitimize_mem_from_attrs (orig_x);
8351 x = orig_x;
8352 if (MEM_P (x))
8353 x = XEXP (x, 0);
8354
8355 y = x;
8356 if (TARGET_CMODEL != CMODEL_SMALL
8357 && GET_CODE (y) == LO_SUM)
8358 y = XEXP (y, 1);
8359
8360 offset = NULL_RTX;
8361 if (GET_CODE (y) == PLUS
8362 && GET_MODE (y) == Pmode
8363 && CONST_INT_P (XEXP (y, 1)))
8364 {
8365 offset = XEXP (y, 1);
8366 y = XEXP (y, 0);
8367 }
8368
8369 if (GET_CODE (y) == UNSPEC
8370 && XINT (y, 1) == UNSPEC_TOCREL)
8371 {
8372 y = XVECEXP (y, 0, 0);
8373
8374 #ifdef HAVE_AS_TLS
8375 /* Do not associate thread-local symbols with the original
8376 constant pool symbol. */
8377 if (TARGET_XCOFF
8378 && GET_CODE (y) == SYMBOL_REF
8379 && CONSTANT_POOL_ADDRESS_P (y)
8380 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8381 return orig_x;
8382 #endif
8383
8384 if (offset != NULL_RTX)
8385 y = gen_rtx_PLUS (Pmode, y, offset);
8386 if (!MEM_P (orig_x))
8387 return y;
8388 else
8389 return replace_equiv_address_nv (orig_x, y);
8390 }
8391
8392 if (TARGET_MACHO
8393 && GET_CODE (orig_x) == LO_SUM
8394 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8395 {
8396 y = XEXP (XEXP (orig_x, 1), 0);
8397 if (GET_CODE (y) == UNSPEC
8398 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8399 return XVECEXP (y, 0, 0);
8400 }
8401
8402 return orig_x;
8403 }
8404
8405 /* Return true if X shouldn't be emitted into the debug info.
8406 The linker doesn't like .toc section references from
8407 .debug_* sections, so reject .toc section symbols. */
8408
8409 static bool
8410 rs6000_const_not_ok_for_debug_p (rtx x)
8411 {
8412 if (GET_CODE (x) == UNSPEC)
8413 return true;
8414 if (GET_CODE (x) == SYMBOL_REF
8415 && CONSTANT_POOL_ADDRESS_P (x))
8416 {
8417 rtx c = get_pool_constant (x);
8418 machine_mode cmode = get_pool_mode (x);
8419 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8420 return true;
8421 }
8422
8423 return false;
8424 }
8425
8426
8427 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8428
8429 static bool
8430 rs6000_legitimate_combined_insn (rtx_insn *insn)
8431 {
8432 int icode = INSN_CODE (insn);
8433
8434 /* Reject creating doloop insns. Combine should not be allowed
8435 to create these for a number of reasons:
8436 1) In a nested loop, if combine creates one of these in an
8437 outer loop and the register allocator happens to allocate ctr
8438 to the outer loop insn, then the inner loop can't use ctr.
8439 Inner loops ought to be more highly optimized.
8440 2) Combine often wants to create one of these from what was
8441 originally a three insn sequence, first combining the three
8442 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8443 allocated ctr, the splitter takes use back to the three insn
8444 sequence. It's better to stop combine at the two insn
8445 sequence.
8446 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8447 insns, the register allocator sometimes uses floating point
8448 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8449 jump insn and output reloads are not implemented for jumps,
8450 the ctrsi/ctrdi splitters need to handle all possible cases.
8451 That's a pain, and it gets to be seriously difficult when a
8452 splitter that runs after reload needs memory to transfer from
8453 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8454 for the difficult case. It's better to not create problems
8455 in the first place. */
8456 if (icode != CODE_FOR_nothing
8457 && (icode == CODE_FOR_bdz_si
8458 || icode == CODE_FOR_bdz_di
8459 || icode == CODE_FOR_bdnz_si
8460 || icode == CODE_FOR_bdnz_di
8461 || icode == CODE_FOR_bdztf_si
8462 || icode == CODE_FOR_bdztf_di
8463 || icode == CODE_FOR_bdnztf_si
8464 || icode == CODE_FOR_bdnztf_di))
8465 return false;
8466
8467 return true;
8468 }
8469
8470 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8471
8472 static GTY(()) rtx rs6000_tls_symbol;
8473 static rtx
8474 rs6000_tls_get_addr (void)
8475 {
8476 if (!rs6000_tls_symbol)
8477 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8478
8479 return rs6000_tls_symbol;
8480 }
8481
8482 /* Construct the SYMBOL_REF for TLS GOT references. */
8483
8484 static GTY(()) rtx rs6000_got_symbol;
8485 static rtx
8486 rs6000_got_sym (void)
8487 {
8488 if (!rs6000_got_symbol)
8489 {
8490 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8491 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8492 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8493 }
8494
8495 return rs6000_got_symbol;
8496 }
8497
8498 /* AIX Thread-Local Address support. */
8499
8500 static rtx
8501 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8502 {
8503 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8504 const char *name;
8505 char *tlsname;
8506
8507 name = XSTR (addr, 0);
8508 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8509 or the symbol will be in TLS private data section. */
8510 if (name[strlen (name) - 1] != ']'
8511 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8512 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8513 {
8514 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8515 strcpy (tlsname, name);
8516 strcat (tlsname,
8517 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8518 tlsaddr = copy_rtx (addr);
8519 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8520 }
8521 else
8522 tlsaddr = addr;
8523
8524 /* Place addr into TOC constant pool. */
8525 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8526
8527 /* Output the TOC entry and create the MEM referencing the value. */
8528 if (constant_pool_expr_p (XEXP (sym, 0))
8529 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8530 {
8531 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8532 mem = gen_const_mem (Pmode, tocref);
8533 set_mem_alias_set (mem, get_TOC_alias_set ());
8534 }
8535 else
8536 return sym;
8537
8538 /* Use global-dynamic for local-dynamic. */
8539 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8540 || model == TLS_MODEL_LOCAL_DYNAMIC)
8541 {
8542 /* Create new TOC reference for @m symbol. */
8543 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8544 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8545 strcpy (tlsname, "*LCM");
8546 strcat (tlsname, name + 3);
8547 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8548 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8549 tocref = create_TOC_reference (modaddr, NULL_RTX);
8550 rtx modmem = gen_const_mem (Pmode, tocref);
8551 set_mem_alias_set (modmem, get_TOC_alias_set ());
8552
8553 rtx modreg = gen_reg_rtx (Pmode);
8554 emit_insn (gen_rtx_SET (modreg, modmem));
8555
8556 tmpreg = gen_reg_rtx (Pmode);
8557 emit_insn (gen_rtx_SET (tmpreg, mem));
8558
8559 dest = gen_reg_rtx (Pmode);
8560 if (TARGET_32BIT)
8561 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8562 else
8563 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8564 return dest;
8565 }
8566 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8567 else if (TARGET_32BIT)
8568 {
8569 tlsreg = gen_reg_rtx (SImode);
8570 emit_insn (gen_tls_get_tpointer (tlsreg));
8571 }
8572 else
8573 tlsreg = gen_rtx_REG (DImode, 13);
8574
8575 /* Load the TOC value into temporary register. */
8576 tmpreg = gen_reg_rtx (Pmode);
8577 emit_insn (gen_rtx_SET (tmpreg, mem));
8578 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8579 gen_rtx_MINUS (Pmode, addr, tlsreg));
8580
8581 /* Add TOC symbol value to TLS pointer. */
8582 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8583
8584 return dest;
8585 }
8586
8587 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8588 this (thread-local) address. */
8589
8590 static rtx
8591 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8592 {
8593 rtx dest, insn;
8594
8595 if (TARGET_XCOFF)
8596 return rs6000_legitimize_tls_address_aix (addr, model);
8597
8598 dest = gen_reg_rtx (Pmode);
8599 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8600 {
8601 rtx tlsreg;
8602
8603 if (TARGET_64BIT)
8604 {
8605 tlsreg = gen_rtx_REG (Pmode, 13);
8606 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8607 }
8608 else
8609 {
8610 tlsreg = gen_rtx_REG (Pmode, 2);
8611 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8612 }
8613 emit_insn (insn);
8614 }
8615 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8616 {
8617 rtx tlsreg, tmp;
8618
8619 tmp = gen_reg_rtx (Pmode);
8620 if (TARGET_64BIT)
8621 {
8622 tlsreg = gen_rtx_REG (Pmode, 13);
8623 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8624 }
8625 else
8626 {
8627 tlsreg = gen_rtx_REG (Pmode, 2);
8628 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8629 }
8630 emit_insn (insn);
8631 if (TARGET_64BIT)
8632 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8633 else
8634 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8635 emit_insn (insn);
8636 }
8637 else
8638 {
8639 rtx r3, got, tga, tmp1, tmp2, call_insn;
8640
8641 /* We currently use relocations like @got@tlsgd for tls, which
8642 means the linker will handle allocation of tls entries, placing
8643 them in the .got section. So use a pointer to the .got section,
8644 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8645 or to secondary GOT sections used by 32-bit -fPIC. */
8646 if (TARGET_64BIT)
8647 got = gen_rtx_REG (Pmode, 2);
8648 else
8649 {
8650 if (flag_pic == 1)
8651 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8652 else
8653 {
8654 rtx gsym = rs6000_got_sym ();
8655 got = gen_reg_rtx (Pmode);
8656 if (flag_pic == 0)
8657 rs6000_emit_move (got, gsym, Pmode);
8658 else
8659 {
8660 rtx mem, lab;
8661
8662 tmp1 = gen_reg_rtx (Pmode);
8663 tmp2 = gen_reg_rtx (Pmode);
8664 mem = gen_const_mem (Pmode, tmp1);
8665 lab = gen_label_rtx ();
8666 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8667 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8668 if (TARGET_LINK_STACK)
8669 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8670 emit_move_insn (tmp2, mem);
8671 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8672 set_unique_reg_note (last, REG_EQUAL, gsym);
8673 }
8674 }
8675 }
8676
8677 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8678 {
8679 tga = rs6000_tls_get_addr ();
8680 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8681 const0_rtx, Pmode);
8682
8683 r3 = gen_rtx_REG (Pmode, 3);
8684 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
8685 {
8686 if (TARGET_64BIT)
8687 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
8688 else
8689 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
8690 }
8691 else if (DEFAULT_ABI == ABI_V4)
8692 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
8693 else
8694 gcc_unreachable ();
8695 call_insn = last_call_insn ();
8696 PATTERN (call_insn) = insn;
8697 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
8698 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
8699 pic_offset_table_rtx);
8700 }
8701 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8702 {
8703 tga = rs6000_tls_get_addr ();
8704 tmp1 = gen_reg_rtx (Pmode);
8705 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8706 const0_rtx, Pmode);
8707
8708 r3 = gen_rtx_REG (Pmode, 3);
8709 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
8710 {
8711 if (TARGET_64BIT)
8712 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
8713 else
8714 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
8715 }
8716 else if (DEFAULT_ABI == ABI_V4)
8717 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
8718 else
8719 gcc_unreachable ();
8720 call_insn = last_call_insn ();
8721 PATTERN (call_insn) = insn;
8722 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
8723 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
8724 pic_offset_table_rtx);
8725
8726 if (rs6000_tls_size == 16)
8727 {
8728 if (TARGET_64BIT)
8729 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8730 else
8731 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8732 }
8733 else if (rs6000_tls_size == 32)
8734 {
8735 tmp2 = gen_reg_rtx (Pmode);
8736 if (TARGET_64BIT)
8737 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8738 else
8739 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8740 emit_insn (insn);
8741 if (TARGET_64BIT)
8742 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8743 else
8744 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8745 }
8746 else
8747 {
8748 tmp2 = gen_reg_rtx (Pmode);
8749 if (TARGET_64BIT)
8750 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8751 else
8752 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8753 emit_insn (insn);
8754 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8755 }
8756 emit_insn (insn);
8757 }
8758 else
8759 {
8760 /* IE, or 64-bit offset LE. */
8761 tmp2 = gen_reg_rtx (Pmode);
8762 if (TARGET_64BIT)
8763 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8764 else
8765 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8766 emit_insn (insn);
8767 if (TARGET_64BIT)
8768 insn = gen_tls_tls_64 (dest, tmp2, addr);
8769 else
8770 insn = gen_tls_tls_32 (dest, tmp2, addr);
8771 emit_insn (insn);
8772 }
8773 }
8774
8775 return dest;
8776 }
8777
8778 /* Only create the global variable for the stack protect guard if we are using
8779 the global flavor of that guard. */
8780 static tree
8781 rs6000_init_stack_protect_guard (void)
8782 {
8783 if (rs6000_stack_protector_guard == SSP_GLOBAL)
8784 return default_stack_protect_guard ();
8785
8786 return NULL_TREE;
8787 }
8788
8789 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8790
8791 static bool
8792 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8793 {
8794 if (GET_CODE (x) == HIGH
8795 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8796 return true;
8797
8798 /* A TLS symbol in the TOC cannot contain a sum. */
8799 if (GET_CODE (x) == CONST
8800 && GET_CODE (XEXP (x, 0)) == PLUS
8801 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8802 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8803 return true;
8804
8805 /* Do not place an ELF TLS symbol in the constant pool. */
8806 return TARGET_ELF && tls_referenced_p (x);
8807 }
8808
8809 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8810 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8811 can be addressed relative to the toc pointer. */
8812
8813 static bool
8814 use_toc_relative_ref (rtx sym, machine_mode mode)
8815 {
8816 return ((constant_pool_expr_p (sym)
8817 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8818 get_pool_mode (sym)))
8819 || (TARGET_CMODEL == CMODEL_MEDIUM
8820 && SYMBOL_REF_LOCAL_P (sym)
8821 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8822 }
8823
8824 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
8825 replace the input X, or the original X if no replacement is called for.
8826 The output parameter *WIN is 1 if the calling macro should goto WIN,
8827 0 if it should not.
8828
8829 For RS/6000, we wish to handle large displacements off a base
8830 register by splitting the addend across an addiu/addis and the mem insn.
8831 This cuts number of extra insns needed from 3 to 1.
8832
8833 On Darwin, we use this to generate code for floating point constants.
8834 A movsf_low is generated so we wind up with 2 instructions rather than 3.
8835 The Darwin code is inside #if TARGET_MACHO because only then are the
8836 machopic_* functions defined. */
8837 static rtx
8838 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
8839 int opnum, int type,
8840 int ind_levels ATTRIBUTE_UNUSED, int *win)
8841 {
8842 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8843 bool quad_offset_p = mode_supports_dq_form (mode);
8844
8845 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
8846 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
8847 if (reg_offset_p
8848 && opnum == 1
8849 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
8850 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
8851 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
8852 && TARGET_P9_VECTOR)
8853 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
8854 && TARGET_P9_VECTOR)))
8855 reg_offset_p = false;
8856
8857 /* We must recognize output that we have already generated ourselves. */
8858 if (GET_CODE (x) == PLUS
8859 && GET_CODE (XEXP (x, 0)) == PLUS
8860 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
8861 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
8862 && GET_CODE (XEXP (x, 1)) == CONST_INT)
8863 {
8864 if (TARGET_DEBUG_ADDR)
8865 {
8866 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
8867 debug_rtx (x);
8868 }
8869 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8870 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8871 opnum, (enum reload_type) type);
8872 *win = 1;
8873 return x;
8874 }
8875
8876 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
8877 if (GET_CODE (x) == LO_SUM
8878 && GET_CODE (XEXP (x, 0)) == HIGH)
8879 {
8880 if (TARGET_DEBUG_ADDR)
8881 {
8882 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
8883 debug_rtx (x);
8884 }
8885 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8886 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8887 opnum, (enum reload_type) type);
8888 *win = 1;
8889 return x;
8890 }
8891
8892 #if TARGET_MACHO
8893 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
8894 && GET_CODE (x) == LO_SUM
8895 && GET_CODE (XEXP (x, 0)) == PLUS
8896 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
8897 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
8898 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
8899 && machopic_operand_p (XEXP (x, 1)))
8900 {
8901 /* Result of previous invocation of this function on Darwin
8902 floating point constant. */
8903 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8904 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8905 opnum, (enum reload_type) type);
8906 *win = 1;
8907 return x;
8908 }
8909 #endif
8910
8911 if (TARGET_CMODEL != CMODEL_SMALL
8912 && reg_offset_p
8913 && !quad_offset_p
8914 && small_toc_ref (x, VOIDmode))
8915 {
8916 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
8917 x = gen_rtx_LO_SUM (Pmode, hi, x);
8918 if (TARGET_DEBUG_ADDR)
8919 {
8920 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
8921 debug_rtx (x);
8922 }
8923 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8924 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8925 opnum, (enum reload_type) type);
8926 *win = 1;
8927 return x;
8928 }
8929
8930 if (GET_CODE (x) == PLUS
8931 && REG_P (XEXP (x, 0))
8932 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
8933 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
8934 && CONST_INT_P (XEXP (x, 1))
8935 && reg_offset_p
8936 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
8937 {
8938 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
8939 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
8940 HOST_WIDE_INT high
8941 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8942
8943 /* Check for 32-bit overflow or quad addresses with one of the
8944 four least significant bits set. */
8945 if (high + low != val
8946 || (quad_offset_p && (low & 0xf)))
8947 {
8948 *win = 0;
8949 return x;
8950 }
8951
8952 /* Reload the high part into a base reg; leave the low part
8953 in the mem directly. */
8954
8955 x = gen_rtx_PLUS (GET_MODE (x),
8956 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
8957 GEN_INT (high)),
8958 GEN_INT (low));
8959
8960 if (TARGET_DEBUG_ADDR)
8961 {
8962 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
8963 debug_rtx (x);
8964 }
8965 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8966 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8967 opnum, (enum reload_type) type);
8968 *win = 1;
8969 return x;
8970 }
8971
8972 if (GET_CODE (x) == SYMBOL_REF
8973 && reg_offset_p
8974 && !quad_offset_p
8975 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
8976 #if TARGET_MACHO
8977 && DEFAULT_ABI == ABI_DARWIN
8978 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
8979 && machopic_symbol_defined_p (x)
8980 #else
8981 && DEFAULT_ABI == ABI_V4
8982 && !flag_pic
8983 #endif
8984 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
8985 The same goes for DImode without 64-bit gprs and DFmode and DDmode
8986 without fprs.
8987 ??? Assume floating point reg based on mode? This assumption is
8988 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
8989 where reload ends up doing a DFmode load of a constant from
8990 mem using two gprs. Unfortunately, at this point reload
8991 hasn't yet selected regs so poking around in reload data
8992 won't help and even if we could figure out the regs reliably,
8993 we'd still want to allow this transformation when the mem is
8994 naturally aligned. Since we say the address is good here, we
8995 can't disable offsets from LO_SUMs in mem_operand_gpr.
8996 FIXME: Allow offset from lo_sum for other modes too, when
8997 mem is sufficiently aligned.
8998
8999 Also disallow this if the type can go in VMX/Altivec registers, since
9000 those registers do not have d-form (reg+offset) address modes. */
9001 && !reg_addr[mode].scalar_in_vmx_p
9002 && mode != TFmode
9003 && mode != TDmode
9004 && mode != IFmode
9005 && mode != KFmode
9006 && (mode != TImode || !TARGET_VSX)
9007 && mode != PTImode
9008 && (mode != DImode || TARGET_POWERPC64)
9009 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9010 || TARGET_HARD_FLOAT))
9011 {
9012 #if TARGET_MACHO
9013 if (flag_pic)
9014 {
9015 rtx offset = machopic_gen_offset (x);
9016 x = gen_rtx_LO_SUM (GET_MODE (x),
9017 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9018 gen_rtx_HIGH (Pmode, offset)), offset);
9019 }
9020 else
9021 #endif
9022 x = gen_rtx_LO_SUM (GET_MODE (x),
9023 gen_rtx_HIGH (Pmode, x), x);
9024
9025 if (TARGET_DEBUG_ADDR)
9026 {
9027 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9028 debug_rtx (x);
9029 }
9030 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9031 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9032 opnum, (enum reload_type) type);
9033 *win = 1;
9034 return x;
9035 }
9036
9037 /* Reload an offset address wrapped by an AND that represents the
9038 masking of the lower bits. Strip the outer AND and let reload
9039 convert the offset address into an indirect address. For VSX,
9040 force reload to create the address with an AND in a separate
9041 register, because we can't guarantee an altivec register will
9042 be used. */
9043 if (VECTOR_MEM_ALTIVEC_P (mode)
9044 && GET_CODE (x) == AND
9045 && GET_CODE (XEXP (x, 0)) == PLUS
9046 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9047 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9048 && GET_CODE (XEXP (x, 1)) == CONST_INT
9049 && INTVAL (XEXP (x, 1)) == -16)
9050 {
9051 x = XEXP (x, 0);
9052 *win = 1;
9053 return x;
9054 }
9055
9056 if (TARGET_TOC
9057 && reg_offset_p
9058 && !quad_offset_p
9059 && GET_CODE (x) == SYMBOL_REF
9060 && use_toc_relative_ref (x, mode))
9061 {
9062 x = create_TOC_reference (x, NULL_RTX);
9063 if (TARGET_CMODEL != CMODEL_SMALL)
9064 {
9065 if (TARGET_DEBUG_ADDR)
9066 {
9067 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9068 debug_rtx (x);
9069 }
9070 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9071 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9072 opnum, (enum reload_type) type);
9073 }
9074 *win = 1;
9075 return x;
9076 }
9077 *win = 0;
9078 return x;
9079 }
9080
9081 /* Debug version of rs6000_legitimize_reload_address. */
9082 static rtx
9083 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9084 int opnum, int type,
9085 int ind_levels, int *win)
9086 {
9087 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9088 ind_levels, win);
9089 fprintf (stderr,
9090 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9091 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9092 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9093 debug_rtx (x);
9094
9095 if (x == ret)
9096 fprintf (stderr, "Same address returned\n");
9097 else if (!ret)
9098 fprintf (stderr, "NULL returned\n");
9099 else
9100 {
9101 fprintf (stderr, "New address:\n");
9102 debug_rtx (ret);
9103 }
9104
9105 return ret;
9106 }
9107
9108 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9109 that is a valid memory address for an instruction.
9110 The MODE argument is the machine mode for the MEM expression
9111 that wants to use this address.
9112
9113 On the RS/6000, there are four valid address: a SYMBOL_REF that
9114 refers to a constant pool entry of an address (or the sum of it
9115 plus a constant), a short (16-bit signed) constant plus a register,
9116 the sum of two registers, or a register indirect, possibly with an
9117 auto-increment. For DFmode, DDmode and DImode with a constant plus
9118 register, we must ensure that both words are addressable or PowerPC64
9119 with offset word aligned.
9120
9121 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9122 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9123 because adjacent memory cells are accessed by adding word-sized offsets
9124 during assembly output. */
9125 static bool
9126 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9127 {
9128 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9129 bool quad_offset_p = mode_supports_dq_form (mode);
9130
9131 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9132 if (VECTOR_MEM_ALTIVEC_P (mode)
9133 && GET_CODE (x) == AND
9134 && GET_CODE (XEXP (x, 1)) == CONST_INT
9135 && INTVAL (XEXP (x, 1)) == -16)
9136 x = XEXP (x, 0);
9137
9138 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9139 return 0;
9140 if (legitimate_indirect_address_p (x, reg_ok_strict))
9141 return 1;
9142 if (TARGET_UPDATE
9143 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9144 && mode_supports_pre_incdec_p (mode)
9145 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9146 return 1;
9147 /* Handle restricted vector d-form offsets in ISA 3.0. */
9148 if (quad_offset_p)
9149 {
9150 if (quad_address_p (x, mode, reg_ok_strict))
9151 return 1;
9152 }
9153 else if (virtual_stack_registers_memory_p (x))
9154 return 1;
9155
9156 else if (reg_offset_p)
9157 {
9158 if (legitimate_small_data_p (mode, x))
9159 return 1;
9160 if (legitimate_constant_pool_address_p (x, mode,
9161 reg_ok_strict || lra_in_progress))
9162 return 1;
9163 }
9164
9165 /* For TImode, if we have TImode in VSX registers, only allow register
9166 indirect addresses. This will allow the values to go in either GPRs
9167 or VSX registers without reloading. The vector types would tend to
9168 go into VSX registers, so we allow REG+REG, while TImode seems
9169 somewhat split, in that some uses are GPR based, and some VSX based. */
9170 /* FIXME: We could loosen this by changing the following to
9171 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9172 but currently we cannot allow REG+REG addressing for TImode. See
9173 PR72827 for complete details on how this ends up hoodwinking DSE. */
9174 if (mode == TImode && TARGET_VSX)
9175 return 0;
9176 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9177 if (! reg_ok_strict
9178 && reg_offset_p
9179 && GET_CODE (x) == PLUS
9180 && GET_CODE (XEXP (x, 0)) == REG
9181 && (XEXP (x, 0) == virtual_stack_vars_rtx
9182 || XEXP (x, 0) == arg_pointer_rtx)
9183 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9184 return 1;
9185 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9186 return 1;
9187 if (!FLOAT128_2REG_P (mode)
9188 && (TARGET_HARD_FLOAT
9189 || TARGET_POWERPC64
9190 || (mode != DFmode && mode != DDmode))
9191 && (TARGET_POWERPC64 || mode != DImode)
9192 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9193 && mode != PTImode
9194 && !avoiding_indexed_address_p (mode)
9195 && legitimate_indexed_address_p (x, reg_ok_strict))
9196 return 1;
9197 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9198 && mode_supports_pre_modify_p (mode)
9199 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9200 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9201 reg_ok_strict, false)
9202 || (!avoiding_indexed_address_p (mode)
9203 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9204 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9205 return 1;
9206 if (reg_offset_p && !quad_offset_p
9207 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9208 return 1;
9209 return 0;
9210 }
9211
9212 /* Debug version of rs6000_legitimate_address_p. */
9213 static bool
9214 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9215 bool reg_ok_strict)
9216 {
9217 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9218 fprintf (stderr,
9219 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9220 "strict = %d, reload = %s, code = %s\n",
9221 ret ? "true" : "false",
9222 GET_MODE_NAME (mode),
9223 reg_ok_strict,
9224 (reload_completed ? "after" : "before"),
9225 GET_RTX_NAME (GET_CODE (x)));
9226 debug_rtx (x);
9227
9228 return ret;
9229 }
9230
9231 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9232
9233 static bool
9234 rs6000_mode_dependent_address_p (const_rtx addr,
9235 addr_space_t as ATTRIBUTE_UNUSED)
9236 {
9237 return rs6000_mode_dependent_address_ptr (addr);
9238 }
9239
9240 /* Go to LABEL if ADDR (a legitimate address expression)
9241 has an effect that depends on the machine mode it is used for.
9242
9243 On the RS/6000 this is true of all integral offsets (since AltiVec
9244 and VSX modes don't allow them) or is a pre-increment or decrement.
9245
9246 ??? Except that due to conceptual problems in offsettable_address_p
9247 we can't really report the problems of integral offsets. So leave
9248 this assuming that the adjustable offset must be valid for the
9249 sub-words of a TFmode operand, which is what we had before. */
9250
9251 static bool
9252 rs6000_mode_dependent_address (const_rtx addr)
9253 {
9254 switch (GET_CODE (addr))
9255 {
9256 case PLUS:
9257 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9258 is considered a legitimate address before reload, so there
9259 are no offset restrictions in that case. Note that this
9260 condition is safe in strict mode because any address involving
9261 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9262 been rejected as illegitimate. */
9263 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9264 && XEXP (addr, 0) != arg_pointer_rtx
9265 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9266 {
9267 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9268 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9269 }
9270 break;
9271
9272 case LO_SUM:
9273 /* Anything in the constant pool is sufficiently aligned that
9274 all bytes have the same high part address. */
9275 return !legitimate_constant_pool_address_p (addr, QImode, false);
9276
9277 /* Auto-increment cases are now treated generically in recog.c. */
9278 case PRE_MODIFY:
9279 return TARGET_UPDATE;
9280
9281 /* AND is only allowed in Altivec loads. */
9282 case AND:
9283 return true;
9284
9285 default:
9286 break;
9287 }
9288
9289 return false;
9290 }
9291
9292 /* Debug version of rs6000_mode_dependent_address. */
9293 static bool
9294 rs6000_debug_mode_dependent_address (const_rtx addr)
9295 {
9296 bool ret = rs6000_mode_dependent_address (addr);
9297
9298 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9299 ret ? "true" : "false");
9300 debug_rtx (addr);
9301
9302 return ret;
9303 }
9304
9305 /* Implement FIND_BASE_TERM. */
9306
9307 rtx
9308 rs6000_find_base_term (rtx op)
9309 {
9310 rtx base;
9311
9312 base = op;
9313 if (GET_CODE (base) == CONST)
9314 base = XEXP (base, 0);
9315 if (GET_CODE (base) == PLUS)
9316 base = XEXP (base, 0);
9317 if (GET_CODE (base) == UNSPEC)
9318 switch (XINT (base, 1))
9319 {
9320 case UNSPEC_TOCREL:
9321 case UNSPEC_MACHOPIC_OFFSET:
9322 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9323 for aliasing purposes. */
9324 return XVECEXP (base, 0, 0);
9325 }
9326
9327 return op;
9328 }
9329
9330 /* More elaborate version of recog's offsettable_memref_p predicate
9331 that works around the ??? note of rs6000_mode_dependent_address.
9332 In particular it accepts
9333
9334 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9335
9336 in 32-bit mode, that the recog predicate rejects. */
9337
9338 static bool
9339 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9340 {
9341 bool worst_case;
9342
9343 if (!MEM_P (op))
9344 return false;
9345
9346 /* First mimic offsettable_memref_p. */
9347 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9348 return true;
9349
9350 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9351 the latter predicate knows nothing about the mode of the memory
9352 reference and, therefore, assumes that it is the largest supported
9353 mode (TFmode). As a consequence, legitimate offsettable memory
9354 references are rejected. rs6000_legitimate_offset_address_p contains
9355 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9356 at least with a little bit of help here given that we know the
9357 actual registers used. */
9358 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9359 || GET_MODE_SIZE (reg_mode) == 4);
9360 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9361 strict, worst_case);
9362 }
9363
9364 /* Determine the reassociation width to be used in reassociate_bb.
9365 This takes into account how many parallel operations we
9366 can actually do of a given type, and also the latency.
9367 P8:
9368 int add/sub 6/cycle
9369 mul 2/cycle
9370 vect add/sub/mul 2/cycle
9371 fp add/sub/mul 2/cycle
9372 dfp 1/cycle
9373 */
9374
9375 static int
9376 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9377 machine_mode mode)
9378 {
9379 switch (rs6000_tune)
9380 {
9381 case PROCESSOR_POWER8:
9382 case PROCESSOR_POWER9:
9383 if (DECIMAL_FLOAT_MODE_P (mode))
9384 return 1;
9385 if (VECTOR_MODE_P (mode))
9386 return 4;
9387 if (INTEGRAL_MODE_P (mode))
9388 return 1;
9389 if (FLOAT_MODE_P (mode))
9390 return 4;
9391 break;
9392 default:
9393 break;
9394 }
9395 return 1;
9396 }
9397
9398 /* Change register usage conditional on target flags. */
9399 static void
9400 rs6000_conditional_register_usage (void)
9401 {
9402 int i;
9403
9404 if (TARGET_DEBUG_TARGET)
9405 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9406
9407 /* Set MQ register fixed (already call_used) so that it will not be
9408 allocated. */
9409 fixed_regs[64] = 1;
9410
9411 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9412 if (TARGET_64BIT)
9413 fixed_regs[13] = call_used_regs[13]
9414 = call_really_used_regs[13] = 1;
9415
9416 /* Conditionally disable FPRs. */
9417 if (TARGET_SOFT_FLOAT)
9418 for (i = 32; i < 64; i++)
9419 fixed_regs[i] = call_used_regs[i]
9420 = call_really_used_regs[i] = 1;
9421
9422 /* The TOC register is not killed across calls in a way that is
9423 visible to the compiler. */
9424 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9425 call_really_used_regs[2] = 0;
9426
9427 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9428 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9429
9430 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9431 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9432 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9433 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9434
9435 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9436 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9437 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9438 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9439
9440 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9441 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9442 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9443
9444 if (!TARGET_ALTIVEC && !TARGET_VSX)
9445 {
9446 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9447 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9448 call_really_used_regs[VRSAVE_REGNO] = 1;
9449 }
9450
9451 if (TARGET_ALTIVEC || TARGET_VSX)
9452 global_regs[VSCR_REGNO] = 1;
9453
9454 if (TARGET_ALTIVEC_ABI)
9455 {
9456 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9457 call_used_regs[i] = call_really_used_regs[i] = 1;
9458
9459 /* AIX reserves VR20:31 in non-extended ABI mode. */
9460 if (TARGET_XCOFF)
9461 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9462 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9463 }
9464 }
9465
9466 \f
9467 /* Output insns to set DEST equal to the constant SOURCE as a series of
9468 lis, ori and shl instructions and return TRUE. */
9469
9470 bool
9471 rs6000_emit_set_const (rtx dest, rtx source)
9472 {
9473 machine_mode mode = GET_MODE (dest);
9474 rtx temp, set;
9475 rtx_insn *insn;
9476 HOST_WIDE_INT c;
9477
9478 gcc_checking_assert (CONST_INT_P (source));
9479 c = INTVAL (source);
9480 switch (mode)
9481 {
9482 case E_QImode:
9483 case E_HImode:
9484 emit_insn (gen_rtx_SET (dest, source));
9485 return true;
9486
9487 case E_SImode:
9488 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9489
9490 emit_insn (gen_rtx_SET (copy_rtx (temp),
9491 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9492 emit_insn (gen_rtx_SET (dest,
9493 gen_rtx_IOR (SImode, copy_rtx (temp),
9494 GEN_INT (c & 0xffff))));
9495 break;
9496
9497 case E_DImode:
9498 if (!TARGET_POWERPC64)
9499 {
9500 rtx hi, lo;
9501
9502 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9503 DImode);
9504 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9505 DImode);
9506 emit_move_insn (hi, GEN_INT (c >> 32));
9507 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9508 emit_move_insn (lo, GEN_INT (c));
9509 }
9510 else
9511 rs6000_emit_set_long_const (dest, c);
9512 break;
9513
9514 default:
9515 gcc_unreachable ();
9516 }
9517
9518 insn = get_last_insn ();
9519 set = single_set (insn);
9520 if (! CONSTANT_P (SET_SRC (set)))
9521 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9522
9523 return true;
9524 }
9525
9526 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9527 Output insns to set DEST equal to the constant C as a series of
9528 lis, ori and shl instructions. */
9529
9530 static void
9531 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9532 {
9533 rtx temp;
9534 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9535
9536 ud1 = c & 0xffff;
9537 c = c >> 16;
9538 ud2 = c & 0xffff;
9539 c = c >> 16;
9540 ud3 = c & 0xffff;
9541 c = c >> 16;
9542 ud4 = c & 0xffff;
9543
9544 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9545 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9546 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9547
9548 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9549 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9550 {
9551 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9552
9553 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9554 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9555 if (ud1 != 0)
9556 emit_move_insn (dest,
9557 gen_rtx_IOR (DImode, copy_rtx (temp),
9558 GEN_INT (ud1)));
9559 }
9560 else if (ud3 == 0 && ud4 == 0)
9561 {
9562 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9563
9564 gcc_assert (ud2 & 0x8000);
9565 emit_move_insn (copy_rtx (temp),
9566 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9567 if (ud1 != 0)
9568 emit_move_insn (copy_rtx (temp),
9569 gen_rtx_IOR (DImode, copy_rtx (temp),
9570 GEN_INT (ud1)));
9571 emit_move_insn (dest,
9572 gen_rtx_ZERO_EXTEND (DImode,
9573 gen_lowpart (SImode,
9574 copy_rtx (temp))));
9575 }
9576 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9577 || (ud4 == 0 && ! (ud3 & 0x8000)))
9578 {
9579 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9580
9581 emit_move_insn (copy_rtx (temp),
9582 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9583 if (ud2 != 0)
9584 emit_move_insn (copy_rtx (temp),
9585 gen_rtx_IOR (DImode, copy_rtx (temp),
9586 GEN_INT (ud2)));
9587 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9588 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9589 GEN_INT (16)));
9590 if (ud1 != 0)
9591 emit_move_insn (dest,
9592 gen_rtx_IOR (DImode, copy_rtx (temp),
9593 GEN_INT (ud1)));
9594 }
9595 else
9596 {
9597 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9598
9599 emit_move_insn (copy_rtx (temp),
9600 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9601 if (ud3 != 0)
9602 emit_move_insn (copy_rtx (temp),
9603 gen_rtx_IOR (DImode, copy_rtx (temp),
9604 GEN_INT (ud3)));
9605
9606 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9607 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9608 GEN_INT (32)));
9609 if (ud2 != 0)
9610 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9611 gen_rtx_IOR (DImode, copy_rtx (temp),
9612 GEN_INT (ud2 << 16)));
9613 if (ud1 != 0)
9614 emit_move_insn (dest,
9615 gen_rtx_IOR (DImode, copy_rtx (temp),
9616 GEN_INT (ud1)));
9617 }
9618 }
9619
9620 /* Helper for the following. Get rid of [r+r] memory refs
9621 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9622
9623 static void
9624 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9625 {
9626 if (GET_CODE (operands[0]) == MEM
9627 && GET_CODE (XEXP (operands[0], 0)) != REG
9628 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9629 GET_MODE (operands[0]), false))
9630 operands[0]
9631 = replace_equiv_address (operands[0],
9632 copy_addr_to_reg (XEXP (operands[0], 0)));
9633
9634 if (GET_CODE (operands[1]) == MEM
9635 && GET_CODE (XEXP (operands[1], 0)) != REG
9636 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9637 GET_MODE (operands[1]), false))
9638 operands[1]
9639 = replace_equiv_address (operands[1],
9640 copy_addr_to_reg (XEXP (operands[1], 0)));
9641 }
9642
9643 /* Generate a vector of constants to permute MODE for a little-endian
9644 storage operation by swapping the two halves of a vector. */
9645 static rtvec
9646 rs6000_const_vec (machine_mode mode)
9647 {
9648 int i, subparts;
9649 rtvec v;
9650
9651 switch (mode)
9652 {
9653 case E_V1TImode:
9654 subparts = 1;
9655 break;
9656 case E_V2DFmode:
9657 case E_V2DImode:
9658 subparts = 2;
9659 break;
9660 case E_V4SFmode:
9661 case E_V4SImode:
9662 subparts = 4;
9663 break;
9664 case E_V8HImode:
9665 subparts = 8;
9666 break;
9667 case E_V16QImode:
9668 subparts = 16;
9669 break;
9670 default:
9671 gcc_unreachable();
9672 }
9673
9674 v = rtvec_alloc (subparts);
9675
9676 for (i = 0; i < subparts / 2; ++i)
9677 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9678 for (i = subparts / 2; i < subparts; ++i)
9679 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9680
9681 return v;
9682 }
9683
9684 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
9685 store operation. */
9686 void
9687 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
9688 {
9689 /* Scalar permutations are easier to express in integer modes rather than
9690 floating-point modes, so cast them here. We use V1TImode instead
9691 of TImode to ensure that the values don't go through GPRs. */
9692 if (FLOAT128_VECTOR_P (mode))
9693 {
9694 dest = gen_lowpart (V1TImode, dest);
9695 source = gen_lowpart (V1TImode, source);
9696 mode = V1TImode;
9697 }
9698
9699 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
9700 scalar. */
9701 if (mode == TImode || mode == V1TImode)
9702 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
9703 GEN_INT (64))));
9704 else
9705 {
9706 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9707 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
9708 }
9709 }
9710
9711 /* Emit a little-endian load from vector memory location SOURCE to VSX
9712 register DEST in mode MODE. The load is done with two permuting
9713 insn's that represent an lxvd2x and xxpermdi. */
9714 void
9715 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9716 {
9717 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9718 V1TImode). */
9719 if (mode == TImode || mode == V1TImode)
9720 {
9721 mode = V2DImode;
9722 dest = gen_lowpart (V2DImode, dest);
9723 source = adjust_address (source, V2DImode, 0);
9724 }
9725
9726 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9727 rs6000_emit_le_vsx_permute (tmp, source, mode);
9728 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9729 }
9730
9731 /* Emit a little-endian store to vector memory location DEST from VSX
9732 register SOURCE in mode MODE. The store is done with two permuting
9733 insn's that represent an xxpermdi and an stxvd2x. */
9734 void
9735 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9736 {
9737 /* This should never be called during or after LRA, because it does
9738 not re-permute the source register. It is intended only for use
9739 during expand. */
9740 gcc_assert (!lra_in_progress && !reload_completed);
9741
9742 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9743 V1TImode). */
9744 if (mode == TImode || mode == V1TImode)
9745 {
9746 mode = V2DImode;
9747 dest = adjust_address (dest, V2DImode, 0);
9748 source = gen_lowpart (V2DImode, source);
9749 }
9750
9751 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9752 rs6000_emit_le_vsx_permute (tmp, source, mode);
9753 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9754 }
9755
9756 /* Emit a sequence representing a little-endian VSX load or store,
9757 moving data from SOURCE to DEST in mode MODE. This is done
9758 separately from rs6000_emit_move to ensure it is called only
9759 during expand. LE VSX loads and stores introduced later are
9760 handled with a split. The expand-time RTL generation allows
9761 us to optimize away redundant pairs of register-permutes. */
9762 void
9763 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9764 {
9765 gcc_assert (!BYTES_BIG_ENDIAN
9766 && VECTOR_MEM_VSX_P (mode)
9767 && !TARGET_P9_VECTOR
9768 && !gpr_or_gpr_p (dest, source)
9769 && (MEM_P (source) ^ MEM_P (dest)));
9770
9771 if (MEM_P (source))
9772 {
9773 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
9774 rs6000_emit_le_vsx_load (dest, source, mode);
9775 }
9776 else
9777 {
9778 if (!REG_P (source))
9779 source = force_reg (mode, source);
9780 rs6000_emit_le_vsx_store (dest, source, mode);
9781 }
9782 }
9783
9784 /* Return whether a SFmode or SImode move can be done without converting one
9785 mode to another. This arrises when we have:
9786
9787 (SUBREG:SF (REG:SI ...))
9788 (SUBREG:SI (REG:SF ...))
9789
9790 and one of the values is in a floating point/vector register, where SFmode
9791 scalars are stored in DFmode format. */
9792
9793 bool
9794 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
9795 {
9796 if (TARGET_ALLOW_SF_SUBREG)
9797 return true;
9798
9799 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
9800 return true;
9801
9802 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
9803 return true;
9804
9805 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
9806 if (SUBREG_P (dest))
9807 {
9808 rtx dest_subreg = SUBREG_REG (dest);
9809 rtx src_subreg = SUBREG_REG (src);
9810 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
9811 }
9812
9813 return false;
9814 }
9815
9816
9817 /* Helper function to change moves with:
9818
9819 (SUBREG:SF (REG:SI)) and
9820 (SUBREG:SI (REG:SF))
9821
9822 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
9823 values are stored as DFmode values in the VSX registers. We need to convert
9824 the bits before we can use a direct move or operate on the bits in the
9825 vector register as an integer type.
9826
9827 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
9828
9829 static bool
9830 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
9831 {
9832 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
9833 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
9834 && SUBREG_P (source) && sf_subreg_operand (source, mode))
9835 {
9836 rtx inner_source = SUBREG_REG (source);
9837 machine_mode inner_mode = GET_MODE (inner_source);
9838
9839 if (mode == SImode && inner_mode == SFmode)
9840 {
9841 emit_insn (gen_movsi_from_sf (dest, inner_source));
9842 return true;
9843 }
9844
9845 if (mode == SFmode && inner_mode == SImode)
9846 {
9847 emit_insn (gen_movsf_from_si (dest, inner_source));
9848 return true;
9849 }
9850 }
9851
9852 return false;
9853 }
9854
9855 /* Emit a move from SOURCE to DEST in mode MODE. */
9856 void
9857 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9858 {
9859 rtx operands[2];
9860 operands[0] = dest;
9861 operands[1] = source;
9862
9863 if (TARGET_DEBUG_ADDR)
9864 {
9865 fprintf (stderr,
9866 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
9867 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9868 GET_MODE_NAME (mode),
9869 lra_in_progress,
9870 reload_completed,
9871 can_create_pseudo_p ());
9872 debug_rtx (dest);
9873 fprintf (stderr, "source:\n");
9874 debug_rtx (source);
9875 }
9876
9877 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
9878 if (CONST_WIDE_INT_P (operands[1])
9879 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9880 {
9881 /* This should be fixed with the introduction of CONST_WIDE_INT. */
9882 gcc_unreachable ();
9883 }
9884
9885 #ifdef HAVE_AS_GNU_ATTRIBUTE
9886 /* If we use a long double type, set the flags in .gnu_attribute that say
9887 what the long double type is. This is to allow the linker's warning
9888 message for the wrong long double to be useful, even if the function does
9889 not do a call (for example, doing a 128-bit add on power9 if the long
9890 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
9891 used if they aren't the default long dobule type. */
9892 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
9893 {
9894 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
9895 rs6000_passes_float = rs6000_passes_long_double = true;
9896
9897 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
9898 rs6000_passes_float = rs6000_passes_long_double = true;
9899 }
9900 #endif
9901
9902 /* See if we need to special case SImode/SFmode SUBREG moves. */
9903 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
9904 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
9905 return;
9906
9907 /* Check if GCC is setting up a block move that will end up using FP
9908 registers as temporaries. We must make sure this is acceptable. */
9909 if (GET_CODE (operands[0]) == MEM
9910 && GET_CODE (operands[1]) == MEM
9911 && mode == DImode
9912 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
9913 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
9914 && ! (rs6000_slow_unaligned_access (SImode,
9915 (MEM_ALIGN (operands[0]) > 32
9916 ? 32 : MEM_ALIGN (operands[0])))
9917 || rs6000_slow_unaligned_access (SImode,
9918 (MEM_ALIGN (operands[1]) > 32
9919 ? 32 : MEM_ALIGN (operands[1]))))
9920 && ! MEM_VOLATILE_P (operands [0])
9921 && ! MEM_VOLATILE_P (operands [1]))
9922 {
9923 emit_move_insn (adjust_address (operands[0], SImode, 0),
9924 adjust_address (operands[1], SImode, 0));
9925 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9926 adjust_address (copy_rtx (operands[1]), SImode, 4));
9927 return;
9928 }
9929
9930 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
9931 && !gpc_reg_operand (operands[1], mode))
9932 operands[1] = force_reg (mode, operands[1]);
9933
9934 /* Recognize the case where operand[1] is a reference to thread-local
9935 data and load its address to a register. */
9936 if (tls_referenced_p (operands[1]))
9937 {
9938 enum tls_model model;
9939 rtx tmp = operands[1];
9940 rtx addend = NULL;
9941
9942 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
9943 {
9944 addend = XEXP (XEXP (tmp, 0), 1);
9945 tmp = XEXP (XEXP (tmp, 0), 0);
9946 }
9947
9948 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
9949 model = SYMBOL_REF_TLS_MODEL (tmp);
9950 gcc_assert (model != 0);
9951
9952 tmp = rs6000_legitimize_tls_address (tmp, model);
9953 if (addend)
9954 {
9955 tmp = gen_rtx_PLUS (mode, tmp, addend);
9956 tmp = force_operand (tmp, operands[0]);
9957 }
9958 operands[1] = tmp;
9959 }
9960
9961 /* 128-bit constant floating-point values on Darwin should really be loaded
9962 as two parts. However, this premature splitting is a problem when DFmode
9963 values can go into Altivec registers. */
9964 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
9965 && GET_CODE (operands[1]) == CONST_DOUBLE)
9966 {
9967 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
9968 simplify_gen_subreg (DFmode, operands[1], mode, 0),
9969 DFmode);
9970 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
9971 GET_MODE_SIZE (DFmode)),
9972 simplify_gen_subreg (DFmode, operands[1], mode,
9973 GET_MODE_SIZE (DFmode)),
9974 DFmode);
9975 return;
9976 }
9977
9978 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
9979 p1:SD) if p1 is not of floating point class and p0 is spilled as
9980 we can have no analogous movsd_store for this. */
9981 if (lra_in_progress && mode == DDmode
9982 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
9983 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9984 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
9985 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
9986 {
9987 enum reg_class cl;
9988 int regno = REGNO (SUBREG_REG (operands[1]));
9989
9990 if (regno >= FIRST_PSEUDO_REGISTER)
9991 {
9992 cl = reg_preferred_class (regno);
9993 regno = reg_renumber[regno];
9994 if (regno < 0)
9995 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
9996 }
9997 if (regno >= 0 && ! FP_REGNO_P (regno))
9998 {
9999 mode = SDmode;
10000 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10001 operands[1] = SUBREG_REG (operands[1]);
10002 }
10003 }
10004 if (lra_in_progress
10005 && mode == SDmode
10006 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10007 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10008 && (REG_P (operands[1])
10009 || (GET_CODE (operands[1]) == SUBREG
10010 && REG_P (SUBREG_REG (operands[1])))))
10011 {
10012 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10013 ? SUBREG_REG (operands[1]) : operands[1]);
10014 enum reg_class cl;
10015
10016 if (regno >= FIRST_PSEUDO_REGISTER)
10017 {
10018 cl = reg_preferred_class (regno);
10019 gcc_assert (cl != NO_REGS);
10020 regno = reg_renumber[regno];
10021 if (regno < 0)
10022 regno = ira_class_hard_regs[cl][0];
10023 }
10024 if (FP_REGNO_P (regno))
10025 {
10026 if (GET_MODE (operands[0]) != DDmode)
10027 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10028 emit_insn (gen_movsd_store (operands[0], operands[1]));
10029 }
10030 else if (INT_REGNO_P (regno))
10031 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10032 else
10033 gcc_unreachable();
10034 return;
10035 }
10036 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10037 p:DD)) if p0 is not of floating point class and p1 is spilled as
10038 we can have no analogous movsd_load for this. */
10039 if (lra_in_progress && mode == DDmode
10040 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10041 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10042 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10043 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10044 {
10045 enum reg_class cl;
10046 int regno = REGNO (SUBREG_REG (operands[0]));
10047
10048 if (regno >= FIRST_PSEUDO_REGISTER)
10049 {
10050 cl = reg_preferred_class (regno);
10051 regno = reg_renumber[regno];
10052 if (regno < 0)
10053 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10054 }
10055 if (regno >= 0 && ! FP_REGNO_P (regno))
10056 {
10057 mode = SDmode;
10058 operands[0] = SUBREG_REG (operands[0]);
10059 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10060 }
10061 }
10062 if (lra_in_progress
10063 && mode == SDmode
10064 && (REG_P (operands[0])
10065 || (GET_CODE (operands[0]) == SUBREG
10066 && REG_P (SUBREG_REG (operands[0]))))
10067 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10068 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10069 {
10070 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10071 ? SUBREG_REG (operands[0]) : operands[0]);
10072 enum reg_class cl;
10073
10074 if (regno >= FIRST_PSEUDO_REGISTER)
10075 {
10076 cl = reg_preferred_class (regno);
10077 gcc_assert (cl != NO_REGS);
10078 regno = reg_renumber[regno];
10079 if (regno < 0)
10080 regno = ira_class_hard_regs[cl][0];
10081 }
10082 if (FP_REGNO_P (regno))
10083 {
10084 if (GET_MODE (operands[1]) != DDmode)
10085 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10086 emit_insn (gen_movsd_load (operands[0], operands[1]));
10087 }
10088 else if (INT_REGNO_P (regno))
10089 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10090 else
10091 gcc_unreachable();
10092 return;
10093 }
10094
10095 /* FIXME: In the long term, this switch statement should go away
10096 and be replaced by a sequence of tests based on things like
10097 mode == Pmode. */
10098 switch (mode)
10099 {
10100 case E_HImode:
10101 case E_QImode:
10102 if (CONSTANT_P (operands[1])
10103 && GET_CODE (operands[1]) != CONST_INT)
10104 operands[1] = force_const_mem (mode, operands[1]);
10105 break;
10106
10107 case E_TFmode:
10108 case E_TDmode:
10109 case E_IFmode:
10110 case E_KFmode:
10111 if (FLOAT128_2REG_P (mode))
10112 rs6000_eliminate_indexed_memrefs (operands);
10113 /* fall through */
10114
10115 case E_DFmode:
10116 case E_DDmode:
10117 case E_SFmode:
10118 case E_SDmode:
10119 if (CONSTANT_P (operands[1])
10120 && ! easy_fp_constant (operands[1], mode))
10121 operands[1] = force_const_mem (mode, operands[1]);
10122 break;
10123
10124 case E_V16QImode:
10125 case E_V8HImode:
10126 case E_V4SFmode:
10127 case E_V4SImode:
10128 case E_V2DFmode:
10129 case E_V2DImode:
10130 case E_V1TImode:
10131 if (CONSTANT_P (operands[1])
10132 && !easy_vector_constant (operands[1], mode))
10133 operands[1] = force_const_mem (mode, operands[1]);
10134 break;
10135
10136 case E_SImode:
10137 case E_DImode:
10138 /* Use default pattern for address of ELF small data */
10139 if (TARGET_ELF
10140 && mode == Pmode
10141 && DEFAULT_ABI == ABI_V4
10142 && (GET_CODE (operands[1]) == SYMBOL_REF
10143 || GET_CODE (operands[1]) == CONST)
10144 && small_data_operand (operands[1], mode))
10145 {
10146 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10147 return;
10148 }
10149
10150 if (DEFAULT_ABI == ABI_V4
10151 && mode == Pmode && mode == SImode
10152 && flag_pic == 1 && got_operand (operands[1], mode))
10153 {
10154 emit_insn (gen_movsi_got (operands[0], operands[1]));
10155 return;
10156 }
10157
10158 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10159 && TARGET_NO_TOC
10160 && ! flag_pic
10161 && mode == Pmode
10162 && CONSTANT_P (operands[1])
10163 && GET_CODE (operands[1]) != HIGH
10164 && GET_CODE (operands[1]) != CONST_INT)
10165 {
10166 rtx target = (!can_create_pseudo_p ()
10167 ? operands[0]
10168 : gen_reg_rtx (mode));
10169
10170 /* If this is a function address on -mcall-aixdesc,
10171 convert it to the address of the descriptor. */
10172 if (DEFAULT_ABI == ABI_AIX
10173 && GET_CODE (operands[1]) == SYMBOL_REF
10174 && XSTR (operands[1], 0)[0] == '.')
10175 {
10176 const char *name = XSTR (operands[1], 0);
10177 rtx new_ref;
10178 while (*name == '.')
10179 name++;
10180 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10181 CONSTANT_POOL_ADDRESS_P (new_ref)
10182 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10183 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10184 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10185 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10186 operands[1] = new_ref;
10187 }
10188
10189 if (DEFAULT_ABI == ABI_DARWIN)
10190 {
10191 #if TARGET_MACHO
10192 if (MACHO_DYNAMIC_NO_PIC_P)
10193 {
10194 /* Take care of any required data indirection. */
10195 operands[1] = rs6000_machopic_legitimize_pic_address (
10196 operands[1], mode, operands[0]);
10197 if (operands[0] != operands[1])
10198 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10199 return;
10200 }
10201 #endif
10202 emit_insn (gen_macho_high (target, operands[1]));
10203 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10204 return;
10205 }
10206
10207 emit_insn (gen_elf_high (target, operands[1]));
10208 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10209 return;
10210 }
10211
10212 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10213 and we have put it in the TOC, we just need to make a TOC-relative
10214 reference to it. */
10215 if (TARGET_TOC
10216 && GET_CODE (operands[1]) == SYMBOL_REF
10217 && use_toc_relative_ref (operands[1], mode))
10218 operands[1] = create_TOC_reference (operands[1], operands[0]);
10219 else if (mode == Pmode
10220 && CONSTANT_P (operands[1])
10221 && GET_CODE (operands[1]) != HIGH
10222 && ((GET_CODE (operands[1]) != CONST_INT
10223 && ! easy_fp_constant (operands[1], mode))
10224 || (GET_CODE (operands[1]) == CONST_INT
10225 && (num_insns_constant (operands[1], mode)
10226 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10227 || (GET_CODE (operands[0]) == REG
10228 && FP_REGNO_P (REGNO (operands[0]))))
10229 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10230 && (TARGET_CMODEL == CMODEL_SMALL
10231 || can_create_pseudo_p ()
10232 || (REG_P (operands[0])
10233 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10234 {
10235
10236 #if TARGET_MACHO
10237 /* Darwin uses a special PIC legitimizer. */
10238 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10239 {
10240 operands[1] =
10241 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10242 operands[0]);
10243 if (operands[0] != operands[1])
10244 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10245 return;
10246 }
10247 #endif
10248
10249 /* If we are to limit the number of things we put in the TOC and
10250 this is a symbol plus a constant we can add in one insn,
10251 just put the symbol in the TOC and add the constant. */
10252 if (GET_CODE (operands[1]) == CONST
10253 && TARGET_NO_SUM_IN_TOC
10254 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10255 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10256 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10257 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10258 && ! side_effects_p (operands[0]))
10259 {
10260 rtx sym =
10261 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10262 rtx other = XEXP (XEXP (operands[1], 0), 1);
10263
10264 sym = force_reg (mode, sym);
10265 emit_insn (gen_add3_insn (operands[0], sym, other));
10266 return;
10267 }
10268
10269 operands[1] = force_const_mem (mode, operands[1]);
10270
10271 if (TARGET_TOC
10272 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10273 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10274 {
10275 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10276 operands[0]);
10277 operands[1] = gen_const_mem (mode, tocref);
10278 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10279 }
10280 }
10281 break;
10282
10283 case E_TImode:
10284 if (!VECTOR_MEM_VSX_P (TImode))
10285 rs6000_eliminate_indexed_memrefs (operands);
10286 break;
10287
10288 case E_PTImode:
10289 rs6000_eliminate_indexed_memrefs (operands);
10290 break;
10291
10292 default:
10293 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10294 }
10295
10296 /* Above, we may have called force_const_mem which may have returned
10297 an invalid address. If we can, fix this up; otherwise, reload will
10298 have to deal with it. */
10299 if (GET_CODE (operands[1]) == MEM)
10300 operands[1] = validize_mem (operands[1]);
10301
10302 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10303 }
10304 \f
10305 /* Nonzero if we can use a floating-point register to pass this arg. */
10306 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10307 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10308 && (CUM)->fregno <= FP_ARG_MAX_REG \
10309 && TARGET_HARD_FLOAT)
10310
10311 /* Nonzero if we can use an AltiVec register to pass this arg. */
10312 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10313 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10314 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10315 && TARGET_ALTIVEC_ABI \
10316 && (NAMED))
10317
10318 /* Walk down the type tree of TYPE counting consecutive base elements.
10319 If *MODEP is VOIDmode, then set it to the first valid floating point
10320 or vector type. If a non-floating point or vector type is found, or
10321 if a floating point or vector type that doesn't match a non-VOIDmode
10322 *MODEP is found, then return -1, otherwise return the count in the
10323 sub-tree. */
10324
10325 static int
10326 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10327 {
10328 machine_mode mode;
10329 HOST_WIDE_INT size;
10330
10331 switch (TREE_CODE (type))
10332 {
10333 case REAL_TYPE:
10334 mode = TYPE_MODE (type);
10335 if (!SCALAR_FLOAT_MODE_P (mode))
10336 return -1;
10337
10338 if (*modep == VOIDmode)
10339 *modep = mode;
10340
10341 if (*modep == mode)
10342 return 1;
10343
10344 break;
10345
10346 case COMPLEX_TYPE:
10347 mode = TYPE_MODE (TREE_TYPE (type));
10348 if (!SCALAR_FLOAT_MODE_P (mode))
10349 return -1;
10350
10351 if (*modep == VOIDmode)
10352 *modep = mode;
10353
10354 if (*modep == mode)
10355 return 2;
10356
10357 break;
10358
10359 case VECTOR_TYPE:
10360 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10361 return -1;
10362
10363 /* Use V4SImode as representative of all 128-bit vector types. */
10364 size = int_size_in_bytes (type);
10365 switch (size)
10366 {
10367 case 16:
10368 mode = V4SImode;
10369 break;
10370 default:
10371 return -1;
10372 }
10373
10374 if (*modep == VOIDmode)
10375 *modep = mode;
10376
10377 /* Vector modes are considered to be opaque: two vectors are
10378 equivalent for the purposes of being homogeneous aggregates
10379 if they are the same size. */
10380 if (*modep == mode)
10381 return 1;
10382
10383 break;
10384
10385 case ARRAY_TYPE:
10386 {
10387 int count;
10388 tree index = TYPE_DOMAIN (type);
10389
10390 /* Can't handle incomplete types nor sizes that are not
10391 fixed. */
10392 if (!COMPLETE_TYPE_P (type)
10393 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10394 return -1;
10395
10396 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10397 if (count == -1
10398 || !index
10399 || !TYPE_MAX_VALUE (index)
10400 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10401 || !TYPE_MIN_VALUE (index)
10402 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10403 || count < 0)
10404 return -1;
10405
10406 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10407 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10408
10409 /* There must be no padding. */
10410 if (wi::to_wide (TYPE_SIZE (type))
10411 != count * GET_MODE_BITSIZE (*modep))
10412 return -1;
10413
10414 return count;
10415 }
10416
10417 case RECORD_TYPE:
10418 {
10419 int count = 0;
10420 int sub_count;
10421 tree field;
10422
10423 /* Can't handle incomplete types nor sizes that are not
10424 fixed. */
10425 if (!COMPLETE_TYPE_P (type)
10426 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10427 return -1;
10428
10429 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10430 {
10431 if (TREE_CODE (field) != FIELD_DECL)
10432 continue;
10433
10434 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10435 if (sub_count < 0)
10436 return -1;
10437 count += sub_count;
10438 }
10439
10440 /* There must be no padding. */
10441 if (wi::to_wide (TYPE_SIZE (type))
10442 != count * GET_MODE_BITSIZE (*modep))
10443 return -1;
10444
10445 return count;
10446 }
10447
10448 case UNION_TYPE:
10449 case QUAL_UNION_TYPE:
10450 {
10451 /* These aren't very interesting except in a degenerate case. */
10452 int count = 0;
10453 int sub_count;
10454 tree field;
10455
10456 /* Can't handle incomplete types nor sizes that are not
10457 fixed. */
10458 if (!COMPLETE_TYPE_P (type)
10459 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10460 return -1;
10461
10462 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10463 {
10464 if (TREE_CODE (field) != FIELD_DECL)
10465 continue;
10466
10467 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10468 if (sub_count < 0)
10469 return -1;
10470 count = count > sub_count ? count : sub_count;
10471 }
10472
10473 /* There must be no padding. */
10474 if (wi::to_wide (TYPE_SIZE (type))
10475 != count * GET_MODE_BITSIZE (*modep))
10476 return -1;
10477
10478 return count;
10479 }
10480
10481 default:
10482 break;
10483 }
10484
10485 return -1;
10486 }
10487
10488 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10489 float or vector aggregate that shall be passed in FP/vector registers
10490 according to the ELFv2 ABI, return the homogeneous element mode in
10491 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10492
10493 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10494
10495 static bool
10496 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10497 machine_mode *elt_mode,
10498 int *n_elts)
10499 {
10500 /* Note that we do not accept complex types at the top level as
10501 homogeneous aggregates; these types are handled via the
10502 targetm.calls.split_complex_arg mechanism. Complex types
10503 can be elements of homogeneous aggregates, however. */
10504 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10505 && AGGREGATE_TYPE_P (type))
10506 {
10507 machine_mode field_mode = VOIDmode;
10508 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10509
10510 if (field_count > 0)
10511 {
10512 int reg_size = ALTIVEC_OR_VSX_VECTOR_MODE (field_mode) ? 16 : 8;
10513 int field_size = ROUND_UP (GET_MODE_SIZE (field_mode), reg_size);
10514
10515 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10516 up to AGGR_ARG_NUM_REG registers. */
10517 if (field_count * field_size <= AGGR_ARG_NUM_REG * reg_size)
10518 {
10519 if (elt_mode)
10520 *elt_mode = field_mode;
10521 if (n_elts)
10522 *n_elts = field_count;
10523 return true;
10524 }
10525 }
10526 }
10527
10528 if (elt_mode)
10529 *elt_mode = mode;
10530 if (n_elts)
10531 *n_elts = 1;
10532 return false;
10533 }
10534
10535 /* Return a nonzero value to say to return the function value in
10536 memory, just as large structures are always returned. TYPE will be
10537 the data type of the value, and FNTYPE will be the type of the
10538 function doing the returning, or @code{NULL} for libcalls.
10539
10540 The AIX ABI for the RS/6000 specifies that all structures are
10541 returned in memory. The Darwin ABI does the same.
10542
10543 For the Darwin 64 Bit ABI, a function result can be returned in
10544 registers or in memory, depending on the size of the return data
10545 type. If it is returned in registers, the value occupies the same
10546 registers as it would if it were the first and only function
10547 argument. Otherwise, the function places its result in memory at
10548 the location pointed to by GPR3.
10549
10550 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10551 but a draft put them in memory, and GCC used to implement the draft
10552 instead of the final standard. Therefore, aix_struct_return
10553 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10554 compatibility can change DRAFT_V4_STRUCT_RET to override the
10555 default, and -m switches get the final word. See
10556 rs6000_option_override_internal for more details.
10557
10558 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10559 long double support is enabled. These values are returned in memory.
10560
10561 int_size_in_bytes returns -1 for variable size objects, which go in
10562 memory always. The cast to unsigned makes -1 > 8. */
10563
10564 static bool
10565 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10566 {
10567 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10568 if (TARGET_MACHO
10569 && rs6000_darwin64_abi
10570 && TREE_CODE (type) == RECORD_TYPE
10571 && int_size_in_bytes (type) > 0)
10572 {
10573 CUMULATIVE_ARGS valcum;
10574 rtx valret;
10575
10576 valcum.words = 0;
10577 valcum.fregno = FP_ARG_MIN_REG;
10578 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10579 /* Do a trial code generation as if this were going to be passed
10580 as an argument; if any part goes in memory, we return NULL. */
10581 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10582 if (valret)
10583 return false;
10584 /* Otherwise fall through to more conventional ABI rules. */
10585 }
10586
10587 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10588 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10589 NULL, NULL))
10590 return false;
10591
10592 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10593 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10594 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10595 return false;
10596
10597 if (AGGREGATE_TYPE_P (type)
10598 && (aix_struct_return
10599 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10600 return true;
10601
10602 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10603 modes only exist for GCC vector types if -maltivec. */
10604 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10605 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10606 return false;
10607
10608 /* Return synthetic vectors in memory. */
10609 if (TREE_CODE (type) == VECTOR_TYPE
10610 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10611 {
10612 static bool warned_for_return_big_vectors = false;
10613 if (!warned_for_return_big_vectors)
10614 {
10615 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10616 "non-standard ABI extension with no compatibility "
10617 "guarantee");
10618 warned_for_return_big_vectors = true;
10619 }
10620 return true;
10621 }
10622
10623 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10624 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10625 return true;
10626
10627 return false;
10628 }
10629
10630 /* Specify whether values returned in registers should be at the most
10631 significant end of a register. We want aggregates returned by
10632 value to match the way aggregates are passed to functions. */
10633
10634 static bool
10635 rs6000_return_in_msb (const_tree valtype)
10636 {
10637 return (DEFAULT_ABI == ABI_ELFv2
10638 && BYTES_BIG_ENDIAN
10639 && AGGREGATE_TYPE_P (valtype)
10640 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
10641 == PAD_UPWARD));
10642 }
10643
10644 #ifdef HAVE_AS_GNU_ATTRIBUTE
10645 /* Return TRUE if a call to function FNDECL may be one that
10646 potentially affects the function calling ABI of the object file. */
10647
10648 static bool
10649 call_ABI_of_interest (tree fndecl)
10650 {
10651 if (rs6000_gnu_attr && symtab->state == EXPANSION)
10652 {
10653 struct cgraph_node *c_node;
10654
10655 /* Libcalls are always interesting. */
10656 if (fndecl == NULL_TREE)
10657 return true;
10658
10659 /* Any call to an external function is interesting. */
10660 if (DECL_EXTERNAL (fndecl))
10661 return true;
10662
10663 /* Interesting functions that we are emitting in this object file. */
10664 c_node = cgraph_node::get (fndecl);
10665 c_node = c_node->ultimate_alias_target ();
10666 return !c_node->only_called_directly_p ();
10667 }
10668 return false;
10669 }
10670 #endif
10671
10672 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10673 for a call to a function whose data type is FNTYPE.
10674 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10675
10676 For incoming args we set the number of arguments in the prototype large
10677 so we never return a PARALLEL. */
10678
10679 void
10680 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10681 rtx libname ATTRIBUTE_UNUSED, int incoming,
10682 int libcall, int n_named_args,
10683 tree fndecl ATTRIBUTE_UNUSED,
10684 machine_mode return_mode ATTRIBUTE_UNUSED)
10685 {
10686 static CUMULATIVE_ARGS zero_cumulative;
10687
10688 *cum = zero_cumulative;
10689 cum->words = 0;
10690 cum->fregno = FP_ARG_MIN_REG;
10691 cum->vregno = ALTIVEC_ARG_MIN_REG;
10692 cum->prototype = (fntype && prototype_p (fntype));
10693 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10694 ? CALL_LIBCALL : CALL_NORMAL);
10695 cum->sysv_gregno = GP_ARG_MIN_REG;
10696 cum->stdarg = stdarg_p (fntype);
10697 cum->libcall = libcall;
10698
10699 cum->nargs_prototype = 0;
10700 if (incoming || cum->prototype)
10701 cum->nargs_prototype = n_named_args;
10702
10703 /* Check for a longcall attribute. */
10704 if ((!fntype && rs6000_default_long_calls)
10705 || (fntype
10706 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10707 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10708 cum->call_cookie |= CALL_LONG;
10709
10710 if (TARGET_DEBUG_ARG)
10711 {
10712 fprintf (stderr, "\ninit_cumulative_args:");
10713 if (fntype)
10714 {
10715 tree ret_type = TREE_TYPE (fntype);
10716 fprintf (stderr, " ret code = %s,",
10717 get_tree_code_name (TREE_CODE (ret_type)));
10718 }
10719
10720 if (cum->call_cookie & CALL_LONG)
10721 fprintf (stderr, " longcall,");
10722
10723 fprintf (stderr, " proto = %d, nargs = %d\n",
10724 cum->prototype, cum->nargs_prototype);
10725 }
10726
10727 #ifdef HAVE_AS_GNU_ATTRIBUTE
10728 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
10729 {
10730 cum->escapes = call_ABI_of_interest (fndecl);
10731 if (cum->escapes)
10732 {
10733 tree return_type;
10734
10735 if (fntype)
10736 {
10737 return_type = TREE_TYPE (fntype);
10738 return_mode = TYPE_MODE (return_type);
10739 }
10740 else
10741 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10742
10743 if (return_type != NULL)
10744 {
10745 if (TREE_CODE (return_type) == RECORD_TYPE
10746 && TYPE_TRANSPARENT_AGGR (return_type))
10747 {
10748 return_type = TREE_TYPE (first_field (return_type));
10749 return_mode = TYPE_MODE (return_type);
10750 }
10751 if (AGGREGATE_TYPE_P (return_type)
10752 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10753 <= 8))
10754 rs6000_returns_struct = true;
10755 }
10756 if (SCALAR_FLOAT_MODE_P (return_mode))
10757 {
10758 rs6000_passes_float = true;
10759 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10760 && (FLOAT128_IBM_P (return_mode)
10761 || FLOAT128_IEEE_P (return_mode)
10762 || (return_type != NULL
10763 && (TYPE_MAIN_VARIANT (return_type)
10764 == long_double_type_node))))
10765 rs6000_passes_long_double = true;
10766
10767 /* Note if we passed or return a IEEE 128-bit type. We changed
10768 the mangling for these types, and we may need to make an alias
10769 with the old mangling. */
10770 if (FLOAT128_IEEE_P (return_mode))
10771 rs6000_passes_ieee128 = true;
10772 }
10773 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
10774 rs6000_passes_vector = true;
10775 }
10776 }
10777 #endif
10778
10779 if (fntype
10780 && !TARGET_ALTIVEC
10781 && TARGET_ALTIVEC_ABI
10782 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10783 {
10784 error ("cannot return value in vector register because"
10785 " altivec instructions are disabled, use %qs"
10786 " to enable them", "-maltivec");
10787 }
10788 }
10789 \f
10790 /* The mode the ABI uses for a word. This is not the same as word_mode
10791 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10792
10793 static scalar_int_mode
10794 rs6000_abi_word_mode (void)
10795 {
10796 return TARGET_32BIT ? SImode : DImode;
10797 }
10798
10799 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10800 static char *
10801 rs6000_offload_options (void)
10802 {
10803 if (TARGET_64BIT)
10804 return xstrdup ("-foffload-abi=lp64");
10805 else
10806 return xstrdup ("-foffload-abi=ilp32");
10807 }
10808
10809 /* On rs6000, function arguments are promoted, as are function return
10810 values. */
10811
10812 static machine_mode
10813 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10814 machine_mode mode,
10815 int *punsignedp ATTRIBUTE_UNUSED,
10816 const_tree, int)
10817 {
10818 PROMOTE_MODE (mode, *punsignedp, type);
10819
10820 return mode;
10821 }
10822
10823 /* Return true if TYPE must be passed on the stack and not in registers. */
10824
10825 static bool
10826 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10827 {
10828 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10829 return must_pass_in_stack_var_size (mode, type);
10830 else
10831 return must_pass_in_stack_var_size_or_pad (mode, type);
10832 }
10833
10834 static inline bool
10835 is_complex_IBM_long_double (machine_mode mode)
10836 {
10837 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
10838 }
10839
10840 /* Whether ABI_V4 passes MODE args to a function in floating point
10841 registers. */
10842
10843 static bool
10844 abi_v4_pass_in_fpr (machine_mode mode, bool named)
10845 {
10846 if (!TARGET_HARD_FLOAT)
10847 return false;
10848 if (mode == DFmode)
10849 return true;
10850 if (mode == SFmode && named)
10851 return true;
10852 /* ABI_V4 passes complex IBM long double in 8 gprs.
10853 Stupid, but we can't change the ABI now. */
10854 if (is_complex_IBM_long_double (mode))
10855 return false;
10856 if (FLOAT128_2REG_P (mode))
10857 return true;
10858 if (DECIMAL_FLOAT_MODE_P (mode))
10859 return true;
10860 return false;
10861 }
10862
10863 /* Implement TARGET_FUNCTION_ARG_PADDING.
10864
10865 For the AIX ABI structs are always stored left shifted in their
10866 argument slot. */
10867
10868 static pad_direction
10869 rs6000_function_arg_padding (machine_mode mode, const_tree type)
10870 {
10871 #ifndef AGGREGATE_PADDING_FIXED
10872 #define AGGREGATE_PADDING_FIXED 0
10873 #endif
10874 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10875 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10876 #endif
10877
10878 if (!AGGREGATE_PADDING_FIXED)
10879 {
10880 /* GCC used to pass structures of the same size as integer types as
10881 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
10882 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10883 passed padded downward, except that -mstrict-align further
10884 muddied the water in that multi-component structures of 2 and 4
10885 bytes in size were passed padded upward.
10886
10887 The following arranges for best compatibility with previous
10888 versions of gcc, but removes the -mstrict-align dependency. */
10889 if (BYTES_BIG_ENDIAN)
10890 {
10891 HOST_WIDE_INT size = 0;
10892
10893 if (mode == BLKmode)
10894 {
10895 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10896 size = int_size_in_bytes (type);
10897 }
10898 else
10899 size = GET_MODE_SIZE (mode);
10900
10901 if (size == 1 || size == 2 || size == 4)
10902 return PAD_DOWNWARD;
10903 }
10904 return PAD_UPWARD;
10905 }
10906
10907 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10908 {
10909 if (type != 0 && AGGREGATE_TYPE_P (type))
10910 return PAD_UPWARD;
10911 }
10912
10913 /* Fall back to the default. */
10914 return default_function_arg_padding (mode, type);
10915 }
10916
10917 /* If defined, a C expression that gives the alignment boundary, in bits,
10918 of an argument with the specified mode and type. If it is not defined,
10919 PARM_BOUNDARY is used for all arguments.
10920
10921 V.4 wants long longs and doubles to be double word aligned. Just
10922 testing the mode size is a boneheaded way to do this as it means
10923 that other types such as complex int are also double word aligned.
10924 However, we're stuck with this because changing the ABI might break
10925 existing library interfaces.
10926
10927 Quadword align Altivec/VSX vectors.
10928 Quadword align large synthetic vector types. */
10929
10930 static unsigned int
10931 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
10932 {
10933 machine_mode elt_mode;
10934 int n_elts;
10935
10936 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10937
10938 if (DEFAULT_ABI == ABI_V4
10939 && (GET_MODE_SIZE (mode) == 8
10940 || (TARGET_HARD_FLOAT
10941 && !is_complex_IBM_long_double (mode)
10942 && FLOAT128_2REG_P (mode))))
10943 return 64;
10944 else if (FLOAT128_VECTOR_P (mode))
10945 return 128;
10946 else if (type && TREE_CODE (type) == VECTOR_TYPE
10947 && int_size_in_bytes (type) >= 8
10948 && int_size_in_bytes (type) < 16)
10949 return 64;
10950 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10951 || (type && TREE_CODE (type) == VECTOR_TYPE
10952 && int_size_in_bytes (type) >= 16))
10953 return 128;
10954
10955 /* Aggregate types that need > 8 byte alignment are quadword-aligned
10956 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
10957 -mcompat-align-parm is used. */
10958 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
10959 || DEFAULT_ABI == ABI_ELFv2)
10960 && type && TYPE_ALIGN (type) > 64)
10961 {
10962 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
10963 or homogeneous float/vector aggregates here. We already handled
10964 vector aggregates above, but still need to check for float here. */
10965 bool aggregate_p = (AGGREGATE_TYPE_P (type)
10966 && !SCALAR_FLOAT_MODE_P (elt_mode));
10967
10968 /* We used to check for BLKmode instead of the above aggregate type
10969 check. Warn when this results in any difference to the ABI. */
10970 if (aggregate_p != (mode == BLKmode))
10971 {
10972 static bool warned;
10973 if (!warned && warn_psabi)
10974 {
10975 warned = true;
10976 inform (input_location,
10977 "the ABI of passing aggregates with %d-byte alignment"
10978 " has changed in GCC 5",
10979 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
10980 }
10981 }
10982
10983 if (aggregate_p)
10984 return 128;
10985 }
10986
10987 /* Similar for the Darwin64 ABI. Note that for historical reasons we
10988 implement the "aggregate type" check as a BLKmode check here; this
10989 means certain aggregate types are in fact not aligned. */
10990 if (TARGET_MACHO && rs6000_darwin64_abi
10991 && mode == BLKmode
10992 && type && TYPE_ALIGN (type) > 64)
10993 return 128;
10994
10995 return PARM_BOUNDARY;
10996 }
10997
10998 /* The offset in words to the start of the parameter save area. */
10999
11000 static unsigned int
11001 rs6000_parm_offset (void)
11002 {
11003 return (DEFAULT_ABI == ABI_V4 ? 2
11004 : DEFAULT_ABI == ABI_ELFv2 ? 4
11005 : 6);
11006 }
11007
11008 /* For a function parm of MODE and TYPE, return the starting word in
11009 the parameter area. NWORDS of the parameter area are already used. */
11010
11011 static unsigned int
11012 rs6000_parm_start (machine_mode mode, const_tree type,
11013 unsigned int nwords)
11014 {
11015 unsigned int align;
11016
11017 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11018 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11019 }
11020
11021 /* Compute the size (in words) of a function argument. */
11022
11023 static unsigned long
11024 rs6000_arg_size (machine_mode mode, const_tree type)
11025 {
11026 unsigned long size;
11027
11028 if (mode != BLKmode)
11029 size = GET_MODE_SIZE (mode);
11030 else
11031 size = int_size_in_bytes (type);
11032
11033 if (TARGET_32BIT)
11034 return (size + 3) >> 2;
11035 else
11036 return (size + 7) >> 3;
11037 }
11038 \f
11039 /* Use this to flush pending int fields. */
11040
11041 static void
11042 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11043 HOST_WIDE_INT bitpos, int final)
11044 {
11045 unsigned int startbit, endbit;
11046 int intregs, intoffset;
11047
11048 /* Handle the situations where a float is taking up the first half
11049 of the GPR, and the other half is empty (typically due to
11050 alignment restrictions). We can detect this by a 8-byte-aligned
11051 int field, or by seeing that this is the final flush for this
11052 argument. Count the word and continue on. */
11053 if (cum->floats_in_gpr == 1
11054 && (cum->intoffset % 64 == 0
11055 || (cum->intoffset == -1 && final)))
11056 {
11057 cum->words++;
11058 cum->floats_in_gpr = 0;
11059 }
11060
11061 if (cum->intoffset == -1)
11062 return;
11063
11064 intoffset = cum->intoffset;
11065 cum->intoffset = -1;
11066 cum->floats_in_gpr = 0;
11067
11068 if (intoffset % BITS_PER_WORD != 0)
11069 {
11070 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11071 if (!int_mode_for_size (bits, 0).exists ())
11072 {
11073 /* We couldn't find an appropriate mode, which happens,
11074 e.g., in packed structs when there are 3 bytes to load.
11075 Back intoffset back to the beginning of the word in this
11076 case. */
11077 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11078 }
11079 }
11080
11081 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11082 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11083 intregs = (endbit - startbit) / BITS_PER_WORD;
11084 cum->words += intregs;
11085 /* words should be unsigned. */
11086 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11087 {
11088 int pad = (endbit/BITS_PER_WORD) - cum->words;
11089 cum->words += pad;
11090 }
11091 }
11092
11093 /* The darwin64 ABI calls for us to recurse down through structs,
11094 looking for elements passed in registers. Unfortunately, we have
11095 to track int register count here also because of misalignments
11096 in powerpc alignment mode. */
11097
11098 static void
11099 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11100 const_tree type,
11101 HOST_WIDE_INT startbitpos)
11102 {
11103 tree f;
11104
11105 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11106 if (TREE_CODE (f) == FIELD_DECL)
11107 {
11108 HOST_WIDE_INT bitpos = startbitpos;
11109 tree ftype = TREE_TYPE (f);
11110 machine_mode mode;
11111 if (ftype == error_mark_node)
11112 continue;
11113 mode = TYPE_MODE (ftype);
11114
11115 if (DECL_SIZE (f) != 0
11116 && tree_fits_uhwi_p (bit_position (f)))
11117 bitpos += int_bit_position (f);
11118
11119 /* ??? FIXME: else assume zero offset. */
11120
11121 if (TREE_CODE (ftype) == RECORD_TYPE)
11122 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11123 else if (USE_FP_FOR_ARG_P (cum, mode))
11124 {
11125 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11126 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11127 cum->fregno += n_fpregs;
11128 /* Single-precision floats present a special problem for
11129 us, because they are smaller than an 8-byte GPR, and so
11130 the structure-packing rules combined with the standard
11131 varargs behavior mean that we want to pack float/float
11132 and float/int combinations into a single register's
11133 space. This is complicated by the arg advance flushing,
11134 which works on arbitrarily large groups of int-type
11135 fields. */
11136 if (mode == SFmode)
11137 {
11138 if (cum->floats_in_gpr == 1)
11139 {
11140 /* Two floats in a word; count the word and reset
11141 the float count. */
11142 cum->words++;
11143 cum->floats_in_gpr = 0;
11144 }
11145 else if (bitpos % 64 == 0)
11146 {
11147 /* A float at the beginning of an 8-byte word;
11148 count it and put off adjusting cum->words until
11149 we see if a arg advance flush is going to do it
11150 for us. */
11151 cum->floats_in_gpr++;
11152 }
11153 else
11154 {
11155 /* The float is at the end of a word, preceded
11156 by integer fields, so the arg advance flush
11157 just above has already set cum->words and
11158 everything is taken care of. */
11159 }
11160 }
11161 else
11162 cum->words += n_fpregs;
11163 }
11164 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11165 {
11166 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11167 cum->vregno++;
11168 cum->words += 2;
11169 }
11170 else if (cum->intoffset == -1)
11171 cum->intoffset = bitpos;
11172 }
11173 }
11174
11175 /* Check for an item that needs to be considered specially under the darwin 64
11176 bit ABI. These are record types where the mode is BLK or the structure is
11177 8 bytes in size. */
11178 static int
11179 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11180 {
11181 return rs6000_darwin64_abi
11182 && ((mode == BLKmode
11183 && TREE_CODE (type) == RECORD_TYPE
11184 && int_size_in_bytes (type) > 0)
11185 || (type && TREE_CODE (type) == RECORD_TYPE
11186 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11187 }
11188
11189 /* Update the data in CUM to advance over an argument
11190 of mode MODE and data type TYPE.
11191 (TYPE is null for libcalls where that information may not be available.)
11192
11193 Note that for args passed by reference, function_arg will be called
11194 with MODE and TYPE set to that of the pointer to the arg, not the arg
11195 itself. */
11196
11197 static void
11198 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11199 const_tree type, bool named, int depth)
11200 {
11201 machine_mode elt_mode;
11202 int n_elts;
11203
11204 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11205
11206 /* Only tick off an argument if we're not recursing. */
11207 if (depth == 0)
11208 cum->nargs_prototype--;
11209
11210 #ifdef HAVE_AS_GNU_ATTRIBUTE
11211 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11212 && cum->escapes)
11213 {
11214 if (SCALAR_FLOAT_MODE_P (mode))
11215 {
11216 rs6000_passes_float = true;
11217 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11218 && (FLOAT128_IBM_P (mode)
11219 || FLOAT128_IEEE_P (mode)
11220 || (type != NULL
11221 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11222 rs6000_passes_long_double = true;
11223
11224 /* Note if we passed or return a IEEE 128-bit type. We changed the
11225 mangling for these types, and we may need to make an alias with
11226 the old mangling. */
11227 if (FLOAT128_IEEE_P (mode))
11228 rs6000_passes_ieee128 = true;
11229 }
11230 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11231 rs6000_passes_vector = true;
11232 }
11233 #endif
11234
11235 if (TARGET_ALTIVEC_ABI
11236 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11237 || (type && TREE_CODE (type) == VECTOR_TYPE
11238 && int_size_in_bytes (type) == 16)))
11239 {
11240 bool stack = false;
11241
11242 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11243 {
11244 cum->vregno += n_elts;
11245
11246 if (!TARGET_ALTIVEC)
11247 error ("cannot pass argument in vector register because"
11248 " altivec instructions are disabled, use %qs"
11249 " to enable them", "-maltivec");
11250
11251 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11252 even if it is going to be passed in a vector register.
11253 Darwin does the same for variable-argument functions. */
11254 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11255 && TARGET_64BIT)
11256 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11257 stack = true;
11258 }
11259 else
11260 stack = true;
11261
11262 if (stack)
11263 {
11264 int align;
11265
11266 /* Vector parameters must be 16-byte aligned. In 32-bit
11267 mode this means we need to take into account the offset
11268 to the parameter save area. In 64-bit mode, they just
11269 have to start on an even word, since the parameter save
11270 area is 16-byte aligned. */
11271 if (TARGET_32BIT)
11272 align = -(rs6000_parm_offset () + cum->words) & 3;
11273 else
11274 align = cum->words & 1;
11275 cum->words += align + rs6000_arg_size (mode, type);
11276
11277 if (TARGET_DEBUG_ARG)
11278 {
11279 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11280 cum->words, align);
11281 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11282 cum->nargs_prototype, cum->prototype,
11283 GET_MODE_NAME (mode));
11284 }
11285 }
11286 }
11287 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11288 {
11289 int size = int_size_in_bytes (type);
11290 /* Variable sized types have size == -1 and are
11291 treated as if consisting entirely of ints.
11292 Pad to 16 byte boundary if needed. */
11293 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11294 && (cum->words % 2) != 0)
11295 cum->words++;
11296 /* For varargs, we can just go up by the size of the struct. */
11297 if (!named)
11298 cum->words += (size + 7) / 8;
11299 else
11300 {
11301 /* It is tempting to say int register count just goes up by
11302 sizeof(type)/8, but this is wrong in a case such as
11303 { int; double; int; } [powerpc alignment]. We have to
11304 grovel through the fields for these too. */
11305 cum->intoffset = 0;
11306 cum->floats_in_gpr = 0;
11307 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11308 rs6000_darwin64_record_arg_advance_flush (cum,
11309 size * BITS_PER_UNIT, 1);
11310 }
11311 if (TARGET_DEBUG_ARG)
11312 {
11313 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11314 cum->words, TYPE_ALIGN (type), size);
11315 fprintf (stderr,
11316 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11317 cum->nargs_prototype, cum->prototype,
11318 GET_MODE_NAME (mode));
11319 }
11320 }
11321 else if (DEFAULT_ABI == ABI_V4)
11322 {
11323 if (abi_v4_pass_in_fpr (mode, named))
11324 {
11325 /* _Decimal128 must use an even/odd register pair. This assumes
11326 that the register number is odd when fregno is odd. */
11327 if (mode == TDmode && (cum->fregno % 2) == 1)
11328 cum->fregno++;
11329
11330 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11331 <= FP_ARG_V4_MAX_REG)
11332 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11333 else
11334 {
11335 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11336 if (mode == DFmode || FLOAT128_IBM_P (mode)
11337 || mode == DDmode || mode == TDmode)
11338 cum->words += cum->words & 1;
11339 cum->words += rs6000_arg_size (mode, type);
11340 }
11341 }
11342 else
11343 {
11344 int n_words = rs6000_arg_size (mode, type);
11345 int gregno = cum->sysv_gregno;
11346
11347 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11348 As does any other 2 word item such as complex int due to a
11349 historical mistake. */
11350 if (n_words == 2)
11351 gregno += (1 - gregno) & 1;
11352
11353 /* Multi-reg args are not split between registers and stack. */
11354 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11355 {
11356 /* Long long is aligned on the stack. So are other 2 word
11357 items such as complex int due to a historical mistake. */
11358 if (n_words == 2)
11359 cum->words += cum->words & 1;
11360 cum->words += n_words;
11361 }
11362
11363 /* Note: continuing to accumulate gregno past when we've started
11364 spilling to the stack indicates the fact that we've started
11365 spilling to the stack to expand_builtin_saveregs. */
11366 cum->sysv_gregno = gregno + n_words;
11367 }
11368
11369 if (TARGET_DEBUG_ARG)
11370 {
11371 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11372 cum->words, cum->fregno);
11373 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11374 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11375 fprintf (stderr, "mode = %4s, named = %d\n",
11376 GET_MODE_NAME (mode), named);
11377 }
11378 }
11379 else
11380 {
11381 int n_words = rs6000_arg_size (mode, type);
11382 int start_words = cum->words;
11383 int align_words = rs6000_parm_start (mode, type, start_words);
11384
11385 cum->words = align_words + n_words;
11386
11387 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11388 {
11389 /* _Decimal128 must be passed in an even/odd float register pair.
11390 This assumes that the register number is odd when fregno is
11391 odd. */
11392 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11393 cum->fregno++;
11394 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11395 }
11396
11397 if (TARGET_DEBUG_ARG)
11398 {
11399 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11400 cum->words, cum->fregno);
11401 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11402 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11403 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11404 named, align_words - start_words, depth);
11405 }
11406 }
11407 }
11408
11409 static void
11410 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11411 const_tree type, bool named)
11412 {
11413 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11414 0);
11415 }
11416
11417 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11418 structure between cum->intoffset and bitpos to integer registers. */
11419
11420 static void
11421 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11422 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11423 {
11424 machine_mode mode;
11425 unsigned int regno;
11426 unsigned int startbit, endbit;
11427 int this_regno, intregs, intoffset;
11428 rtx reg;
11429
11430 if (cum->intoffset == -1)
11431 return;
11432
11433 intoffset = cum->intoffset;
11434 cum->intoffset = -1;
11435
11436 /* If this is the trailing part of a word, try to only load that
11437 much into the register. Otherwise load the whole register. Note
11438 that in the latter case we may pick up unwanted bits. It's not a
11439 problem at the moment but may wish to revisit. */
11440
11441 if (intoffset % BITS_PER_WORD != 0)
11442 {
11443 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11444 if (!int_mode_for_size (bits, 0).exists (&mode))
11445 {
11446 /* We couldn't find an appropriate mode, which happens,
11447 e.g., in packed structs when there are 3 bytes to load.
11448 Back intoffset back to the beginning of the word in this
11449 case. */
11450 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11451 mode = word_mode;
11452 }
11453 }
11454 else
11455 mode = word_mode;
11456
11457 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11458 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11459 intregs = (endbit - startbit) / BITS_PER_WORD;
11460 this_regno = cum->words + intoffset / BITS_PER_WORD;
11461
11462 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11463 cum->use_stack = 1;
11464
11465 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11466 if (intregs <= 0)
11467 return;
11468
11469 intoffset /= BITS_PER_UNIT;
11470 do
11471 {
11472 regno = GP_ARG_MIN_REG + this_regno;
11473 reg = gen_rtx_REG (mode, regno);
11474 rvec[(*k)++] =
11475 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11476
11477 this_regno += 1;
11478 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11479 mode = word_mode;
11480 intregs -= 1;
11481 }
11482 while (intregs > 0);
11483 }
11484
11485 /* Recursive workhorse for the following. */
11486
11487 static void
11488 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11489 HOST_WIDE_INT startbitpos, rtx rvec[],
11490 int *k)
11491 {
11492 tree f;
11493
11494 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11495 if (TREE_CODE (f) == FIELD_DECL)
11496 {
11497 HOST_WIDE_INT bitpos = startbitpos;
11498 tree ftype = TREE_TYPE (f);
11499 machine_mode mode;
11500 if (ftype == error_mark_node)
11501 continue;
11502 mode = TYPE_MODE (ftype);
11503
11504 if (DECL_SIZE (f) != 0
11505 && tree_fits_uhwi_p (bit_position (f)))
11506 bitpos += int_bit_position (f);
11507
11508 /* ??? FIXME: else assume zero offset. */
11509
11510 if (TREE_CODE (ftype) == RECORD_TYPE)
11511 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11512 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11513 {
11514 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11515 #if 0
11516 switch (mode)
11517 {
11518 case E_SCmode: mode = SFmode; break;
11519 case E_DCmode: mode = DFmode; break;
11520 case E_TCmode: mode = TFmode; break;
11521 default: break;
11522 }
11523 #endif
11524 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11525 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11526 {
11527 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11528 && (mode == TFmode || mode == TDmode));
11529 /* Long double or _Decimal128 split over regs and memory. */
11530 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11531 cum->use_stack=1;
11532 }
11533 rvec[(*k)++]
11534 = gen_rtx_EXPR_LIST (VOIDmode,
11535 gen_rtx_REG (mode, cum->fregno++),
11536 GEN_INT (bitpos / BITS_PER_UNIT));
11537 if (FLOAT128_2REG_P (mode))
11538 cum->fregno++;
11539 }
11540 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11541 {
11542 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11543 rvec[(*k)++]
11544 = gen_rtx_EXPR_LIST (VOIDmode,
11545 gen_rtx_REG (mode, cum->vregno++),
11546 GEN_INT (bitpos / BITS_PER_UNIT));
11547 }
11548 else if (cum->intoffset == -1)
11549 cum->intoffset = bitpos;
11550 }
11551 }
11552
11553 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11554 the register(s) to be used for each field and subfield of a struct
11555 being passed by value, along with the offset of where the
11556 register's value may be found in the block. FP fields go in FP
11557 register, vector fields go in vector registers, and everything
11558 else goes in int registers, packed as in memory.
11559
11560 This code is also used for function return values. RETVAL indicates
11561 whether this is the case.
11562
11563 Much of this is taken from the SPARC V9 port, which has a similar
11564 calling convention. */
11565
11566 static rtx
11567 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11568 bool named, bool retval)
11569 {
11570 rtx rvec[FIRST_PSEUDO_REGISTER];
11571 int k = 1, kbase = 1;
11572 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11573 /* This is a copy; modifications are not visible to our caller. */
11574 CUMULATIVE_ARGS copy_cum = *orig_cum;
11575 CUMULATIVE_ARGS *cum = &copy_cum;
11576
11577 /* Pad to 16 byte boundary if needed. */
11578 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11579 && (cum->words % 2) != 0)
11580 cum->words++;
11581
11582 cum->intoffset = 0;
11583 cum->use_stack = 0;
11584 cum->named = named;
11585
11586 /* Put entries into rvec[] for individual FP and vector fields, and
11587 for the chunks of memory that go in int regs. Note we start at
11588 element 1; 0 is reserved for an indication of using memory, and
11589 may or may not be filled in below. */
11590 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11591 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11592
11593 /* If any part of the struct went on the stack put all of it there.
11594 This hack is because the generic code for
11595 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11596 parts of the struct are not at the beginning. */
11597 if (cum->use_stack)
11598 {
11599 if (retval)
11600 return NULL_RTX; /* doesn't go in registers at all */
11601 kbase = 0;
11602 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11603 }
11604 if (k > 1 || cum->use_stack)
11605 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11606 else
11607 return NULL_RTX;
11608 }
11609
11610 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11611
11612 static rtx
11613 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11614 int align_words)
11615 {
11616 int n_units;
11617 int i, k;
11618 rtx rvec[GP_ARG_NUM_REG + 1];
11619
11620 if (align_words >= GP_ARG_NUM_REG)
11621 return NULL_RTX;
11622
11623 n_units = rs6000_arg_size (mode, type);
11624
11625 /* Optimize the simple case where the arg fits in one gpr, except in
11626 the case of BLKmode due to assign_parms assuming that registers are
11627 BITS_PER_WORD wide. */
11628 if (n_units == 0
11629 || (n_units == 1 && mode != BLKmode))
11630 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11631
11632 k = 0;
11633 if (align_words + n_units > GP_ARG_NUM_REG)
11634 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11635 using a magic NULL_RTX component.
11636 This is not strictly correct. Only some of the arg belongs in
11637 memory, not all of it. However, the normal scheme using
11638 function_arg_partial_nregs can result in unusual subregs, eg.
11639 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11640 store the whole arg to memory is often more efficient than code
11641 to store pieces, and we know that space is available in the right
11642 place for the whole arg. */
11643 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11644
11645 i = 0;
11646 do
11647 {
11648 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11649 rtx off = GEN_INT (i++ * 4);
11650 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11651 }
11652 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11653
11654 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11655 }
11656
11657 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11658 but must also be copied into the parameter save area starting at
11659 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11660 to the GPRs and/or memory. Return the number of elements used. */
11661
11662 static int
11663 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11664 int align_words, rtx *rvec)
11665 {
11666 int k = 0;
11667
11668 if (align_words < GP_ARG_NUM_REG)
11669 {
11670 int n_words = rs6000_arg_size (mode, type);
11671
11672 if (align_words + n_words > GP_ARG_NUM_REG
11673 || mode == BLKmode
11674 || (TARGET_32BIT && TARGET_POWERPC64))
11675 {
11676 /* If this is partially on the stack, then we only
11677 include the portion actually in registers here. */
11678 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11679 int i = 0;
11680
11681 if (align_words + n_words > GP_ARG_NUM_REG)
11682 {
11683 /* Not all of the arg fits in gprs. Say that it goes in memory
11684 too, using a magic NULL_RTX component. Also see comment in
11685 rs6000_mixed_function_arg for why the normal
11686 function_arg_partial_nregs scheme doesn't work in this case. */
11687 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11688 }
11689
11690 do
11691 {
11692 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11693 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11694 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11695 }
11696 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11697 }
11698 else
11699 {
11700 /* The whole arg fits in gprs. */
11701 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11702 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11703 }
11704 }
11705 else
11706 {
11707 /* It's entirely in memory. */
11708 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11709 }
11710
11711 return k;
11712 }
11713
11714 /* RVEC is a vector of K components of an argument of mode MODE.
11715 Construct the final function_arg return value from it. */
11716
11717 static rtx
11718 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11719 {
11720 gcc_assert (k >= 1);
11721
11722 /* Avoid returning a PARALLEL in the trivial cases. */
11723 if (k == 1)
11724 {
11725 if (XEXP (rvec[0], 0) == NULL_RTX)
11726 return NULL_RTX;
11727
11728 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11729 return XEXP (rvec[0], 0);
11730 }
11731
11732 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11733 }
11734
11735 /* Determine where to put an argument to a function.
11736 Value is zero to push the argument on the stack,
11737 or a hard register in which to store the argument.
11738
11739 MODE is the argument's machine mode.
11740 TYPE is the data type of the argument (as a tree).
11741 This is null for libcalls where that information may
11742 not be available.
11743 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11744 the preceding args and about the function being called. It is
11745 not modified in this routine.
11746 NAMED is nonzero if this argument is a named parameter
11747 (otherwise it is an extra parameter matching an ellipsis).
11748
11749 On RS/6000 the first eight words of non-FP are normally in registers
11750 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11751 Under V.4, the first 8 FP args are in registers.
11752
11753 If this is floating-point and no prototype is specified, we use
11754 both an FP and integer register (or possibly FP reg and stack). Library
11755 functions (when CALL_LIBCALL is set) always have the proper types for args,
11756 so we can pass the FP value just in one register. emit_library_function
11757 doesn't support PARALLEL anyway.
11758
11759 Note that for args passed by reference, function_arg will be called
11760 with MODE and TYPE set to that of the pointer to the arg, not the arg
11761 itself. */
11762
11763 static rtx
11764 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11765 const_tree type, bool named)
11766 {
11767 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11768 enum rs6000_abi abi = DEFAULT_ABI;
11769 machine_mode elt_mode;
11770 int n_elts;
11771
11772 /* Return a marker to indicate whether CR1 needs to set or clear the
11773 bit that V.4 uses to say fp args were passed in registers.
11774 Assume that we don't need the marker for software floating point,
11775 or compiler generated library calls. */
11776 if (mode == VOIDmode)
11777 {
11778 if (abi == ABI_V4
11779 && (cum->call_cookie & CALL_LIBCALL) == 0
11780 && (cum->stdarg
11781 || (cum->nargs_prototype < 0
11782 && (cum->prototype || TARGET_NO_PROTOTYPE)))
11783 && TARGET_HARD_FLOAT)
11784 return GEN_INT (cum->call_cookie
11785 | ((cum->fregno == FP_ARG_MIN_REG)
11786 ? CALL_V4_SET_FP_ARGS
11787 : CALL_V4_CLEAR_FP_ARGS));
11788
11789 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11790 }
11791
11792 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11793
11794 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11795 {
11796 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11797 if (rslt != NULL_RTX)
11798 return rslt;
11799 /* Else fall through to usual handling. */
11800 }
11801
11802 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11803 {
11804 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11805 rtx r, off;
11806 int i, k = 0;
11807
11808 /* Do we also need to pass this argument in the parameter save area?
11809 Library support functions for IEEE 128-bit are assumed to not need the
11810 value passed both in GPRs and in vector registers. */
11811 if (TARGET_64BIT && !cum->prototype
11812 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11813 {
11814 int align_words = ROUND_UP (cum->words, 2);
11815 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11816 }
11817
11818 /* Describe where this argument goes in the vector registers. */
11819 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11820 {
11821 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11822 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11823 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11824 }
11825
11826 return rs6000_finish_function_arg (mode, rvec, k);
11827 }
11828 else if (TARGET_ALTIVEC_ABI
11829 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11830 || (type && TREE_CODE (type) == VECTOR_TYPE
11831 && int_size_in_bytes (type) == 16)))
11832 {
11833 if (named || abi == ABI_V4)
11834 return NULL_RTX;
11835 else
11836 {
11837 /* Vector parameters to varargs functions under AIX or Darwin
11838 get passed in memory and possibly also in GPRs. */
11839 int align, align_words, n_words;
11840 machine_mode part_mode;
11841
11842 /* Vector parameters must be 16-byte aligned. In 32-bit
11843 mode this means we need to take into account the offset
11844 to the parameter save area. In 64-bit mode, they just
11845 have to start on an even word, since the parameter save
11846 area is 16-byte aligned. */
11847 if (TARGET_32BIT)
11848 align = -(rs6000_parm_offset () + cum->words) & 3;
11849 else
11850 align = cum->words & 1;
11851 align_words = cum->words + align;
11852
11853 /* Out of registers? Memory, then. */
11854 if (align_words >= GP_ARG_NUM_REG)
11855 return NULL_RTX;
11856
11857 if (TARGET_32BIT && TARGET_POWERPC64)
11858 return rs6000_mixed_function_arg (mode, type, align_words);
11859
11860 /* The vector value goes in GPRs. Only the part of the
11861 value in GPRs is reported here. */
11862 part_mode = mode;
11863 n_words = rs6000_arg_size (mode, type);
11864 if (align_words + n_words > GP_ARG_NUM_REG)
11865 /* Fortunately, there are only two possibilities, the value
11866 is either wholly in GPRs or half in GPRs and half not. */
11867 part_mode = DImode;
11868
11869 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11870 }
11871 }
11872
11873 else if (abi == ABI_V4)
11874 {
11875 if (abi_v4_pass_in_fpr (mode, named))
11876 {
11877 /* _Decimal128 must use an even/odd register pair. This assumes
11878 that the register number is odd when fregno is odd. */
11879 if (mode == TDmode && (cum->fregno % 2) == 1)
11880 cum->fregno++;
11881
11882 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11883 <= FP_ARG_V4_MAX_REG)
11884 return gen_rtx_REG (mode, cum->fregno);
11885 else
11886 return NULL_RTX;
11887 }
11888 else
11889 {
11890 int n_words = rs6000_arg_size (mode, type);
11891 int gregno = cum->sysv_gregno;
11892
11893 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11894 As does any other 2 word item such as complex int due to a
11895 historical mistake. */
11896 if (n_words == 2)
11897 gregno += (1 - gregno) & 1;
11898
11899 /* Multi-reg args are not split between registers and stack. */
11900 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11901 return NULL_RTX;
11902
11903 if (TARGET_32BIT && TARGET_POWERPC64)
11904 return rs6000_mixed_function_arg (mode, type,
11905 gregno - GP_ARG_MIN_REG);
11906 return gen_rtx_REG (mode, gregno);
11907 }
11908 }
11909 else
11910 {
11911 int align_words = rs6000_parm_start (mode, type, cum->words);
11912
11913 /* _Decimal128 must be passed in an even/odd float register pair.
11914 This assumes that the register number is odd when fregno is odd. */
11915 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11916 cum->fregno++;
11917
11918 if (USE_FP_FOR_ARG_P (cum, elt_mode))
11919 {
11920 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11921 rtx r, off;
11922 int i, k = 0;
11923 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11924 int fpr_words;
11925
11926 /* Do we also need to pass this argument in the parameter
11927 save area? */
11928 if (type && (cum->nargs_prototype <= 0
11929 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11930 && TARGET_XL_COMPAT
11931 && align_words >= GP_ARG_NUM_REG)))
11932 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11933
11934 /* Describe where this argument goes in the fprs. */
11935 for (i = 0; i < n_elts
11936 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
11937 {
11938 /* Check if the argument is split over registers and memory.
11939 This can only ever happen for long double or _Decimal128;
11940 complex types are handled via split_complex_arg. */
11941 machine_mode fmode = elt_mode;
11942 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
11943 {
11944 gcc_assert (FLOAT128_2REG_P (fmode));
11945 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
11946 }
11947
11948 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
11949 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11950 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11951 }
11952
11953 /* If there were not enough FPRs to hold the argument, the rest
11954 usually goes into memory. However, if the current position
11955 is still within the register parameter area, a portion may
11956 actually have to go into GPRs.
11957
11958 Note that it may happen that the portion of the argument
11959 passed in the first "half" of the first GPR was already
11960 passed in the last FPR as well.
11961
11962 For unnamed arguments, we already set up GPRs to cover the
11963 whole argument in rs6000_psave_function_arg, so there is
11964 nothing further to do at this point. */
11965 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
11966 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
11967 && cum->nargs_prototype > 0)
11968 {
11969 static bool warned;
11970
11971 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11972 int n_words = rs6000_arg_size (mode, type);
11973
11974 align_words += fpr_words;
11975 n_words -= fpr_words;
11976
11977 do
11978 {
11979 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11980 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
11981 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11982 }
11983 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11984
11985 if (!warned && warn_psabi)
11986 {
11987 warned = true;
11988 inform (input_location,
11989 "the ABI of passing homogeneous float aggregates"
11990 " has changed in GCC 5");
11991 }
11992 }
11993
11994 return rs6000_finish_function_arg (mode, rvec, k);
11995 }
11996 else if (align_words < GP_ARG_NUM_REG)
11997 {
11998 if (TARGET_32BIT && TARGET_POWERPC64)
11999 return rs6000_mixed_function_arg (mode, type, align_words);
12000
12001 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12002 }
12003 else
12004 return NULL_RTX;
12005 }
12006 }
12007 \f
12008 /* For an arg passed partly in registers and partly in memory, this is
12009 the number of bytes passed in registers. For args passed entirely in
12010 registers or entirely in memory, zero. When an arg is described by a
12011 PARALLEL, perhaps using more than one register type, this function
12012 returns the number of bytes used by the first element of the PARALLEL. */
12013
12014 static int
12015 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12016 tree type, bool named)
12017 {
12018 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12019 bool passed_in_gprs = true;
12020 int ret = 0;
12021 int align_words;
12022 machine_mode elt_mode;
12023 int n_elts;
12024
12025 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12026
12027 if (DEFAULT_ABI == ABI_V4)
12028 return 0;
12029
12030 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12031 {
12032 /* If we are passing this arg in the fixed parameter save area (gprs or
12033 memory) as well as VRs, we do not use the partial bytes mechanism;
12034 instead, rs6000_function_arg will return a PARALLEL including a memory
12035 element as necessary. Library support functions for IEEE 128-bit are
12036 assumed to not need the value passed both in GPRs and in vector
12037 registers. */
12038 if (TARGET_64BIT && !cum->prototype
12039 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12040 return 0;
12041
12042 /* Otherwise, we pass in VRs only. Check for partial copies. */
12043 passed_in_gprs = false;
12044 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12045 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12046 }
12047
12048 /* In this complicated case we just disable the partial_nregs code. */
12049 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12050 return 0;
12051
12052 align_words = rs6000_parm_start (mode, type, cum->words);
12053
12054 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12055 {
12056 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12057
12058 /* If we are passing this arg in the fixed parameter save area
12059 (gprs or memory) as well as FPRs, we do not use the partial
12060 bytes mechanism; instead, rs6000_function_arg will return a
12061 PARALLEL including a memory element as necessary. */
12062 if (type
12063 && (cum->nargs_prototype <= 0
12064 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12065 && TARGET_XL_COMPAT
12066 && align_words >= GP_ARG_NUM_REG)))
12067 return 0;
12068
12069 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12070 passed_in_gprs = false;
12071 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12072 {
12073 /* Compute number of bytes / words passed in FPRs. If there
12074 is still space available in the register parameter area
12075 *after* that amount, a part of the argument will be passed
12076 in GPRs. In that case, the total amount passed in any
12077 registers is equal to the amount that would have been passed
12078 in GPRs if everything were passed there, so we fall back to
12079 the GPR code below to compute the appropriate value. */
12080 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12081 * MIN (8, GET_MODE_SIZE (elt_mode)));
12082 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12083
12084 if (align_words + fpr_words < GP_ARG_NUM_REG)
12085 passed_in_gprs = true;
12086 else
12087 ret = fpr;
12088 }
12089 }
12090
12091 if (passed_in_gprs
12092 && align_words < GP_ARG_NUM_REG
12093 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12094 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12095
12096 if (ret != 0 && TARGET_DEBUG_ARG)
12097 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12098
12099 return ret;
12100 }
12101 \f
12102 /* A C expression that indicates when an argument must be passed by
12103 reference. If nonzero for an argument, a copy of that argument is
12104 made in memory and a pointer to the argument is passed instead of
12105 the argument itself. The pointer is passed in whatever way is
12106 appropriate for passing a pointer to that type.
12107
12108 Under V.4, aggregates and long double are passed by reference.
12109
12110 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12111 reference unless the AltiVec vector extension ABI is in force.
12112
12113 As an extension to all ABIs, variable sized types are passed by
12114 reference. */
12115
12116 static bool
12117 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12118 machine_mode mode, const_tree type,
12119 bool named ATTRIBUTE_UNUSED)
12120 {
12121 if (!type)
12122 return 0;
12123
12124 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12125 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12126 {
12127 if (TARGET_DEBUG_ARG)
12128 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12129 return 1;
12130 }
12131
12132 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12133 {
12134 if (TARGET_DEBUG_ARG)
12135 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12136 return 1;
12137 }
12138
12139 if (int_size_in_bytes (type) < 0)
12140 {
12141 if (TARGET_DEBUG_ARG)
12142 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12143 return 1;
12144 }
12145
12146 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12147 modes only exist for GCC vector types if -maltivec. */
12148 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12149 {
12150 if (TARGET_DEBUG_ARG)
12151 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12152 return 1;
12153 }
12154
12155 /* Pass synthetic vectors in memory. */
12156 if (TREE_CODE (type) == VECTOR_TYPE
12157 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12158 {
12159 static bool warned_for_pass_big_vectors = false;
12160 if (TARGET_DEBUG_ARG)
12161 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12162 if (!warned_for_pass_big_vectors)
12163 {
12164 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12165 "non-standard ABI extension with no compatibility "
12166 "guarantee");
12167 warned_for_pass_big_vectors = true;
12168 }
12169 return 1;
12170 }
12171
12172 return 0;
12173 }
12174
12175 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12176 already processes. Return true if the parameter must be passed
12177 (fully or partially) on the stack. */
12178
12179 static bool
12180 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12181 {
12182 machine_mode mode;
12183 int unsignedp;
12184 rtx entry_parm;
12185
12186 /* Catch errors. */
12187 if (type == NULL || type == error_mark_node)
12188 return true;
12189
12190 /* Handle types with no storage requirement. */
12191 if (TYPE_MODE (type) == VOIDmode)
12192 return false;
12193
12194 /* Handle complex types. */
12195 if (TREE_CODE (type) == COMPLEX_TYPE)
12196 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12197 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12198
12199 /* Handle transparent aggregates. */
12200 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12201 && TYPE_TRANSPARENT_AGGR (type))
12202 type = TREE_TYPE (first_field (type));
12203
12204 /* See if this arg was passed by invisible reference. */
12205 if (pass_by_reference (get_cumulative_args (args_so_far),
12206 TYPE_MODE (type), type, true))
12207 type = build_pointer_type (type);
12208
12209 /* Find mode as it is passed by the ABI. */
12210 unsignedp = TYPE_UNSIGNED (type);
12211 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12212
12213 /* If we must pass in stack, we need a stack. */
12214 if (rs6000_must_pass_in_stack (mode, type))
12215 return true;
12216
12217 /* If there is no incoming register, we need a stack. */
12218 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12219 if (entry_parm == NULL)
12220 return true;
12221
12222 /* Likewise if we need to pass both in registers and on the stack. */
12223 if (GET_CODE (entry_parm) == PARALLEL
12224 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12225 return true;
12226
12227 /* Also true if we're partially in registers and partially not. */
12228 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12229 return true;
12230
12231 /* Update info on where next arg arrives in registers. */
12232 rs6000_function_arg_advance (args_so_far, mode, type, true);
12233 return false;
12234 }
12235
12236 /* Return true if FUN has no prototype, has a variable argument
12237 list, or passes any parameter in memory. */
12238
12239 static bool
12240 rs6000_function_parms_need_stack (tree fun, bool incoming)
12241 {
12242 tree fntype, result;
12243 CUMULATIVE_ARGS args_so_far_v;
12244 cumulative_args_t args_so_far;
12245
12246 if (!fun)
12247 /* Must be a libcall, all of which only use reg parms. */
12248 return false;
12249
12250 fntype = fun;
12251 if (!TYPE_P (fun))
12252 fntype = TREE_TYPE (fun);
12253
12254 /* Varargs functions need the parameter save area. */
12255 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12256 return true;
12257
12258 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12259 args_so_far = pack_cumulative_args (&args_so_far_v);
12260
12261 /* When incoming, we will have been passed the function decl.
12262 It is necessary to use the decl to handle K&R style functions,
12263 where TYPE_ARG_TYPES may not be available. */
12264 if (incoming)
12265 {
12266 gcc_assert (DECL_P (fun));
12267 result = DECL_RESULT (fun);
12268 }
12269 else
12270 result = TREE_TYPE (fntype);
12271
12272 if (result && aggregate_value_p (result, fntype))
12273 {
12274 if (!TYPE_P (result))
12275 result = TREE_TYPE (result);
12276 result = build_pointer_type (result);
12277 rs6000_parm_needs_stack (args_so_far, result);
12278 }
12279
12280 if (incoming)
12281 {
12282 tree parm;
12283
12284 for (parm = DECL_ARGUMENTS (fun);
12285 parm && parm != void_list_node;
12286 parm = TREE_CHAIN (parm))
12287 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12288 return true;
12289 }
12290 else
12291 {
12292 function_args_iterator args_iter;
12293 tree arg_type;
12294
12295 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12296 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12297 return true;
12298 }
12299
12300 return false;
12301 }
12302
12303 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12304 usually a constant depending on the ABI. However, in the ELFv2 ABI
12305 the register parameter area is optional when calling a function that
12306 has a prototype is scope, has no variable argument list, and passes
12307 all parameters in registers. */
12308
12309 int
12310 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12311 {
12312 int reg_parm_stack_space;
12313
12314 switch (DEFAULT_ABI)
12315 {
12316 default:
12317 reg_parm_stack_space = 0;
12318 break;
12319
12320 case ABI_AIX:
12321 case ABI_DARWIN:
12322 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12323 break;
12324
12325 case ABI_ELFv2:
12326 /* ??? Recomputing this every time is a bit expensive. Is there
12327 a place to cache this information? */
12328 if (rs6000_function_parms_need_stack (fun, incoming))
12329 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12330 else
12331 reg_parm_stack_space = 0;
12332 break;
12333 }
12334
12335 return reg_parm_stack_space;
12336 }
12337
12338 static void
12339 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12340 {
12341 int i;
12342 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12343
12344 if (nregs == 0)
12345 return;
12346
12347 for (i = 0; i < nregs; i++)
12348 {
12349 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12350 if (reload_completed)
12351 {
12352 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12353 tem = NULL_RTX;
12354 else
12355 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12356 i * GET_MODE_SIZE (reg_mode));
12357 }
12358 else
12359 tem = replace_equiv_address (tem, XEXP (tem, 0));
12360
12361 gcc_assert (tem);
12362
12363 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12364 }
12365 }
12366 \f
12367 /* Perform any needed actions needed for a function that is receiving a
12368 variable number of arguments.
12369
12370 CUM is as above.
12371
12372 MODE and TYPE are the mode and type of the current parameter.
12373
12374 PRETEND_SIZE is a variable that should be set to the amount of stack
12375 that must be pushed by the prolog to pretend that our caller pushed
12376 it.
12377
12378 Normally, this macro will push all remaining incoming registers on the
12379 stack and set PRETEND_SIZE to the length of the registers pushed. */
12380
12381 static void
12382 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12383 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12384 int no_rtl)
12385 {
12386 CUMULATIVE_ARGS next_cum;
12387 int reg_size = TARGET_32BIT ? 4 : 8;
12388 rtx save_area = NULL_RTX, mem;
12389 int first_reg_offset;
12390 alias_set_type set;
12391
12392 /* Skip the last named argument. */
12393 next_cum = *get_cumulative_args (cum);
12394 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12395
12396 if (DEFAULT_ABI == ABI_V4)
12397 {
12398 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12399
12400 if (! no_rtl)
12401 {
12402 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12403 HOST_WIDE_INT offset = 0;
12404
12405 /* Try to optimize the size of the varargs save area.
12406 The ABI requires that ap.reg_save_area is doubleword
12407 aligned, but we don't need to allocate space for all
12408 the bytes, only those to which we actually will save
12409 anything. */
12410 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12411 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12412 if (TARGET_HARD_FLOAT
12413 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12414 && cfun->va_list_fpr_size)
12415 {
12416 if (gpr_reg_num)
12417 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12418 * UNITS_PER_FP_WORD;
12419 if (cfun->va_list_fpr_size
12420 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12421 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12422 else
12423 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12424 * UNITS_PER_FP_WORD;
12425 }
12426 if (gpr_reg_num)
12427 {
12428 offset = -((first_reg_offset * reg_size) & ~7);
12429 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12430 {
12431 gpr_reg_num = cfun->va_list_gpr_size;
12432 if (reg_size == 4 && (first_reg_offset & 1))
12433 gpr_reg_num++;
12434 }
12435 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12436 }
12437 else if (fpr_size)
12438 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12439 * UNITS_PER_FP_WORD
12440 - (int) (GP_ARG_NUM_REG * reg_size);
12441
12442 if (gpr_size + fpr_size)
12443 {
12444 rtx reg_save_area
12445 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12446 gcc_assert (GET_CODE (reg_save_area) == MEM);
12447 reg_save_area = XEXP (reg_save_area, 0);
12448 if (GET_CODE (reg_save_area) == PLUS)
12449 {
12450 gcc_assert (XEXP (reg_save_area, 0)
12451 == virtual_stack_vars_rtx);
12452 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
12453 offset += INTVAL (XEXP (reg_save_area, 1));
12454 }
12455 else
12456 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12457 }
12458
12459 cfun->machine->varargs_save_offset = offset;
12460 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12461 }
12462 }
12463 else
12464 {
12465 first_reg_offset = next_cum.words;
12466 save_area = crtl->args.internal_arg_pointer;
12467
12468 if (targetm.calls.must_pass_in_stack (mode, type))
12469 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12470 }
12471
12472 set = get_varargs_alias_set ();
12473 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12474 && cfun->va_list_gpr_size)
12475 {
12476 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12477
12478 if (va_list_gpr_counter_field)
12479 /* V4 va_list_gpr_size counts number of registers needed. */
12480 n_gpr = cfun->va_list_gpr_size;
12481 else
12482 /* char * va_list instead counts number of bytes needed. */
12483 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12484
12485 if (nregs > n_gpr)
12486 nregs = n_gpr;
12487
12488 mem = gen_rtx_MEM (BLKmode,
12489 plus_constant (Pmode, save_area,
12490 first_reg_offset * reg_size));
12491 MEM_NOTRAP_P (mem) = 1;
12492 set_mem_alias_set (mem, set);
12493 set_mem_align (mem, BITS_PER_WORD);
12494
12495 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12496 nregs);
12497 }
12498
12499 /* Save FP registers if needed. */
12500 if (DEFAULT_ABI == ABI_V4
12501 && TARGET_HARD_FLOAT
12502 && ! no_rtl
12503 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12504 && cfun->va_list_fpr_size)
12505 {
12506 int fregno = next_cum.fregno, nregs;
12507 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12508 rtx lab = gen_label_rtx ();
12509 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12510 * UNITS_PER_FP_WORD);
12511
12512 emit_jump_insn
12513 (gen_rtx_SET (pc_rtx,
12514 gen_rtx_IF_THEN_ELSE (VOIDmode,
12515 gen_rtx_NE (VOIDmode, cr1,
12516 const0_rtx),
12517 gen_rtx_LABEL_REF (VOIDmode, lab),
12518 pc_rtx)));
12519
12520 for (nregs = 0;
12521 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12522 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12523 {
12524 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12525 plus_constant (Pmode, save_area, off));
12526 MEM_NOTRAP_P (mem) = 1;
12527 set_mem_alias_set (mem, set);
12528 set_mem_align (mem, GET_MODE_ALIGNMENT (
12529 TARGET_HARD_FLOAT ? DFmode : SFmode));
12530 emit_move_insn (mem, gen_rtx_REG (
12531 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12532 }
12533
12534 emit_label (lab);
12535 }
12536 }
12537
12538 /* Create the va_list data type. */
12539
12540 static tree
12541 rs6000_build_builtin_va_list (void)
12542 {
12543 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12544
12545 /* For AIX, prefer 'char *' because that's what the system
12546 header files like. */
12547 if (DEFAULT_ABI != ABI_V4)
12548 return build_pointer_type (char_type_node);
12549
12550 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12551 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12552 get_identifier ("__va_list_tag"), record);
12553
12554 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12555 unsigned_char_type_node);
12556 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12557 unsigned_char_type_node);
12558 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12559 every user file. */
12560 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12561 get_identifier ("reserved"), short_unsigned_type_node);
12562 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12563 get_identifier ("overflow_arg_area"),
12564 ptr_type_node);
12565 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12566 get_identifier ("reg_save_area"),
12567 ptr_type_node);
12568
12569 va_list_gpr_counter_field = f_gpr;
12570 va_list_fpr_counter_field = f_fpr;
12571
12572 DECL_FIELD_CONTEXT (f_gpr) = record;
12573 DECL_FIELD_CONTEXT (f_fpr) = record;
12574 DECL_FIELD_CONTEXT (f_res) = record;
12575 DECL_FIELD_CONTEXT (f_ovf) = record;
12576 DECL_FIELD_CONTEXT (f_sav) = record;
12577
12578 TYPE_STUB_DECL (record) = type_decl;
12579 TYPE_NAME (record) = type_decl;
12580 TYPE_FIELDS (record) = f_gpr;
12581 DECL_CHAIN (f_gpr) = f_fpr;
12582 DECL_CHAIN (f_fpr) = f_res;
12583 DECL_CHAIN (f_res) = f_ovf;
12584 DECL_CHAIN (f_ovf) = f_sav;
12585
12586 layout_type (record);
12587
12588 /* The correct type is an array type of one element. */
12589 return build_array_type (record, build_index_type (size_zero_node));
12590 }
12591
12592 /* Implement va_start. */
12593
12594 static void
12595 rs6000_va_start (tree valist, rtx nextarg)
12596 {
12597 HOST_WIDE_INT words, n_gpr, n_fpr;
12598 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12599 tree gpr, fpr, ovf, sav, t;
12600
12601 /* Only SVR4 needs something special. */
12602 if (DEFAULT_ABI != ABI_V4)
12603 {
12604 std_expand_builtin_va_start (valist, nextarg);
12605 return;
12606 }
12607
12608 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12609 f_fpr = DECL_CHAIN (f_gpr);
12610 f_res = DECL_CHAIN (f_fpr);
12611 f_ovf = DECL_CHAIN (f_res);
12612 f_sav = DECL_CHAIN (f_ovf);
12613
12614 valist = build_simple_mem_ref (valist);
12615 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12616 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12617 f_fpr, NULL_TREE);
12618 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12619 f_ovf, NULL_TREE);
12620 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12621 f_sav, NULL_TREE);
12622
12623 /* Count number of gp and fp argument registers used. */
12624 words = crtl->args.info.words;
12625 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12626 GP_ARG_NUM_REG);
12627 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12628 FP_ARG_NUM_REG);
12629
12630 if (TARGET_DEBUG_ARG)
12631 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12632 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12633 words, n_gpr, n_fpr);
12634
12635 if (cfun->va_list_gpr_size)
12636 {
12637 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12638 build_int_cst (NULL_TREE, n_gpr));
12639 TREE_SIDE_EFFECTS (t) = 1;
12640 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12641 }
12642
12643 if (cfun->va_list_fpr_size)
12644 {
12645 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12646 build_int_cst (NULL_TREE, n_fpr));
12647 TREE_SIDE_EFFECTS (t) = 1;
12648 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12649
12650 #ifdef HAVE_AS_GNU_ATTRIBUTE
12651 if (call_ABI_of_interest (cfun->decl))
12652 rs6000_passes_float = true;
12653 #endif
12654 }
12655
12656 /* Find the overflow area. */
12657 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12658 if (words != 0)
12659 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12660 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12661 TREE_SIDE_EFFECTS (t) = 1;
12662 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12663
12664 /* If there were no va_arg invocations, don't set up the register
12665 save area. */
12666 if (!cfun->va_list_gpr_size
12667 && !cfun->va_list_fpr_size
12668 && n_gpr < GP_ARG_NUM_REG
12669 && n_fpr < FP_ARG_V4_MAX_REG)
12670 return;
12671
12672 /* Find the register save area. */
12673 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12674 if (cfun->machine->varargs_save_offset)
12675 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12676 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12677 TREE_SIDE_EFFECTS (t) = 1;
12678 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12679 }
12680
12681 /* Implement va_arg. */
12682
12683 static tree
12684 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12685 gimple_seq *post_p)
12686 {
12687 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12688 tree gpr, fpr, ovf, sav, reg, t, u;
12689 int size, rsize, n_reg, sav_ofs, sav_scale;
12690 tree lab_false, lab_over, addr;
12691 int align;
12692 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12693 int regalign = 0;
12694 gimple *stmt;
12695
12696 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12697 {
12698 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12699 return build_va_arg_indirect_ref (t);
12700 }
12701
12702 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12703 earlier version of gcc, with the property that it always applied alignment
12704 adjustments to the va-args (even for zero-sized types). The cheapest way
12705 to deal with this is to replicate the effect of the part of
12706 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12707 of relevance.
12708 We don't need to check for pass-by-reference because of the test above.
12709 We can return a simplifed answer, since we know there's no offset to add. */
12710
12711 if (((TARGET_MACHO
12712 && rs6000_darwin64_abi)
12713 || DEFAULT_ABI == ABI_ELFv2
12714 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12715 && integer_zerop (TYPE_SIZE (type)))
12716 {
12717 unsigned HOST_WIDE_INT align, boundary;
12718 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12719 align = PARM_BOUNDARY / BITS_PER_UNIT;
12720 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12721 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12722 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12723 boundary /= BITS_PER_UNIT;
12724 if (boundary > align)
12725 {
12726 tree t ;
12727 /* This updates arg ptr by the amount that would be necessary
12728 to align the zero-sized (but not zero-alignment) item. */
12729 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12730 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12731 gimplify_and_add (t, pre_p);
12732
12733 t = fold_convert (sizetype, valist_tmp);
12734 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12735 fold_convert (TREE_TYPE (valist),
12736 fold_build2 (BIT_AND_EXPR, sizetype, t,
12737 size_int (-boundary))));
12738 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12739 gimplify_and_add (t, pre_p);
12740 }
12741 /* Since it is zero-sized there's no increment for the item itself. */
12742 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12743 return build_va_arg_indirect_ref (valist_tmp);
12744 }
12745
12746 if (DEFAULT_ABI != ABI_V4)
12747 {
12748 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12749 {
12750 tree elem_type = TREE_TYPE (type);
12751 machine_mode elem_mode = TYPE_MODE (elem_type);
12752 int elem_size = GET_MODE_SIZE (elem_mode);
12753
12754 if (elem_size < UNITS_PER_WORD)
12755 {
12756 tree real_part, imag_part;
12757 gimple_seq post = NULL;
12758
12759 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12760 &post);
12761 /* Copy the value into a temporary, lest the formal temporary
12762 be reused out from under us. */
12763 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12764 gimple_seq_add_seq (pre_p, post);
12765
12766 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12767 post_p);
12768
12769 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12770 }
12771 }
12772
12773 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12774 }
12775
12776 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12777 f_fpr = DECL_CHAIN (f_gpr);
12778 f_res = DECL_CHAIN (f_fpr);
12779 f_ovf = DECL_CHAIN (f_res);
12780 f_sav = DECL_CHAIN (f_ovf);
12781
12782 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12783 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12784 f_fpr, NULL_TREE);
12785 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12786 f_ovf, NULL_TREE);
12787 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12788 f_sav, NULL_TREE);
12789
12790 size = int_size_in_bytes (type);
12791 rsize = (size + 3) / 4;
12792 int pad = 4 * rsize - size;
12793 align = 1;
12794
12795 machine_mode mode = TYPE_MODE (type);
12796 if (abi_v4_pass_in_fpr (mode, false))
12797 {
12798 /* FP args go in FP registers, if present. */
12799 reg = fpr;
12800 n_reg = (size + 7) / 8;
12801 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
12802 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
12803 if (mode != SFmode && mode != SDmode)
12804 align = 8;
12805 }
12806 else
12807 {
12808 /* Otherwise into GP registers. */
12809 reg = gpr;
12810 n_reg = rsize;
12811 sav_ofs = 0;
12812 sav_scale = 4;
12813 if (n_reg == 2)
12814 align = 8;
12815 }
12816
12817 /* Pull the value out of the saved registers.... */
12818
12819 lab_over = NULL;
12820 addr = create_tmp_var (ptr_type_node, "addr");
12821
12822 /* AltiVec vectors never go in registers when -mabi=altivec. */
12823 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12824 align = 16;
12825 else
12826 {
12827 lab_false = create_artificial_label (input_location);
12828 lab_over = create_artificial_label (input_location);
12829
12830 /* Long long is aligned in the registers. As are any other 2 gpr
12831 item such as complex int due to a historical mistake. */
12832 u = reg;
12833 if (n_reg == 2 && reg == gpr)
12834 {
12835 regalign = 1;
12836 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12837 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12838 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12839 unshare_expr (reg), u);
12840 }
12841 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12842 reg number is 0 for f1, so we want to make it odd. */
12843 else if (reg == fpr && mode == TDmode)
12844 {
12845 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12846 build_int_cst (TREE_TYPE (reg), 1));
12847 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12848 }
12849
12850 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12851 t = build2 (GE_EXPR, boolean_type_node, u, t);
12852 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12853 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12854 gimplify_and_add (t, pre_p);
12855
12856 t = sav;
12857 if (sav_ofs)
12858 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12859
12860 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12861 build_int_cst (TREE_TYPE (reg), n_reg));
12862 u = fold_convert (sizetype, u);
12863 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12864 t = fold_build_pointer_plus (t, u);
12865
12866 /* _Decimal32 varargs are located in the second word of the 64-bit
12867 FP register for 32-bit binaries. */
12868 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
12869 t = fold_build_pointer_plus_hwi (t, size);
12870
12871 /* Args are passed right-aligned. */
12872 if (BYTES_BIG_ENDIAN)
12873 t = fold_build_pointer_plus_hwi (t, pad);
12874
12875 gimplify_assign (addr, t, pre_p);
12876
12877 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12878
12879 stmt = gimple_build_label (lab_false);
12880 gimple_seq_add_stmt (pre_p, stmt);
12881
12882 if ((n_reg == 2 && !regalign) || n_reg > 2)
12883 {
12884 /* Ensure that we don't find any more args in regs.
12885 Alignment has taken care of for special cases. */
12886 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12887 }
12888 }
12889
12890 /* ... otherwise out of the overflow area. */
12891
12892 /* Care for on-stack alignment if needed. */
12893 t = ovf;
12894 if (align != 1)
12895 {
12896 t = fold_build_pointer_plus_hwi (t, align - 1);
12897 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
12898 build_int_cst (TREE_TYPE (t), -align));
12899 }
12900
12901 /* Args are passed right-aligned. */
12902 if (BYTES_BIG_ENDIAN)
12903 t = fold_build_pointer_plus_hwi (t, pad);
12904
12905 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12906
12907 gimplify_assign (unshare_expr (addr), t, pre_p);
12908
12909 t = fold_build_pointer_plus_hwi (t, size);
12910 gimplify_assign (unshare_expr (ovf), t, pre_p);
12911
12912 if (lab_over)
12913 {
12914 stmt = gimple_build_label (lab_over);
12915 gimple_seq_add_stmt (pre_p, stmt);
12916 }
12917
12918 if (STRICT_ALIGNMENT
12919 && (TYPE_ALIGN (type)
12920 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
12921 {
12922 /* The value (of type complex double, for example) may not be
12923 aligned in memory in the saved registers, so copy via a
12924 temporary. (This is the same code as used for SPARC.) */
12925 tree tmp = create_tmp_var (type, "va_arg_tmp");
12926 tree dest_addr = build_fold_addr_expr (tmp);
12927
12928 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
12929 3, dest_addr, addr, size_int (rsize * 4));
12930 TREE_ADDRESSABLE (tmp) = 1;
12931
12932 gimplify_and_add (copy, pre_p);
12933 addr = dest_addr;
12934 }
12935
12936 addr = fold_convert (ptrtype, addr);
12937 return build_va_arg_indirect_ref (addr);
12938 }
12939
12940 /* Builtins. */
12941
12942 static void
12943 def_builtin (const char *name, tree type, enum rs6000_builtins code)
12944 {
12945 tree t;
12946 unsigned classify = rs6000_builtin_info[(int)code].attr;
12947 const char *attr_string = "";
12948
12949 gcc_assert (name != NULL);
12950 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
12951
12952 if (rs6000_builtin_decls[(int)code])
12953 fatal_error (input_location,
12954 "internal error: builtin function %qs already processed",
12955 name);
12956
12957 rs6000_builtin_decls[(int)code] = t =
12958 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
12959
12960 /* Set any special attributes. */
12961 if ((classify & RS6000_BTC_CONST) != 0)
12962 {
12963 /* const function, function only depends on the inputs. */
12964 TREE_READONLY (t) = 1;
12965 TREE_NOTHROW (t) = 1;
12966 attr_string = ", const";
12967 }
12968 else if ((classify & RS6000_BTC_PURE) != 0)
12969 {
12970 /* pure function, function can read global memory, but does not set any
12971 external state. */
12972 DECL_PURE_P (t) = 1;
12973 TREE_NOTHROW (t) = 1;
12974 attr_string = ", pure";
12975 }
12976 else if ((classify & RS6000_BTC_FP) != 0)
12977 {
12978 /* Function is a math function. If rounding mode is on, then treat the
12979 function as not reading global memory, but it can have arbitrary side
12980 effects. If it is off, then assume the function is a const function.
12981 This mimics the ATTR_MATHFN_FPROUNDING attribute in
12982 builtin-attribute.def that is used for the math functions. */
12983 TREE_NOTHROW (t) = 1;
12984 if (flag_rounding_math)
12985 {
12986 DECL_PURE_P (t) = 1;
12987 DECL_IS_NOVOPS (t) = 1;
12988 attr_string = ", fp, pure";
12989 }
12990 else
12991 {
12992 TREE_READONLY (t) = 1;
12993 attr_string = ", fp, const";
12994 }
12995 }
12996 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
12997 gcc_unreachable ();
12998
12999 if (TARGET_DEBUG_BUILTIN)
13000 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13001 (int)code, name, attr_string);
13002 }
13003
13004 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13005
13006 #undef RS6000_BUILTIN_0
13007 #undef RS6000_BUILTIN_1
13008 #undef RS6000_BUILTIN_2
13009 #undef RS6000_BUILTIN_3
13010 #undef RS6000_BUILTIN_A
13011 #undef RS6000_BUILTIN_D
13012 #undef RS6000_BUILTIN_H
13013 #undef RS6000_BUILTIN_P
13014 #undef RS6000_BUILTIN_X
13015
13016 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13017 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13018 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13019 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13020 { MASK, ICODE, NAME, ENUM },
13021
13022 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13023 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13024 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13025 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13026 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13027
13028 static const struct builtin_description bdesc_3arg[] =
13029 {
13030 #include "rs6000-builtin.def"
13031 };
13032
13033 /* DST operations: void foo (void *, const int, const char). */
13034
13035 #undef RS6000_BUILTIN_0
13036 #undef RS6000_BUILTIN_1
13037 #undef RS6000_BUILTIN_2
13038 #undef RS6000_BUILTIN_3
13039 #undef RS6000_BUILTIN_A
13040 #undef RS6000_BUILTIN_D
13041 #undef RS6000_BUILTIN_H
13042 #undef RS6000_BUILTIN_P
13043 #undef RS6000_BUILTIN_X
13044
13045 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13046 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13047 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13048 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13049 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13050 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13051 { MASK, ICODE, NAME, ENUM },
13052
13053 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13054 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13055 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13056
13057 static const struct builtin_description bdesc_dst[] =
13058 {
13059 #include "rs6000-builtin.def"
13060 };
13061
13062 /* Simple binary operations: VECc = foo (VECa, VECb). */
13063
13064 #undef RS6000_BUILTIN_0
13065 #undef RS6000_BUILTIN_1
13066 #undef RS6000_BUILTIN_2
13067 #undef RS6000_BUILTIN_3
13068 #undef RS6000_BUILTIN_A
13069 #undef RS6000_BUILTIN_D
13070 #undef RS6000_BUILTIN_H
13071 #undef RS6000_BUILTIN_P
13072 #undef RS6000_BUILTIN_X
13073
13074 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13075 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13076 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13077 { MASK, ICODE, NAME, ENUM },
13078
13079 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13080 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13081 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13082 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13083 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13084 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13085
13086 static const struct builtin_description bdesc_2arg[] =
13087 {
13088 #include "rs6000-builtin.def"
13089 };
13090
13091 #undef RS6000_BUILTIN_0
13092 #undef RS6000_BUILTIN_1
13093 #undef RS6000_BUILTIN_2
13094 #undef RS6000_BUILTIN_3
13095 #undef RS6000_BUILTIN_A
13096 #undef RS6000_BUILTIN_D
13097 #undef RS6000_BUILTIN_H
13098 #undef RS6000_BUILTIN_P
13099 #undef RS6000_BUILTIN_X
13100
13101 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13102 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13103 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13104 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13105 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13106 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13107 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13108 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13109 { MASK, ICODE, NAME, ENUM },
13110
13111 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13112
13113 /* AltiVec predicates. */
13114
13115 static const struct builtin_description bdesc_altivec_preds[] =
13116 {
13117 #include "rs6000-builtin.def"
13118 };
13119
13120 /* ABS* operations. */
13121
13122 #undef RS6000_BUILTIN_0
13123 #undef RS6000_BUILTIN_1
13124 #undef RS6000_BUILTIN_2
13125 #undef RS6000_BUILTIN_3
13126 #undef RS6000_BUILTIN_A
13127 #undef RS6000_BUILTIN_D
13128 #undef RS6000_BUILTIN_H
13129 #undef RS6000_BUILTIN_P
13130 #undef RS6000_BUILTIN_X
13131
13132 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13133 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13134 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13135 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13136 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13137 { MASK, ICODE, NAME, ENUM },
13138
13139 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13140 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13141 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13142 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13143
13144 static const struct builtin_description bdesc_abs[] =
13145 {
13146 #include "rs6000-builtin.def"
13147 };
13148
13149 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13150 foo (VECa). */
13151
13152 #undef RS6000_BUILTIN_0
13153 #undef RS6000_BUILTIN_1
13154 #undef RS6000_BUILTIN_2
13155 #undef RS6000_BUILTIN_3
13156 #undef RS6000_BUILTIN_A
13157 #undef RS6000_BUILTIN_D
13158 #undef RS6000_BUILTIN_H
13159 #undef RS6000_BUILTIN_P
13160 #undef RS6000_BUILTIN_X
13161
13162 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13163 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13164 { MASK, ICODE, NAME, ENUM },
13165
13166 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13167 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13168 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13169 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13170 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13171 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13172 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13173
13174 static const struct builtin_description bdesc_1arg[] =
13175 {
13176 #include "rs6000-builtin.def"
13177 };
13178
13179 /* Simple no-argument operations: result = __builtin_darn_32 () */
13180
13181 #undef RS6000_BUILTIN_0
13182 #undef RS6000_BUILTIN_1
13183 #undef RS6000_BUILTIN_2
13184 #undef RS6000_BUILTIN_3
13185 #undef RS6000_BUILTIN_A
13186 #undef RS6000_BUILTIN_D
13187 #undef RS6000_BUILTIN_H
13188 #undef RS6000_BUILTIN_P
13189 #undef RS6000_BUILTIN_X
13190
13191 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13192 { MASK, ICODE, NAME, ENUM },
13193
13194 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13195 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13196 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13197 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13198 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13199 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13200 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13201 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13202
13203 static const struct builtin_description bdesc_0arg[] =
13204 {
13205 #include "rs6000-builtin.def"
13206 };
13207
13208 /* HTM builtins. */
13209 #undef RS6000_BUILTIN_0
13210 #undef RS6000_BUILTIN_1
13211 #undef RS6000_BUILTIN_2
13212 #undef RS6000_BUILTIN_3
13213 #undef RS6000_BUILTIN_A
13214 #undef RS6000_BUILTIN_D
13215 #undef RS6000_BUILTIN_H
13216 #undef RS6000_BUILTIN_P
13217 #undef RS6000_BUILTIN_X
13218
13219 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13220 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13221 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13222 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13223 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13224 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13225 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13226 { MASK, ICODE, NAME, ENUM },
13227
13228 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13229 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13230
13231 static const struct builtin_description bdesc_htm[] =
13232 {
13233 #include "rs6000-builtin.def"
13234 };
13235
13236 #undef RS6000_BUILTIN_0
13237 #undef RS6000_BUILTIN_1
13238 #undef RS6000_BUILTIN_2
13239 #undef RS6000_BUILTIN_3
13240 #undef RS6000_BUILTIN_A
13241 #undef RS6000_BUILTIN_D
13242 #undef RS6000_BUILTIN_H
13243 #undef RS6000_BUILTIN_P
13244
13245 /* Return true if a builtin function is overloaded. */
13246 bool
13247 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13248 {
13249 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13250 }
13251
13252 const char *
13253 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13254 {
13255 return rs6000_builtin_info[(int)fncode].name;
13256 }
13257
13258 /* Expand an expression EXP that calls a builtin without arguments. */
13259 static rtx
13260 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13261 {
13262 rtx pat;
13263 machine_mode tmode = insn_data[icode].operand[0].mode;
13264
13265 if (icode == CODE_FOR_nothing)
13266 /* Builtin not supported on this processor. */
13267 return 0;
13268
13269 if (icode == CODE_FOR_rs6000_mffsl
13270 && rs6000_isa_flags_explicit & OPTION_MASK_SOFT_FLOAT)
13271 {
13272 error ("__builtin_mffsl() not supported with -msoft-float");
13273 return const0_rtx;
13274 }
13275
13276 if (target == 0
13277 || GET_MODE (target) != tmode
13278 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13279 target = gen_reg_rtx (tmode);
13280
13281 pat = GEN_FCN (icode) (target);
13282 if (! pat)
13283 return 0;
13284 emit_insn (pat);
13285
13286 return target;
13287 }
13288
13289
13290 static rtx
13291 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13292 {
13293 rtx pat;
13294 tree arg0 = CALL_EXPR_ARG (exp, 0);
13295 tree arg1 = CALL_EXPR_ARG (exp, 1);
13296 rtx op0 = expand_normal (arg0);
13297 rtx op1 = expand_normal (arg1);
13298 machine_mode mode0 = insn_data[icode].operand[0].mode;
13299 machine_mode mode1 = insn_data[icode].operand[1].mode;
13300
13301 if (icode == CODE_FOR_nothing)
13302 /* Builtin not supported on this processor. */
13303 return 0;
13304
13305 /* If we got invalid arguments bail out before generating bad rtl. */
13306 if (arg0 == error_mark_node || arg1 == error_mark_node)
13307 return const0_rtx;
13308
13309 if (GET_CODE (op0) != CONST_INT
13310 || INTVAL (op0) > 255
13311 || INTVAL (op0) < 0)
13312 {
13313 error ("argument 1 must be an 8-bit field value");
13314 return const0_rtx;
13315 }
13316
13317 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13318 op0 = copy_to_mode_reg (mode0, op0);
13319
13320 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13321 op1 = copy_to_mode_reg (mode1, op1);
13322
13323 pat = GEN_FCN (icode) (op0, op1);
13324 if (!pat)
13325 return const0_rtx;
13326 emit_insn (pat);
13327
13328 return NULL_RTX;
13329 }
13330
13331 static rtx
13332 rs6000_expand_mtfsb_builtin (enum insn_code icode, tree exp)
13333 {
13334 rtx pat;
13335 tree arg0 = CALL_EXPR_ARG (exp, 0);
13336 rtx op0 = expand_normal (arg0);
13337
13338 if (icode == CODE_FOR_nothing)
13339 /* Builtin not supported on this processor. */
13340 return 0;
13341
13342 if (rs6000_isa_flags_explicit & OPTION_MASK_SOFT_FLOAT)
13343 {
13344 error ("__builtin_mtfsb0 and __builtin_mtfsb1 not supported with -msoft-float");
13345 return const0_rtx;
13346 }
13347
13348 /* If we got invalid arguments bail out before generating bad rtl. */
13349 if (arg0 == error_mark_node)
13350 return const0_rtx;
13351
13352 /* Only allow bit numbers 0 to 31. */
13353 if (!u5bit_cint_operand (op0, VOIDmode))
13354 {
13355 error ("Argument must be a constant between 0 and 31.");
13356 return const0_rtx;
13357 }
13358
13359 pat = GEN_FCN (icode) (op0);
13360 if (!pat)
13361 return const0_rtx;
13362 emit_insn (pat);
13363
13364 return NULL_RTX;
13365 }
13366
13367 static rtx
13368 rs6000_expand_set_fpscr_rn_builtin (enum insn_code icode, tree exp)
13369 {
13370 rtx pat;
13371 tree arg0 = CALL_EXPR_ARG (exp, 0);
13372 rtx op0 = expand_normal (arg0);
13373 machine_mode mode0 = insn_data[icode].operand[0].mode;
13374
13375 if (icode == CODE_FOR_nothing)
13376 /* Builtin not supported on this processor. */
13377 return 0;
13378
13379 if (rs6000_isa_flags_explicit & OPTION_MASK_SOFT_FLOAT)
13380 {
13381 error ("__builtin_set_fpscr_rn not supported with -msoft-float");
13382 return const0_rtx;
13383 }
13384
13385 /* If we got invalid arguments bail out before generating bad rtl. */
13386 if (arg0 == error_mark_node)
13387 return const0_rtx;
13388
13389 /* If the argument is a constant, check the range. Argument can only be a
13390 2-bit value. Unfortunately, can't check the range of the value at
13391 compile time if the argument is a variable. The least significant two
13392 bits of the argument, regardless of type, are used to set the rounding
13393 mode. All other bits are ignored. */
13394 if (GET_CODE (op0) == CONST_INT && !const_0_to_3_operand(op0, VOIDmode))
13395 {
13396 error ("Argument must be a value between 0 and 3.");
13397 return const0_rtx;
13398 }
13399
13400 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13401 op0 = copy_to_mode_reg (mode0, op0);
13402
13403 pat = GEN_FCN (icode) (op0);
13404 if (!pat)
13405 return const0_rtx;
13406 emit_insn (pat);
13407
13408 return NULL_RTX;
13409 }
13410 static rtx
13411 rs6000_expand_set_fpscr_drn_builtin (enum insn_code icode, tree exp)
13412 {
13413 rtx pat;
13414 tree arg0 = CALL_EXPR_ARG (exp, 0);
13415 rtx op0 = expand_normal (arg0);
13416 machine_mode mode0 = insn_data[icode].operand[0].mode;
13417
13418 if (TARGET_32BIT)
13419 /* Builtin not supported in 32-bit mode. */
13420 fatal_error (input_location,
13421 "__builtin_set_fpscr_drn is not supported in 32-bit mode.");
13422
13423 if (rs6000_isa_flags_explicit & OPTION_MASK_SOFT_FLOAT)
13424 {
13425 error ("__builtin_set_fpscr_drn not supported with -msoft-float");
13426 return const0_rtx;
13427 }
13428
13429 if (icode == CODE_FOR_nothing)
13430 /* Builtin not supported on this processor. */
13431 return 0;
13432
13433 /* If we got invalid arguments bail out before generating bad rtl. */
13434 if (arg0 == error_mark_node)
13435 return const0_rtx;
13436
13437 /* If the argument is a constant, check the range. Agrument can only be a
13438 3-bit value. Unfortunately, can't check the range of the value at
13439 compile time if the argument is a variable. The least significant two
13440 bits of the argument, regardless of type, are used to set the rounding
13441 mode. All other bits are ignored. */
13442 if (GET_CODE (op0) == CONST_INT && !const_0_to_7_operand(op0, VOIDmode))
13443 {
13444 error ("Argument must be a value between 0 and 7.");
13445 return const0_rtx;
13446 }
13447
13448 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13449 op0 = copy_to_mode_reg (mode0, op0);
13450
13451 pat = GEN_FCN (icode) (op0);
13452 if (! pat)
13453 return const0_rtx;
13454 emit_insn (pat);
13455
13456 return NULL_RTX;
13457 }
13458
13459 static rtx
13460 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13461 {
13462 rtx pat;
13463 tree arg0 = CALL_EXPR_ARG (exp, 0);
13464 rtx op0 = expand_normal (arg0);
13465 machine_mode tmode = insn_data[icode].operand[0].mode;
13466 machine_mode mode0 = insn_data[icode].operand[1].mode;
13467
13468 if (icode == CODE_FOR_nothing)
13469 /* Builtin not supported on this processor. */
13470 return 0;
13471
13472 /* If we got invalid arguments bail out before generating bad rtl. */
13473 if (arg0 == error_mark_node)
13474 return const0_rtx;
13475
13476 if (icode == CODE_FOR_altivec_vspltisb
13477 || icode == CODE_FOR_altivec_vspltish
13478 || icode == CODE_FOR_altivec_vspltisw)
13479 {
13480 /* Only allow 5-bit *signed* literals. */
13481 if (GET_CODE (op0) != CONST_INT
13482 || INTVAL (op0) > 15
13483 || INTVAL (op0) < -16)
13484 {
13485 error ("argument 1 must be a 5-bit signed literal");
13486 return CONST0_RTX (tmode);
13487 }
13488 }
13489
13490 if (target == 0
13491 || GET_MODE (target) != tmode
13492 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13493 target = gen_reg_rtx (tmode);
13494
13495 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13496 op0 = copy_to_mode_reg (mode0, op0);
13497
13498 pat = GEN_FCN (icode) (target, op0);
13499 if (! pat)
13500 return 0;
13501 emit_insn (pat);
13502
13503 return target;
13504 }
13505
13506 static rtx
13507 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13508 {
13509 rtx pat, scratch1, scratch2;
13510 tree arg0 = CALL_EXPR_ARG (exp, 0);
13511 rtx op0 = expand_normal (arg0);
13512 machine_mode tmode = insn_data[icode].operand[0].mode;
13513 machine_mode mode0 = insn_data[icode].operand[1].mode;
13514
13515 /* If we have invalid arguments, bail out before generating bad rtl. */
13516 if (arg0 == error_mark_node)
13517 return const0_rtx;
13518
13519 if (target == 0
13520 || GET_MODE (target) != tmode
13521 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13522 target = gen_reg_rtx (tmode);
13523
13524 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13525 op0 = copy_to_mode_reg (mode0, op0);
13526
13527 scratch1 = gen_reg_rtx (mode0);
13528 scratch2 = gen_reg_rtx (mode0);
13529
13530 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13531 if (! pat)
13532 return 0;
13533 emit_insn (pat);
13534
13535 return target;
13536 }
13537
13538 static rtx
13539 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13540 {
13541 rtx pat;
13542 tree arg0 = CALL_EXPR_ARG (exp, 0);
13543 tree arg1 = CALL_EXPR_ARG (exp, 1);
13544 rtx op0 = expand_normal (arg0);
13545 rtx op1 = expand_normal (arg1);
13546 machine_mode tmode = insn_data[icode].operand[0].mode;
13547 machine_mode mode0 = insn_data[icode].operand[1].mode;
13548 machine_mode mode1 = insn_data[icode].operand[2].mode;
13549
13550 if (icode == CODE_FOR_nothing)
13551 /* Builtin not supported on this processor. */
13552 return 0;
13553
13554 /* If we got invalid arguments bail out before generating bad rtl. */
13555 if (arg0 == error_mark_node || arg1 == error_mark_node)
13556 return const0_rtx;
13557
13558 if (icode == CODE_FOR_unpackv1ti
13559 || icode == CODE_FOR_unpackkf
13560 || icode == CODE_FOR_unpacktf
13561 || icode == CODE_FOR_unpackif
13562 || icode == CODE_FOR_unpacktd)
13563 {
13564 /* Only allow 1-bit unsigned literals. */
13565 STRIP_NOPS (arg1);
13566 if (TREE_CODE (arg1) != INTEGER_CST
13567 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13568 {
13569 error ("argument 2 must be a 1-bit unsigned literal");
13570 return CONST0_RTX (tmode);
13571 }
13572 }
13573 else if (icode == CODE_FOR_altivec_vspltw)
13574 {
13575 /* Only allow 2-bit unsigned literals. */
13576 STRIP_NOPS (arg1);
13577 if (TREE_CODE (arg1) != INTEGER_CST
13578 || TREE_INT_CST_LOW (arg1) & ~3)
13579 {
13580 error ("argument 2 must be a 2-bit unsigned literal");
13581 return CONST0_RTX (tmode);
13582 }
13583 }
13584 else if (icode == CODE_FOR_altivec_vsplth)
13585 {
13586 /* Only allow 3-bit unsigned literals. */
13587 STRIP_NOPS (arg1);
13588 if (TREE_CODE (arg1) != INTEGER_CST
13589 || TREE_INT_CST_LOW (arg1) & ~7)
13590 {
13591 error ("argument 2 must be a 3-bit unsigned literal");
13592 return CONST0_RTX (tmode);
13593 }
13594 }
13595 else if (icode == CODE_FOR_altivec_vspltb)
13596 {
13597 /* Only allow 4-bit unsigned literals. */
13598 STRIP_NOPS (arg1);
13599 if (TREE_CODE (arg1) != INTEGER_CST
13600 || TREE_INT_CST_LOW (arg1) & ~15)
13601 {
13602 error ("argument 2 must be a 4-bit unsigned literal");
13603 return CONST0_RTX (tmode);
13604 }
13605 }
13606 else if (icode == CODE_FOR_altivec_vcfux
13607 || icode == CODE_FOR_altivec_vcfsx
13608 || icode == CODE_FOR_altivec_vctsxs
13609 || icode == CODE_FOR_altivec_vctuxs)
13610 {
13611 /* Only allow 5-bit unsigned literals. */
13612 STRIP_NOPS (arg1);
13613 if (TREE_CODE (arg1) != INTEGER_CST
13614 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13615 {
13616 error ("argument 2 must be a 5-bit unsigned literal");
13617 return CONST0_RTX (tmode);
13618 }
13619 }
13620 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13621 || icode == CODE_FOR_dfptstsfi_lt_dd
13622 || icode == CODE_FOR_dfptstsfi_gt_dd
13623 || icode == CODE_FOR_dfptstsfi_unordered_dd
13624 || icode == CODE_FOR_dfptstsfi_eq_td
13625 || icode == CODE_FOR_dfptstsfi_lt_td
13626 || icode == CODE_FOR_dfptstsfi_gt_td
13627 || icode == CODE_FOR_dfptstsfi_unordered_td)
13628 {
13629 /* Only allow 6-bit unsigned literals. */
13630 STRIP_NOPS (arg0);
13631 if (TREE_CODE (arg0) != INTEGER_CST
13632 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13633 {
13634 error ("argument 1 must be a 6-bit unsigned literal");
13635 return CONST0_RTX (tmode);
13636 }
13637 }
13638 else if (icode == CODE_FOR_xststdcqp_kf
13639 || icode == CODE_FOR_xststdcqp_tf
13640 || icode == CODE_FOR_xststdcdp
13641 || icode == CODE_FOR_xststdcsp
13642 || icode == CODE_FOR_xvtstdcdp
13643 || icode == CODE_FOR_xvtstdcsp)
13644 {
13645 /* Only allow 7-bit unsigned literals. */
13646 STRIP_NOPS (arg1);
13647 if (TREE_CODE (arg1) != INTEGER_CST
13648 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13649 {
13650 error ("argument 2 must be a 7-bit unsigned literal");
13651 return CONST0_RTX (tmode);
13652 }
13653 }
13654
13655 if (target == 0
13656 || GET_MODE (target) != tmode
13657 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13658 target = gen_reg_rtx (tmode);
13659
13660 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13661 op0 = copy_to_mode_reg (mode0, op0);
13662 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13663 op1 = copy_to_mode_reg (mode1, op1);
13664
13665 pat = GEN_FCN (icode) (target, op0, op1);
13666 if (! pat)
13667 return 0;
13668 emit_insn (pat);
13669
13670 return target;
13671 }
13672
13673 static rtx
13674 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13675 {
13676 rtx pat, scratch;
13677 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13678 tree arg0 = CALL_EXPR_ARG (exp, 1);
13679 tree arg1 = CALL_EXPR_ARG (exp, 2);
13680 rtx op0 = expand_normal (arg0);
13681 rtx op1 = expand_normal (arg1);
13682 machine_mode tmode = SImode;
13683 machine_mode mode0 = insn_data[icode].operand[1].mode;
13684 machine_mode mode1 = insn_data[icode].operand[2].mode;
13685 int cr6_form_int;
13686
13687 if (TREE_CODE (cr6_form) != INTEGER_CST)
13688 {
13689 error ("argument 1 of %qs must be a constant",
13690 "__builtin_altivec_predicate");
13691 return const0_rtx;
13692 }
13693 else
13694 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13695
13696 gcc_assert (mode0 == mode1);
13697
13698 /* If we have invalid arguments, bail out before generating bad rtl. */
13699 if (arg0 == error_mark_node || arg1 == error_mark_node)
13700 return const0_rtx;
13701
13702 if (target == 0
13703 || GET_MODE (target) != tmode
13704 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13705 target = gen_reg_rtx (tmode);
13706
13707 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13708 op0 = copy_to_mode_reg (mode0, op0);
13709 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13710 op1 = copy_to_mode_reg (mode1, op1);
13711
13712 /* Note that for many of the relevant operations (e.g. cmpne or
13713 cmpeq) with float or double operands, it makes more sense for the
13714 mode of the allocated scratch register to select a vector of
13715 integer. But the choice to copy the mode of operand 0 was made
13716 long ago and there are no plans to change it. */
13717 scratch = gen_reg_rtx (mode0);
13718
13719 pat = GEN_FCN (icode) (scratch, op0, op1);
13720 if (! pat)
13721 return 0;
13722 emit_insn (pat);
13723
13724 /* The vec_any* and vec_all* predicates use the same opcodes for two
13725 different operations, but the bits in CR6 will be different
13726 depending on what information we want. So we have to play tricks
13727 with CR6 to get the right bits out.
13728
13729 If you think this is disgusting, look at the specs for the
13730 AltiVec predicates. */
13731
13732 switch (cr6_form_int)
13733 {
13734 case 0:
13735 emit_insn (gen_cr6_test_for_zero (target));
13736 break;
13737 case 1:
13738 emit_insn (gen_cr6_test_for_zero_reverse (target));
13739 break;
13740 case 2:
13741 emit_insn (gen_cr6_test_for_lt (target));
13742 break;
13743 case 3:
13744 emit_insn (gen_cr6_test_for_lt_reverse (target));
13745 break;
13746 default:
13747 error ("argument 1 of %qs is out of range",
13748 "__builtin_altivec_predicate");
13749 break;
13750 }
13751
13752 return target;
13753 }
13754
13755 rtx
13756 swap_endian_selector_for_mode (machine_mode mode)
13757 {
13758 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13759 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13760 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13761 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13762
13763 unsigned int *swaparray, i;
13764 rtx perm[16];
13765
13766 switch (mode)
13767 {
13768 case E_V1TImode:
13769 swaparray = swap1;
13770 break;
13771 case E_V2DFmode:
13772 case E_V2DImode:
13773 swaparray = swap2;
13774 break;
13775 case E_V4SFmode:
13776 case E_V4SImode:
13777 swaparray = swap4;
13778 break;
13779 case E_V8HImode:
13780 swaparray = swap8;
13781 break;
13782 default:
13783 gcc_unreachable ();
13784 }
13785
13786 for (i = 0; i < 16; ++i)
13787 perm[i] = GEN_INT (swaparray[i]);
13788
13789 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13790 gen_rtvec_v (16, perm)));
13791 }
13792
13793 static rtx
13794 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13795 {
13796 rtx pat, addr;
13797 tree arg0 = CALL_EXPR_ARG (exp, 0);
13798 tree arg1 = CALL_EXPR_ARG (exp, 1);
13799 machine_mode tmode = insn_data[icode].operand[0].mode;
13800 machine_mode mode0 = Pmode;
13801 machine_mode mode1 = Pmode;
13802 rtx op0 = expand_normal (arg0);
13803 rtx op1 = expand_normal (arg1);
13804
13805 if (icode == CODE_FOR_nothing)
13806 /* Builtin not supported on this processor. */
13807 return 0;
13808
13809 /* If we got invalid arguments bail out before generating bad rtl. */
13810 if (arg0 == error_mark_node || arg1 == error_mark_node)
13811 return const0_rtx;
13812
13813 if (target == 0
13814 || GET_MODE (target) != tmode
13815 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13816 target = gen_reg_rtx (tmode);
13817
13818 op1 = copy_to_mode_reg (mode1, op1);
13819
13820 /* For LVX, express the RTL accurately by ANDing the address with -16.
13821 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13822 so the raw address is fine. */
13823 if (icode == CODE_FOR_altivec_lvx_v1ti
13824 || icode == CODE_FOR_altivec_lvx_v2df
13825 || icode == CODE_FOR_altivec_lvx_v2di
13826 || icode == CODE_FOR_altivec_lvx_v4sf
13827 || icode == CODE_FOR_altivec_lvx_v4si
13828 || icode == CODE_FOR_altivec_lvx_v8hi
13829 || icode == CODE_FOR_altivec_lvx_v16qi)
13830 {
13831 rtx rawaddr;
13832 if (op0 == const0_rtx)
13833 rawaddr = op1;
13834 else
13835 {
13836 op0 = copy_to_mode_reg (mode0, op0);
13837 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13838 }
13839 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13840 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13841
13842 emit_insn (gen_rtx_SET (target, addr));
13843 }
13844 else
13845 {
13846 if (op0 == const0_rtx)
13847 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13848 else
13849 {
13850 op0 = copy_to_mode_reg (mode0, op0);
13851 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13852 gen_rtx_PLUS (Pmode, op1, op0));
13853 }
13854
13855 pat = GEN_FCN (icode) (target, addr);
13856 if (! pat)
13857 return 0;
13858 emit_insn (pat);
13859 }
13860
13861 return target;
13862 }
13863
13864 static rtx
13865 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
13866 {
13867 rtx pat;
13868 tree arg0 = CALL_EXPR_ARG (exp, 0);
13869 tree arg1 = CALL_EXPR_ARG (exp, 1);
13870 tree arg2 = CALL_EXPR_ARG (exp, 2);
13871 rtx op0 = expand_normal (arg0);
13872 rtx op1 = expand_normal (arg1);
13873 rtx op2 = expand_normal (arg2);
13874 machine_mode mode0 = insn_data[icode].operand[0].mode;
13875 machine_mode mode1 = insn_data[icode].operand[1].mode;
13876 machine_mode mode2 = insn_data[icode].operand[2].mode;
13877
13878 if (icode == CODE_FOR_nothing)
13879 /* Builtin not supported on this processor. */
13880 return NULL_RTX;
13881
13882 /* If we got invalid arguments bail out before generating bad rtl. */
13883 if (arg0 == error_mark_node
13884 || arg1 == error_mark_node
13885 || arg2 == error_mark_node)
13886 return NULL_RTX;
13887
13888 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13889 op0 = copy_to_mode_reg (mode0, op0);
13890 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13891 op1 = copy_to_mode_reg (mode1, op1);
13892 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13893 op2 = copy_to_mode_reg (mode2, op2);
13894
13895 pat = GEN_FCN (icode) (op0, op1, op2);
13896 if (pat)
13897 emit_insn (pat);
13898
13899 return NULL_RTX;
13900 }
13901
13902 static rtx
13903 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
13904 {
13905 tree arg0 = CALL_EXPR_ARG (exp, 0);
13906 tree arg1 = CALL_EXPR_ARG (exp, 1);
13907 tree arg2 = CALL_EXPR_ARG (exp, 2);
13908 rtx op0 = expand_normal (arg0);
13909 rtx op1 = expand_normal (arg1);
13910 rtx op2 = expand_normal (arg2);
13911 rtx pat, addr, rawaddr;
13912 machine_mode tmode = insn_data[icode].operand[0].mode;
13913 machine_mode smode = insn_data[icode].operand[1].mode;
13914 machine_mode mode1 = Pmode;
13915 machine_mode mode2 = Pmode;
13916
13917 /* Invalid arguments. Bail before doing anything stoopid! */
13918 if (arg0 == error_mark_node
13919 || arg1 == error_mark_node
13920 || arg2 == error_mark_node)
13921 return const0_rtx;
13922
13923 op2 = copy_to_mode_reg (mode2, op2);
13924
13925 /* For STVX, express the RTL accurately by ANDing the address with -16.
13926 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
13927 so the raw address is fine. */
13928 if (icode == CODE_FOR_altivec_stvx_v2df
13929 || icode == CODE_FOR_altivec_stvx_v2di
13930 || icode == CODE_FOR_altivec_stvx_v4sf
13931 || icode == CODE_FOR_altivec_stvx_v4si
13932 || icode == CODE_FOR_altivec_stvx_v8hi
13933 || icode == CODE_FOR_altivec_stvx_v16qi)
13934 {
13935 if (op1 == const0_rtx)
13936 rawaddr = op2;
13937 else
13938 {
13939 op1 = copy_to_mode_reg (mode1, op1);
13940 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
13941 }
13942
13943 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13944 addr = gen_rtx_MEM (tmode, addr);
13945
13946 op0 = copy_to_mode_reg (tmode, op0);
13947
13948 emit_insn (gen_rtx_SET (addr, op0));
13949 }
13950 else
13951 {
13952 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
13953 op0 = copy_to_mode_reg (smode, op0);
13954
13955 if (op1 == const0_rtx)
13956 addr = gen_rtx_MEM (tmode, op2);
13957 else
13958 {
13959 op1 = copy_to_mode_reg (mode1, op1);
13960 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
13961 }
13962
13963 pat = GEN_FCN (icode) (addr, op0);
13964 if (pat)
13965 emit_insn (pat);
13966 }
13967
13968 return NULL_RTX;
13969 }
13970
13971 /* Return the appropriate SPR number associated with the given builtin. */
13972 static inline HOST_WIDE_INT
13973 htm_spr_num (enum rs6000_builtins code)
13974 {
13975 if (code == HTM_BUILTIN_GET_TFHAR
13976 || code == HTM_BUILTIN_SET_TFHAR)
13977 return TFHAR_SPR;
13978 else if (code == HTM_BUILTIN_GET_TFIAR
13979 || code == HTM_BUILTIN_SET_TFIAR)
13980 return TFIAR_SPR;
13981 else if (code == HTM_BUILTIN_GET_TEXASR
13982 || code == HTM_BUILTIN_SET_TEXASR)
13983 return TEXASR_SPR;
13984 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
13985 || code == HTM_BUILTIN_SET_TEXASRU);
13986 return TEXASRU_SPR;
13987 }
13988
13989 /* Return the appropriate SPR regno associated with the given builtin. */
13990 static inline HOST_WIDE_INT
13991 htm_spr_regno (enum rs6000_builtins code)
13992 {
13993 if (code == HTM_BUILTIN_GET_TFHAR
13994 || code == HTM_BUILTIN_SET_TFHAR)
13995 return TFHAR_REGNO;
13996 else if (code == HTM_BUILTIN_GET_TFIAR
13997 || code == HTM_BUILTIN_SET_TFIAR)
13998 return TFIAR_REGNO;
13999 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14000 || code == HTM_BUILTIN_SET_TEXASR
14001 || code == HTM_BUILTIN_GET_TEXASRU
14002 || code == HTM_BUILTIN_SET_TEXASRU);
14003 return TEXASR_REGNO;
14004 }
14005
14006 /* Return the correct ICODE value depending on whether we are
14007 setting or reading the HTM SPRs. */
14008 static inline enum insn_code
14009 rs6000_htm_spr_icode (bool nonvoid)
14010 {
14011 if (nonvoid)
14012 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14013 else
14014 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14015 }
14016
14017 /* Expand the HTM builtin in EXP and store the result in TARGET.
14018 Store true in *EXPANDEDP if we found a builtin to expand. */
14019 static rtx
14020 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14021 {
14022 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14023 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14024 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14025 const struct builtin_description *d;
14026 size_t i;
14027
14028 *expandedp = true;
14029
14030 if (!TARGET_POWERPC64
14031 && (fcode == HTM_BUILTIN_TABORTDC
14032 || fcode == HTM_BUILTIN_TABORTDCI))
14033 {
14034 size_t uns_fcode = (size_t)fcode;
14035 const char *name = rs6000_builtin_info[uns_fcode].name;
14036 error ("builtin %qs is only valid in 64-bit mode", name);
14037 return const0_rtx;
14038 }
14039
14040 /* Expand the HTM builtins. */
14041 d = bdesc_htm;
14042 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14043 if (d->code == fcode)
14044 {
14045 rtx op[MAX_HTM_OPERANDS], pat;
14046 int nopnds = 0;
14047 tree arg;
14048 call_expr_arg_iterator iter;
14049 unsigned attr = rs6000_builtin_info[fcode].attr;
14050 enum insn_code icode = d->icode;
14051 const struct insn_operand_data *insn_op;
14052 bool uses_spr = (attr & RS6000_BTC_SPR);
14053 rtx cr = NULL_RTX;
14054
14055 if (uses_spr)
14056 icode = rs6000_htm_spr_icode (nonvoid);
14057 insn_op = &insn_data[icode].operand[0];
14058
14059 if (nonvoid)
14060 {
14061 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14062 if (!target
14063 || GET_MODE (target) != tmode
14064 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14065 target = gen_reg_rtx (tmode);
14066 if (uses_spr)
14067 op[nopnds++] = target;
14068 }
14069
14070 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14071 {
14072 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14073 return const0_rtx;
14074
14075 insn_op = &insn_data[icode].operand[nopnds];
14076
14077 op[nopnds] = expand_normal (arg);
14078
14079 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14080 {
14081 if (!strcmp (insn_op->constraint, "n"))
14082 {
14083 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14084 if (!CONST_INT_P (op[nopnds]))
14085 error ("argument %d must be an unsigned literal", arg_num);
14086 else
14087 error ("argument %d is an unsigned literal that is "
14088 "out of range", arg_num);
14089 return const0_rtx;
14090 }
14091 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14092 }
14093
14094 nopnds++;
14095 }
14096
14097 /* Handle the builtins for extended mnemonics. These accept
14098 no arguments, but map to builtins that take arguments. */
14099 switch (fcode)
14100 {
14101 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14102 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14103 op[nopnds++] = GEN_INT (1);
14104 if (flag_checking)
14105 attr |= RS6000_BTC_UNARY;
14106 break;
14107 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14108 op[nopnds++] = GEN_INT (0);
14109 if (flag_checking)
14110 attr |= RS6000_BTC_UNARY;
14111 break;
14112 default:
14113 break;
14114 }
14115
14116 /* If this builtin accesses SPRs, then pass in the appropriate
14117 SPR number and SPR regno as the last two operands. */
14118 if (uses_spr)
14119 {
14120 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14121 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14122 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14123 }
14124 /* If this builtin accesses a CR, then pass in a scratch
14125 CR as the last operand. */
14126 else if (attr & RS6000_BTC_CR)
14127 { cr = gen_reg_rtx (CCmode);
14128 op[nopnds++] = cr;
14129 }
14130
14131 if (flag_checking)
14132 {
14133 int expected_nopnds = 0;
14134 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14135 expected_nopnds = 1;
14136 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14137 expected_nopnds = 2;
14138 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14139 expected_nopnds = 3;
14140 if (!(attr & RS6000_BTC_VOID))
14141 expected_nopnds += 1;
14142 if (uses_spr)
14143 expected_nopnds += 2;
14144
14145 gcc_assert (nopnds == expected_nopnds
14146 && nopnds <= MAX_HTM_OPERANDS);
14147 }
14148
14149 switch (nopnds)
14150 {
14151 case 1:
14152 pat = GEN_FCN (icode) (op[0]);
14153 break;
14154 case 2:
14155 pat = GEN_FCN (icode) (op[0], op[1]);
14156 break;
14157 case 3:
14158 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14159 break;
14160 case 4:
14161 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14162 break;
14163 default:
14164 gcc_unreachable ();
14165 }
14166 if (!pat)
14167 return NULL_RTX;
14168 emit_insn (pat);
14169
14170 if (attr & RS6000_BTC_CR)
14171 {
14172 if (fcode == HTM_BUILTIN_TBEGIN)
14173 {
14174 /* Emit code to set TARGET to true or false depending on
14175 whether the tbegin. instruction successfully or failed
14176 to start a transaction. We do this by placing the 1's
14177 complement of CR's EQ bit into TARGET. */
14178 rtx scratch = gen_reg_rtx (SImode);
14179 emit_insn (gen_rtx_SET (scratch,
14180 gen_rtx_EQ (SImode, cr,
14181 const0_rtx)));
14182 emit_insn (gen_rtx_SET (target,
14183 gen_rtx_XOR (SImode, scratch,
14184 GEN_INT (1))));
14185 }
14186 else
14187 {
14188 /* Emit code to copy the 4-bit condition register field
14189 CR into the least significant end of register TARGET. */
14190 rtx scratch1 = gen_reg_rtx (SImode);
14191 rtx scratch2 = gen_reg_rtx (SImode);
14192 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14193 emit_insn (gen_movcc (subreg, cr));
14194 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14195 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14196 }
14197 }
14198
14199 if (nonvoid)
14200 return target;
14201 return const0_rtx;
14202 }
14203
14204 *expandedp = false;
14205 return NULL_RTX;
14206 }
14207
14208 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14209
14210 static rtx
14211 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14212 rtx target)
14213 {
14214 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14215 if (fcode == RS6000_BUILTIN_CPU_INIT)
14216 return const0_rtx;
14217
14218 if (target == 0 || GET_MODE (target) != SImode)
14219 target = gen_reg_rtx (SImode);
14220
14221 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14222 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14223 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14224 to a STRING_CST. */
14225 if (TREE_CODE (arg) == ARRAY_REF
14226 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14227 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14228 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14229 arg = TREE_OPERAND (arg, 0);
14230
14231 if (TREE_CODE (arg) != STRING_CST)
14232 {
14233 error ("builtin %qs only accepts a string argument",
14234 rs6000_builtin_info[(size_t) fcode].name);
14235 return const0_rtx;
14236 }
14237
14238 if (fcode == RS6000_BUILTIN_CPU_IS)
14239 {
14240 const char *cpu = TREE_STRING_POINTER (arg);
14241 rtx cpuid = NULL_RTX;
14242 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14243 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14244 {
14245 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14246 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14247 break;
14248 }
14249 if (cpuid == NULL_RTX)
14250 {
14251 /* Invalid CPU argument. */
14252 error ("cpu %qs is an invalid argument to builtin %qs",
14253 cpu, rs6000_builtin_info[(size_t) fcode].name);
14254 return const0_rtx;
14255 }
14256
14257 rtx platform = gen_reg_rtx (SImode);
14258 rtx tcbmem = gen_const_mem (SImode,
14259 gen_rtx_PLUS (Pmode,
14260 gen_rtx_REG (Pmode, TLS_REGNUM),
14261 GEN_INT (TCB_PLATFORM_OFFSET)));
14262 emit_move_insn (platform, tcbmem);
14263 emit_insn (gen_eqsi3 (target, platform, cpuid));
14264 }
14265 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14266 {
14267 const char *hwcap = TREE_STRING_POINTER (arg);
14268 rtx mask = NULL_RTX;
14269 int hwcap_offset;
14270 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14271 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14272 {
14273 mask = GEN_INT (cpu_supports_info[i].mask);
14274 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14275 break;
14276 }
14277 if (mask == NULL_RTX)
14278 {
14279 /* Invalid HWCAP argument. */
14280 error ("%s %qs is an invalid argument to builtin %qs",
14281 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14282 return const0_rtx;
14283 }
14284
14285 rtx tcb_hwcap = gen_reg_rtx (SImode);
14286 rtx tcbmem = gen_const_mem (SImode,
14287 gen_rtx_PLUS (Pmode,
14288 gen_rtx_REG (Pmode, TLS_REGNUM),
14289 GEN_INT (hwcap_offset)));
14290 emit_move_insn (tcb_hwcap, tcbmem);
14291 rtx scratch1 = gen_reg_rtx (SImode);
14292 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14293 rtx scratch2 = gen_reg_rtx (SImode);
14294 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14295 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14296 }
14297 else
14298 gcc_unreachable ();
14299
14300 /* Record that we have expanded a CPU builtin, so that we can later
14301 emit a reference to the special symbol exported by LIBC to ensure we
14302 do not link against an old LIBC that doesn't support this feature. */
14303 cpu_builtin_p = true;
14304
14305 #else
14306 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14307 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14308
14309 /* For old LIBCs, always return FALSE. */
14310 emit_move_insn (target, GEN_INT (0));
14311 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14312
14313 return target;
14314 }
14315
14316 static rtx
14317 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14318 {
14319 rtx pat;
14320 tree arg0 = CALL_EXPR_ARG (exp, 0);
14321 tree arg1 = CALL_EXPR_ARG (exp, 1);
14322 tree arg2 = CALL_EXPR_ARG (exp, 2);
14323 rtx op0 = expand_normal (arg0);
14324 rtx op1 = expand_normal (arg1);
14325 rtx op2 = expand_normal (arg2);
14326 machine_mode tmode = insn_data[icode].operand[0].mode;
14327 machine_mode mode0 = insn_data[icode].operand[1].mode;
14328 machine_mode mode1 = insn_data[icode].operand[2].mode;
14329 machine_mode mode2 = insn_data[icode].operand[3].mode;
14330
14331 if (icode == CODE_FOR_nothing)
14332 /* Builtin not supported on this processor. */
14333 return 0;
14334
14335 /* If we got invalid arguments bail out before generating bad rtl. */
14336 if (arg0 == error_mark_node
14337 || arg1 == error_mark_node
14338 || arg2 == error_mark_node)
14339 return const0_rtx;
14340
14341 /* Check and prepare argument depending on the instruction code.
14342
14343 Note that a switch statement instead of the sequence of tests
14344 would be incorrect as many of the CODE_FOR values could be
14345 CODE_FOR_nothing and that would yield multiple alternatives
14346 with identical values. We'd never reach here at runtime in
14347 this case. */
14348 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14349 || icode == CODE_FOR_altivec_vsldoi_v2df
14350 || icode == CODE_FOR_altivec_vsldoi_v4si
14351 || icode == CODE_FOR_altivec_vsldoi_v8hi
14352 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14353 {
14354 /* Only allow 4-bit unsigned literals. */
14355 STRIP_NOPS (arg2);
14356 if (TREE_CODE (arg2) != INTEGER_CST
14357 || TREE_INT_CST_LOW (arg2) & ~0xf)
14358 {
14359 error ("argument 3 must be a 4-bit unsigned literal");
14360 return CONST0_RTX (tmode);
14361 }
14362 }
14363 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14364 || icode == CODE_FOR_vsx_xxpermdi_v2di
14365 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14366 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
14367 || icode == CODE_FOR_vsx_xxpermdi_v1ti
14368 || icode == CODE_FOR_vsx_xxpermdi_v4sf
14369 || icode == CODE_FOR_vsx_xxpermdi_v4si
14370 || icode == CODE_FOR_vsx_xxpermdi_v8hi
14371 || icode == CODE_FOR_vsx_xxpermdi_v16qi
14372 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14373 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14374 || icode == CODE_FOR_vsx_xxsldwi_v4si
14375 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14376 || icode == CODE_FOR_vsx_xxsldwi_v2di
14377 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14378 {
14379 /* Only allow 2-bit unsigned literals. */
14380 STRIP_NOPS (arg2);
14381 if (TREE_CODE (arg2) != INTEGER_CST
14382 || TREE_INT_CST_LOW (arg2) & ~0x3)
14383 {
14384 error ("argument 3 must be a 2-bit unsigned literal");
14385 return CONST0_RTX (tmode);
14386 }
14387 }
14388 else if (icode == CODE_FOR_vsx_set_v2df
14389 || icode == CODE_FOR_vsx_set_v2di
14390 || icode == CODE_FOR_bcdadd
14391 || icode == CODE_FOR_bcdadd_lt
14392 || icode == CODE_FOR_bcdadd_eq
14393 || icode == CODE_FOR_bcdadd_gt
14394 || icode == CODE_FOR_bcdsub
14395 || icode == CODE_FOR_bcdsub_lt
14396 || icode == CODE_FOR_bcdsub_eq
14397 || icode == CODE_FOR_bcdsub_gt)
14398 {
14399 /* Only allow 1-bit unsigned literals. */
14400 STRIP_NOPS (arg2);
14401 if (TREE_CODE (arg2) != INTEGER_CST
14402 || TREE_INT_CST_LOW (arg2) & ~0x1)
14403 {
14404 error ("argument 3 must be a 1-bit unsigned literal");
14405 return CONST0_RTX (tmode);
14406 }
14407 }
14408 else if (icode == CODE_FOR_dfp_ddedpd_dd
14409 || icode == CODE_FOR_dfp_ddedpd_td)
14410 {
14411 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14412 STRIP_NOPS (arg0);
14413 if (TREE_CODE (arg0) != INTEGER_CST
14414 || TREE_INT_CST_LOW (arg2) & ~0x3)
14415 {
14416 error ("argument 1 must be 0 or 2");
14417 return CONST0_RTX (tmode);
14418 }
14419 }
14420 else if (icode == CODE_FOR_dfp_denbcd_dd
14421 || icode == CODE_FOR_dfp_denbcd_td)
14422 {
14423 /* Only allow 1-bit unsigned literals. */
14424 STRIP_NOPS (arg0);
14425 if (TREE_CODE (arg0) != INTEGER_CST
14426 || TREE_INT_CST_LOW (arg0) & ~0x1)
14427 {
14428 error ("argument 1 must be a 1-bit unsigned literal");
14429 return CONST0_RTX (tmode);
14430 }
14431 }
14432 else if (icode == CODE_FOR_dfp_dscli_dd
14433 || icode == CODE_FOR_dfp_dscli_td
14434 || icode == CODE_FOR_dfp_dscri_dd
14435 || icode == CODE_FOR_dfp_dscri_td)
14436 {
14437 /* Only allow 6-bit unsigned literals. */
14438 STRIP_NOPS (arg1);
14439 if (TREE_CODE (arg1) != INTEGER_CST
14440 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14441 {
14442 error ("argument 2 must be a 6-bit unsigned literal");
14443 return CONST0_RTX (tmode);
14444 }
14445 }
14446 else if (icode == CODE_FOR_crypto_vshasigmaw
14447 || icode == CODE_FOR_crypto_vshasigmad)
14448 {
14449 /* Check whether the 2nd and 3rd arguments are integer constants and in
14450 range and prepare arguments. */
14451 STRIP_NOPS (arg1);
14452 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14453 {
14454 error ("argument 2 must be 0 or 1");
14455 return CONST0_RTX (tmode);
14456 }
14457
14458 STRIP_NOPS (arg2);
14459 if (TREE_CODE (arg2) != INTEGER_CST
14460 || wi::geu_p (wi::to_wide (arg2), 16))
14461 {
14462 error ("argument 3 must be in the range 0..15");
14463 return CONST0_RTX (tmode);
14464 }
14465 }
14466
14467 if (target == 0
14468 || GET_MODE (target) != tmode
14469 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14470 target = gen_reg_rtx (tmode);
14471
14472 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14473 op0 = copy_to_mode_reg (mode0, op0);
14474 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14475 op1 = copy_to_mode_reg (mode1, op1);
14476 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14477 op2 = copy_to_mode_reg (mode2, op2);
14478
14479 pat = GEN_FCN (icode) (target, op0, op1, op2);
14480 if (! pat)
14481 return 0;
14482 emit_insn (pat);
14483
14484 return target;
14485 }
14486
14487
14488 /* Expand the dst builtins. */
14489 static rtx
14490 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14491 bool *expandedp)
14492 {
14493 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14494 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14495 tree arg0, arg1, arg2;
14496 machine_mode mode0, mode1;
14497 rtx pat, op0, op1, op2;
14498 const struct builtin_description *d;
14499 size_t i;
14500
14501 *expandedp = false;
14502
14503 /* Handle DST variants. */
14504 d = bdesc_dst;
14505 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14506 if (d->code == fcode)
14507 {
14508 arg0 = CALL_EXPR_ARG (exp, 0);
14509 arg1 = CALL_EXPR_ARG (exp, 1);
14510 arg2 = CALL_EXPR_ARG (exp, 2);
14511 op0 = expand_normal (arg0);
14512 op1 = expand_normal (arg1);
14513 op2 = expand_normal (arg2);
14514 mode0 = insn_data[d->icode].operand[0].mode;
14515 mode1 = insn_data[d->icode].operand[1].mode;
14516
14517 /* Invalid arguments, bail out before generating bad rtl. */
14518 if (arg0 == error_mark_node
14519 || arg1 == error_mark_node
14520 || arg2 == error_mark_node)
14521 return const0_rtx;
14522
14523 *expandedp = true;
14524 STRIP_NOPS (arg2);
14525 if (TREE_CODE (arg2) != INTEGER_CST
14526 || TREE_INT_CST_LOW (arg2) & ~0x3)
14527 {
14528 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14529 return const0_rtx;
14530 }
14531
14532 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14533 op0 = copy_to_mode_reg (Pmode, op0);
14534 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14535 op1 = copy_to_mode_reg (mode1, op1);
14536
14537 pat = GEN_FCN (d->icode) (op0, op1, op2);
14538 if (pat != 0)
14539 emit_insn (pat);
14540
14541 return NULL_RTX;
14542 }
14543
14544 return NULL_RTX;
14545 }
14546
14547 /* Expand vec_init builtin. */
14548 static rtx
14549 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14550 {
14551 machine_mode tmode = TYPE_MODE (type);
14552 machine_mode inner_mode = GET_MODE_INNER (tmode);
14553 int i, n_elt = GET_MODE_NUNITS (tmode);
14554
14555 gcc_assert (VECTOR_MODE_P (tmode));
14556 gcc_assert (n_elt == call_expr_nargs (exp));
14557
14558 if (!target || !register_operand (target, tmode))
14559 target = gen_reg_rtx (tmode);
14560
14561 /* If we have a vector compromised of a single element, such as V1TImode, do
14562 the initialization directly. */
14563 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14564 {
14565 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14566 emit_move_insn (target, gen_lowpart (tmode, x));
14567 }
14568 else
14569 {
14570 rtvec v = rtvec_alloc (n_elt);
14571
14572 for (i = 0; i < n_elt; ++i)
14573 {
14574 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14575 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14576 }
14577
14578 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14579 }
14580
14581 return target;
14582 }
14583
14584 /* Return the integer constant in ARG. Constrain it to be in the range
14585 of the subparts of VEC_TYPE; issue an error if not. */
14586
14587 static int
14588 get_element_number (tree vec_type, tree arg)
14589 {
14590 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14591
14592 if (!tree_fits_uhwi_p (arg)
14593 || (elt = tree_to_uhwi (arg), elt > max))
14594 {
14595 error ("selector must be an integer constant in the range 0..%wi", max);
14596 return 0;
14597 }
14598
14599 return elt;
14600 }
14601
14602 /* Expand vec_set builtin. */
14603 static rtx
14604 altivec_expand_vec_set_builtin (tree exp)
14605 {
14606 machine_mode tmode, mode1;
14607 tree arg0, arg1, arg2;
14608 int elt;
14609 rtx op0, op1;
14610
14611 arg0 = CALL_EXPR_ARG (exp, 0);
14612 arg1 = CALL_EXPR_ARG (exp, 1);
14613 arg2 = CALL_EXPR_ARG (exp, 2);
14614
14615 tmode = TYPE_MODE (TREE_TYPE (arg0));
14616 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14617 gcc_assert (VECTOR_MODE_P (tmode));
14618
14619 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14620 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14621 elt = get_element_number (TREE_TYPE (arg0), arg2);
14622
14623 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14624 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14625
14626 op0 = force_reg (tmode, op0);
14627 op1 = force_reg (mode1, op1);
14628
14629 rs6000_expand_vector_set (op0, op1, elt);
14630
14631 return op0;
14632 }
14633
14634 /* Expand vec_ext builtin. */
14635 static rtx
14636 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14637 {
14638 machine_mode tmode, mode0;
14639 tree arg0, arg1;
14640 rtx op0;
14641 rtx op1;
14642
14643 arg0 = CALL_EXPR_ARG (exp, 0);
14644 arg1 = CALL_EXPR_ARG (exp, 1);
14645
14646 op0 = expand_normal (arg0);
14647 op1 = expand_normal (arg1);
14648
14649 /* Call get_element_number to validate arg1 if it is a constant. */
14650 if (TREE_CODE (arg1) == INTEGER_CST)
14651 (void) get_element_number (TREE_TYPE (arg0), arg1);
14652
14653 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14654 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14655 gcc_assert (VECTOR_MODE_P (mode0));
14656
14657 op0 = force_reg (mode0, op0);
14658
14659 if (optimize || !target || !register_operand (target, tmode))
14660 target = gen_reg_rtx (tmode);
14661
14662 rs6000_expand_vector_extract (target, op0, op1);
14663
14664 return target;
14665 }
14666
14667 /* Expand the builtin in EXP and store the result in TARGET. Store
14668 true in *EXPANDEDP if we found a builtin to expand. */
14669 static rtx
14670 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14671 {
14672 const struct builtin_description *d;
14673 size_t i;
14674 enum insn_code icode;
14675 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14676 tree arg0, arg1, arg2;
14677 rtx op0, pat;
14678 machine_mode tmode, mode0;
14679 enum rs6000_builtins fcode
14680 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14681
14682 if (rs6000_overloaded_builtin_p (fcode))
14683 {
14684 *expandedp = true;
14685 error ("unresolved overload for Altivec builtin %qF", fndecl);
14686
14687 /* Given it is invalid, just generate a normal call. */
14688 return expand_call (exp, target, false);
14689 }
14690
14691 target = altivec_expand_dst_builtin (exp, target, expandedp);
14692 if (*expandedp)
14693 return target;
14694
14695 *expandedp = true;
14696
14697 switch (fcode)
14698 {
14699 case ALTIVEC_BUILTIN_STVX_V2DF:
14700 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14701 case ALTIVEC_BUILTIN_STVX_V2DI:
14702 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14703 case ALTIVEC_BUILTIN_STVX_V4SF:
14704 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14705 case ALTIVEC_BUILTIN_STVX:
14706 case ALTIVEC_BUILTIN_STVX_V4SI:
14707 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14708 case ALTIVEC_BUILTIN_STVX_V8HI:
14709 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14710 case ALTIVEC_BUILTIN_STVX_V16QI:
14711 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14712 case ALTIVEC_BUILTIN_STVEBX:
14713 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14714 case ALTIVEC_BUILTIN_STVEHX:
14715 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14716 case ALTIVEC_BUILTIN_STVEWX:
14717 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14718 case ALTIVEC_BUILTIN_STVXL_V2DF:
14719 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14720 case ALTIVEC_BUILTIN_STVXL_V2DI:
14721 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14722 case ALTIVEC_BUILTIN_STVXL_V4SF:
14723 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14724 case ALTIVEC_BUILTIN_STVXL:
14725 case ALTIVEC_BUILTIN_STVXL_V4SI:
14726 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14727 case ALTIVEC_BUILTIN_STVXL_V8HI:
14728 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14729 case ALTIVEC_BUILTIN_STVXL_V16QI:
14730 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14731
14732 case ALTIVEC_BUILTIN_STVLX:
14733 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14734 case ALTIVEC_BUILTIN_STVLXL:
14735 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14736 case ALTIVEC_BUILTIN_STVRX:
14737 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14738 case ALTIVEC_BUILTIN_STVRXL:
14739 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14740
14741 case P9V_BUILTIN_STXVL:
14742 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14743
14744 case P9V_BUILTIN_XST_LEN_R:
14745 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14746
14747 case VSX_BUILTIN_STXVD2X_V1TI:
14748 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14749 case VSX_BUILTIN_STXVD2X_V2DF:
14750 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14751 case VSX_BUILTIN_STXVD2X_V2DI:
14752 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14753 case VSX_BUILTIN_STXVW4X_V4SF:
14754 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14755 case VSX_BUILTIN_STXVW4X_V4SI:
14756 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14757 case VSX_BUILTIN_STXVW4X_V8HI:
14758 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14759 case VSX_BUILTIN_STXVW4X_V16QI:
14760 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14761
14762 /* For the following on big endian, it's ok to use any appropriate
14763 unaligned-supporting store, so use a generic expander. For
14764 little-endian, the exact element-reversing instruction must
14765 be used. */
14766 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14767 {
14768 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14769 : CODE_FOR_vsx_st_elemrev_v1ti);
14770 return altivec_expand_stv_builtin (code, exp);
14771 }
14772 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14773 {
14774 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14775 : CODE_FOR_vsx_st_elemrev_v2df);
14776 return altivec_expand_stv_builtin (code, exp);
14777 }
14778 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14779 {
14780 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14781 : CODE_FOR_vsx_st_elemrev_v2di);
14782 return altivec_expand_stv_builtin (code, exp);
14783 }
14784 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14785 {
14786 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14787 : CODE_FOR_vsx_st_elemrev_v4sf);
14788 return altivec_expand_stv_builtin (code, exp);
14789 }
14790 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14791 {
14792 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14793 : CODE_FOR_vsx_st_elemrev_v4si);
14794 return altivec_expand_stv_builtin (code, exp);
14795 }
14796 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14797 {
14798 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14799 : CODE_FOR_vsx_st_elemrev_v8hi);
14800 return altivec_expand_stv_builtin (code, exp);
14801 }
14802 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14803 {
14804 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14805 : CODE_FOR_vsx_st_elemrev_v16qi);
14806 return altivec_expand_stv_builtin (code, exp);
14807 }
14808
14809 case ALTIVEC_BUILTIN_MFVSCR:
14810 icode = CODE_FOR_altivec_mfvscr;
14811 tmode = insn_data[icode].operand[0].mode;
14812
14813 if (target == 0
14814 || GET_MODE (target) != tmode
14815 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14816 target = gen_reg_rtx (tmode);
14817
14818 pat = GEN_FCN (icode) (target);
14819 if (! pat)
14820 return 0;
14821 emit_insn (pat);
14822 return target;
14823
14824 case ALTIVEC_BUILTIN_MTVSCR:
14825 icode = CODE_FOR_altivec_mtvscr;
14826 arg0 = CALL_EXPR_ARG (exp, 0);
14827 op0 = expand_normal (arg0);
14828 mode0 = insn_data[icode].operand[0].mode;
14829
14830 /* If we got invalid arguments bail out before generating bad rtl. */
14831 if (arg0 == error_mark_node)
14832 return const0_rtx;
14833
14834 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14835 op0 = copy_to_mode_reg (mode0, op0);
14836
14837 pat = GEN_FCN (icode) (op0);
14838 if (pat)
14839 emit_insn (pat);
14840 return NULL_RTX;
14841
14842 case ALTIVEC_BUILTIN_DSSALL:
14843 emit_insn (gen_altivec_dssall ());
14844 return NULL_RTX;
14845
14846 case ALTIVEC_BUILTIN_DSS:
14847 icode = CODE_FOR_altivec_dss;
14848 arg0 = CALL_EXPR_ARG (exp, 0);
14849 STRIP_NOPS (arg0);
14850 op0 = expand_normal (arg0);
14851 mode0 = insn_data[icode].operand[0].mode;
14852
14853 /* If we got invalid arguments bail out before generating bad rtl. */
14854 if (arg0 == error_mark_node)
14855 return const0_rtx;
14856
14857 if (TREE_CODE (arg0) != INTEGER_CST
14858 || TREE_INT_CST_LOW (arg0) & ~0x3)
14859 {
14860 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
14861 return const0_rtx;
14862 }
14863
14864 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14865 op0 = copy_to_mode_reg (mode0, op0);
14866
14867 emit_insn (gen_altivec_dss (op0));
14868 return NULL_RTX;
14869
14870 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14871 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14872 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14873 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14874 case VSX_BUILTIN_VEC_INIT_V2DF:
14875 case VSX_BUILTIN_VEC_INIT_V2DI:
14876 case VSX_BUILTIN_VEC_INIT_V1TI:
14877 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14878
14879 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14880 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14881 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14882 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14883 case VSX_BUILTIN_VEC_SET_V2DF:
14884 case VSX_BUILTIN_VEC_SET_V2DI:
14885 case VSX_BUILTIN_VEC_SET_V1TI:
14886 return altivec_expand_vec_set_builtin (exp);
14887
14888 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
14889 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
14890 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
14891 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
14892 case VSX_BUILTIN_VEC_EXT_V2DF:
14893 case VSX_BUILTIN_VEC_EXT_V2DI:
14894 case VSX_BUILTIN_VEC_EXT_V1TI:
14895 return altivec_expand_vec_ext_builtin (exp, target);
14896
14897 case P9V_BUILTIN_VEC_EXTRACT4B:
14898 arg1 = CALL_EXPR_ARG (exp, 1);
14899 STRIP_NOPS (arg1);
14900
14901 /* Generate a normal call if it is invalid. */
14902 if (arg1 == error_mark_node)
14903 return expand_call (exp, target, false);
14904
14905 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
14906 {
14907 error ("second argument to %qs must be 0..12", "vec_vextract4b");
14908 return expand_call (exp, target, false);
14909 }
14910 break;
14911
14912 case P9V_BUILTIN_VEC_INSERT4B:
14913 arg2 = CALL_EXPR_ARG (exp, 2);
14914 STRIP_NOPS (arg2);
14915
14916 /* Generate a normal call if it is invalid. */
14917 if (arg2 == error_mark_node)
14918 return expand_call (exp, target, false);
14919
14920 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
14921 {
14922 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
14923 return expand_call (exp, target, false);
14924 }
14925 break;
14926
14927 default:
14928 break;
14929 /* Fall through. */
14930 }
14931
14932 /* Expand abs* operations. */
14933 d = bdesc_abs;
14934 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
14935 if (d->code == fcode)
14936 return altivec_expand_abs_builtin (d->icode, exp, target);
14937
14938 /* Expand the AltiVec predicates. */
14939 d = bdesc_altivec_preds;
14940 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
14941 if (d->code == fcode)
14942 return altivec_expand_predicate_builtin (d->icode, exp, target);
14943
14944 /* LV* are funky. We initialized them differently. */
14945 switch (fcode)
14946 {
14947 case ALTIVEC_BUILTIN_LVSL:
14948 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
14949 exp, target, false);
14950 case ALTIVEC_BUILTIN_LVSR:
14951 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
14952 exp, target, false);
14953 case ALTIVEC_BUILTIN_LVEBX:
14954 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
14955 exp, target, false);
14956 case ALTIVEC_BUILTIN_LVEHX:
14957 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
14958 exp, target, false);
14959 case ALTIVEC_BUILTIN_LVEWX:
14960 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
14961 exp, target, false);
14962 case ALTIVEC_BUILTIN_LVXL_V2DF:
14963 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
14964 exp, target, false);
14965 case ALTIVEC_BUILTIN_LVXL_V2DI:
14966 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
14967 exp, target, false);
14968 case ALTIVEC_BUILTIN_LVXL_V4SF:
14969 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
14970 exp, target, false);
14971 case ALTIVEC_BUILTIN_LVXL:
14972 case ALTIVEC_BUILTIN_LVXL_V4SI:
14973 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
14974 exp, target, false);
14975 case ALTIVEC_BUILTIN_LVXL_V8HI:
14976 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
14977 exp, target, false);
14978 case ALTIVEC_BUILTIN_LVXL_V16QI:
14979 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
14980 exp, target, false);
14981 case ALTIVEC_BUILTIN_LVX_V1TI:
14982 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
14983 exp, target, false);
14984 case ALTIVEC_BUILTIN_LVX_V2DF:
14985 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
14986 exp, target, false);
14987 case ALTIVEC_BUILTIN_LVX_V2DI:
14988 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
14989 exp, target, false);
14990 case ALTIVEC_BUILTIN_LVX_V4SF:
14991 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
14992 exp, target, false);
14993 case ALTIVEC_BUILTIN_LVX:
14994 case ALTIVEC_BUILTIN_LVX_V4SI:
14995 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
14996 exp, target, false);
14997 case ALTIVEC_BUILTIN_LVX_V8HI:
14998 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
14999 exp, target, false);
15000 case ALTIVEC_BUILTIN_LVX_V16QI:
15001 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
15002 exp, target, false);
15003 case ALTIVEC_BUILTIN_LVLX:
15004 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15005 exp, target, true);
15006 case ALTIVEC_BUILTIN_LVLXL:
15007 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15008 exp, target, true);
15009 case ALTIVEC_BUILTIN_LVRX:
15010 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15011 exp, target, true);
15012 case ALTIVEC_BUILTIN_LVRXL:
15013 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15014 exp, target, true);
15015 case VSX_BUILTIN_LXVD2X_V1TI:
15016 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15017 exp, target, false);
15018 case VSX_BUILTIN_LXVD2X_V2DF:
15019 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15020 exp, target, false);
15021 case VSX_BUILTIN_LXVD2X_V2DI:
15022 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15023 exp, target, false);
15024 case VSX_BUILTIN_LXVW4X_V4SF:
15025 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15026 exp, target, false);
15027 case VSX_BUILTIN_LXVW4X_V4SI:
15028 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15029 exp, target, false);
15030 case VSX_BUILTIN_LXVW4X_V8HI:
15031 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15032 exp, target, false);
15033 case VSX_BUILTIN_LXVW4X_V16QI:
15034 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15035 exp, target, false);
15036 /* For the following on big endian, it's ok to use any appropriate
15037 unaligned-supporting load, so use a generic expander. For
15038 little-endian, the exact element-reversing instruction must
15039 be used. */
15040 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15041 {
15042 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15043 : CODE_FOR_vsx_ld_elemrev_v2df);
15044 return altivec_expand_lv_builtin (code, exp, target, false);
15045 }
15046 case VSX_BUILTIN_LD_ELEMREV_V1TI:
15047 {
15048 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
15049 : CODE_FOR_vsx_ld_elemrev_v1ti);
15050 return altivec_expand_lv_builtin (code, exp, target, false);
15051 }
15052 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15053 {
15054 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15055 : CODE_FOR_vsx_ld_elemrev_v2di);
15056 return altivec_expand_lv_builtin (code, exp, target, false);
15057 }
15058 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15059 {
15060 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15061 : CODE_FOR_vsx_ld_elemrev_v4sf);
15062 return altivec_expand_lv_builtin (code, exp, target, false);
15063 }
15064 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15065 {
15066 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15067 : CODE_FOR_vsx_ld_elemrev_v4si);
15068 return altivec_expand_lv_builtin (code, exp, target, false);
15069 }
15070 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15071 {
15072 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15073 : CODE_FOR_vsx_ld_elemrev_v8hi);
15074 return altivec_expand_lv_builtin (code, exp, target, false);
15075 }
15076 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15077 {
15078 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15079 : CODE_FOR_vsx_ld_elemrev_v16qi);
15080 return altivec_expand_lv_builtin (code, exp, target, false);
15081 }
15082 break;
15083 default:
15084 break;
15085 /* Fall through. */
15086 }
15087
15088 *expandedp = false;
15089 return NULL_RTX;
15090 }
15091
15092 /* Check whether a builtin function is supported in this target
15093 configuration. */
15094 bool
15095 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
15096 {
15097 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
15098 if ((fnmask & rs6000_builtin_mask) != fnmask)
15099 return false;
15100 else
15101 return true;
15102 }
15103
15104 /* Raise an error message for a builtin function that is called without the
15105 appropriate target options being set. */
15106
15107 static void
15108 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15109 {
15110 size_t uns_fncode = (size_t) fncode;
15111 const char *name = rs6000_builtin_info[uns_fncode].name;
15112 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15113
15114 gcc_assert (name != NULL);
15115 if ((fnmask & RS6000_BTM_CELL) != 0)
15116 error ("builtin function %qs is only valid for the cell processor", name);
15117 else if ((fnmask & RS6000_BTM_VSX) != 0)
15118 error ("builtin function %qs requires the %qs option", name, "-mvsx");
15119 else if ((fnmask & RS6000_BTM_HTM) != 0)
15120 error ("builtin function %qs requires the %qs option", name, "-mhtm");
15121 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
15122 error ("builtin function %qs requires the %qs option", name, "-maltivec");
15123 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15124 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15125 error ("builtin function %qs requires the %qs and %qs options",
15126 name, "-mhard-dfp", "-mpower8-vector");
15127 else if ((fnmask & RS6000_BTM_DFP) != 0)
15128 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
15129 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
15130 error ("builtin function %qs requires the %qs option", name,
15131 "-mpower8-vector");
15132 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15133 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15134 error ("builtin function %qs requires the %qs and %qs options",
15135 name, "-mcpu=power9", "-m64");
15136 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
15137 error ("builtin function %qs requires the %qs option", name,
15138 "-mcpu=power9");
15139 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15140 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15141 error ("builtin function %qs requires the %qs and %qs options",
15142 name, "-mcpu=power9", "-m64");
15143 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
15144 error ("builtin function %qs requires the %qs option", name,
15145 "-mcpu=power9");
15146 else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128)
15147 {
15148 if (!TARGET_HARD_FLOAT)
15149 error ("builtin function %qs requires the %qs option", name,
15150 "-mhard-float");
15151 else
15152 error ("builtin function %qs requires the %qs option", name,
15153 TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128");
15154 }
15155 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
15156 error ("builtin function %qs requires the %qs option", name,
15157 "-mhard-float");
15158 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
15159 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
15160 name);
15161 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
15162 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
15163 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15164 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15165 error ("builtin function %qs requires the %qs (or newer), and "
15166 "%qs or %qs options",
15167 name, "-mcpu=power7", "-m64", "-mpowerpc64");
15168 else
15169 error ("builtin function %qs is not supported with the current options",
15170 name);
15171 }
15172
15173 /* Target hook for early folding of built-ins, shamelessly stolen
15174 from ia64.c. */
15175
15176 static tree
15177 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
15178 int n_args ATTRIBUTE_UNUSED,
15179 tree *args ATTRIBUTE_UNUSED,
15180 bool ignore ATTRIBUTE_UNUSED)
15181 {
15182 #ifdef SUBTARGET_FOLD_BUILTIN
15183 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
15184 #else
15185 return NULL_TREE;
15186 #endif
15187 }
15188
15189 /* Helper function to sort out which built-ins may be valid without having
15190 a LHS. */
15191 static bool
15192 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
15193 {
15194 switch (fn_code)
15195 {
15196 case ALTIVEC_BUILTIN_STVX_V16QI:
15197 case ALTIVEC_BUILTIN_STVX_V8HI:
15198 case ALTIVEC_BUILTIN_STVX_V4SI:
15199 case ALTIVEC_BUILTIN_STVX_V4SF:
15200 case ALTIVEC_BUILTIN_STVX_V2DI:
15201 case ALTIVEC_BUILTIN_STVX_V2DF:
15202 case VSX_BUILTIN_STXVW4X_V16QI:
15203 case VSX_BUILTIN_STXVW4X_V8HI:
15204 case VSX_BUILTIN_STXVW4X_V4SF:
15205 case VSX_BUILTIN_STXVW4X_V4SI:
15206 case VSX_BUILTIN_STXVD2X_V2DF:
15207 case VSX_BUILTIN_STXVD2X_V2DI:
15208 return true;
15209 default:
15210 return false;
15211 }
15212 }
15213
15214 /* Helper function to handle the gimple folding of a vector compare
15215 operation. This sets up true/false vectors, and uses the
15216 VEC_COND_EXPR operation.
15217 CODE indicates which comparison is to be made. (EQ, GT, ...).
15218 TYPE indicates the type of the result. */
15219 static tree
15220 fold_build_vec_cmp (tree_code code, tree type,
15221 tree arg0, tree arg1)
15222 {
15223 tree cmp_type = build_same_sized_truth_vector_type (type);
15224 tree zero_vec = build_zero_cst (type);
15225 tree minus_one_vec = build_minus_one_cst (type);
15226 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
15227 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
15228 }
15229
15230 /* Helper function to handle the in-between steps for the
15231 vector compare built-ins. */
15232 static void
15233 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
15234 {
15235 tree arg0 = gimple_call_arg (stmt, 0);
15236 tree arg1 = gimple_call_arg (stmt, 1);
15237 tree lhs = gimple_call_lhs (stmt);
15238 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
15239 gimple *g = gimple_build_assign (lhs, cmp);
15240 gimple_set_location (g, gimple_location (stmt));
15241 gsi_replace (gsi, g, true);
15242 }
15243
15244 /* Helper function to map V2DF and V4SF types to their
15245 integral equivalents (V2DI and V4SI). */
15246 tree map_to_integral_tree_type (tree input_tree_type)
15247 {
15248 if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type)))
15249 return input_tree_type;
15250 else
15251 {
15252 if (types_compatible_p (TREE_TYPE (input_tree_type),
15253 TREE_TYPE (V2DF_type_node)))
15254 return V2DI_type_node;
15255 else if (types_compatible_p (TREE_TYPE (input_tree_type),
15256 TREE_TYPE (V4SF_type_node)))
15257 return V4SI_type_node;
15258 else
15259 gcc_unreachable ();
15260 }
15261 }
15262
15263 /* Helper function to handle the vector merge[hl] built-ins. The
15264 implementation difference between h and l versions for this code are in
15265 the values used when building of the permute vector for high word versus
15266 low word merge. The variance is keyed off the use_high parameter. */
15267 static void
15268 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
15269 {
15270 tree arg0 = gimple_call_arg (stmt, 0);
15271 tree arg1 = gimple_call_arg (stmt, 1);
15272 tree lhs = gimple_call_lhs (stmt);
15273 tree lhs_type = TREE_TYPE (lhs);
15274 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15275 int midpoint = n_elts / 2;
15276 int offset = 0;
15277
15278 if (use_high == 1)
15279 offset = midpoint;
15280
15281 /* The permute_type will match the lhs for integral types. For double and
15282 float types, the permute type needs to map to the V2 or V4 type that
15283 matches size. */
15284 tree permute_type;
15285 permute_type = map_to_integral_tree_type (lhs_type);
15286 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15287
15288 for (int i = 0; i < midpoint; i++)
15289 {
15290 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15291 offset + i));
15292 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15293 offset + n_elts + i));
15294 }
15295
15296 tree permute = elts.build ();
15297
15298 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15299 gimple_set_location (g, gimple_location (stmt));
15300 gsi_replace (gsi, g, true);
15301 }
15302
15303 /* Helper function to handle the vector merge[eo] built-ins. */
15304 static void
15305 fold_mergeeo_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_odd)
15306 {
15307 tree arg0 = gimple_call_arg (stmt, 0);
15308 tree arg1 = gimple_call_arg (stmt, 1);
15309 tree lhs = gimple_call_lhs (stmt);
15310 tree lhs_type = TREE_TYPE (lhs);
15311 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15312
15313 /* The permute_type will match the lhs for integral types. For double and
15314 float types, the permute type needs to map to the V2 or V4 type that
15315 matches size. */
15316 tree permute_type;
15317 permute_type = map_to_integral_tree_type (lhs_type);
15318
15319 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15320
15321 /* Build the permute vector. */
15322 for (int i = 0; i < n_elts / 2; i++)
15323 {
15324 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15325 2*i + use_odd));
15326 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15327 2*i + use_odd + n_elts));
15328 }
15329
15330 tree permute = elts.build ();
15331
15332 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15333 gimple_set_location (g, gimple_location (stmt));
15334 gsi_replace (gsi, g, true);
15335 }
15336
15337 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15338 a constant, use rs6000_fold_builtin.) */
15339
15340 bool
15341 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
15342 {
15343 gimple *stmt = gsi_stmt (*gsi);
15344 tree fndecl = gimple_call_fndecl (stmt);
15345 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
15346 enum rs6000_builtins fn_code
15347 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15348 tree arg0, arg1, lhs, temp;
15349 gimple *g;
15350
15351 size_t uns_fncode = (size_t) fn_code;
15352 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
15353 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
15354 const char *fn_name2 = (icode != CODE_FOR_nothing)
15355 ? get_insn_name ((int) icode)
15356 : "nothing";
15357
15358 if (TARGET_DEBUG_BUILTIN)
15359 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
15360 fn_code, fn_name1, fn_name2);
15361
15362 if (!rs6000_fold_gimple)
15363 return false;
15364
15365 /* Prevent gimple folding for code that does not have a LHS, unless it is
15366 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15367 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
15368 return false;
15369
15370 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15371 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
15372 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
15373 if (!func_valid_p)
15374 return false;
15375
15376 switch (fn_code)
15377 {
15378 /* Flavors of vec_add. We deliberately don't expand
15379 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15380 TImode, resulting in much poorer code generation. */
15381 case ALTIVEC_BUILTIN_VADDUBM:
15382 case ALTIVEC_BUILTIN_VADDUHM:
15383 case ALTIVEC_BUILTIN_VADDUWM:
15384 case P8V_BUILTIN_VADDUDM:
15385 case ALTIVEC_BUILTIN_VADDFP:
15386 case VSX_BUILTIN_XVADDDP:
15387 arg0 = gimple_call_arg (stmt, 0);
15388 arg1 = gimple_call_arg (stmt, 1);
15389 lhs = gimple_call_lhs (stmt);
15390 g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
15391 gimple_set_location (g, gimple_location (stmt));
15392 gsi_replace (gsi, g, true);
15393 return true;
15394 /* Flavors of vec_sub. We deliberately don't expand
15395 P8V_BUILTIN_VSUBUQM. */
15396 case ALTIVEC_BUILTIN_VSUBUBM:
15397 case ALTIVEC_BUILTIN_VSUBUHM:
15398 case ALTIVEC_BUILTIN_VSUBUWM:
15399 case P8V_BUILTIN_VSUBUDM:
15400 case ALTIVEC_BUILTIN_VSUBFP:
15401 case VSX_BUILTIN_XVSUBDP:
15402 arg0 = gimple_call_arg (stmt, 0);
15403 arg1 = gimple_call_arg (stmt, 1);
15404 lhs = gimple_call_lhs (stmt);
15405 g = gimple_build_assign (lhs, MINUS_EXPR, arg0, arg1);
15406 gimple_set_location (g, gimple_location (stmt));
15407 gsi_replace (gsi, g, true);
15408 return true;
15409 case VSX_BUILTIN_XVMULSP:
15410 case VSX_BUILTIN_XVMULDP:
15411 arg0 = gimple_call_arg (stmt, 0);
15412 arg1 = gimple_call_arg (stmt, 1);
15413 lhs = gimple_call_lhs (stmt);
15414 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15415 gimple_set_location (g, gimple_location (stmt));
15416 gsi_replace (gsi, g, true);
15417 return true;
15418 /* Even element flavors of vec_mul (signed). */
15419 case ALTIVEC_BUILTIN_VMULESB:
15420 case ALTIVEC_BUILTIN_VMULESH:
15421 case P8V_BUILTIN_VMULESW:
15422 /* Even element flavors of vec_mul (unsigned). */
15423 case ALTIVEC_BUILTIN_VMULEUB:
15424 case ALTIVEC_BUILTIN_VMULEUH:
15425 case P8V_BUILTIN_VMULEUW:
15426 arg0 = gimple_call_arg (stmt, 0);
15427 arg1 = gimple_call_arg (stmt, 1);
15428 lhs = gimple_call_lhs (stmt);
15429 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15430 gimple_set_location (g, gimple_location (stmt));
15431 gsi_replace (gsi, g, true);
15432 return true;
15433 /* Odd element flavors of vec_mul (signed). */
15434 case ALTIVEC_BUILTIN_VMULOSB:
15435 case ALTIVEC_BUILTIN_VMULOSH:
15436 case P8V_BUILTIN_VMULOSW:
15437 /* Odd element flavors of vec_mul (unsigned). */
15438 case ALTIVEC_BUILTIN_VMULOUB:
15439 case ALTIVEC_BUILTIN_VMULOUH:
15440 case P8V_BUILTIN_VMULOUW:
15441 arg0 = gimple_call_arg (stmt, 0);
15442 arg1 = gimple_call_arg (stmt, 1);
15443 lhs = gimple_call_lhs (stmt);
15444 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15445 gimple_set_location (g, gimple_location (stmt));
15446 gsi_replace (gsi, g, true);
15447 return true;
15448 /* Flavors of vec_div (Integer). */
15449 case VSX_BUILTIN_DIV_V2DI:
15450 case VSX_BUILTIN_UDIV_V2DI:
15451 arg0 = gimple_call_arg (stmt, 0);
15452 arg1 = gimple_call_arg (stmt, 1);
15453 lhs = gimple_call_lhs (stmt);
15454 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15455 gimple_set_location (g, gimple_location (stmt));
15456 gsi_replace (gsi, g, true);
15457 return true;
15458 /* Flavors of vec_div (Float). */
15459 case VSX_BUILTIN_XVDIVSP:
15460 case VSX_BUILTIN_XVDIVDP:
15461 arg0 = gimple_call_arg (stmt, 0);
15462 arg1 = gimple_call_arg (stmt, 1);
15463 lhs = gimple_call_lhs (stmt);
15464 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15465 gimple_set_location (g, gimple_location (stmt));
15466 gsi_replace (gsi, g, true);
15467 return true;
15468 /* Flavors of vec_and. */
15469 case ALTIVEC_BUILTIN_VAND:
15470 arg0 = gimple_call_arg (stmt, 0);
15471 arg1 = gimple_call_arg (stmt, 1);
15472 lhs = gimple_call_lhs (stmt);
15473 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15474 gimple_set_location (g, gimple_location (stmt));
15475 gsi_replace (gsi, g, true);
15476 return true;
15477 /* Flavors of vec_andc. */
15478 case ALTIVEC_BUILTIN_VANDC:
15479 arg0 = gimple_call_arg (stmt, 0);
15480 arg1 = gimple_call_arg (stmt, 1);
15481 lhs = gimple_call_lhs (stmt);
15482 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15483 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15484 gimple_set_location (g, gimple_location (stmt));
15485 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15486 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15487 gimple_set_location (g, gimple_location (stmt));
15488 gsi_replace (gsi, g, true);
15489 return true;
15490 /* Flavors of vec_nand. */
15491 case P8V_BUILTIN_VEC_NAND:
15492 case P8V_BUILTIN_NAND_V16QI:
15493 case P8V_BUILTIN_NAND_V8HI:
15494 case P8V_BUILTIN_NAND_V4SI:
15495 case P8V_BUILTIN_NAND_V4SF:
15496 case P8V_BUILTIN_NAND_V2DF:
15497 case P8V_BUILTIN_NAND_V2DI:
15498 arg0 = gimple_call_arg (stmt, 0);
15499 arg1 = gimple_call_arg (stmt, 1);
15500 lhs = gimple_call_lhs (stmt);
15501 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15502 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15503 gimple_set_location (g, gimple_location (stmt));
15504 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15505 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15506 gimple_set_location (g, gimple_location (stmt));
15507 gsi_replace (gsi, g, true);
15508 return true;
15509 /* Flavors of vec_or. */
15510 case ALTIVEC_BUILTIN_VOR:
15511 arg0 = gimple_call_arg (stmt, 0);
15512 arg1 = gimple_call_arg (stmt, 1);
15513 lhs = gimple_call_lhs (stmt);
15514 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15515 gimple_set_location (g, gimple_location (stmt));
15516 gsi_replace (gsi, g, true);
15517 return true;
15518 /* flavors of vec_orc. */
15519 case P8V_BUILTIN_ORC_V16QI:
15520 case P8V_BUILTIN_ORC_V8HI:
15521 case P8V_BUILTIN_ORC_V4SI:
15522 case P8V_BUILTIN_ORC_V4SF:
15523 case P8V_BUILTIN_ORC_V2DF:
15524 case P8V_BUILTIN_ORC_V2DI:
15525 arg0 = gimple_call_arg (stmt, 0);
15526 arg1 = gimple_call_arg (stmt, 1);
15527 lhs = gimple_call_lhs (stmt);
15528 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15529 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15530 gimple_set_location (g, gimple_location (stmt));
15531 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15532 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15533 gimple_set_location (g, gimple_location (stmt));
15534 gsi_replace (gsi, g, true);
15535 return true;
15536 /* Flavors of vec_xor. */
15537 case ALTIVEC_BUILTIN_VXOR:
15538 arg0 = gimple_call_arg (stmt, 0);
15539 arg1 = gimple_call_arg (stmt, 1);
15540 lhs = gimple_call_lhs (stmt);
15541 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15542 gimple_set_location (g, gimple_location (stmt));
15543 gsi_replace (gsi, g, true);
15544 return true;
15545 /* Flavors of vec_nor. */
15546 case ALTIVEC_BUILTIN_VNOR:
15547 arg0 = gimple_call_arg (stmt, 0);
15548 arg1 = gimple_call_arg (stmt, 1);
15549 lhs = gimple_call_lhs (stmt);
15550 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15551 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15552 gimple_set_location (g, gimple_location (stmt));
15553 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15554 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15555 gimple_set_location (g, gimple_location (stmt));
15556 gsi_replace (gsi, g, true);
15557 return true;
15558 /* flavors of vec_abs. */
15559 case ALTIVEC_BUILTIN_ABS_V16QI:
15560 case ALTIVEC_BUILTIN_ABS_V8HI:
15561 case ALTIVEC_BUILTIN_ABS_V4SI:
15562 case ALTIVEC_BUILTIN_ABS_V4SF:
15563 case P8V_BUILTIN_ABS_V2DI:
15564 case VSX_BUILTIN_XVABSDP:
15565 arg0 = gimple_call_arg (stmt, 0);
15566 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15567 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15568 return false;
15569 lhs = gimple_call_lhs (stmt);
15570 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15571 gimple_set_location (g, gimple_location (stmt));
15572 gsi_replace (gsi, g, true);
15573 return true;
15574 /* flavors of vec_min. */
15575 case VSX_BUILTIN_XVMINDP:
15576 case P8V_BUILTIN_VMINSD:
15577 case P8V_BUILTIN_VMINUD:
15578 case ALTIVEC_BUILTIN_VMINSB:
15579 case ALTIVEC_BUILTIN_VMINSH:
15580 case ALTIVEC_BUILTIN_VMINSW:
15581 case ALTIVEC_BUILTIN_VMINUB:
15582 case ALTIVEC_BUILTIN_VMINUH:
15583 case ALTIVEC_BUILTIN_VMINUW:
15584 case ALTIVEC_BUILTIN_VMINFP:
15585 arg0 = gimple_call_arg (stmt, 0);
15586 arg1 = gimple_call_arg (stmt, 1);
15587 lhs = gimple_call_lhs (stmt);
15588 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15589 gimple_set_location (g, gimple_location (stmt));
15590 gsi_replace (gsi, g, true);
15591 return true;
15592 /* flavors of vec_max. */
15593 case VSX_BUILTIN_XVMAXDP:
15594 case P8V_BUILTIN_VMAXSD:
15595 case P8V_BUILTIN_VMAXUD:
15596 case ALTIVEC_BUILTIN_VMAXSB:
15597 case ALTIVEC_BUILTIN_VMAXSH:
15598 case ALTIVEC_BUILTIN_VMAXSW:
15599 case ALTIVEC_BUILTIN_VMAXUB:
15600 case ALTIVEC_BUILTIN_VMAXUH:
15601 case ALTIVEC_BUILTIN_VMAXUW:
15602 case ALTIVEC_BUILTIN_VMAXFP:
15603 arg0 = gimple_call_arg (stmt, 0);
15604 arg1 = gimple_call_arg (stmt, 1);
15605 lhs = gimple_call_lhs (stmt);
15606 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15607 gimple_set_location (g, gimple_location (stmt));
15608 gsi_replace (gsi, g, true);
15609 return true;
15610 /* Flavors of vec_eqv. */
15611 case P8V_BUILTIN_EQV_V16QI:
15612 case P8V_BUILTIN_EQV_V8HI:
15613 case P8V_BUILTIN_EQV_V4SI:
15614 case P8V_BUILTIN_EQV_V4SF:
15615 case P8V_BUILTIN_EQV_V2DF:
15616 case P8V_BUILTIN_EQV_V2DI:
15617 arg0 = gimple_call_arg (stmt, 0);
15618 arg1 = gimple_call_arg (stmt, 1);
15619 lhs = gimple_call_lhs (stmt);
15620 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15621 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15622 gimple_set_location (g, gimple_location (stmt));
15623 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15624 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15625 gimple_set_location (g, gimple_location (stmt));
15626 gsi_replace (gsi, g, true);
15627 return true;
15628 /* Flavors of vec_rotate_left. */
15629 case ALTIVEC_BUILTIN_VRLB:
15630 case ALTIVEC_BUILTIN_VRLH:
15631 case ALTIVEC_BUILTIN_VRLW:
15632 case P8V_BUILTIN_VRLD:
15633 arg0 = gimple_call_arg (stmt, 0);
15634 arg1 = gimple_call_arg (stmt, 1);
15635 lhs = gimple_call_lhs (stmt);
15636 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15637 gimple_set_location (g, gimple_location (stmt));
15638 gsi_replace (gsi, g, true);
15639 return true;
15640 /* Flavors of vector shift right algebraic.
15641 vec_sra{b,h,w} -> vsra{b,h,w}. */
15642 case ALTIVEC_BUILTIN_VSRAB:
15643 case ALTIVEC_BUILTIN_VSRAH:
15644 case ALTIVEC_BUILTIN_VSRAW:
15645 case P8V_BUILTIN_VSRAD:
15646 arg0 = gimple_call_arg (stmt, 0);
15647 arg1 = gimple_call_arg (stmt, 1);
15648 lhs = gimple_call_lhs (stmt);
15649 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
15650 gimple_set_location (g, gimple_location (stmt));
15651 gsi_replace (gsi, g, true);
15652 return true;
15653 /* Flavors of vector shift left.
15654 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15655 case ALTIVEC_BUILTIN_VSLB:
15656 case ALTIVEC_BUILTIN_VSLH:
15657 case ALTIVEC_BUILTIN_VSLW:
15658 case P8V_BUILTIN_VSLD:
15659 {
15660 location_t loc;
15661 gimple_seq stmts = NULL;
15662 arg0 = gimple_call_arg (stmt, 0);
15663 tree arg0_type = TREE_TYPE (arg0);
15664 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
15665 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
15666 return false;
15667 arg1 = gimple_call_arg (stmt, 1);
15668 tree arg1_type = TREE_TYPE (arg1);
15669 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15670 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15671 loc = gimple_location (stmt);
15672 lhs = gimple_call_lhs (stmt);
15673 /* Force arg1 into the range valid matching the arg0 type. */
15674 /* Build a vector consisting of the max valid bit-size values. */
15675 int n_elts = VECTOR_CST_NELTS (arg1);
15676 int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
15677 * BITS_PER_UNIT;
15678 tree element_size = build_int_cst (unsigned_element_type,
15679 tree_size_in_bits / n_elts);
15680 tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
15681 for (int i = 0; i < n_elts; i++)
15682 elts.safe_push (element_size);
15683 tree modulo_tree = elts.build ();
15684 /* Modulo the provided shift value against that vector. */
15685 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15686 unsigned_arg1_type, arg1);
15687 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15688 unsigned_arg1_type, unsigned_arg1,
15689 modulo_tree);
15690 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15691 /* And finally, do the shift. */
15692 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
15693 gimple_set_location (g, gimple_location (stmt));
15694 gsi_replace (gsi, g, true);
15695 return true;
15696 }
15697 /* Flavors of vector shift right. */
15698 case ALTIVEC_BUILTIN_VSRB:
15699 case ALTIVEC_BUILTIN_VSRH:
15700 case ALTIVEC_BUILTIN_VSRW:
15701 case P8V_BUILTIN_VSRD:
15702 {
15703 arg0 = gimple_call_arg (stmt, 0);
15704 arg1 = gimple_call_arg (stmt, 1);
15705 lhs = gimple_call_lhs (stmt);
15706 gimple_seq stmts = NULL;
15707 /* Convert arg0 to unsigned. */
15708 tree arg0_unsigned
15709 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15710 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15711 tree res
15712 = gimple_build (&stmts, RSHIFT_EXPR,
15713 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
15714 /* Convert result back to the lhs type. */
15715 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15716 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15717 update_call_from_tree (gsi, res);
15718 return true;
15719 }
15720 /* Vector loads. */
15721 case ALTIVEC_BUILTIN_LVX_V16QI:
15722 case ALTIVEC_BUILTIN_LVX_V8HI:
15723 case ALTIVEC_BUILTIN_LVX_V4SI:
15724 case ALTIVEC_BUILTIN_LVX_V4SF:
15725 case ALTIVEC_BUILTIN_LVX_V2DI:
15726 case ALTIVEC_BUILTIN_LVX_V2DF:
15727 case ALTIVEC_BUILTIN_LVX_V1TI:
15728 {
15729 arg0 = gimple_call_arg (stmt, 0); // offset
15730 arg1 = gimple_call_arg (stmt, 1); // address
15731 lhs = gimple_call_lhs (stmt);
15732 location_t loc = gimple_location (stmt);
15733 /* Since arg1 may be cast to a different type, just use ptr_type_node
15734 here instead of trying to enforce TBAA on pointer types. */
15735 tree arg1_type = ptr_type_node;
15736 tree lhs_type = TREE_TYPE (lhs);
15737 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15738 the tree using the value from arg0. The resulting type will match
15739 the type of arg1. */
15740 gimple_seq stmts = NULL;
15741 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15742 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15743 arg1_type, arg1, temp_offset);
15744 /* Mask off any lower bits from the address. */
15745 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15746 arg1_type, temp_addr,
15747 build_int_cst (arg1_type, -16));
15748 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15749 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15750 take an offset, but since we've already incorporated the offset
15751 above, here we just pass in a zero. */
15752 gimple *g
15753 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15754 build_int_cst (arg1_type, 0)));
15755 gimple_set_location (g, loc);
15756 gsi_replace (gsi, g, true);
15757 return true;
15758 }
15759 /* Vector stores. */
15760 case ALTIVEC_BUILTIN_STVX_V16QI:
15761 case ALTIVEC_BUILTIN_STVX_V8HI:
15762 case ALTIVEC_BUILTIN_STVX_V4SI:
15763 case ALTIVEC_BUILTIN_STVX_V4SF:
15764 case ALTIVEC_BUILTIN_STVX_V2DI:
15765 case ALTIVEC_BUILTIN_STVX_V2DF:
15766 {
15767 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15768 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15769 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15770 location_t loc = gimple_location (stmt);
15771 tree arg0_type = TREE_TYPE (arg0);
15772 /* Use ptr_type_node (no TBAA) for the arg2_type.
15773 FIXME: (Richard) "A proper fix would be to transition this type as
15774 seen from the frontend to GIMPLE, for example in a similar way we
15775 do for MEM_REFs by piggy-backing that on an extra argument, a
15776 constant zero pointer of the alias pointer type to use (which would
15777 also serve as a type indicator of the store itself). I'd use a
15778 target specific internal function for this (not sure if we can have
15779 those target specific, but I guess if it's folded away then that's
15780 fine) and get away with the overload set." */
15781 tree arg2_type = ptr_type_node;
15782 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15783 the tree using the value from arg0. The resulting type will match
15784 the type of arg2. */
15785 gimple_seq stmts = NULL;
15786 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15787 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15788 arg2_type, arg2, temp_offset);
15789 /* Mask off any lower bits from the address. */
15790 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15791 arg2_type, temp_addr,
15792 build_int_cst (arg2_type, -16));
15793 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15794 /* The desired gimple result should be similar to:
15795 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15796 gimple *g
15797 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15798 build_int_cst (arg2_type, 0)), arg0);
15799 gimple_set_location (g, loc);
15800 gsi_replace (gsi, g, true);
15801 return true;
15802 }
15803
15804 /* unaligned Vector loads. */
15805 case VSX_BUILTIN_LXVW4X_V16QI:
15806 case VSX_BUILTIN_LXVW4X_V8HI:
15807 case VSX_BUILTIN_LXVW4X_V4SF:
15808 case VSX_BUILTIN_LXVW4X_V4SI:
15809 case VSX_BUILTIN_LXVD2X_V2DF:
15810 case VSX_BUILTIN_LXVD2X_V2DI:
15811 {
15812 arg0 = gimple_call_arg (stmt, 0); // offset
15813 arg1 = gimple_call_arg (stmt, 1); // address
15814 lhs = gimple_call_lhs (stmt);
15815 location_t loc = gimple_location (stmt);
15816 /* Since arg1 may be cast to a different type, just use ptr_type_node
15817 here instead of trying to enforce TBAA on pointer types. */
15818 tree arg1_type = ptr_type_node;
15819 tree lhs_type = TREE_TYPE (lhs);
15820 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15821 required alignment (power) is 4 bytes regardless of data type. */
15822 tree align_ltype = build_aligned_type (lhs_type, 4);
15823 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15824 the tree using the value from arg0. The resulting type will match
15825 the type of arg1. */
15826 gimple_seq stmts = NULL;
15827 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15828 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15829 arg1_type, arg1, temp_offset);
15830 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15831 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15832 take an offset, but since we've already incorporated the offset
15833 above, here we just pass in a zero. */
15834 gimple *g;
15835 g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr,
15836 build_int_cst (arg1_type, 0)));
15837 gimple_set_location (g, loc);
15838 gsi_replace (gsi, g, true);
15839 return true;
15840 }
15841
15842 /* unaligned Vector stores. */
15843 case VSX_BUILTIN_STXVW4X_V16QI:
15844 case VSX_BUILTIN_STXVW4X_V8HI:
15845 case VSX_BUILTIN_STXVW4X_V4SF:
15846 case VSX_BUILTIN_STXVW4X_V4SI:
15847 case VSX_BUILTIN_STXVD2X_V2DF:
15848 case VSX_BUILTIN_STXVD2X_V2DI:
15849 {
15850 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15851 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15852 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15853 location_t loc = gimple_location (stmt);
15854 tree arg0_type = TREE_TYPE (arg0);
15855 /* Use ptr_type_node (no TBAA) for the arg2_type. */
15856 tree arg2_type = ptr_type_node;
15857 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15858 required alignment (power) is 4 bytes regardless of data type. */
15859 tree align_stype = build_aligned_type (arg0_type, 4);
15860 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15861 the tree using the value from arg1. */
15862 gimple_seq stmts = NULL;
15863 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15864 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15865 arg2_type, arg2, temp_offset);
15866 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15867 gimple *g;
15868 g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
15869 build_int_cst (arg2_type, 0)), arg0);
15870 gimple_set_location (g, loc);
15871 gsi_replace (gsi, g, true);
15872 return true;
15873 }
15874
15875 /* Vector Fused multiply-add (fma). */
15876 case ALTIVEC_BUILTIN_VMADDFP:
15877 case VSX_BUILTIN_XVMADDDP:
15878 case ALTIVEC_BUILTIN_VMLADDUHM:
15879 {
15880 arg0 = gimple_call_arg (stmt, 0);
15881 arg1 = gimple_call_arg (stmt, 1);
15882 tree arg2 = gimple_call_arg (stmt, 2);
15883 lhs = gimple_call_lhs (stmt);
15884 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
15885 gimple_call_set_lhs (g, lhs);
15886 gimple_call_set_nothrow (g, true);
15887 gimple_set_location (g, gimple_location (stmt));
15888 gsi_replace (gsi, g, true);
15889 return true;
15890 }
15891
15892 /* Vector compares; EQ, NE, GE, GT, LE. */
15893 case ALTIVEC_BUILTIN_VCMPEQUB:
15894 case ALTIVEC_BUILTIN_VCMPEQUH:
15895 case ALTIVEC_BUILTIN_VCMPEQUW:
15896 case P8V_BUILTIN_VCMPEQUD:
15897 fold_compare_helper (gsi, EQ_EXPR, stmt);
15898 return true;
15899
15900 case P9V_BUILTIN_CMPNEB:
15901 case P9V_BUILTIN_CMPNEH:
15902 case P9V_BUILTIN_CMPNEW:
15903 fold_compare_helper (gsi, NE_EXPR, stmt);
15904 return true;
15905
15906 case VSX_BUILTIN_CMPGE_16QI:
15907 case VSX_BUILTIN_CMPGE_U16QI:
15908 case VSX_BUILTIN_CMPGE_8HI:
15909 case VSX_BUILTIN_CMPGE_U8HI:
15910 case VSX_BUILTIN_CMPGE_4SI:
15911 case VSX_BUILTIN_CMPGE_U4SI:
15912 case VSX_BUILTIN_CMPGE_2DI:
15913 case VSX_BUILTIN_CMPGE_U2DI:
15914 fold_compare_helper (gsi, GE_EXPR, stmt);
15915 return true;
15916
15917 case ALTIVEC_BUILTIN_VCMPGTSB:
15918 case ALTIVEC_BUILTIN_VCMPGTUB:
15919 case ALTIVEC_BUILTIN_VCMPGTSH:
15920 case ALTIVEC_BUILTIN_VCMPGTUH:
15921 case ALTIVEC_BUILTIN_VCMPGTSW:
15922 case ALTIVEC_BUILTIN_VCMPGTUW:
15923 case P8V_BUILTIN_VCMPGTUD:
15924 case P8V_BUILTIN_VCMPGTSD:
15925 fold_compare_helper (gsi, GT_EXPR, stmt);
15926 return true;
15927
15928 case VSX_BUILTIN_CMPLE_16QI:
15929 case VSX_BUILTIN_CMPLE_U16QI:
15930 case VSX_BUILTIN_CMPLE_8HI:
15931 case VSX_BUILTIN_CMPLE_U8HI:
15932 case VSX_BUILTIN_CMPLE_4SI:
15933 case VSX_BUILTIN_CMPLE_U4SI:
15934 case VSX_BUILTIN_CMPLE_2DI:
15935 case VSX_BUILTIN_CMPLE_U2DI:
15936 fold_compare_helper (gsi, LE_EXPR, stmt);
15937 return true;
15938
15939 /* flavors of vec_splat_[us]{8,16,32}. */
15940 case ALTIVEC_BUILTIN_VSPLTISB:
15941 case ALTIVEC_BUILTIN_VSPLTISH:
15942 case ALTIVEC_BUILTIN_VSPLTISW:
15943 {
15944 int size;
15945 if (fn_code == ALTIVEC_BUILTIN_VSPLTISB)
15946 size = 8;
15947 else if (fn_code == ALTIVEC_BUILTIN_VSPLTISH)
15948 size = 16;
15949 else
15950 size = 32;
15951
15952 arg0 = gimple_call_arg (stmt, 0);
15953 lhs = gimple_call_lhs (stmt);
15954
15955 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
15956 5-bit signed constant in range -16 to +15. */
15957 if (TREE_CODE (arg0) != INTEGER_CST
15958 || !IN_RANGE (sext_hwi (TREE_INT_CST_LOW (arg0), size),
15959 -16, 15))
15960 return false;
15961 gimple_seq stmts = NULL;
15962 location_t loc = gimple_location (stmt);
15963 tree splat_value = gimple_convert (&stmts, loc,
15964 TREE_TYPE (TREE_TYPE (lhs)), arg0);
15965 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15966 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
15967 g = gimple_build_assign (lhs, splat_tree);
15968 gimple_set_location (g, gimple_location (stmt));
15969 gsi_replace (gsi, g, true);
15970 return true;
15971 }
15972
15973 /* Flavors of vec_splat. */
15974 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
15975 case ALTIVEC_BUILTIN_VSPLTB:
15976 case ALTIVEC_BUILTIN_VSPLTH:
15977 case ALTIVEC_BUILTIN_VSPLTW:
15978 case VSX_BUILTIN_XXSPLTD_V2DI:
15979 case VSX_BUILTIN_XXSPLTD_V2DF:
15980 {
15981 arg0 = gimple_call_arg (stmt, 0); /* input vector. */
15982 arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */
15983 /* Only fold the vec_splat_*() if arg1 is both a constant value and
15984 is a valid index into the arg0 vector. */
15985 unsigned int n_elts = VECTOR_CST_NELTS (arg0);
15986 if (TREE_CODE (arg1) != INTEGER_CST
15987 || TREE_INT_CST_LOW (arg1) > (n_elts -1))
15988 return false;
15989 lhs = gimple_call_lhs (stmt);
15990 tree lhs_type = TREE_TYPE (lhs);
15991 tree arg0_type = TREE_TYPE (arg0);
15992 tree splat;
15993 if (TREE_CODE (arg0) == VECTOR_CST)
15994 splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1));
15995 else
15996 {
15997 /* Determine (in bits) the length and start location of the
15998 splat value for a call to the tree_vec_extract helper. */
15999 int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type))
16000 * BITS_PER_UNIT / n_elts;
16001 int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size;
16002 tree len = build_int_cst (bitsizetype, splat_elem_size);
16003 tree start = build_int_cst (bitsizetype, splat_start_bit);
16004 splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0,
16005 len, start);
16006 }
16007 /* And finally, build the new vector. */
16008 tree splat_tree = build_vector_from_val (lhs_type, splat);
16009 g = gimple_build_assign (lhs, splat_tree);
16010 gimple_set_location (g, gimple_location (stmt));
16011 gsi_replace (gsi, g, true);
16012 return true;
16013 }
16014
16015 /* vec_mergel (integrals). */
16016 case ALTIVEC_BUILTIN_VMRGLH:
16017 case ALTIVEC_BUILTIN_VMRGLW:
16018 case VSX_BUILTIN_XXMRGLW_4SI:
16019 case ALTIVEC_BUILTIN_VMRGLB:
16020 case VSX_BUILTIN_VEC_MERGEL_V2DI:
16021 case VSX_BUILTIN_XXMRGLW_4SF:
16022 case VSX_BUILTIN_VEC_MERGEL_V2DF:
16023 fold_mergehl_helper (gsi, stmt, 1);
16024 return true;
16025 /* vec_mergeh (integrals). */
16026 case ALTIVEC_BUILTIN_VMRGHH:
16027 case ALTIVEC_BUILTIN_VMRGHW:
16028 case VSX_BUILTIN_XXMRGHW_4SI:
16029 case ALTIVEC_BUILTIN_VMRGHB:
16030 case VSX_BUILTIN_VEC_MERGEH_V2DI:
16031 case VSX_BUILTIN_XXMRGHW_4SF:
16032 case VSX_BUILTIN_VEC_MERGEH_V2DF:
16033 fold_mergehl_helper (gsi, stmt, 0);
16034 return true;
16035
16036 /* Flavors of vec_mergee. */
16037 case P8V_BUILTIN_VMRGEW_V4SI:
16038 case P8V_BUILTIN_VMRGEW_V2DI:
16039 case P8V_BUILTIN_VMRGEW_V4SF:
16040 case P8V_BUILTIN_VMRGEW_V2DF:
16041 fold_mergeeo_helper (gsi, stmt, 0);
16042 return true;
16043 /* Flavors of vec_mergeo. */
16044 case P8V_BUILTIN_VMRGOW_V4SI:
16045 case P8V_BUILTIN_VMRGOW_V2DI:
16046 case P8V_BUILTIN_VMRGOW_V4SF:
16047 case P8V_BUILTIN_VMRGOW_V2DF:
16048 fold_mergeeo_helper (gsi, stmt, 1);
16049 return true;
16050
16051 /* d = vec_pack (a, b) */
16052 case P8V_BUILTIN_VPKUDUM:
16053 case ALTIVEC_BUILTIN_VPKUHUM:
16054 case ALTIVEC_BUILTIN_VPKUWUM:
16055 {
16056 arg0 = gimple_call_arg (stmt, 0);
16057 arg1 = gimple_call_arg (stmt, 1);
16058 lhs = gimple_call_lhs (stmt);
16059 gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1);
16060 gimple_set_location (g, gimple_location (stmt));
16061 gsi_replace (gsi, g, true);
16062 return true;
16063 }
16064
16065 /* d = vec_unpackh (a) */
16066 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
16067 in this code is sensitive to endian-ness, and needs to be inverted to
16068 handle both LE and BE targets. */
16069 case ALTIVEC_BUILTIN_VUPKHSB:
16070 case ALTIVEC_BUILTIN_VUPKHSH:
16071 case P8V_BUILTIN_VUPKHSW:
16072 {
16073 arg0 = gimple_call_arg (stmt, 0);
16074 lhs = gimple_call_lhs (stmt);
16075 if (BYTES_BIG_ENDIAN)
16076 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16077 else
16078 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16079 gimple_set_location (g, gimple_location (stmt));
16080 gsi_replace (gsi, g, true);
16081 return true;
16082 }
16083 /* d = vec_unpackl (a) */
16084 case ALTIVEC_BUILTIN_VUPKLSB:
16085 case ALTIVEC_BUILTIN_VUPKLSH:
16086 case P8V_BUILTIN_VUPKLSW:
16087 {
16088 arg0 = gimple_call_arg (stmt, 0);
16089 lhs = gimple_call_lhs (stmt);
16090 if (BYTES_BIG_ENDIAN)
16091 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16092 else
16093 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16094 gimple_set_location (g, gimple_location (stmt));
16095 gsi_replace (gsi, g, true);
16096 return true;
16097 }
16098 /* There is no gimple type corresponding with pixel, so just return. */
16099 case ALTIVEC_BUILTIN_VUPKHPX:
16100 case ALTIVEC_BUILTIN_VUPKLPX:
16101 return false;
16102
16103 /* vec_perm. */
16104 case ALTIVEC_BUILTIN_VPERM_16QI:
16105 case ALTIVEC_BUILTIN_VPERM_8HI:
16106 case ALTIVEC_BUILTIN_VPERM_4SI:
16107 case ALTIVEC_BUILTIN_VPERM_2DI:
16108 case ALTIVEC_BUILTIN_VPERM_4SF:
16109 case ALTIVEC_BUILTIN_VPERM_2DF:
16110 {
16111 arg0 = gimple_call_arg (stmt, 0);
16112 arg1 = gimple_call_arg (stmt, 1);
16113 tree permute = gimple_call_arg (stmt, 2);
16114 lhs = gimple_call_lhs (stmt);
16115 location_t loc = gimple_location (stmt);
16116 gimple_seq stmts = NULL;
16117 // convert arg0 and arg1 to match the type of the permute
16118 // for the VEC_PERM_EXPR operation.
16119 tree permute_type = (TREE_TYPE (permute));
16120 tree arg0_ptype = gimple_convert (&stmts, loc, permute_type, arg0);
16121 tree arg1_ptype = gimple_convert (&stmts, loc, permute_type, arg1);
16122 tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR,
16123 permute_type, arg0_ptype, arg1_ptype,
16124 permute);
16125 // Convert the result back to the desired lhs type upon completion.
16126 tree temp = gimple_convert (&stmts, loc, TREE_TYPE (lhs), lhs_ptype);
16127 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16128 g = gimple_build_assign (lhs, temp);
16129 gimple_set_location (g, loc);
16130 gsi_replace (gsi, g, true);
16131 return true;
16132 }
16133
16134 default:
16135 if (TARGET_DEBUG_BUILTIN)
16136 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16137 fn_code, fn_name1, fn_name2);
16138 break;
16139 }
16140
16141 return false;
16142 }
16143
16144 /* Expand an expression EXP that calls a built-in function,
16145 with result going to TARGET if that's convenient
16146 (and in mode MODE if that's convenient).
16147 SUBTARGET may be used as the target for computing one of EXP's operands.
16148 IGNORE is nonzero if the value is to be ignored. */
16149
16150 static rtx
16151 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16152 machine_mode mode ATTRIBUTE_UNUSED,
16153 int ignore ATTRIBUTE_UNUSED)
16154 {
16155 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16156 enum rs6000_builtins fcode
16157 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16158 size_t uns_fcode = (size_t)fcode;
16159 const struct builtin_description *d;
16160 size_t i;
16161 rtx ret;
16162 bool success;
16163 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16164 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16165 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16166
16167 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16168 floating point type, depending on whether long double is the IBM extended
16169 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16170 we only define one variant of the built-in function, and switch the code
16171 when defining it, rather than defining two built-ins and using the
16172 overload table in rs6000-c.c to switch between the two. If we don't have
16173 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16174 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16175 if (FLOAT128_IEEE_P (TFmode))
16176 switch (icode)
16177 {
16178 default:
16179 break;
16180
16181 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16182 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16183 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16184 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16185 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16186 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16187 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16188 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16189 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16190 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16191 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16192 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16193 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16194 }
16195
16196 if (TARGET_DEBUG_BUILTIN)
16197 {
16198 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16199 const char *name2 = (icode != CODE_FOR_nothing)
16200 ? get_insn_name ((int) icode)
16201 : "nothing";
16202 const char *name3;
16203
16204 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16205 {
16206 default: name3 = "unknown"; break;
16207 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16208 case RS6000_BTC_UNARY: name3 = "unary"; break;
16209 case RS6000_BTC_BINARY: name3 = "binary"; break;
16210 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16211 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16212 case RS6000_BTC_ABS: name3 = "abs"; break;
16213 case RS6000_BTC_DST: name3 = "dst"; break;
16214 }
16215
16216
16217 fprintf (stderr,
16218 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16219 (name1) ? name1 : "---", fcode,
16220 (name2) ? name2 : "---", (int) icode,
16221 name3,
16222 func_valid_p ? "" : ", not valid");
16223 }
16224
16225 if (!func_valid_p)
16226 {
16227 rs6000_invalid_builtin (fcode);
16228
16229 /* Given it is invalid, just generate a normal call. */
16230 return expand_call (exp, target, ignore);
16231 }
16232
16233 switch (fcode)
16234 {
16235 case RS6000_BUILTIN_RECIP:
16236 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16237
16238 case RS6000_BUILTIN_RECIPF:
16239 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16240
16241 case RS6000_BUILTIN_RSQRTF:
16242 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16243
16244 case RS6000_BUILTIN_RSQRT:
16245 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16246
16247 case POWER7_BUILTIN_BPERMD:
16248 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16249 ? CODE_FOR_bpermd_di
16250 : CODE_FOR_bpermd_si), exp, target);
16251
16252 case RS6000_BUILTIN_GET_TB:
16253 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16254 target);
16255
16256 case RS6000_BUILTIN_MFTB:
16257 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16258 ? CODE_FOR_rs6000_mftb_di
16259 : CODE_FOR_rs6000_mftb_si),
16260 target);
16261
16262 case RS6000_BUILTIN_MFFS:
16263 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16264
16265 case RS6000_BUILTIN_MTFSB0:
16266 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb0, exp);
16267
16268 case RS6000_BUILTIN_MTFSB1:
16269 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb1, exp);
16270
16271 case RS6000_BUILTIN_SET_FPSCR_RN:
16272 return rs6000_expand_set_fpscr_rn_builtin (CODE_FOR_rs6000_set_fpscr_rn,
16273 exp);
16274
16275 case RS6000_BUILTIN_SET_FPSCR_DRN:
16276 return
16277 rs6000_expand_set_fpscr_drn_builtin (CODE_FOR_rs6000_set_fpscr_drn,
16278 exp);
16279
16280 case RS6000_BUILTIN_MFFSL:
16281 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffsl, target);
16282
16283 case RS6000_BUILTIN_MTFSF:
16284 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16285
16286 case RS6000_BUILTIN_CPU_INIT:
16287 case RS6000_BUILTIN_CPU_IS:
16288 case RS6000_BUILTIN_CPU_SUPPORTS:
16289 return cpu_expand_builtin (fcode, exp, target);
16290
16291 case MISC_BUILTIN_SPEC_BARRIER:
16292 {
16293 emit_insn (gen_speculation_barrier ());
16294 return NULL_RTX;
16295 }
16296
16297 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16298 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16299 {
16300 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16301 : (int) CODE_FOR_altivec_lvsl_direct);
16302 machine_mode tmode = insn_data[icode2].operand[0].mode;
16303 machine_mode mode = insn_data[icode2].operand[1].mode;
16304 tree arg;
16305 rtx op, addr, pat;
16306
16307 gcc_assert (TARGET_ALTIVEC);
16308
16309 arg = CALL_EXPR_ARG (exp, 0);
16310 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16311 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16312 addr = memory_address (mode, op);
16313 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16314 op = addr;
16315 else
16316 {
16317 /* For the load case need to negate the address. */
16318 op = gen_reg_rtx (GET_MODE (addr));
16319 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16320 }
16321 op = gen_rtx_MEM (mode, op);
16322
16323 if (target == 0
16324 || GET_MODE (target) != tmode
16325 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16326 target = gen_reg_rtx (tmode);
16327
16328 pat = GEN_FCN (icode2) (target, op);
16329 if (!pat)
16330 return 0;
16331 emit_insn (pat);
16332
16333 return target;
16334 }
16335
16336 case ALTIVEC_BUILTIN_VCFUX:
16337 case ALTIVEC_BUILTIN_VCFSX:
16338 case ALTIVEC_BUILTIN_VCTUXS:
16339 case ALTIVEC_BUILTIN_VCTSXS:
16340 /* FIXME: There's got to be a nicer way to handle this case than
16341 constructing a new CALL_EXPR. */
16342 if (call_expr_nargs (exp) == 1)
16343 {
16344 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16345 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16346 }
16347 break;
16348
16349 /* For the pack and unpack int128 routines, fix up the builtin so it
16350 uses the correct IBM128 type. */
16351 case MISC_BUILTIN_PACK_IF:
16352 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16353 {
16354 icode = CODE_FOR_packtf;
16355 fcode = MISC_BUILTIN_PACK_TF;
16356 uns_fcode = (size_t)fcode;
16357 }
16358 break;
16359
16360 case MISC_BUILTIN_UNPACK_IF:
16361 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16362 {
16363 icode = CODE_FOR_unpacktf;
16364 fcode = MISC_BUILTIN_UNPACK_TF;
16365 uns_fcode = (size_t)fcode;
16366 }
16367 break;
16368
16369 default:
16370 break;
16371 }
16372
16373 if (TARGET_ALTIVEC)
16374 {
16375 ret = altivec_expand_builtin (exp, target, &success);
16376
16377 if (success)
16378 return ret;
16379 }
16380 if (TARGET_HTM)
16381 {
16382 ret = htm_expand_builtin (exp, target, &success);
16383
16384 if (success)
16385 return ret;
16386 }
16387
16388 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16389 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16390 gcc_assert (attr == RS6000_BTC_UNARY
16391 || attr == RS6000_BTC_BINARY
16392 || attr == RS6000_BTC_TERNARY
16393 || attr == RS6000_BTC_SPECIAL);
16394
16395 /* Handle simple unary operations. */
16396 d = bdesc_1arg;
16397 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16398 if (d->code == fcode)
16399 return rs6000_expand_unop_builtin (icode, exp, target);
16400
16401 /* Handle simple binary operations. */
16402 d = bdesc_2arg;
16403 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16404 if (d->code == fcode)
16405 return rs6000_expand_binop_builtin (icode, exp, target);
16406
16407 /* Handle simple ternary operations. */
16408 d = bdesc_3arg;
16409 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16410 if (d->code == fcode)
16411 return rs6000_expand_ternop_builtin (icode, exp, target);
16412
16413 /* Handle simple no-argument operations. */
16414 d = bdesc_0arg;
16415 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16416 if (d->code == fcode)
16417 return rs6000_expand_zeroop_builtin (icode, target);
16418
16419 gcc_unreachable ();
16420 }
16421
16422 /* Create a builtin vector type with a name. Taking care not to give
16423 the canonical type a name. */
16424
16425 static tree
16426 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16427 {
16428 tree result = build_vector_type (elt_type, num_elts);
16429
16430 /* Copy so we don't give the canonical type a name. */
16431 result = build_variant_type_copy (result);
16432
16433 add_builtin_type (name, result);
16434
16435 return result;
16436 }
16437
16438 static void
16439 rs6000_init_builtins (void)
16440 {
16441 tree tdecl;
16442 tree ftype;
16443 machine_mode mode;
16444
16445 if (TARGET_DEBUG_BUILTIN)
16446 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16447 (TARGET_ALTIVEC) ? ", altivec" : "",
16448 (TARGET_VSX) ? ", vsx" : "");
16449
16450 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16451 : "__vector long long",
16452 intDI_type_node, 2);
16453 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16454 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16455 intSI_type_node, 4);
16456 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16457 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16458 intHI_type_node, 8);
16459 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16460 intQI_type_node, 16);
16461
16462 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16463 unsigned_intQI_type_node, 16);
16464 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16465 unsigned_intHI_type_node, 8);
16466 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16467 unsigned_intSI_type_node, 4);
16468 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16469 ? "__vector unsigned long"
16470 : "__vector unsigned long long",
16471 unsigned_intDI_type_node, 2);
16472
16473 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16474
16475 const_str_type_node
16476 = build_pointer_type (build_qualified_type (char_type_node,
16477 TYPE_QUAL_CONST));
16478
16479 /* We use V1TI mode as a special container to hold __int128_t items that
16480 must live in VSX registers. */
16481 if (intTI_type_node)
16482 {
16483 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16484 intTI_type_node, 1);
16485 unsigned_V1TI_type_node
16486 = rs6000_vector_type ("__vector unsigned __int128",
16487 unsigned_intTI_type_node, 1);
16488 }
16489
16490 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16491 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16492 'vector unsigned short'. */
16493
16494 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16495 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16496 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16497 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16498 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16499
16500 long_integer_type_internal_node = long_integer_type_node;
16501 long_unsigned_type_internal_node = long_unsigned_type_node;
16502 long_long_integer_type_internal_node = long_long_integer_type_node;
16503 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16504 intQI_type_internal_node = intQI_type_node;
16505 uintQI_type_internal_node = unsigned_intQI_type_node;
16506 intHI_type_internal_node = intHI_type_node;
16507 uintHI_type_internal_node = unsigned_intHI_type_node;
16508 intSI_type_internal_node = intSI_type_node;
16509 uintSI_type_internal_node = unsigned_intSI_type_node;
16510 intDI_type_internal_node = intDI_type_node;
16511 uintDI_type_internal_node = unsigned_intDI_type_node;
16512 intTI_type_internal_node = intTI_type_node;
16513 uintTI_type_internal_node = unsigned_intTI_type_node;
16514 float_type_internal_node = float_type_node;
16515 double_type_internal_node = double_type_node;
16516 long_double_type_internal_node = long_double_type_node;
16517 dfloat64_type_internal_node = dfloat64_type_node;
16518 dfloat128_type_internal_node = dfloat128_type_node;
16519 void_type_internal_node = void_type_node;
16520
16521 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16522 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16523 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16524 format that uses a pair of doubles, depending on the switches and
16525 defaults.
16526
16527 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16528 floating point, we need make sure the type is non-zero or else self-test
16529 fails during bootstrap.
16530
16531 Always create __ibm128 as a separate type, even if the current long double
16532 format is IBM extended double.
16533
16534 For IEEE 128-bit floating point, always create the type __ieee128. If the
16535 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16536 __ieee128. */
16537 if (TARGET_FLOAT128_TYPE)
16538 {
16539 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16540 ibm128_float_type_node = long_double_type_node;
16541 else
16542 {
16543 ibm128_float_type_node = make_node (REAL_TYPE);
16544 TYPE_PRECISION (ibm128_float_type_node) = 128;
16545 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16546 layout_type (ibm128_float_type_node);
16547 }
16548
16549 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16550 "__ibm128");
16551
16552 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16553 ieee128_float_type_node = long_double_type_node;
16554 else
16555 ieee128_float_type_node = float128_type_node;
16556
16557 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16558 "__ieee128");
16559 }
16560
16561 else
16562 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16563
16564 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16565 tree type node. */
16566 builtin_mode_to_type[QImode][0] = integer_type_node;
16567 builtin_mode_to_type[HImode][0] = integer_type_node;
16568 builtin_mode_to_type[SImode][0] = intSI_type_node;
16569 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16570 builtin_mode_to_type[DImode][0] = intDI_type_node;
16571 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16572 builtin_mode_to_type[TImode][0] = intTI_type_node;
16573 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16574 builtin_mode_to_type[SFmode][0] = float_type_node;
16575 builtin_mode_to_type[DFmode][0] = double_type_node;
16576 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16577 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16578 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16579 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16580 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16581 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16582 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16583 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16584 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16585 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16586 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16587 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16588 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16589 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16590 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16591 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16592 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16593
16594 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16595 TYPE_NAME (bool_char_type_node) = tdecl;
16596
16597 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16598 TYPE_NAME (bool_short_type_node) = tdecl;
16599
16600 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16601 TYPE_NAME (bool_int_type_node) = tdecl;
16602
16603 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16604 TYPE_NAME (pixel_type_node) = tdecl;
16605
16606 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16607 bool_char_type_node, 16);
16608 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16609 bool_short_type_node, 8);
16610 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16611 bool_int_type_node, 4);
16612 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16613 ? "__vector __bool long"
16614 : "__vector __bool long long",
16615 bool_long_long_type_node, 2);
16616 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16617 pixel_type_node, 8);
16618
16619 /* Create Altivec and VSX builtins on machines with at least the
16620 general purpose extensions (970 and newer) to allow the use of
16621 the target attribute. */
16622 if (TARGET_EXTRA_BUILTINS)
16623 altivec_init_builtins ();
16624 if (TARGET_HTM)
16625 htm_init_builtins ();
16626
16627 if (TARGET_EXTRA_BUILTINS)
16628 rs6000_common_init_builtins ();
16629
16630 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16631 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16632 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16633
16634 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16635 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16636 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16637
16638 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16639 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16640 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16641
16642 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16643 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16644 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16645
16646 mode = (TARGET_64BIT) ? DImode : SImode;
16647 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16648 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16649 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16650
16651 ftype = build_function_type_list (unsigned_intDI_type_node,
16652 NULL_TREE);
16653 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16654
16655 if (TARGET_64BIT)
16656 ftype = build_function_type_list (unsigned_intDI_type_node,
16657 NULL_TREE);
16658 else
16659 ftype = build_function_type_list (unsigned_intSI_type_node,
16660 NULL_TREE);
16661 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16662
16663 ftype = build_function_type_list (double_type_node, NULL_TREE);
16664 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16665
16666 ftype = build_function_type_list (double_type_node, NULL_TREE);
16667 def_builtin ("__builtin_mffsl", ftype, RS6000_BUILTIN_MFFSL);
16668
16669 ftype = build_function_type_list (void_type_node,
16670 intSI_type_node,
16671 NULL_TREE);
16672 def_builtin ("__builtin_mtfsb0", ftype, RS6000_BUILTIN_MTFSB0);
16673
16674 ftype = build_function_type_list (void_type_node,
16675 intSI_type_node,
16676 NULL_TREE);
16677 def_builtin ("__builtin_mtfsb1", ftype, RS6000_BUILTIN_MTFSB1);
16678
16679 ftype = build_function_type_list (void_type_node,
16680 intDI_type_node,
16681 NULL_TREE);
16682 def_builtin ("__builtin_set_fpscr_rn", ftype, RS6000_BUILTIN_SET_FPSCR_RN);
16683
16684 ftype = build_function_type_list (void_type_node,
16685 intDI_type_node,
16686 NULL_TREE);
16687 def_builtin ("__builtin_set_fpscr_drn", ftype, RS6000_BUILTIN_SET_FPSCR_DRN);
16688
16689 ftype = build_function_type_list (void_type_node,
16690 intSI_type_node, double_type_node,
16691 NULL_TREE);
16692 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16693
16694 ftype = build_function_type_list (void_type_node, NULL_TREE);
16695 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16696 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16697 MISC_BUILTIN_SPEC_BARRIER);
16698
16699 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16700 NULL_TREE);
16701 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16702 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16703
16704 /* AIX libm provides clog as __clog. */
16705 if (TARGET_XCOFF &&
16706 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16707 set_user_assembler_name (tdecl, "__clog");
16708
16709 #ifdef SUBTARGET_INIT_BUILTINS
16710 SUBTARGET_INIT_BUILTINS;
16711 #endif
16712 }
16713
16714 /* Returns the rs6000 builtin decl for CODE. */
16715
16716 static tree
16717 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16718 {
16719 HOST_WIDE_INT fnmask;
16720
16721 if (code >= RS6000_BUILTIN_COUNT)
16722 return error_mark_node;
16723
16724 fnmask = rs6000_builtin_info[code].mask;
16725 if ((fnmask & rs6000_builtin_mask) != fnmask)
16726 {
16727 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16728 return error_mark_node;
16729 }
16730
16731 return rs6000_builtin_decls[code];
16732 }
16733
16734 static void
16735 altivec_init_builtins (void)
16736 {
16737 const struct builtin_description *d;
16738 size_t i;
16739 tree ftype;
16740 tree decl;
16741 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16742
16743 tree pvoid_type_node = build_pointer_type (void_type_node);
16744
16745 tree pcvoid_type_node
16746 = build_pointer_type (build_qualified_type (void_type_node,
16747 TYPE_QUAL_CONST));
16748
16749 tree int_ftype_opaque
16750 = build_function_type_list (integer_type_node,
16751 opaque_V4SI_type_node, NULL_TREE);
16752 tree opaque_ftype_opaque
16753 = build_function_type_list (integer_type_node, NULL_TREE);
16754 tree opaque_ftype_opaque_int
16755 = build_function_type_list (opaque_V4SI_type_node,
16756 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16757 tree opaque_ftype_opaque_opaque_int
16758 = build_function_type_list (opaque_V4SI_type_node,
16759 opaque_V4SI_type_node, opaque_V4SI_type_node,
16760 integer_type_node, NULL_TREE);
16761 tree opaque_ftype_opaque_opaque_opaque
16762 = build_function_type_list (opaque_V4SI_type_node,
16763 opaque_V4SI_type_node, opaque_V4SI_type_node,
16764 opaque_V4SI_type_node, NULL_TREE);
16765 tree opaque_ftype_opaque_opaque
16766 = build_function_type_list (opaque_V4SI_type_node,
16767 opaque_V4SI_type_node, opaque_V4SI_type_node,
16768 NULL_TREE);
16769 tree int_ftype_int_opaque_opaque
16770 = build_function_type_list (integer_type_node,
16771 integer_type_node, opaque_V4SI_type_node,
16772 opaque_V4SI_type_node, NULL_TREE);
16773 tree int_ftype_int_v4si_v4si
16774 = build_function_type_list (integer_type_node,
16775 integer_type_node, V4SI_type_node,
16776 V4SI_type_node, NULL_TREE);
16777 tree int_ftype_int_v2di_v2di
16778 = build_function_type_list (integer_type_node,
16779 integer_type_node, V2DI_type_node,
16780 V2DI_type_node, NULL_TREE);
16781 tree void_ftype_v4si
16782 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16783 tree v8hi_ftype_void
16784 = build_function_type_list (V8HI_type_node, NULL_TREE);
16785 tree void_ftype_void
16786 = build_function_type_list (void_type_node, NULL_TREE);
16787 tree void_ftype_int
16788 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16789
16790 tree opaque_ftype_long_pcvoid
16791 = build_function_type_list (opaque_V4SI_type_node,
16792 long_integer_type_node, pcvoid_type_node,
16793 NULL_TREE);
16794 tree v16qi_ftype_long_pcvoid
16795 = build_function_type_list (V16QI_type_node,
16796 long_integer_type_node, pcvoid_type_node,
16797 NULL_TREE);
16798 tree v8hi_ftype_long_pcvoid
16799 = build_function_type_list (V8HI_type_node,
16800 long_integer_type_node, pcvoid_type_node,
16801 NULL_TREE);
16802 tree v4si_ftype_long_pcvoid
16803 = build_function_type_list (V4SI_type_node,
16804 long_integer_type_node, pcvoid_type_node,
16805 NULL_TREE);
16806 tree v4sf_ftype_long_pcvoid
16807 = build_function_type_list (V4SF_type_node,
16808 long_integer_type_node, pcvoid_type_node,
16809 NULL_TREE);
16810 tree v2df_ftype_long_pcvoid
16811 = build_function_type_list (V2DF_type_node,
16812 long_integer_type_node, pcvoid_type_node,
16813 NULL_TREE);
16814 tree v2di_ftype_long_pcvoid
16815 = build_function_type_list (V2DI_type_node,
16816 long_integer_type_node, pcvoid_type_node,
16817 NULL_TREE);
16818 tree v1ti_ftype_long_pcvoid
16819 = build_function_type_list (V1TI_type_node,
16820 long_integer_type_node, pcvoid_type_node,
16821 NULL_TREE);
16822
16823 tree void_ftype_opaque_long_pvoid
16824 = build_function_type_list (void_type_node,
16825 opaque_V4SI_type_node, long_integer_type_node,
16826 pvoid_type_node, NULL_TREE);
16827 tree void_ftype_v4si_long_pvoid
16828 = build_function_type_list (void_type_node,
16829 V4SI_type_node, long_integer_type_node,
16830 pvoid_type_node, NULL_TREE);
16831 tree void_ftype_v16qi_long_pvoid
16832 = build_function_type_list (void_type_node,
16833 V16QI_type_node, long_integer_type_node,
16834 pvoid_type_node, NULL_TREE);
16835
16836 tree void_ftype_v16qi_pvoid_long
16837 = build_function_type_list (void_type_node,
16838 V16QI_type_node, pvoid_type_node,
16839 long_integer_type_node, NULL_TREE);
16840
16841 tree void_ftype_v8hi_long_pvoid
16842 = build_function_type_list (void_type_node,
16843 V8HI_type_node, long_integer_type_node,
16844 pvoid_type_node, NULL_TREE);
16845 tree void_ftype_v4sf_long_pvoid
16846 = build_function_type_list (void_type_node,
16847 V4SF_type_node, long_integer_type_node,
16848 pvoid_type_node, NULL_TREE);
16849 tree void_ftype_v2df_long_pvoid
16850 = build_function_type_list (void_type_node,
16851 V2DF_type_node, long_integer_type_node,
16852 pvoid_type_node, NULL_TREE);
16853 tree void_ftype_v1ti_long_pvoid
16854 = build_function_type_list (void_type_node,
16855 V1TI_type_node, long_integer_type_node,
16856 pvoid_type_node, NULL_TREE);
16857 tree void_ftype_v2di_long_pvoid
16858 = build_function_type_list (void_type_node,
16859 V2DI_type_node, long_integer_type_node,
16860 pvoid_type_node, NULL_TREE);
16861 tree int_ftype_int_v8hi_v8hi
16862 = build_function_type_list (integer_type_node,
16863 integer_type_node, V8HI_type_node,
16864 V8HI_type_node, NULL_TREE);
16865 tree int_ftype_int_v16qi_v16qi
16866 = build_function_type_list (integer_type_node,
16867 integer_type_node, V16QI_type_node,
16868 V16QI_type_node, NULL_TREE);
16869 tree int_ftype_int_v4sf_v4sf
16870 = build_function_type_list (integer_type_node,
16871 integer_type_node, V4SF_type_node,
16872 V4SF_type_node, NULL_TREE);
16873 tree int_ftype_int_v2df_v2df
16874 = build_function_type_list (integer_type_node,
16875 integer_type_node, V2DF_type_node,
16876 V2DF_type_node, NULL_TREE);
16877 tree v2di_ftype_v2di
16878 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16879 tree v4si_ftype_v4si
16880 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16881 tree v8hi_ftype_v8hi
16882 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16883 tree v16qi_ftype_v16qi
16884 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16885 tree v4sf_ftype_v4sf
16886 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16887 tree v2df_ftype_v2df
16888 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16889 tree void_ftype_pcvoid_int_int
16890 = build_function_type_list (void_type_node,
16891 pcvoid_type_node, integer_type_node,
16892 integer_type_node, NULL_TREE);
16893
16894 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
16895 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
16896 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
16897 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
16898 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
16899 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
16900 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
16901 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
16902 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
16903 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
16904 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
16905 ALTIVEC_BUILTIN_LVXL_V2DF);
16906 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
16907 ALTIVEC_BUILTIN_LVXL_V2DI);
16908 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
16909 ALTIVEC_BUILTIN_LVXL_V4SF);
16910 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
16911 ALTIVEC_BUILTIN_LVXL_V4SI);
16912 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
16913 ALTIVEC_BUILTIN_LVXL_V8HI);
16914 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
16915 ALTIVEC_BUILTIN_LVXL_V16QI);
16916 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
16917 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
16918 ALTIVEC_BUILTIN_LVX_V1TI);
16919 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
16920 ALTIVEC_BUILTIN_LVX_V2DF);
16921 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
16922 ALTIVEC_BUILTIN_LVX_V2DI);
16923 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
16924 ALTIVEC_BUILTIN_LVX_V4SF);
16925 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
16926 ALTIVEC_BUILTIN_LVX_V4SI);
16927 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
16928 ALTIVEC_BUILTIN_LVX_V8HI);
16929 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
16930 ALTIVEC_BUILTIN_LVX_V16QI);
16931 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
16932 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
16933 ALTIVEC_BUILTIN_STVX_V2DF);
16934 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
16935 ALTIVEC_BUILTIN_STVX_V2DI);
16936 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
16937 ALTIVEC_BUILTIN_STVX_V4SF);
16938 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
16939 ALTIVEC_BUILTIN_STVX_V4SI);
16940 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
16941 ALTIVEC_BUILTIN_STVX_V8HI);
16942 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
16943 ALTIVEC_BUILTIN_STVX_V16QI);
16944 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
16945 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
16946 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
16947 ALTIVEC_BUILTIN_STVXL_V2DF);
16948 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
16949 ALTIVEC_BUILTIN_STVXL_V2DI);
16950 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
16951 ALTIVEC_BUILTIN_STVXL_V4SF);
16952 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
16953 ALTIVEC_BUILTIN_STVXL_V4SI);
16954 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
16955 ALTIVEC_BUILTIN_STVXL_V8HI);
16956 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
16957 ALTIVEC_BUILTIN_STVXL_V16QI);
16958 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
16959 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
16960 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
16961 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
16962 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
16963 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
16964 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
16965 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
16966 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
16967 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
16968 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
16969 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
16970 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
16971 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
16972 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
16973 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
16974
16975 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
16976 VSX_BUILTIN_LXVD2X_V2DF);
16977 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
16978 VSX_BUILTIN_LXVD2X_V2DI);
16979 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
16980 VSX_BUILTIN_LXVW4X_V4SF);
16981 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
16982 VSX_BUILTIN_LXVW4X_V4SI);
16983 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
16984 VSX_BUILTIN_LXVW4X_V8HI);
16985 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
16986 VSX_BUILTIN_LXVW4X_V16QI);
16987 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
16988 VSX_BUILTIN_STXVD2X_V2DF);
16989 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
16990 VSX_BUILTIN_STXVD2X_V2DI);
16991 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
16992 VSX_BUILTIN_STXVW4X_V4SF);
16993 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
16994 VSX_BUILTIN_STXVW4X_V4SI);
16995 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
16996 VSX_BUILTIN_STXVW4X_V8HI);
16997 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
16998 VSX_BUILTIN_STXVW4X_V16QI);
16999
17000 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17001 VSX_BUILTIN_LD_ELEMREV_V2DF);
17002 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17003 VSX_BUILTIN_LD_ELEMREV_V2DI);
17004 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17005 VSX_BUILTIN_LD_ELEMREV_V4SF);
17006 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17007 VSX_BUILTIN_LD_ELEMREV_V4SI);
17008 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17009 VSX_BUILTIN_LD_ELEMREV_V8HI);
17010 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17011 VSX_BUILTIN_LD_ELEMREV_V16QI);
17012 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17013 VSX_BUILTIN_ST_ELEMREV_V2DF);
17014 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
17015 VSX_BUILTIN_ST_ELEMREV_V1TI);
17016 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17017 VSX_BUILTIN_ST_ELEMREV_V2DI);
17018 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17019 VSX_BUILTIN_ST_ELEMREV_V4SF);
17020 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17021 VSX_BUILTIN_ST_ELEMREV_V4SI);
17022 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
17023 VSX_BUILTIN_ST_ELEMREV_V8HI);
17024 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
17025 VSX_BUILTIN_ST_ELEMREV_V16QI);
17026
17027 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17028 VSX_BUILTIN_VEC_LD);
17029 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17030 VSX_BUILTIN_VEC_ST);
17031 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17032 VSX_BUILTIN_VEC_XL);
17033 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17034 VSX_BUILTIN_VEC_XL_BE);
17035 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17036 VSX_BUILTIN_VEC_XST);
17037 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
17038 VSX_BUILTIN_VEC_XST_BE);
17039
17040 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17041 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17042 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17043
17044 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17045 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17046 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17047 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17048 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17049 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17050 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17051 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17052 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17053 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17054 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17055 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17056
17057 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17058 ALTIVEC_BUILTIN_VEC_ADDE);
17059 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17060 ALTIVEC_BUILTIN_VEC_ADDEC);
17061 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17062 ALTIVEC_BUILTIN_VEC_CMPNE);
17063 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17064 ALTIVEC_BUILTIN_VEC_MUL);
17065 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17066 ALTIVEC_BUILTIN_VEC_SUBE);
17067 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17068 ALTIVEC_BUILTIN_VEC_SUBEC);
17069
17070 /* Cell builtins. */
17071 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17072 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17073 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17074 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17075
17076 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17077 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17078 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17079 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17080
17081 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17082 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17083 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17084 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17085
17086 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17087 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17088 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17089 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17090
17091 if (TARGET_P9_VECTOR)
17092 {
17093 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17094 P9V_BUILTIN_STXVL);
17095 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
17096 P9V_BUILTIN_XST_LEN_R);
17097 }
17098
17099 /* Add the DST variants. */
17100 d = bdesc_dst;
17101 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17102 {
17103 HOST_WIDE_INT mask = d->mask;
17104
17105 /* It is expected that these dst built-in functions may have
17106 d->icode equal to CODE_FOR_nothing. */
17107 if ((mask & builtin_mask) != mask)
17108 {
17109 if (TARGET_DEBUG_BUILTIN)
17110 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17111 d->name);
17112 continue;
17113 }
17114 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17115 }
17116
17117 /* Initialize the predicates. */
17118 d = bdesc_altivec_preds;
17119 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17120 {
17121 machine_mode mode1;
17122 tree type;
17123 HOST_WIDE_INT mask = d->mask;
17124
17125 if ((mask & builtin_mask) != mask)
17126 {
17127 if (TARGET_DEBUG_BUILTIN)
17128 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17129 d->name);
17130 continue;
17131 }
17132
17133 if (rs6000_overloaded_builtin_p (d->code))
17134 mode1 = VOIDmode;
17135 else
17136 {
17137 /* Cannot define builtin if the instruction is disabled. */
17138 gcc_assert (d->icode != CODE_FOR_nothing);
17139 mode1 = insn_data[d->icode].operand[1].mode;
17140 }
17141
17142 switch (mode1)
17143 {
17144 case E_VOIDmode:
17145 type = int_ftype_int_opaque_opaque;
17146 break;
17147 case E_V2DImode:
17148 type = int_ftype_int_v2di_v2di;
17149 break;
17150 case E_V4SImode:
17151 type = int_ftype_int_v4si_v4si;
17152 break;
17153 case E_V8HImode:
17154 type = int_ftype_int_v8hi_v8hi;
17155 break;
17156 case E_V16QImode:
17157 type = int_ftype_int_v16qi_v16qi;
17158 break;
17159 case E_V4SFmode:
17160 type = int_ftype_int_v4sf_v4sf;
17161 break;
17162 case E_V2DFmode:
17163 type = int_ftype_int_v2df_v2df;
17164 break;
17165 default:
17166 gcc_unreachable ();
17167 }
17168
17169 def_builtin (d->name, type, d->code);
17170 }
17171
17172 /* Initialize the abs* operators. */
17173 d = bdesc_abs;
17174 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17175 {
17176 machine_mode mode0;
17177 tree type;
17178 HOST_WIDE_INT mask = d->mask;
17179
17180 if ((mask & builtin_mask) != mask)
17181 {
17182 if (TARGET_DEBUG_BUILTIN)
17183 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17184 d->name);
17185 continue;
17186 }
17187
17188 /* Cannot define builtin if the instruction is disabled. */
17189 gcc_assert (d->icode != CODE_FOR_nothing);
17190 mode0 = insn_data[d->icode].operand[0].mode;
17191
17192 switch (mode0)
17193 {
17194 case E_V2DImode:
17195 type = v2di_ftype_v2di;
17196 break;
17197 case E_V4SImode:
17198 type = v4si_ftype_v4si;
17199 break;
17200 case E_V8HImode:
17201 type = v8hi_ftype_v8hi;
17202 break;
17203 case E_V16QImode:
17204 type = v16qi_ftype_v16qi;
17205 break;
17206 case E_V4SFmode:
17207 type = v4sf_ftype_v4sf;
17208 break;
17209 case E_V2DFmode:
17210 type = v2df_ftype_v2df;
17211 break;
17212 default:
17213 gcc_unreachable ();
17214 }
17215
17216 def_builtin (d->name, type, d->code);
17217 }
17218
17219 /* Initialize target builtin that implements
17220 targetm.vectorize.builtin_mask_for_load. */
17221
17222 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17223 v16qi_ftype_long_pcvoid,
17224 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17225 BUILT_IN_MD, NULL, NULL_TREE);
17226 TREE_READONLY (decl) = 1;
17227 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17228 altivec_builtin_mask_for_load = decl;
17229
17230 /* Access to the vec_init patterns. */
17231 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17232 integer_type_node, integer_type_node,
17233 integer_type_node, NULL_TREE);
17234 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17235
17236 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17237 short_integer_type_node,
17238 short_integer_type_node,
17239 short_integer_type_node,
17240 short_integer_type_node,
17241 short_integer_type_node,
17242 short_integer_type_node,
17243 short_integer_type_node, NULL_TREE);
17244 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17245
17246 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17247 char_type_node, char_type_node,
17248 char_type_node, char_type_node,
17249 char_type_node, char_type_node,
17250 char_type_node, char_type_node,
17251 char_type_node, char_type_node,
17252 char_type_node, char_type_node,
17253 char_type_node, char_type_node,
17254 char_type_node, NULL_TREE);
17255 def_builtin ("__builtin_vec_init_v16qi", ftype,
17256 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17257
17258 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17259 float_type_node, float_type_node,
17260 float_type_node, NULL_TREE);
17261 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17262
17263 /* VSX builtins. */
17264 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17265 double_type_node, NULL_TREE);
17266 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17267
17268 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17269 intDI_type_node, NULL_TREE);
17270 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17271
17272 /* Access to the vec_set patterns. */
17273 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17274 intSI_type_node,
17275 integer_type_node, NULL_TREE);
17276 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17277
17278 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17279 intHI_type_node,
17280 integer_type_node, NULL_TREE);
17281 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17282
17283 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17284 intQI_type_node,
17285 integer_type_node, NULL_TREE);
17286 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17287
17288 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17289 float_type_node,
17290 integer_type_node, NULL_TREE);
17291 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17292
17293 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17294 double_type_node,
17295 integer_type_node, NULL_TREE);
17296 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17297
17298 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17299 intDI_type_node,
17300 integer_type_node, NULL_TREE);
17301 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17302
17303 /* Access to the vec_extract patterns. */
17304 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17305 integer_type_node, NULL_TREE);
17306 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17307
17308 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17309 integer_type_node, NULL_TREE);
17310 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17311
17312 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17313 integer_type_node, NULL_TREE);
17314 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17315
17316 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17317 integer_type_node, NULL_TREE);
17318 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17319
17320 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17321 integer_type_node, NULL_TREE);
17322 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17323
17324 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17325 integer_type_node, NULL_TREE);
17326 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17327
17328
17329 if (V1TI_type_node)
17330 {
17331 tree v1ti_ftype_long_pcvoid
17332 = build_function_type_list (V1TI_type_node,
17333 long_integer_type_node, pcvoid_type_node,
17334 NULL_TREE);
17335 tree void_ftype_v1ti_long_pvoid
17336 = build_function_type_list (void_type_node,
17337 V1TI_type_node, long_integer_type_node,
17338 pvoid_type_node, NULL_TREE);
17339 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17340 VSX_BUILTIN_LD_ELEMREV_V1TI);
17341 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17342 VSX_BUILTIN_LXVD2X_V1TI);
17343 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17344 VSX_BUILTIN_STXVD2X_V1TI);
17345 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17346 NULL_TREE, NULL_TREE);
17347 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17348 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17349 intTI_type_node,
17350 integer_type_node, NULL_TREE);
17351 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17352 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17353 integer_type_node, NULL_TREE);
17354 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17355 }
17356
17357 }
17358
17359 static void
17360 htm_init_builtins (void)
17361 {
17362 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17363 const struct builtin_description *d;
17364 size_t i;
17365
17366 d = bdesc_htm;
17367 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17368 {
17369 tree op[MAX_HTM_OPERANDS], type;
17370 HOST_WIDE_INT mask = d->mask;
17371 unsigned attr = rs6000_builtin_info[d->code].attr;
17372 bool void_func = (attr & RS6000_BTC_VOID);
17373 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17374 int nopnds = 0;
17375 tree gpr_type_node;
17376 tree rettype;
17377 tree argtype;
17378
17379 /* It is expected that these htm built-in functions may have
17380 d->icode equal to CODE_FOR_nothing. */
17381
17382 if (TARGET_32BIT && TARGET_POWERPC64)
17383 gpr_type_node = long_long_unsigned_type_node;
17384 else
17385 gpr_type_node = long_unsigned_type_node;
17386
17387 if (attr & RS6000_BTC_SPR)
17388 {
17389 rettype = gpr_type_node;
17390 argtype = gpr_type_node;
17391 }
17392 else if (d->code == HTM_BUILTIN_TABORTDC
17393 || d->code == HTM_BUILTIN_TABORTDCI)
17394 {
17395 rettype = unsigned_type_node;
17396 argtype = gpr_type_node;
17397 }
17398 else
17399 {
17400 rettype = unsigned_type_node;
17401 argtype = unsigned_type_node;
17402 }
17403
17404 if ((mask & builtin_mask) != mask)
17405 {
17406 if (TARGET_DEBUG_BUILTIN)
17407 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17408 continue;
17409 }
17410
17411 if (d->name == 0)
17412 {
17413 if (TARGET_DEBUG_BUILTIN)
17414 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17415 (long unsigned) i);
17416 continue;
17417 }
17418
17419 op[nopnds++] = (void_func) ? void_type_node : rettype;
17420
17421 if (attr_args == RS6000_BTC_UNARY)
17422 op[nopnds++] = argtype;
17423 else if (attr_args == RS6000_BTC_BINARY)
17424 {
17425 op[nopnds++] = argtype;
17426 op[nopnds++] = argtype;
17427 }
17428 else if (attr_args == RS6000_BTC_TERNARY)
17429 {
17430 op[nopnds++] = argtype;
17431 op[nopnds++] = argtype;
17432 op[nopnds++] = argtype;
17433 }
17434
17435 switch (nopnds)
17436 {
17437 case 1:
17438 type = build_function_type_list (op[0], NULL_TREE);
17439 break;
17440 case 2:
17441 type = build_function_type_list (op[0], op[1], NULL_TREE);
17442 break;
17443 case 3:
17444 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17445 break;
17446 case 4:
17447 type = build_function_type_list (op[0], op[1], op[2], op[3],
17448 NULL_TREE);
17449 break;
17450 default:
17451 gcc_unreachable ();
17452 }
17453
17454 def_builtin (d->name, type, d->code);
17455 }
17456 }
17457
17458 /* Hash function for builtin functions with up to 3 arguments and a return
17459 type. */
17460 hashval_t
17461 builtin_hasher::hash (builtin_hash_struct *bh)
17462 {
17463 unsigned ret = 0;
17464 int i;
17465
17466 for (i = 0; i < 4; i++)
17467 {
17468 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17469 ret = (ret * 2) + bh->uns_p[i];
17470 }
17471
17472 return ret;
17473 }
17474
17475 /* Compare builtin hash entries H1 and H2 for equivalence. */
17476 bool
17477 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17478 {
17479 return ((p1->mode[0] == p2->mode[0])
17480 && (p1->mode[1] == p2->mode[1])
17481 && (p1->mode[2] == p2->mode[2])
17482 && (p1->mode[3] == p2->mode[3])
17483 && (p1->uns_p[0] == p2->uns_p[0])
17484 && (p1->uns_p[1] == p2->uns_p[1])
17485 && (p1->uns_p[2] == p2->uns_p[2])
17486 && (p1->uns_p[3] == p2->uns_p[3]));
17487 }
17488
17489 /* Map types for builtin functions with an explicit return type and up to 3
17490 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17491 of the argument. */
17492 static tree
17493 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17494 machine_mode mode_arg1, machine_mode mode_arg2,
17495 enum rs6000_builtins builtin, const char *name)
17496 {
17497 struct builtin_hash_struct h;
17498 struct builtin_hash_struct *h2;
17499 int num_args = 3;
17500 int i;
17501 tree ret_type = NULL_TREE;
17502 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17503
17504 /* Create builtin_hash_table. */
17505 if (builtin_hash_table == NULL)
17506 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17507
17508 h.type = NULL_TREE;
17509 h.mode[0] = mode_ret;
17510 h.mode[1] = mode_arg0;
17511 h.mode[2] = mode_arg1;
17512 h.mode[3] = mode_arg2;
17513 h.uns_p[0] = 0;
17514 h.uns_p[1] = 0;
17515 h.uns_p[2] = 0;
17516 h.uns_p[3] = 0;
17517
17518 /* If the builtin is a type that produces unsigned results or takes unsigned
17519 arguments, and it is returned as a decl for the vectorizer (such as
17520 widening multiplies, permute), make sure the arguments and return value
17521 are type correct. */
17522 switch (builtin)
17523 {
17524 /* unsigned 1 argument functions. */
17525 case CRYPTO_BUILTIN_VSBOX:
17526 case P8V_BUILTIN_VGBBD:
17527 case MISC_BUILTIN_CDTBCD:
17528 case MISC_BUILTIN_CBCDTD:
17529 h.uns_p[0] = 1;
17530 h.uns_p[1] = 1;
17531 break;
17532
17533 /* unsigned 2 argument functions. */
17534 case ALTIVEC_BUILTIN_VMULEUB:
17535 case ALTIVEC_BUILTIN_VMULEUH:
17536 case P8V_BUILTIN_VMULEUW:
17537 case ALTIVEC_BUILTIN_VMULOUB:
17538 case ALTIVEC_BUILTIN_VMULOUH:
17539 case P8V_BUILTIN_VMULOUW:
17540 case CRYPTO_BUILTIN_VCIPHER:
17541 case CRYPTO_BUILTIN_VCIPHERLAST:
17542 case CRYPTO_BUILTIN_VNCIPHER:
17543 case CRYPTO_BUILTIN_VNCIPHERLAST:
17544 case CRYPTO_BUILTIN_VPMSUMB:
17545 case CRYPTO_BUILTIN_VPMSUMH:
17546 case CRYPTO_BUILTIN_VPMSUMW:
17547 case CRYPTO_BUILTIN_VPMSUMD:
17548 case CRYPTO_BUILTIN_VPMSUM:
17549 case MISC_BUILTIN_ADDG6S:
17550 case MISC_BUILTIN_DIVWEU:
17551 case MISC_BUILTIN_DIVDEU:
17552 case VSX_BUILTIN_UDIV_V2DI:
17553 case ALTIVEC_BUILTIN_VMAXUB:
17554 case ALTIVEC_BUILTIN_VMINUB:
17555 case ALTIVEC_BUILTIN_VMAXUH:
17556 case ALTIVEC_BUILTIN_VMINUH:
17557 case ALTIVEC_BUILTIN_VMAXUW:
17558 case ALTIVEC_BUILTIN_VMINUW:
17559 case P8V_BUILTIN_VMAXUD:
17560 case P8V_BUILTIN_VMINUD:
17561 h.uns_p[0] = 1;
17562 h.uns_p[1] = 1;
17563 h.uns_p[2] = 1;
17564 break;
17565
17566 /* unsigned 3 argument functions. */
17567 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17568 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17569 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17570 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17571 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17572 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17573 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17574 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17575 case VSX_BUILTIN_VPERM_16QI_UNS:
17576 case VSX_BUILTIN_VPERM_8HI_UNS:
17577 case VSX_BUILTIN_VPERM_4SI_UNS:
17578 case VSX_BUILTIN_VPERM_2DI_UNS:
17579 case VSX_BUILTIN_XXSEL_16QI_UNS:
17580 case VSX_BUILTIN_XXSEL_8HI_UNS:
17581 case VSX_BUILTIN_XXSEL_4SI_UNS:
17582 case VSX_BUILTIN_XXSEL_2DI_UNS:
17583 case CRYPTO_BUILTIN_VPERMXOR:
17584 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17585 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17586 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17587 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17588 case CRYPTO_BUILTIN_VSHASIGMAW:
17589 case CRYPTO_BUILTIN_VSHASIGMAD:
17590 case CRYPTO_BUILTIN_VSHASIGMA:
17591 h.uns_p[0] = 1;
17592 h.uns_p[1] = 1;
17593 h.uns_p[2] = 1;
17594 h.uns_p[3] = 1;
17595 break;
17596
17597 /* signed permute functions with unsigned char mask. */
17598 case ALTIVEC_BUILTIN_VPERM_16QI:
17599 case ALTIVEC_BUILTIN_VPERM_8HI:
17600 case ALTIVEC_BUILTIN_VPERM_4SI:
17601 case ALTIVEC_BUILTIN_VPERM_4SF:
17602 case ALTIVEC_BUILTIN_VPERM_2DI:
17603 case ALTIVEC_BUILTIN_VPERM_2DF:
17604 case VSX_BUILTIN_VPERM_16QI:
17605 case VSX_BUILTIN_VPERM_8HI:
17606 case VSX_BUILTIN_VPERM_4SI:
17607 case VSX_BUILTIN_VPERM_4SF:
17608 case VSX_BUILTIN_VPERM_2DI:
17609 case VSX_BUILTIN_VPERM_2DF:
17610 h.uns_p[3] = 1;
17611 break;
17612
17613 /* unsigned args, signed return. */
17614 case VSX_BUILTIN_XVCVUXDSP:
17615 case VSX_BUILTIN_XVCVUXDDP_UNS:
17616 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17617 h.uns_p[1] = 1;
17618 break;
17619
17620 /* signed args, unsigned return. */
17621 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17622 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17623 case MISC_BUILTIN_UNPACK_TD:
17624 case MISC_BUILTIN_UNPACK_V1TI:
17625 h.uns_p[0] = 1;
17626 break;
17627
17628 /* unsigned arguments, bool return (compares). */
17629 case ALTIVEC_BUILTIN_VCMPEQUB:
17630 case ALTIVEC_BUILTIN_VCMPEQUH:
17631 case ALTIVEC_BUILTIN_VCMPEQUW:
17632 case P8V_BUILTIN_VCMPEQUD:
17633 case VSX_BUILTIN_CMPGE_U16QI:
17634 case VSX_BUILTIN_CMPGE_U8HI:
17635 case VSX_BUILTIN_CMPGE_U4SI:
17636 case VSX_BUILTIN_CMPGE_U2DI:
17637 case ALTIVEC_BUILTIN_VCMPGTUB:
17638 case ALTIVEC_BUILTIN_VCMPGTUH:
17639 case ALTIVEC_BUILTIN_VCMPGTUW:
17640 case P8V_BUILTIN_VCMPGTUD:
17641 h.uns_p[1] = 1;
17642 h.uns_p[2] = 1;
17643 break;
17644
17645 /* unsigned arguments for 128-bit pack instructions. */
17646 case MISC_BUILTIN_PACK_TD:
17647 case MISC_BUILTIN_PACK_V1TI:
17648 h.uns_p[1] = 1;
17649 h.uns_p[2] = 1;
17650 break;
17651
17652 /* unsigned second arguments (vector shift right). */
17653 case ALTIVEC_BUILTIN_VSRB:
17654 case ALTIVEC_BUILTIN_VSRH:
17655 case ALTIVEC_BUILTIN_VSRW:
17656 case P8V_BUILTIN_VSRD:
17657 h.uns_p[2] = 1;
17658 break;
17659
17660 default:
17661 break;
17662 }
17663
17664 /* Figure out how many args are present. */
17665 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17666 num_args--;
17667
17668 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17669 if (!ret_type && h.uns_p[0])
17670 ret_type = builtin_mode_to_type[h.mode[0]][0];
17671
17672 if (!ret_type)
17673 fatal_error (input_location,
17674 "internal error: builtin function %qs had an unexpected "
17675 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17676
17677 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17678 arg_type[i] = NULL_TREE;
17679
17680 for (i = 0; i < num_args; i++)
17681 {
17682 int m = (int) h.mode[i+1];
17683 int uns_p = h.uns_p[i+1];
17684
17685 arg_type[i] = builtin_mode_to_type[m][uns_p];
17686 if (!arg_type[i] && uns_p)
17687 arg_type[i] = builtin_mode_to_type[m][0];
17688
17689 if (!arg_type[i])
17690 fatal_error (input_location,
17691 "internal error: builtin function %qs, argument %d "
17692 "had unexpected argument type %qs", name, i,
17693 GET_MODE_NAME (m));
17694 }
17695
17696 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17697 if (*found == NULL)
17698 {
17699 h2 = ggc_alloc<builtin_hash_struct> ();
17700 *h2 = h;
17701 *found = h2;
17702
17703 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17704 arg_type[2], NULL_TREE);
17705 }
17706
17707 return (*found)->type;
17708 }
17709
17710 static void
17711 rs6000_common_init_builtins (void)
17712 {
17713 const struct builtin_description *d;
17714 size_t i;
17715
17716 tree opaque_ftype_opaque = NULL_TREE;
17717 tree opaque_ftype_opaque_opaque = NULL_TREE;
17718 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17719 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17720
17721 /* Create Altivec and VSX builtins on machines with at least the
17722 general purpose extensions (970 and newer) to allow the use of
17723 the target attribute. */
17724
17725 if (TARGET_EXTRA_BUILTINS)
17726 builtin_mask |= RS6000_BTM_COMMON;
17727
17728 /* Add the ternary operators. */
17729 d = bdesc_3arg;
17730 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17731 {
17732 tree type;
17733 HOST_WIDE_INT mask = d->mask;
17734
17735 if ((mask & builtin_mask) != mask)
17736 {
17737 if (TARGET_DEBUG_BUILTIN)
17738 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17739 continue;
17740 }
17741
17742 if (rs6000_overloaded_builtin_p (d->code))
17743 {
17744 if (! (type = opaque_ftype_opaque_opaque_opaque))
17745 type = opaque_ftype_opaque_opaque_opaque
17746 = build_function_type_list (opaque_V4SI_type_node,
17747 opaque_V4SI_type_node,
17748 opaque_V4SI_type_node,
17749 opaque_V4SI_type_node,
17750 NULL_TREE);
17751 }
17752 else
17753 {
17754 enum insn_code icode = d->icode;
17755 if (d->name == 0)
17756 {
17757 if (TARGET_DEBUG_BUILTIN)
17758 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17759 (long unsigned)i);
17760
17761 continue;
17762 }
17763
17764 if (icode == CODE_FOR_nothing)
17765 {
17766 if (TARGET_DEBUG_BUILTIN)
17767 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17768 d->name);
17769
17770 continue;
17771 }
17772
17773 type = builtin_function_type (insn_data[icode].operand[0].mode,
17774 insn_data[icode].operand[1].mode,
17775 insn_data[icode].operand[2].mode,
17776 insn_data[icode].operand[3].mode,
17777 d->code, d->name);
17778 }
17779
17780 def_builtin (d->name, type, d->code);
17781 }
17782
17783 /* Add the binary operators. */
17784 d = bdesc_2arg;
17785 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17786 {
17787 machine_mode mode0, mode1, mode2;
17788 tree type;
17789 HOST_WIDE_INT mask = d->mask;
17790
17791 if ((mask & builtin_mask) != mask)
17792 {
17793 if (TARGET_DEBUG_BUILTIN)
17794 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17795 continue;
17796 }
17797
17798 if (rs6000_overloaded_builtin_p (d->code))
17799 {
17800 if (! (type = opaque_ftype_opaque_opaque))
17801 type = opaque_ftype_opaque_opaque
17802 = build_function_type_list (opaque_V4SI_type_node,
17803 opaque_V4SI_type_node,
17804 opaque_V4SI_type_node,
17805 NULL_TREE);
17806 }
17807 else
17808 {
17809 enum insn_code icode = d->icode;
17810 if (d->name == 0)
17811 {
17812 if (TARGET_DEBUG_BUILTIN)
17813 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17814 (long unsigned)i);
17815
17816 continue;
17817 }
17818
17819 if (icode == CODE_FOR_nothing)
17820 {
17821 if (TARGET_DEBUG_BUILTIN)
17822 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17823 d->name);
17824
17825 continue;
17826 }
17827
17828 mode0 = insn_data[icode].operand[0].mode;
17829 mode1 = insn_data[icode].operand[1].mode;
17830 mode2 = insn_data[icode].operand[2].mode;
17831
17832 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17833 d->code, d->name);
17834 }
17835
17836 def_builtin (d->name, type, d->code);
17837 }
17838
17839 /* Add the simple unary operators. */
17840 d = bdesc_1arg;
17841 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17842 {
17843 machine_mode mode0, mode1;
17844 tree type;
17845 HOST_WIDE_INT mask = d->mask;
17846
17847 if ((mask & builtin_mask) != mask)
17848 {
17849 if (TARGET_DEBUG_BUILTIN)
17850 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17851 continue;
17852 }
17853
17854 if (rs6000_overloaded_builtin_p (d->code))
17855 {
17856 if (! (type = opaque_ftype_opaque))
17857 type = opaque_ftype_opaque
17858 = build_function_type_list (opaque_V4SI_type_node,
17859 opaque_V4SI_type_node,
17860 NULL_TREE);
17861 }
17862 else
17863 {
17864 enum insn_code icode = d->icode;
17865 if (d->name == 0)
17866 {
17867 if (TARGET_DEBUG_BUILTIN)
17868 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17869 (long unsigned)i);
17870
17871 continue;
17872 }
17873
17874 if (icode == CODE_FOR_nothing)
17875 {
17876 if (TARGET_DEBUG_BUILTIN)
17877 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17878 d->name);
17879
17880 continue;
17881 }
17882
17883 mode0 = insn_data[icode].operand[0].mode;
17884 mode1 = insn_data[icode].operand[1].mode;
17885
17886 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
17887 d->code, d->name);
17888 }
17889
17890 def_builtin (d->name, type, d->code);
17891 }
17892
17893 /* Add the simple no-argument operators. */
17894 d = bdesc_0arg;
17895 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
17896 {
17897 machine_mode mode0;
17898 tree type;
17899 HOST_WIDE_INT mask = d->mask;
17900
17901 if ((mask & builtin_mask) != mask)
17902 {
17903 if (TARGET_DEBUG_BUILTIN)
17904 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
17905 continue;
17906 }
17907 if (rs6000_overloaded_builtin_p (d->code))
17908 {
17909 if (!opaque_ftype_opaque)
17910 opaque_ftype_opaque
17911 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
17912 type = opaque_ftype_opaque;
17913 }
17914 else
17915 {
17916 enum insn_code icode = d->icode;
17917 if (d->name == 0)
17918 {
17919 if (TARGET_DEBUG_BUILTIN)
17920 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
17921 (long unsigned) i);
17922 continue;
17923 }
17924 if (icode == CODE_FOR_nothing)
17925 {
17926 if (TARGET_DEBUG_BUILTIN)
17927 fprintf (stderr,
17928 "rs6000_builtin, skip no-argument %s (no code)\n",
17929 d->name);
17930 continue;
17931 }
17932 mode0 = insn_data[icode].operand[0].mode;
17933 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
17934 d->code, d->name);
17935 }
17936 def_builtin (d->name, type, d->code);
17937 }
17938 }
17939
17940 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
17941 static void
17942 init_float128_ibm (machine_mode mode)
17943 {
17944 if (!TARGET_XL_COMPAT)
17945 {
17946 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
17947 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
17948 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
17949 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
17950
17951 if (!TARGET_HARD_FLOAT)
17952 {
17953 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
17954 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
17955 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
17956 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
17957 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
17958 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
17959 set_optab_libfunc (le_optab, mode, "__gcc_qle");
17960 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
17961
17962 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
17963 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
17964 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
17965 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
17966 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
17967 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
17968 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
17969 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
17970 }
17971 }
17972 else
17973 {
17974 set_optab_libfunc (add_optab, mode, "_xlqadd");
17975 set_optab_libfunc (sub_optab, mode, "_xlqsub");
17976 set_optab_libfunc (smul_optab, mode, "_xlqmul");
17977 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
17978 }
17979
17980 /* Add various conversions for IFmode to use the traditional TFmode
17981 names. */
17982 if (mode == IFmode)
17983 {
17984 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf");
17985 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf");
17986 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdtf");
17987 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd");
17988 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd");
17989 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtftd");
17990
17991 if (TARGET_POWERPC64)
17992 {
17993 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
17994 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
17995 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
17996 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
17997 }
17998 }
17999 }
18000
18001 /* Create a decl for either complex long double multiply or complex long double
18002 divide when long double is IEEE 128-bit floating point. We can't use
18003 __multc3 and __divtc3 because the original long double using IBM extended
18004 double used those names. The complex multiply/divide functions are encoded
18005 as builtin functions with a complex result and 4 scalar inputs. */
18006
18007 static void
18008 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
18009 {
18010 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
18011 name, NULL_TREE);
18012
18013 set_builtin_decl (fncode, fndecl, true);
18014
18015 if (TARGET_DEBUG_BUILTIN)
18016 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
18017
18018 return;
18019 }
18020
18021 /* Set up IEEE 128-bit floating point routines. Use different names if the
18022 arguments can be passed in a vector register. The historical PowerPC
18023 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18024 continue to use that if we aren't using vector registers to pass IEEE
18025 128-bit floating point. */
18026
18027 static void
18028 init_float128_ieee (machine_mode mode)
18029 {
18030 if (FLOAT128_VECTOR_P (mode))
18031 {
18032 static bool complex_muldiv_init_p = false;
18033
18034 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
18035 we have clone or target attributes, this will be called a second
18036 time. We want to create the built-in function only once. */
18037 if (mode == TFmode && TARGET_IEEEQUAD && !complex_muldiv_init_p)
18038 {
18039 complex_muldiv_init_p = true;
18040 built_in_function fncode_mul =
18041 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
18042 - MIN_MODE_COMPLEX_FLOAT);
18043 built_in_function fncode_div =
18044 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
18045 - MIN_MODE_COMPLEX_FLOAT);
18046
18047 tree fntype = build_function_type_list (complex_long_double_type_node,
18048 long_double_type_node,
18049 long_double_type_node,
18050 long_double_type_node,
18051 long_double_type_node,
18052 NULL_TREE);
18053
18054 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
18055 create_complex_muldiv ("__divkc3", fncode_div, fntype);
18056 }
18057
18058 set_optab_libfunc (add_optab, mode, "__addkf3");
18059 set_optab_libfunc (sub_optab, mode, "__subkf3");
18060 set_optab_libfunc (neg_optab, mode, "__negkf2");
18061 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18062 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18063 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18064 set_optab_libfunc (abs_optab, mode, "__abskf2");
18065 set_optab_libfunc (powi_optab, mode, "__powikf2");
18066
18067 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18068 set_optab_libfunc (ne_optab, mode, "__nekf2");
18069 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18070 set_optab_libfunc (ge_optab, mode, "__gekf2");
18071 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18072 set_optab_libfunc (le_optab, mode, "__lekf2");
18073 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18074
18075 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18076 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18077 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18078 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18079
18080 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
18081 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18082 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
18083
18084 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
18085 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18086 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
18087
18088 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf");
18089 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf");
18090 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdkf");
18091 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd");
18092 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd");
18093 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendkftd");
18094
18095 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18096 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18097 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18098 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18099
18100 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18101 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18102 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18103 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18104
18105 if (TARGET_POWERPC64)
18106 {
18107 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18108 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18109 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18110 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18111 }
18112 }
18113
18114 else
18115 {
18116 set_optab_libfunc (add_optab, mode, "_q_add");
18117 set_optab_libfunc (sub_optab, mode, "_q_sub");
18118 set_optab_libfunc (neg_optab, mode, "_q_neg");
18119 set_optab_libfunc (smul_optab, mode, "_q_mul");
18120 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18121 if (TARGET_PPC_GPOPT)
18122 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18123
18124 set_optab_libfunc (eq_optab, mode, "_q_feq");
18125 set_optab_libfunc (ne_optab, mode, "_q_fne");
18126 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18127 set_optab_libfunc (ge_optab, mode, "_q_fge");
18128 set_optab_libfunc (lt_optab, mode, "_q_flt");
18129 set_optab_libfunc (le_optab, mode, "_q_fle");
18130
18131 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18132 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18133 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18134 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18135 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18136 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18137 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18138 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18139 }
18140 }
18141
18142 static void
18143 rs6000_init_libfuncs (void)
18144 {
18145 /* __float128 support. */
18146 if (TARGET_FLOAT128_TYPE)
18147 {
18148 init_float128_ibm (IFmode);
18149 init_float128_ieee (KFmode);
18150 }
18151
18152 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18153 if (TARGET_LONG_DOUBLE_128)
18154 {
18155 if (!TARGET_IEEEQUAD)
18156 init_float128_ibm (TFmode);
18157
18158 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18159 else
18160 init_float128_ieee (TFmode);
18161 }
18162 }
18163
18164 /* Emit a potentially record-form instruction, setting DST from SRC.
18165 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18166 signed comparison of DST with zero. If DOT is 1, the generated RTL
18167 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18168 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18169 a separate COMPARE. */
18170
18171 void
18172 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18173 {
18174 if (dot == 0)
18175 {
18176 emit_move_insn (dst, src);
18177 return;
18178 }
18179
18180 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18181 {
18182 emit_move_insn (dst, src);
18183 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18184 return;
18185 }
18186
18187 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18188 if (dot == 1)
18189 {
18190 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18191 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18192 }
18193 else
18194 {
18195 rtx set = gen_rtx_SET (dst, src);
18196 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18197 }
18198 }
18199
18200 \f
18201 /* A validation routine: say whether CODE, a condition code, and MODE
18202 match. The other alternatives either don't make sense or should
18203 never be generated. */
18204
18205 void
18206 validate_condition_mode (enum rtx_code code, machine_mode mode)
18207 {
18208 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18209 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18210 && GET_MODE_CLASS (mode) == MODE_CC);
18211
18212 /* These don't make sense. */
18213 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18214 || mode != CCUNSmode);
18215
18216 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18217 || mode == CCUNSmode);
18218
18219 gcc_assert (mode == CCFPmode
18220 || (code != ORDERED && code != UNORDERED
18221 && code != UNEQ && code != LTGT
18222 && code != UNGT && code != UNLT
18223 && code != UNGE && code != UNLE));
18224
18225 /* These should never be generated except for
18226 flag_finite_math_only. */
18227 gcc_assert (mode != CCFPmode
18228 || flag_finite_math_only
18229 || (code != LE && code != GE
18230 && code != UNEQ && code != LTGT
18231 && code != UNGT && code != UNLT));
18232
18233 /* These are invalid; the information is not there. */
18234 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18235 }
18236
18237 \f
18238 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18239 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18240 not zero, store there the bit offset (counted from the right) where
18241 the single stretch of 1 bits begins; and similarly for B, the bit
18242 offset where it ends. */
18243
18244 bool
18245 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18246 {
18247 unsigned HOST_WIDE_INT val = INTVAL (mask);
18248 unsigned HOST_WIDE_INT bit;
18249 int nb, ne;
18250 int n = GET_MODE_PRECISION (mode);
18251
18252 if (mode != DImode && mode != SImode)
18253 return false;
18254
18255 if (INTVAL (mask) >= 0)
18256 {
18257 bit = val & -val;
18258 ne = exact_log2 (bit);
18259 nb = exact_log2 (val + bit);
18260 }
18261 else if (val + 1 == 0)
18262 {
18263 nb = n;
18264 ne = 0;
18265 }
18266 else if (val & 1)
18267 {
18268 val = ~val;
18269 bit = val & -val;
18270 nb = exact_log2 (bit);
18271 ne = exact_log2 (val + bit);
18272 }
18273 else
18274 {
18275 bit = val & -val;
18276 ne = exact_log2 (bit);
18277 if (val + bit == 0)
18278 nb = n;
18279 else
18280 nb = 0;
18281 }
18282
18283 nb--;
18284
18285 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18286 return false;
18287
18288 if (b)
18289 *b = nb;
18290 if (e)
18291 *e = ne;
18292
18293 return true;
18294 }
18295
18296 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18297 or rldicr instruction, to implement an AND with it in mode MODE. */
18298
18299 bool
18300 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18301 {
18302 int nb, ne;
18303
18304 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18305 return false;
18306
18307 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18308 does not wrap. */
18309 if (mode == DImode)
18310 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18311
18312 /* For SImode, rlwinm can do everything. */
18313 if (mode == SImode)
18314 return (nb < 32 && ne < 32);
18315
18316 return false;
18317 }
18318
18319 /* Return the instruction template for an AND with mask in mode MODE, with
18320 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18321
18322 const char *
18323 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18324 {
18325 int nb, ne;
18326
18327 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18328 gcc_unreachable ();
18329
18330 if (mode == DImode && ne == 0)
18331 {
18332 operands[3] = GEN_INT (63 - nb);
18333 if (dot)
18334 return "rldicl. %0,%1,0,%3";
18335 return "rldicl %0,%1,0,%3";
18336 }
18337
18338 if (mode == DImode && nb == 63)
18339 {
18340 operands[3] = GEN_INT (63 - ne);
18341 if (dot)
18342 return "rldicr. %0,%1,0,%3";
18343 return "rldicr %0,%1,0,%3";
18344 }
18345
18346 if (nb < 32 && ne < 32)
18347 {
18348 operands[3] = GEN_INT (31 - nb);
18349 operands[4] = GEN_INT (31 - ne);
18350 if (dot)
18351 return "rlwinm. %0,%1,0,%3,%4";
18352 return "rlwinm %0,%1,0,%3,%4";
18353 }
18354
18355 gcc_unreachable ();
18356 }
18357
18358 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18359 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18360 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18361
18362 bool
18363 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18364 {
18365 int nb, ne;
18366
18367 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18368 return false;
18369
18370 int n = GET_MODE_PRECISION (mode);
18371 int sh = -1;
18372
18373 if (CONST_INT_P (XEXP (shift, 1)))
18374 {
18375 sh = INTVAL (XEXP (shift, 1));
18376 if (sh < 0 || sh >= n)
18377 return false;
18378 }
18379
18380 rtx_code code = GET_CODE (shift);
18381
18382 /* Convert any shift by 0 to a rotate, to simplify below code. */
18383 if (sh == 0)
18384 code = ROTATE;
18385
18386 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18387 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18388 code = ASHIFT;
18389 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18390 {
18391 code = LSHIFTRT;
18392 sh = n - sh;
18393 }
18394
18395 /* DImode rotates need rld*. */
18396 if (mode == DImode && code == ROTATE)
18397 return (nb == 63 || ne == 0 || ne == sh);
18398
18399 /* SImode rotates need rlw*. */
18400 if (mode == SImode && code == ROTATE)
18401 return (nb < 32 && ne < 32 && sh < 32);
18402
18403 /* Wrap-around masks are only okay for rotates. */
18404 if (ne > nb)
18405 return false;
18406
18407 /* Variable shifts are only okay for rotates. */
18408 if (sh < 0)
18409 return false;
18410
18411 /* Don't allow ASHIFT if the mask is wrong for that. */
18412 if (code == ASHIFT && ne < sh)
18413 return false;
18414
18415 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18416 if the mask is wrong for that. */
18417 if (nb < 32 && ne < 32 && sh < 32
18418 && !(code == LSHIFTRT && nb >= 32 - sh))
18419 return true;
18420
18421 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18422 if the mask is wrong for that. */
18423 if (code == LSHIFTRT)
18424 sh = 64 - sh;
18425 if (nb == 63 || ne == 0 || ne == sh)
18426 return !(code == LSHIFTRT && nb >= sh);
18427
18428 return false;
18429 }
18430
18431 /* Return the instruction template for a shift with mask in mode MODE, with
18432 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18433
18434 const char *
18435 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18436 {
18437 int nb, ne;
18438
18439 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18440 gcc_unreachable ();
18441
18442 if (mode == DImode && ne == 0)
18443 {
18444 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18445 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18446 operands[3] = GEN_INT (63 - nb);
18447 if (dot)
18448 return "rld%I2cl. %0,%1,%2,%3";
18449 return "rld%I2cl %0,%1,%2,%3";
18450 }
18451
18452 if (mode == DImode && nb == 63)
18453 {
18454 operands[3] = GEN_INT (63 - ne);
18455 if (dot)
18456 return "rld%I2cr. %0,%1,%2,%3";
18457 return "rld%I2cr %0,%1,%2,%3";
18458 }
18459
18460 if (mode == DImode
18461 && GET_CODE (operands[4]) != LSHIFTRT
18462 && CONST_INT_P (operands[2])
18463 && ne == INTVAL (operands[2]))
18464 {
18465 operands[3] = GEN_INT (63 - nb);
18466 if (dot)
18467 return "rld%I2c. %0,%1,%2,%3";
18468 return "rld%I2c %0,%1,%2,%3";
18469 }
18470
18471 if (nb < 32 && ne < 32)
18472 {
18473 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18474 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18475 operands[3] = GEN_INT (31 - nb);
18476 operands[4] = GEN_INT (31 - ne);
18477 /* This insn can also be a 64-bit rotate with mask that really makes
18478 it just a shift right (with mask); the %h below are to adjust for
18479 that situation (shift count is >= 32 in that case). */
18480 if (dot)
18481 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18482 return "rlw%I2nm %0,%1,%h2,%3,%4";
18483 }
18484
18485 gcc_unreachable ();
18486 }
18487
18488 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18489 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18490 ASHIFT, or LSHIFTRT) in mode MODE. */
18491
18492 bool
18493 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18494 {
18495 int nb, ne;
18496
18497 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18498 return false;
18499
18500 int n = GET_MODE_PRECISION (mode);
18501
18502 int sh = INTVAL (XEXP (shift, 1));
18503 if (sh < 0 || sh >= n)
18504 return false;
18505
18506 rtx_code code = GET_CODE (shift);
18507
18508 /* Convert any shift by 0 to a rotate, to simplify below code. */
18509 if (sh == 0)
18510 code = ROTATE;
18511
18512 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18513 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18514 code = ASHIFT;
18515 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18516 {
18517 code = LSHIFTRT;
18518 sh = n - sh;
18519 }
18520
18521 /* DImode rotates need rldimi. */
18522 if (mode == DImode && code == ROTATE)
18523 return (ne == sh);
18524
18525 /* SImode rotates need rlwimi. */
18526 if (mode == SImode && code == ROTATE)
18527 return (nb < 32 && ne < 32 && sh < 32);
18528
18529 /* Wrap-around masks are only okay for rotates. */
18530 if (ne > nb)
18531 return false;
18532
18533 /* Don't allow ASHIFT if the mask is wrong for that. */
18534 if (code == ASHIFT && ne < sh)
18535 return false;
18536
18537 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18538 if the mask is wrong for that. */
18539 if (nb < 32 && ne < 32 && sh < 32
18540 && !(code == LSHIFTRT && nb >= 32 - sh))
18541 return true;
18542
18543 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18544 if the mask is wrong for that. */
18545 if (code == LSHIFTRT)
18546 sh = 64 - sh;
18547 if (ne == sh)
18548 return !(code == LSHIFTRT && nb >= sh);
18549
18550 return false;
18551 }
18552
18553 /* Return the instruction template for an insert with mask in mode MODE, with
18554 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18555
18556 const char *
18557 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18558 {
18559 int nb, ne;
18560
18561 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18562 gcc_unreachable ();
18563
18564 /* Prefer rldimi because rlwimi is cracked. */
18565 if (TARGET_POWERPC64
18566 && (!dot || mode == DImode)
18567 && GET_CODE (operands[4]) != LSHIFTRT
18568 && ne == INTVAL (operands[2]))
18569 {
18570 operands[3] = GEN_INT (63 - nb);
18571 if (dot)
18572 return "rldimi. %0,%1,%2,%3";
18573 return "rldimi %0,%1,%2,%3";
18574 }
18575
18576 if (nb < 32 && ne < 32)
18577 {
18578 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18579 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18580 operands[3] = GEN_INT (31 - nb);
18581 operands[4] = GEN_INT (31 - ne);
18582 if (dot)
18583 return "rlwimi. %0,%1,%2,%3,%4";
18584 return "rlwimi %0,%1,%2,%3,%4";
18585 }
18586
18587 gcc_unreachable ();
18588 }
18589
18590 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18591 using two machine instructions. */
18592
18593 bool
18594 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18595 {
18596 /* There are two kinds of AND we can handle with two insns:
18597 1) those we can do with two rl* insn;
18598 2) ori[s];xori[s].
18599
18600 We do not handle that last case yet. */
18601
18602 /* If there is just one stretch of ones, we can do it. */
18603 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18604 return true;
18605
18606 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18607 one insn, we can do the whole thing with two. */
18608 unsigned HOST_WIDE_INT val = INTVAL (c);
18609 unsigned HOST_WIDE_INT bit1 = val & -val;
18610 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18611 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18612 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18613 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18614 }
18615
18616 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18617 If EXPAND is true, split rotate-and-mask instructions we generate to
18618 their constituent parts as well (this is used during expand); if DOT
18619 is 1, make the last insn a record-form instruction clobbering the
18620 destination GPR and setting the CC reg (from operands[3]); if 2, set
18621 that GPR as well as the CC reg. */
18622
18623 void
18624 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18625 {
18626 gcc_assert (!(expand && dot));
18627
18628 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18629
18630 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18631 shift right. This generates better code than doing the masks without
18632 shifts, or shifting first right and then left. */
18633 int nb, ne;
18634 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18635 {
18636 gcc_assert (mode == DImode);
18637
18638 int shift = 63 - nb;
18639 if (expand)
18640 {
18641 rtx tmp1 = gen_reg_rtx (DImode);
18642 rtx tmp2 = gen_reg_rtx (DImode);
18643 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18644 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18645 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18646 }
18647 else
18648 {
18649 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18650 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18651 emit_move_insn (operands[0], tmp);
18652 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18653 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18654 }
18655 return;
18656 }
18657
18658 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18659 that does the rest. */
18660 unsigned HOST_WIDE_INT bit1 = val & -val;
18661 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18662 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18663 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18664
18665 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18666 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18667
18668 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18669
18670 /* Two "no-rotate"-and-mask instructions, for SImode. */
18671 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18672 {
18673 gcc_assert (mode == SImode);
18674
18675 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18676 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18677 emit_move_insn (reg, tmp);
18678 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18679 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18680 return;
18681 }
18682
18683 gcc_assert (mode == DImode);
18684
18685 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18686 insns; we have to do the first in SImode, because it wraps. */
18687 if (mask2 <= 0xffffffff
18688 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18689 {
18690 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18691 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18692 GEN_INT (mask1));
18693 rtx reg_low = gen_lowpart (SImode, reg);
18694 emit_move_insn (reg_low, tmp);
18695 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18696 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18697 return;
18698 }
18699
18700 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18701 at the top end), rotate back and clear the other hole. */
18702 int right = exact_log2 (bit3);
18703 int left = 64 - right;
18704
18705 /* Rotate the mask too. */
18706 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18707
18708 if (expand)
18709 {
18710 rtx tmp1 = gen_reg_rtx (DImode);
18711 rtx tmp2 = gen_reg_rtx (DImode);
18712 rtx tmp3 = gen_reg_rtx (DImode);
18713 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18714 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18715 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18716 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18717 }
18718 else
18719 {
18720 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18721 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18722 emit_move_insn (operands[0], tmp);
18723 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18724 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18725 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18726 }
18727 }
18728 \f
18729 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18730 for lfq and stfq insns iff the registers are hard registers. */
18731
18732 int
18733 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18734 {
18735 /* We might have been passed a SUBREG. */
18736 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
18737 return 0;
18738
18739 /* We might have been passed non floating point registers. */
18740 if (!FP_REGNO_P (REGNO (reg1))
18741 || !FP_REGNO_P (REGNO (reg2)))
18742 return 0;
18743
18744 return (REGNO (reg1) == REGNO (reg2) - 1);
18745 }
18746
18747 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18748 addr1 and addr2 must be in consecutive memory locations
18749 (addr2 == addr1 + 8). */
18750
18751 int
18752 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18753 {
18754 rtx addr1, addr2;
18755 unsigned int reg1, reg2;
18756 int offset1, offset2;
18757
18758 /* The mems cannot be volatile. */
18759 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18760 return 0;
18761
18762 addr1 = XEXP (mem1, 0);
18763 addr2 = XEXP (mem2, 0);
18764
18765 /* Extract an offset (if used) from the first addr. */
18766 if (GET_CODE (addr1) == PLUS)
18767 {
18768 /* If not a REG, return zero. */
18769 if (GET_CODE (XEXP (addr1, 0)) != REG)
18770 return 0;
18771 else
18772 {
18773 reg1 = REGNO (XEXP (addr1, 0));
18774 /* The offset must be constant! */
18775 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
18776 return 0;
18777 offset1 = INTVAL (XEXP (addr1, 1));
18778 }
18779 }
18780 else if (GET_CODE (addr1) != REG)
18781 return 0;
18782 else
18783 {
18784 reg1 = REGNO (addr1);
18785 /* This was a simple (mem (reg)) expression. Offset is 0. */
18786 offset1 = 0;
18787 }
18788
18789 /* And now for the second addr. */
18790 if (GET_CODE (addr2) == PLUS)
18791 {
18792 /* If not a REG, return zero. */
18793 if (GET_CODE (XEXP (addr2, 0)) != REG)
18794 return 0;
18795 else
18796 {
18797 reg2 = REGNO (XEXP (addr2, 0));
18798 /* The offset must be constant. */
18799 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
18800 return 0;
18801 offset2 = INTVAL (XEXP (addr2, 1));
18802 }
18803 }
18804 else if (GET_CODE (addr2) != REG)
18805 return 0;
18806 else
18807 {
18808 reg2 = REGNO (addr2);
18809 /* This was a simple (mem (reg)) expression. Offset is 0. */
18810 offset2 = 0;
18811 }
18812
18813 /* Both of these must have the same base register. */
18814 if (reg1 != reg2)
18815 return 0;
18816
18817 /* The offset for the second addr must be 8 more than the first addr. */
18818 if (offset2 != offset1 + 8)
18819 return 0;
18820
18821 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18822 instructions. */
18823 return 1;
18824 }
18825 \f
18826 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18827 need to use DDmode, in all other cases we can use the same mode. */
18828 static machine_mode
18829 rs6000_secondary_memory_needed_mode (machine_mode mode)
18830 {
18831 if (lra_in_progress && mode == SDmode)
18832 return DDmode;
18833 return mode;
18834 }
18835
18836 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18837 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18838 only work on the traditional altivec registers, note if an altivec register
18839 was chosen. */
18840
18841 static enum rs6000_reg_type
18842 register_to_reg_type (rtx reg, bool *is_altivec)
18843 {
18844 HOST_WIDE_INT regno;
18845 enum reg_class rclass;
18846
18847 if (GET_CODE (reg) == SUBREG)
18848 reg = SUBREG_REG (reg);
18849
18850 if (!REG_P (reg))
18851 return NO_REG_TYPE;
18852
18853 regno = REGNO (reg);
18854 if (regno >= FIRST_PSEUDO_REGISTER)
18855 {
18856 if (!lra_in_progress && !reload_completed)
18857 return PSEUDO_REG_TYPE;
18858
18859 regno = true_regnum (reg);
18860 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
18861 return PSEUDO_REG_TYPE;
18862 }
18863
18864 gcc_assert (regno >= 0);
18865
18866 if (is_altivec && ALTIVEC_REGNO_P (regno))
18867 *is_altivec = true;
18868
18869 rclass = rs6000_regno_regclass[regno];
18870 return reg_class_to_reg_type[(int)rclass];
18871 }
18872
18873 /* Helper function to return the cost of adding a TOC entry address. */
18874
18875 static inline int
18876 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
18877 {
18878 int ret;
18879
18880 if (TARGET_CMODEL != CMODEL_SMALL)
18881 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
18882
18883 else
18884 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
18885
18886 return ret;
18887 }
18888
18889 /* Helper function for rs6000_secondary_reload to determine whether the memory
18890 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18891 needs reloading. Return negative if the memory is not handled by the memory
18892 helper functions and to try a different reload method, 0 if no additional
18893 instructions are need, and positive to give the extra cost for the
18894 memory. */
18895
18896 static int
18897 rs6000_secondary_reload_memory (rtx addr,
18898 enum reg_class rclass,
18899 machine_mode mode)
18900 {
18901 int extra_cost = 0;
18902 rtx reg, and_arg, plus_arg0, plus_arg1;
18903 addr_mask_type addr_mask;
18904 const char *type = NULL;
18905 const char *fail_msg = NULL;
18906
18907 if (GPR_REG_CLASS_P (rclass))
18908 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
18909
18910 else if (rclass == FLOAT_REGS)
18911 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
18912
18913 else if (rclass == ALTIVEC_REGS)
18914 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
18915
18916 /* For the combined VSX_REGS, turn off Altivec AND -16. */
18917 else if (rclass == VSX_REGS)
18918 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
18919 & ~RELOAD_REG_AND_M16);
18920
18921 /* If the register allocator hasn't made up its mind yet on the register
18922 class to use, settle on defaults to use. */
18923 else if (rclass == NO_REGS)
18924 {
18925 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
18926 & ~RELOAD_REG_AND_M16);
18927
18928 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
18929 addr_mask &= ~(RELOAD_REG_INDEXED
18930 | RELOAD_REG_PRE_INCDEC
18931 | RELOAD_REG_PRE_MODIFY);
18932 }
18933
18934 else
18935 addr_mask = 0;
18936
18937 /* If the register isn't valid in this register class, just return now. */
18938 if ((addr_mask & RELOAD_REG_VALID) == 0)
18939 {
18940 if (TARGET_DEBUG_ADDR)
18941 {
18942 fprintf (stderr,
18943 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18944 "not valid in class\n",
18945 GET_MODE_NAME (mode), reg_class_names[rclass]);
18946 debug_rtx (addr);
18947 }
18948
18949 return -1;
18950 }
18951
18952 switch (GET_CODE (addr))
18953 {
18954 /* Does the register class supports auto update forms for this mode? We
18955 don't need a scratch register, since the powerpc only supports
18956 PRE_INC, PRE_DEC, and PRE_MODIFY. */
18957 case PRE_INC:
18958 case PRE_DEC:
18959 reg = XEXP (addr, 0);
18960 if (!base_reg_operand (addr, GET_MODE (reg)))
18961 {
18962 fail_msg = "no base register #1";
18963 extra_cost = -1;
18964 }
18965
18966 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
18967 {
18968 extra_cost = 1;
18969 type = "update";
18970 }
18971 break;
18972
18973 case PRE_MODIFY:
18974 reg = XEXP (addr, 0);
18975 plus_arg1 = XEXP (addr, 1);
18976 if (!base_reg_operand (reg, GET_MODE (reg))
18977 || GET_CODE (plus_arg1) != PLUS
18978 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
18979 {
18980 fail_msg = "bad PRE_MODIFY";
18981 extra_cost = -1;
18982 }
18983
18984 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
18985 {
18986 extra_cost = 1;
18987 type = "update";
18988 }
18989 break;
18990
18991 /* Do we need to simulate AND -16 to clear the bottom address bits used
18992 in VMX load/stores? Only allow the AND for vector sizes. */
18993 case AND:
18994 and_arg = XEXP (addr, 0);
18995 if (GET_MODE_SIZE (mode) != 16
18996 || GET_CODE (XEXP (addr, 1)) != CONST_INT
18997 || INTVAL (XEXP (addr, 1)) != -16)
18998 {
18999 fail_msg = "bad Altivec AND #1";
19000 extra_cost = -1;
19001 }
19002
19003 if (rclass != ALTIVEC_REGS)
19004 {
19005 if (legitimate_indirect_address_p (and_arg, false))
19006 extra_cost = 1;
19007
19008 else if (legitimate_indexed_address_p (and_arg, false))
19009 extra_cost = 2;
19010
19011 else
19012 {
19013 fail_msg = "bad Altivec AND #2";
19014 extra_cost = -1;
19015 }
19016
19017 type = "and";
19018 }
19019 break;
19020
19021 /* If this is an indirect address, make sure it is a base register. */
19022 case REG:
19023 case SUBREG:
19024 if (!legitimate_indirect_address_p (addr, false))
19025 {
19026 extra_cost = 1;
19027 type = "move";
19028 }
19029 break;
19030
19031 /* If this is an indexed address, make sure the register class can handle
19032 indexed addresses for this mode. */
19033 case PLUS:
19034 plus_arg0 = XEXP (addr, 0);
19035 plus_arg1 = XEXP (addr, 1);
19036
19037 /* (plus (plus (reg) (constant)) (constant)) is generated during
19038 push_reload processing, so handle it now. */
19039 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19040 {
19041 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19042 {
19043 extra_cost = 1;
19044 type = "offset";
19045 }
19046 }
19047
19048 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19049 push_reload processing, so handle it now. */
19050 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19051 {
19052 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19053 {
19054 extra_cost = 1;
19055 type = "indexed #2";
19056 }
19057 }
19058
19059 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19060 {
19061 fail_msg = "no base register #2";
19062 extra_cost = -1;
19063 }
19064
19065 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19066 {
19067 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19068 || !legitimate_indexed_address_p (addr, false))
19069 {
19070 extra_cost = 1;
19071 type = "indexed";
19072 }
19073 }
19074
19075 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19076 && CONST_INT_P (plus_arg1))
19077 {
19078 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19079 {
19080 extra_cost = 1;
19081 type = "vector d-form offset";
19082 }
19083 }
19084
19085 /* Make sure the register class can handle offset addresses. */
19086 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19087 {
19088 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19089 {
19090 extra_cost = 1;
19091 type = "offset #2";
19092 }
19093 }
19094
19095 else
19096 {
19097 fail_msg = "bad PLUS";
19098 extra_cost = -1;
19099 }
19100
19101 break;
19102
19103 case LO_SUM:
19104 /* Quad offsets are restricted and can't handle normal addresses. */
19105 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19106 {
19107 extra_cost = -1;
19108 type = "vector d-form lo_sum";
19109 }
19110
19111 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19112 {
19113 fail_msg = "bad LO_SUM";
19114 extra_cost = -1;
19115 }
19116
19117 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19118 {
19119 extra_cost = 1;
19120 type = "lo_sum";
19121 }
19122 break;
19123
19124 /* Static addresses need to create a TOC entry. */
19125 case CONST:
19126 case SYMBOL_REF:
19127 case LABEL_REF:
19128 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19129 {
19130 extra_cost = -1;
19131 type = "vector d-form lo_sum #2";
19132 }
19133
19134 else
19135 {
19136 type = "address";
19137 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19138 }
19139 break;
19140
19141 /* TOC references look like offsetable memory. */
19142 case UNSPEC:
19143 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19144 {
19145 fail_msg = "bad UNSPEC";
19146 extra_cost = -1;
19147 }
19148
19149 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19150 {
19151 extra_cost = -1;
19152 type = "vector d-form lo_sum #3";
19153 }
19154
19155 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19156 {
19157 extra_cost = 1;
19158 type = "toc reference";
19159 }
19160 break;
19161
19162 default:
19163 {
19164 fail_msg = "bad address";
19165 extra_cost = -1;
19166 }
19167 }
19168
19169 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19170 {
19171 if (extra_cost < 0)
19172 fprintf (stderr,
19173 "rs6000_secondary_reload_memory error: mode = %s, "
19174 "class = %s, addr_mask = '%s', %s\n",
19175 GET_MODE_NAME (mode),
19176 reg_class_names[rclass],
19177 rs6000_debug_addr_mask (addr_mask, false),
19178 (fail_msg != NULL) ? fail_msg : "<bad address>");
19179
19180 else
19181 fprintf (stderr,
19182 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19183 "addr_mask = '%s', extra cost = %d, %s\n",
19184 GET_MODE_NAME (mode),
19185 reg_class_names[rclass],
19186 rs6000_debug_addr_mask (addr_mask, false),
19187 extra_cost,
19188 (type) ? type : "<none>");
19189
19190 debug_rtx (addr);
19191 }
19192
19193 return extra_cost;
19194 }
19195
19196 /* Helper function for rs6000_secondary_reload to return true if a move to a
19197 different register classe is really a simple move. */
19198
19199 static bool
19200 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19201 enum rs6000_reg_type from_type,
19202 machine_mode mode)
19203 {
19204 int size = GET_MODE_SIZE (mode);
19205
19206 /* Add support for various direct moves available. In this function, we only
19207 look at cases where we don't need any extra registers, and one or more
19208 simple move insns are issued. Originally small integers are not allowed
19209 in FPR/VSX registers. Single precision binary floating is not a simple
19210 move because we need to convert to the single precision memory layout.
19211 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19212 need special direct move handling, which we do not support yet. */
19213 if (TARGET_DIRECT_MOVE
19214 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19215 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19216 {
19217 if (TARGET_POWERPC64)
19218 {
19219 /* ISA 2.07: MTVSRD or MVFVSRD. */
19220 if (size == 8)
19221 return true;
19222
19223 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19224 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19225 return true;
19226 }
19227
19228 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19229 if (TARGET_P8_VECTOR)
19230 {
19231 if (mode == SImode)
19232 return true;
19233
19234 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19235 return true;
19236 }
19237
19238 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19239 if (mode == SDmode)
19240 return true;
19241 }
19242
19243 /* Power6+: MFTGPR or MFFGPR. */
19244 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19245 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19246 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19247 return true;
19248
19249 /* Move to/from SPR. */
19250 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19251 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19252 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19253 return true;
19254
19255 return false;
19256 }
19257
19258 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19259 special direct moves that involve allocating an extra register, return the
19260 insn code of the helper function if there is such a function or
19261 CODE_FOR_nothing if not. */
19262
19263 static bool
19264 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19265 enum rs6000_reg_type from_type,
19266 machine_mode mode,
19267 secondary_reload_info *sri,
19268 bool altivec_p)
19269 {
19270 bool ret = false;
19271 enum insn_code icode = CODE_FOR_nothing;
19272 int cost = 0;
19273 int size = GET_MODE_SIZE (mode);
19274
19275 if (TARGET_POWERPC64 && size == 16)
19276 {
19277 /* Handle moving 128-bit values from GPRs to VSX point registers on
19278 ISA 2.07 (power8, power9) when running in 64-bit mode using
19279 XXPERMDI to glue the two 64-bit values back together. */
19280 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19281 {
19282 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19283 icode = reg_addr[mode].reload_vsx_gpr;
19284 }
19285
19286 /* Handle moving 128-bit values from VSX point registers to GPRs on
19287 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19288 bottom 64-bit value. */
19289 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19290 {
19291 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19292 icode = reg_addr[mode].reload_gpr_vsx;
19293 }
19294 }
19295
19296 else if (TARGET_POWERPC64 && mode == SFmode)
19297 {
19298 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19299 {
19300 cost = 3; /* xscvdpspn, mfvsrd, and. */
19301 icode = reg_addr[mode].reload_gpr_vsx;
19302 }
19303
19304 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19305 {
19306 cost = 2; /* mtvsrz, xscvspdpn. */
19307 icode = reg_addr[mode].reload_vsx_gpr;
19308 }
19309 }
19310
19311 else if (!TARGET_POWERPC64 && size == 8)
19312 {
19313 /* Handle moving 64-bit values from GPRs to floating point registers on
19314 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19315 32-bit values back together. Altivec register classes must be handled
19316 specially since a different instruction is used, and the secondary
19317 reload support requires a single instruction class in the scratch
19318 register constraint. However, right now TFmode is not allowed in
19319 Altivec registers, so the pattern will never match. */
19320 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19321 {
19322 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19323 icode = reg_addr[mode].reload_fpr_gpr;
19324 }
19325 }
19326
19327 if (icode != CODE_FOR_nothing)
19328 {
19329 ret = true;
19330 if (sri)
19331 {
19332 sri->icode = icode;
19333 sri->extra_cost = cost;
19334 }
19335 }
19336
19337 return ret;
19338 }
19339
19340 /* Return whether a move between two register classes can be done either
19341 directly (simple move) or via a pattern that uses a single extra temporary
19342 (using ISA 2.07's direct move in this case. */
19343
19344 static bool
19345 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19346 enum rs6000_reg_type from_type,
19347 machine_mode mode,
19348 secondary_reload_info *sri,
19349 bool altivec_p)
19350 {
19351 /* Fall back to load/store reloads if either type is not a register. */
19352 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19353 return false;
19354
19355 /* If we haven't allocated registers yet, assume the move can be done for the
19356 standard register types. */
19357 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19358 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19359 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19360 return true;
19361
19362 /* Moves to the same set of registers is a simple move for non-specialized
19363 registers. */
19364 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19365 return true;
19366
19367 /* Check whether a simple move can be done directly. */
19368 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19369 {
19370 if (sri)
19371 {
19372 sri->icode = CODE_FOR_nothing;
19373 sri->extra_cost = 0;
19374 }
19375 return true;
19376 }
19377
19378 /* Now check if we can do it in a few steps. */
19379 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19380 altivec_p);
19381 }
19382
19383 /* Inform reload about cases where moving X with a mode MODE to a register in
19384 RCLASS requires an extra scratch or immediate register. Return the class
19385 needed for the immediate register.
19386
19387 For VSX and Altivec, we may need a register to convert sp+offset into
19388 reg+sp.
19389
19390 For misaligned 64-bit gpr loads and stores we need a register to
19391 convert an offset address to indirect. */
19392
19393 static reg_class_t
19394 rs6000_secondary_reload (bool in_p,
19395 rtx x,
19396 reg_class_t rclass_i,
19397 machine_mode mode,
19398 secondary_reload_info *sri)
19399 {
19400 enum reg_class rclass = (enum reg_class) rclass_i;
19401 reg_class_t ret = ALL_REGS;
19402 enum insn_code icode;
19403 bool default_p = false;
19404 bool done_p = false;
19405
19406 /* Allow subreg of memory before/during reload. */
19407 bool memory_p = (MEM_P (x)
19408 || (!reload_completed && GET_CODE (x) == SUBREG
19409 && MEM_P (SUBREG_REG (x))));
19410
19411 sri->icode = CODE_FOR_nothing;
19412 sri->t_icode = CODE_FOR_nothing;
19413 sri->extra_cost = 0;
19414 icode = ((in_p)
19415 ? reg_addr[mode].reload_load
19416 : reg_addr[mode].reload_store);
19417
19418 if (REG_P (x) || register_operand (x, mode))
19419 {
19420 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19421 bool altivec_p = (rclass == ALTIVEC_REGS);
19422 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19423
19424 if (!in_p)
19425 std::swap (to_type, from_type);
19426
19427 /* Can we do a direct move of some sort? */
19428 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19429 altivec_p))
19430 {
19431 icode = (enum insn_code)sri->icode;
19432 default_p = false;
19433 done_p = true;
19434 ret = NO_REGS;
19435 }
19436 }
19437
19438 /* Make sure 0.0 is not reloaded or forced into memory. */
19439 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19440 {
19441 ret = NO_REGS;
19442 default_p = false;
19443 done_p = true;
19444 }
19445
19446 /* If this is a scalar floating point value and we want to load it into the
19447 traditional Altivec registers, do it via a move via a traditional floating
19448 point register, unless we have D-form addressing. Also make sure that
19449 non-zero constants use a FPR. */
19450 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19451 && !mode_supports_vmx_dform (mode)
19452 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19453 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19454 {
19455 ret = FLOAT_REGS;
19456 default_p = false;
19457 done_p = true;
19458 }
19459
19460 /* Handle reload of load/stores if we have reload helper functions. */
19461 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19462 {
19463 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19464 mode);
19465
19466 if (extra_cost >= 0)
19467 {
19468 done_p = true;
19469 ret = NO_REGS;
19470 if (extra_cost > 0)
19471 {
19472 sri->extra_cost = extra_cost;
19473 sri->icode = icode;
19474 }
19475 }
19476 }
19477
19478 /* Handle unaligned loads and stores of integer registers. */
19479 if (!done_p && TARGET_POWERPC64
19480 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19481 && memory_p
19482 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19483 {
19484 rtx addr = XEXP (x, 0);
19485 rtx off = address_offset (addr);
19486
19487 if (off != NULL_RTX)
19488 {
19489 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19490 unsigned HOST_WIDE_INT offset = INTVAL (off);
19491
19492 /* We need a secondary reload when our legitimate_address_p
19493 says the address is good (as otherwise the entire address
19494 will be reloaded), and the offset is not a multiple of
19495 four or we have an address wrap. Address wrap will only
19496 occur for LO_SUMs since legitimate_offset_address_p
19497 rejects addresses for 16-byte mems that will wrap. */
19498 if (GET_CODE (addr) == LO_SUM
19499 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19500 && ((offset & 3) != 0
19501 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19502 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19503 && (offset & 3) != 0))
19504 {
19505 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19506 if (in_p)
19507 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19508 : CODE_FOR_reload_di_load);
19509 else
19510 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19511 : CODE_FOR_reload_di_store);
19512 sri->extra_cost = 2;
19513 ret = NO_REGS;
19514 done_p = true;
19515 }
19516 else
19517 default_p = true;
19518 }
19519 else
19520 default_p = true;
19521 }
19522
19523 if (!done_p && !TARGET_POWERPC64
19524 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19525 && memory_p
19526 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19527 {
19528 rtx addr = XEXP (x, 0);
19529 rtx off = address_offset (addr);
19530
19531 if (off != NULL_RTX)
19532 {
19533 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19534 unsigned HOST_WIDE_INT offset = INTVAL (off);
19535
19536 /* We need a secondary reload when our legitimate_address_p
19537 says the address is good (as otherwise the entire address
19538 will be reloaded), and we have a wrap.
19539
19540 legitimate_lo_sum_address_p allows LO_SUM addresses to
19541 have any offset so test for wrap in the low 16 bits.
19542
19543 legitimate_offset_address_p checks for the range
19544 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19545 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19546 [0x7ff4,0x7fff] respectively, so test for the
19547 intersection of these ranges, [0x7ffc,0x7fff] and
19548 [0x7ff4,0x7ff7] respectively.
19549
19550 Note that the address we see here may have been
19551 manipulated by legitimize_reload_address. */
19552 if (GET_CODE (addr) == LO_SUM
19553 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19554 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19555 {
19556 if (in_p)
19557 sri->icode = CODE_FOR_reload_si_load;
19558 else
19559 sri->icode = CODE_FOR_reload_si_store;
19560 sri->extra_cost = 2;
19561 ret = NO_REGS;
19562 done_p = true;
19563 }
19564 else
19565 default_p = true;
19566 }
19567 else
19568 default_p = true;
19569 }
19570
19571 if (!done_p)
19572 default_p = true;
19573
19574 if (default_p)
19575 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19576
19577 gcc_assert (ret != ALL_REGS);
19578
19579 if (TARGET_DEBUG_ADDR)
19580 {
19581 fprintf (stderr,
19582 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19583 "mode = %s",
19584 reg_class_names[ret],
19585 in_p ? "true" : "false",
19586 reg_class_names[rclass],
19587 GET_MODE_NAME (mode));
19588
19589 if (reload_completed)
19590 fputs (", after reload", stderr);
19591
19592 if (!done_p)
19593 fputs (", done_p not set", stderr);
19594
19595 if (default_p)
19596 fputs (", default secondary reload", stderr);
19597
19598 if (sri->icode != CODE_FOR_nothing)
19599 fprintf (stderr, ", reload func = %s, extra cost = %d",
19600 insn_data[sri->icode].name, sri->extra_cost);
19601
19602 else if (sri->extra_cost > 0)
19603 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19604
19605 fputs ("\n", stderr);
19606 debug_rtx (x);
19607 }
19608
19609 return ret;
19610 }
19611
19612 /* Better tracing for rs6000_secondary_reload_inner. */
19613
19614 static void
19615 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19616 bool store_p)
19617 {
19618 rtx set, clobber;
19619
19620 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19621
19622 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19623 store_p ? "store" : "load");
19624
19625 if (store_p)
19626 set = gen_rtx_SET (mem, reg);
19627 else
19628 set = gen_rtx_SET (reg, mem);
19629
19630 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19631 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19632 }
19633
19634 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19635 ATTRIBUTE_NORETURN;
19636
19637 static void
19638 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19639 bool store_p)
19640 {
19641 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19642 gcc_unreachable ();
19643 }
19644
19645 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19646 reload helper functions. These were identified in
19647 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19648 reload, it calls the insns:
19649 reload_<RELOAD:mode>_<P:mptrsize>_store
19650 reload_<RELOAD:mode>_<P:mptrsize>_load
19651
19652 which in turn calls this function, to do whatever is necessary to create
19653 valid addresses. */
19654
19655 void
19656 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19657 {
19658 int regno = true_regnum (reg);
19659 machine_mode mode = GET_MODE (reg);
19660 addr_mask_type addr_mask;
19661 rtx addr;
19662 rtx new_addr;
19663 rtx op_reg, op0, op1;
19664 rtx and_op;
19665 rtx cc_clobber;
19666 rtvec rv;
19667
19668 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
19669 || !base_reg_operand (scratch, GET_MODE (scratch)))
19670 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19671
19672 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19673 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19674
19675 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19676 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19677
19678 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19679 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19680
19681 else
19682 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19683
19684 /* Make sure the mode is valid in this register class. */
19685 if ((addr_mask & RELOAD_REG_VALID) == 0)
19686 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19687
19688 if (TARGET_DEBUG_ADDR)
19689 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19690
19691 new_addr = addr = XEXP (mem, 0);
19692 switch (GET_CODE (addr))
19693 {
19694 /* Does the register class support auto update forms for this mode? If
19695 not, do the update now. We don't need a scratch register, since the
19696 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19697 case PRE_INC:
19698 case PRE_DEC:
19699 op_reg = XEXP (addr, 0);
19700 if (!base_reg_operand (op_reg, Pmode))
19701 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19702
19703 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19704 {
19705 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
19706 new_addr = op_reg;
19707 }
19708 break;
19709
19710 case PRE_MODIFY:
19711 op0 = XEXP (addr, 0);
19712 op1 = XEXP (addr, 1);
19713 if (!base_reg_operand (op0, Pmode)
19714 || GET_CODE (op1) != PLUS
19715 || !rtx_equal_p (op0, XEXP (op1, 0)))
19716 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19717
19718 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19719 {
19720 emit_insn (gen_rtx_SET (op0, op1));
19721 new_addr = reg;
19722 }
19723 break;
19724
19725 /* Do we need to simulate AND -16 to clear the bottom address bits used
19726 in VMX load/stores? */
19727 case AND:
19728 op0 = XEXP (addr, 0);
19729 op1 = XEXP (addr, 1);
19730 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19731 {
19732 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
19733 op_reg = op0;
19734
19735 else if (GET_CODE (op1) == PLUS)
19736 {
19737 emit_insn (gen_rtx_SET (scratch, op1));
19738 op_reg = scratch;
19739 }
19740
19741 else
19742 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19743
19744 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19745 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19746 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19747 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19748 new_addr = scratch;
19749 }
19750 break;
19751
19752 /* If this is an indirect address, make sure it is a base register. */
19753 case REG:
19754 case SUBREG:
19755 if (!base_reg_operand (addr, GET_MODE (addr)))
19756 {
19757 emit_insn (gen_rtx_SET (scratch, addr));
19758 new_addr = scratch;
19759 }
19760 break;
19761
19762 /* If this is an indexed address, make sure the register class can handle
19763 indexed addresses for this mode. */
19764 case PLUS:
19765 op0 = XEXP (addr, 0);
19766 op1 = XEXP (addr, 1);
19767 if (!base_reg_operand (op0, Pmode))
19768 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19769
19770 else if (int_reg_operand (op1, Pmode))
19771 {
19772 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19773 {
19774 emit_insn (gen_rtx_SET (scratch, addr));
19775 new_addr = scratch;
19776 }
19777 }
19778
19779 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19780 {
19781 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19782 || !quad_address_p (addr, mode, false))
19783 {
19784 emit_insn (gen_rtx_SET (scratch, addr));
19785 new_addr = scratch;
19786 }
19787 }
19788
19789 /* Make sure the register class can handle offset addresses. */
19790 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19791 {
19792 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19793 {
19794 emit_insn (gen_rtx_SET (scratch, addr));
19795 new_addr = scratch;
19796 }
19797 }
19798
19799 else
19800 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19801
19802 break;
19803
19804 case LO_SUM:
19805 op0 = XEXP (addr, 0);
19806 op1 = XEXP (addr, 1);
19807 if (!base_reg_operand (op0, Pmode))
19808 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19809
19810 else if (int_reg_operand (op1, Pmode))
19811 {
19812 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19813 {
19814 emit_insn (gen_rtx_SET (scratch, addr));
19815 new_addr = scratch;
19816 }
19817 }
19818
19819 /* Quad offsets are restricted and can't handle normal addresses. */
19820 else if (mode_supports_dq_form (mode))
19821 {
19822 emit_insn (gen_rtx_SET (scratch, addr));
19823 new_addr = scratch;
19824 }
19825
19826 /* Make sure the register class can handle offset addresses. */
19827 else if (legitimate_lo_sum_address_p (mode, addr, false))
19828 {
19829 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19830 {
19831 emit_insn (gen_rtx_SET (scratch, addr));
19832 new_addr = scratch;
19833 }
19834 }
19835
19836 else
19837 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19838
19839 break;
19840
19841 case SYMBOL_REF:
19842 case CONST:
19843 case LABEL_REF:
19844 rs6000_emit_move (scratch, addr, Pmode);
19845 new_addr = scratch;
19846 break;
19847
19848 default:
19849 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19850 }
19851
19852 /* Adjust the address if it changed. */
19853 if (addr != new_addr)
19854 {
19855 mem = replace_equiv_address_nv (mem, new_addr);
19856 if (TARGET_DEBUG_ADDR)
19857 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19858 }
19859
19860 /* Now create the move. */
19861 if (store_p)
19862 emit_insn (gen_rtx_SET (mem, reg));
19863 else
19864 emit_insn (gen_rtx_SET (reg, mem));
19865
19866 return;
19867 }
19868
19869 /* Convert reloads involving 64-bit gprs and misaligned offset
19870 addressing, or multiple 32-bit gprs and offsets that are too large,
19871 to use indirect addressing. */
19872
19873 void
19874 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
19875 {
19876 int regno = true_regnum (reg);
19877 enum reg_class rclass;
19878 rtx addr;
19879 rtx scratch_or_premodify = scratch;
19880
19881 if (TARGET_DEBUG_ADDR)
19882 {
19883 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
19884 store_p ? "store" : "load");
19885 fprintf (stderr, "reg:\n");
19886 debug_rtx (reg);
19887 fprintf (stderr, "mem:\n");
19888 debug_rtx (mem);
19889 fprintf (stderr, "scratch:\n");
19890 debug_rtx (scratch);
19891 }
19892
19893 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
19894 gcc_assert (GET_CODE (mem) == MEM);
19895 rclass = REGNO_REG_CLASS (regno);
19896 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
19897 addr = XEXP (mem, 0);
19898
19899 if (GET_CODE (addr) == PRE_MODIFY)
19900 {
19901 gcc_assert (REG_P (XEXP (addr, 0))
19902 && GET_CODE (XEXP (addr, 1)) == PLUS
19903 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
19904 scratch_or_premodify = XEXP (addr, 0);
19905 if (!HARD_REGISTER_P (scratch_or_premodify))
19906 /* If we have a pseudo here then reload will have arranged
19907 to have it replaced, but only in the original insn.
19908 Use the replacement here too. */
19909 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
19910
19911 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
19912 expressions from the original insn, without unsharing them.
19913 Any RTL that points into the original insn will of course
19914 have register replacements applied. That is why we don't
19915 need to look for replacements under the PLUS. */
19916 addr = XEXP (addr, 1);
19917 }
19918 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
19919
19920 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
19921
19922 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
19923
19924 /* Now create the move. */
19925 if (store_p)
19926 emit_insn (gen_rtx_SET (mem, reg));
19927 else
19928 emit_insn (gen_rtx_SET (reg, mem));
19929
19930 return;
19931 }
19932
19933 /* Given an rtx X being reloaded into a reg required to be
19934 in class CLASS, return the class of reg to actually use.
19935 In general this is just CLASS; but on some machines
19936 in some cases it is preferable to use a more restrictive class.
19937
19938 On the RS/6000, we have to return NO_REGS when we want to reload a
19939 floating-point CONST_DOUBLE to force it to be copied to memory.
19940
19941 We also don't want to reload integer values into floating-point
19942 registers if we can at all help it. In fact, this can
19943 cause reload to die, if it tries to generate a reload of CTR
19944 into a FP register and discovers it doesn't have the memory location
19945 required.
19946
19947 ??? Would it be a good idea to have reload do the converse, that is
19948 try to reload floating modes into FP registers if possible?
19949 */
19950
19951 static enum reg_class
19952 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
19953 {
19954 machine_mode mode = GET_MODE (x);
19955 bool is_constant = CONSTANT_P (x);
19956
19957 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
19958 reload class for it. */
19959 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19960 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
19961 return NO_REGS;
19962
19963 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
19964 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
19965 return NO_REGS;
19966
19967 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
19968 the reloading of address expressions using PLUS into floating point
19969 registers. */
19970 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
19971 {
19972 if (is_constant)
19973 {
19974 /* Zero is always allowed in all VSX registers. */
19975 if (x == CONST0_RTX (mode))
19976 return rclass;
19977
19978 /* If this is a vector constant that can be formed with a few Altivec
19979 instructions, we want altivec registers. */
19980 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
19981 return ALTIVEC_REGS;
19982
19983 /* If this is an integer constant that can easily be loaded into
19984 vector registers, allow it. */
19985 if (CONST_INT_P (x))
19986 {
19987 HOST_WIDE_INT value = INTVAL (x);
19988
19989 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
19990 2.06 can generate it in the Altivec registers with
19991 VSPLTI<x>. */
19992 if (value == -1)
19993 {
19994 if (TARGET_P8_VECTOR)
19995 return rclass;
19996 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19997 return ALTIVEC_REGS;
19998 else
19999 return NO_REGS;
20000 }
20001
20002 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20003 a sign extend in the Altivec registers. */
20004 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20005 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20006 return ALTIVEC_REGS;
20007 }
20008
20009 /* Force constant to memory. */
20010 return NO_REGS;
20011 }
20012
20013 /* D-form addressing can easily reload the value. */
20014 if (mode_supports_vmx_dform (mode)
20015 || mode_supports_dq_form (mode))
20016 return rclass;
20017
20018 /* If this is a scalar floating point value and we don't have D-form
20019 addressing, prefer the traditional floating point registers so that we
20020 can use D-form (register+offset) addressing. */
20021 if (rclass == VSX_REGS
20022 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20023 return FLOAT_REGS;
20024
20025 /* Prefer the Altivec registers if Altivec is handling the vector
20026 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20027 loads. */
20028 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20029 || mode == V1TImode)
20030 return ALTIVEC_REGS;
20031
20032 return rclass;
20033 }
20034
20035 if (is_constant || GET_CODE (x) == PLUS)
20036 {
20037 if (reg_class_subset_p (GENERAL_REGS, rclass))
20038 return GENERAL_REGS;
20039 if (reg_class_subset_p (BASE_REGS, rclass))
20040 return BASE_REGS;
20041 return NO_REGS;
20042 }
20043
20044 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20045 return GENERAL_REGS;
20046
20047 return rclass;
20048 }
20049
20050 /* Debug version of rs6000_preferred_reload_class. */
20051 static enum reg_class
20052 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20053 {
20054 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20055
20056 fprintf (stderr,
20057 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20058 "mode = %s, x:\n",
20059 reg_class_names[ret], reg_class_names[rclass],
20060 GET_MODE_NAME (GET_MODE (x)));
20061 debug_rtx (x);
20062
20063 return ret;
20064 }
20065
20066 /* If we are copying between FP or AltiVec registers and anything else, we need
20067 a memory location. The exception is when we are targeting ppc64 and the
20068 move to/from fpr to gpr instructions are available. Also, under VSX, you
20069 can copy vector registers from the FP register set to the Altivec register
20070 set and vice versa. */
20071
20072 static bool
20073 rs6000_secondary_memory_needed (machine_mode mode,
20074 reg_class_t from_class,
20075 reg_class_t to_class)
20076 {
20077 enum rs6000_reg_type from_type, to_type;
20078 bool altivec_p = ((from_class == ALTIVEC_REGS)
20079 || (to_class == ALTIVEC_REGS));
20080
20081 /* If a simple/direct move is available, we don't need secondary memory */
20082 from_type = reg_class_to_reg_type[(int)from_class];
20083 to_type = reg_class_to_reg_type[(int)to_class];
20084
20085 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20086 (secondary_reload_info *)0, altivec_p))
20087 return false;
20088
20089 /* If we have a floating point or vector register class, we need to use
20090 memory to transfer the data. */
20091 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20092 return true;
20093
20094 return false;
20095 }
20096
20097 /* Debug version of rs6000_secondary_memory_needed. */
20098 static bool
20099 rs6000_debug_secondary_memory_needed (machine_mode mode,
20100 reg_class_t from_class,
20101 reg_class_t to_class)
20102 {
20103 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
20104
20105 fprintf (stderr,
20106 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20107 "to_class = %s, mode = %s\n",
20108 ret ? "true" : "false",
20109 reg_class_names[from_class],
20110 reg_class_names[to_class],
20111 GET_MODE_NAME (mode));
20112
20113 return ret;
20114 }
20115
20116 /* Return the register class of a scratch register needed to copy IN into
20117 or out of a register in RCLASS in MODE. If it can be done directly,
20118 NO_REGS is returned. */
20119
20120 static enum reg_class
20121 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20122 rtx in)
20123 {
20124 int regno;
20125
20126 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20127 #if TARGET_MACHO
20128 && MACHOPIC_INDIRECT
20129 #endif
20130 ))
20131 {
20132 /* We cannot copy a symbolic operand directly into anything
20133 other than BASE_REGS for TARGET_ELF. So indicate that a
20134 register from BASE_REGS is needed as an intermediate
20135 register.
20136
20137 On Darwin, pic addresses require a load from memory, which
20138 needs a base register. */
20139 if (rclass != BASE_REGS
20140 && (GET_CODE (in) == SYMBOL_REF
20141 || GET_CODE (in) == HIGH
20142 || GET_CODE (in) == LABEL_REF
20143 || GET_CODE (in) == CONST))
20144 return BASE_REGS;
20145 }
20146
20147 if (GET_CODE (in) == REG)
20148 {
20149 regno = REGNO (in);
20150 if (regno >= FIRST_PSEUDO_REGISTER)
20151 {
20152 regno = true_regnum (in);
20153 if (regno >= FIRST_PSEUDO_REGISTER)
20154 regno = -1;
20155 }
20156 }
20157 else if (GET_CODE (in) == SUBREG)
20158 {
20159 regno = true_regnum (in);
20160 if (regno >= FIRST_PSEUDO_REGISTER)
20161 regno = -1;
20162 }
20163 else
20164 regno = -1;
20165
20166 /* If we have VSX register moves, prefer moving scalar values between
20167 Altivec registers and GPR by going via an FPR (and then via memory)
20168 instead of reloading the secondary memory address for Altivec moves. */
20169 if (TARGET_VSX
20170 && GET_MODE_SIZE (mode) < 16
20171 && !mode_supports_vmx_dform (mode)
20172 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20173 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20174 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20175 && (regno >= 0 && INT_REGNO_P (regno)))))
20176 return FLOAT_REGS;
20177
20178 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20179 into anything. */
20180 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20181 || (regno >= 0 && INT_REGNO_P (regno)))
20182 return NO_REGS;
20183
20184 /* Constants, memory, and VSX registers can go into VSX registers (both the
20185 traditional floating point and the altivec registers). */
20186 if (rclass == VSX_REGS
20187 && (regno == -1 || VSX_REGNO_P (regno)))
20188 return NO_REGS;
20189
20190 /* Constants, memory, and FP registers can go into FP registers. */
20191 if ((regno == -1 || FP_REGNO_P (regno))
20192 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20193 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20194
20195 /* Memory, and AltiVec registers can go into AltiVec registers. */
20196 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20197 && rclass == ALTIVEC_REGS)
20198 return NO_REGS;
20199
20200 /* We can copy among the CR registers. */
20201 if ((rclass == CR_REGS || rclass == CR0_REGS)
20202 && regno >= 0 && CR_REGNO_P (regno))
20203 return NO_REGS;
20204
20205 /* Otherwise, we need GENERAL_REGS. */
20206 return GENERAL_REGS;
20207 }
20208
20209 /* Debug version of rs6000_secondary_reload_class. */
20210 static enum reg_class
20211 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20212 machine_mode mode, rtx in)
20213 {
20214 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20215 fprintf (stderr,
20216 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20217 "mode = %s, input rtx:\n",
20218 reg_class_names[ret], reg_class_names[rclass],
20219 GET_MODE_NAME (mode));
20220 debug_rtx (in);
20221
20222 return ret;
20223 }
20224
20225 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20226
20227 static bool
20228 rs6000_can_change_mode_class (machine_mode from,
20229 machine_mode to,
20230 reg_class_t rclass)
20231 {
20232 unsigned from_size = GET_MODE_SIZE (from);
20233 unsigned to_size = GET_MODE_SIZE (to);
20234
20235 if (from_size != to_size)
20236 {
20237 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20238
20239 if (reg_classes_intersect_p (xclass, rclass))
20240 {
20241 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20242 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20243 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20244 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20245
20246 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20247 single register under VSX because the scalar part of the register
20248 is in the upper 64-bits, and not the lower 64-bits. Types like
20249 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20250 IEEE floating point can't overlap, and neither can small
20251 values. */
20252
20253 if (to_float128_vector_p && from_float128_vector_p)
20254 return true;
20255
20256 else if (to_float128_vector_p || from_float128_vector_p)
20257 return false;
20258
20259 /* TDmode in floating-mode registers must always go into a register
20260 pair with the most significant word in the even-numbered register
20261 to match ISA requirements. In little-endian mode, this does not
20262 match subreg numbering, so we cannot allow subregs. */
20263 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20264 return false;
20265
20266 if (from_size < 8 || to_size < 8)
20267 return false;
20268
20269 if (from_size == 8 && (8 * to_nregs) != to_size)
20270 return false;
20271
20272 if (to_size == 8 && (8 * from_nregs) != from_size)
20273 return false;
20274
20275 return true;
20276 }
20277 else
20278 return true;
20279 }
20280
20281 /* Since the VSX register set includes traditional floating point registers
20282 and altivec registers, just check for the size being different instead of
20283 trying to check whether the modes are vector modes. Otherwise it won't
20284 allow say DF and DI to change classes. For types like TFmode and TDmode
20285 that take 2 64-bit registers, rather than a single 128-bit register, don't
20286 allow subregs of those types to other 128 bit types. */
20287 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20288 {
20289 unsigned num_regs = (from_size + 15) / 16;
20290 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20291 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20292 return false;
20293
20294 return (from_size == 8 || from_size == 16);
20295 }
20296
20297 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20298 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20299 return false;
20300
20301 return true;
20302 }
20303
20304 /* Debug version of rs6000_can_change_mode_class. */
20305 static bool
20306 rs6000_debug_can_change_mode_class (machine_mode from,
20307 machine_mode to,
20308 reg_class_t rclass)
20309 {
20310 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20311
20312 fprintf (stderr,
20313 "rs6000_can_change_mode_class, return %s, from = %s, "
20314 "to = %s, rclass = %s\n",
20315 ret ? "true" : "false",
20316 GET_MODE_NAME (from), GET_MODE_NAME (to),
20317 reg_class_names[rclass]);
20318
20319 return ret;
20320 }
20321 \f
20322 /* Return a string to do a move operation of 128 bits of data. */
20323
20324 const char *
20325 rs6000_output_move_128bit (rtx operands[])
20326 {
20327 rtx dest = operands[0];
20328 rtx src = operands[1];
20329 machine_mode mode = GET_MODE (dest);
20330 int dest_regno;
20331 int src_regno;
20332 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20333 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20334
20335 if (REG_P (dest))
20336 {
20337 dest_regno = REGNO (dest);
20338 dest_gpr_p = INT_REGNO_P (dest_regno);
20339 dest_fp_p = FP_REGNO_P (dest_regno);
20340 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20341 dest_vsx_p = dest_fp_p | dest_vmx_p;
20342 }
20343 else
20344 {
20345 dest_regno = -1;
20346 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20347 }
20348
20349 if (REG_P (src))
20350 {
20351 src_regno = REGNO (src);
20352 src_gpr_p = INT_REGNO_P (src_regno);
20353 src_fp_p = FP_REGNO_P (src_regno);
20354 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20355 src_vsx_p = src_fp_p | src_vmx_p;
20356 }
20357 else
20358 {
20359 src_regno = -1;
20360 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20361 }
20362
20363 /* Register moves. */
20364 if (dest_regno >= 0 && src_regno >= 0)
20365 {
20366 if (dest_gpr_p)
20367 {
20368 if (src_gpr_p)
20369 return "#";
20370
20371 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20372 return (WORDS_BIG_ENDIAN
20373 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20374 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20375
20376 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20377 return "#";
20378 }
20379
20380 else if (TARGET_VSX && dest_vsx_p)
20381 {
20382 if (src_vsx_p)
20383 return "xxlor %x0,%x1,%x1";
20384
20385 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20386 return (WORDS_BIG_ENDIAN
20387 ? "mtvsrdd %x0,%1,%L1"
20388 : "mtvsrdd %x0,%L1,%1");
20389
20390 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20391 return "#";
20392 }
20393
20394 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20395 return "vor %0,%1,%1";
20396
20397 else if (dest_fp_p && src_fp_p)
20398 return "#";
20399 }
20400
20401 /* Loads. */
20402 else if (dest_regno >= 0 && MEM_P (src))
20403 {
20404 if (dest_gpr_p)
20405 {
20406 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20407 return "lq %0,%1";
20408 else
20409 return "#";
20410 }
20411
20412 else if (TARGET_ALTIVEC && dest_vmx_p
20413 && altivec_indexed_or_indirect_operand (src, mode))
20414 return "lvx %0,%y1";
20415
20416 else if (TARGET_VSX && dest_vsx_p)
20417 {
20418 if (mode_supports_dq_form (mode)
20419 && quad_address_p (XEXP (src, 0), mode, true))
20420 return "lxv %x0,%1";
20421
20422 else if (TARGET_P9_VECTOR)
20423 return "lxvx %x0,%y1";
20424
20425 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20426 return "lxvw4x %x0,%y1";
20427
20428 else
20429 return "lxvd2x %x0,%y1";
20430 }
20431
20432 else if (TARGET_ALTIVEC && dest_vmx_p)
20433 return "lvx %0,%y1";
20434
20435 else if (dest_fp_p)
20436 return "#";
20437 }
20438
20439 /* Stores. */
20440 else if (src_regno >= 0 && MEM_P (dest))
20441 {
20442 if (src_gpr_p)
20443 {
20444 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20445 return "stq %1,%0";
20446 else
20447 return "#";
20448 }
20449
20450 else if (TARGET_ALTIVEC && src_vmx_p
20451 && altivec_indexed_or_indirect_operand (dest, mode))
20452 return "stvx %1,%y0";
20453
20454 else if (TARGET_VSX && src_vsx_p)
20455 {
20456 if (mode_supports_dq_form (mode)
20457 && quad_address_p (XEXP (dest, 0), mode, true))
20458 return "stxv %x1,%0";
20459
20460 else if (TARGET_P9_VECTOR)
20461 return "stxvx %x1,%y0";
20462
20463 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20464 return "stxvw4x %x1,%y0";
20465
20466 else
20467 return "stxvd2x %x1,%y0";
20468 }
20469
20470 else if (TARGET_ALTIVEC && src_vmx_p)
20471 return "stvx %1,%y0";
20472
20473 else if (src_fp_p)
20474 return "#";
20475 }
20476
20477 /* Constants. */
20478 else if (dest_regno >= 0
20479 && (GET_CODE (src) == CONST_INT
20480 || GET_CODE (src) == CONST_WIDE_INT
20481 || GET_CODE (src) == CONST_DOUBLE
20482 || GET_CODE (src) == CONST_VECTOR))
20483 {
20484 if (dest_gpr_p)
20485 return "#";
20486
20487 else if ((dest_vmx_p && TARGET_ALTIVEC)
20488 || (dest_vsx_p && TARGET_VSX))
20489 return output_vec_const_move (operands);
20490 }
20491
20492 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20493 }
20494
20495 /* Validate a 128-bit move. */
20496 bool
20497 rs6000_move_128bit_ok_p (rtx operands[])
20498 {
20499 machine_mode mode = GET_MODE (operands[0]);
20500 return (gpc_reg_operand (operands[0], mode)
20501 || gpc_reg_operand (operands[1], mode));
20502 }
20503
20504 /* Return true if a 128-bit move needs to be split. */
20505 bool
20506 rs6000_split_128bit_ok_p (rtx operands[])
20507 {
20508 if (!reload_completed)
20509 return false;
20510
20511 if (!gpr_or_gpr_p (operands[0], operands[1]))
20512 return false;
20513
20514 if (quad_load_store_p (operands[0], operands[1]))
20515 return false;
20516
20517 return true;
20518 }
20519
20520 \f
20521 /* Given a comparison operation, return the bit number in CCR to test. We
20522 know this is a valid comparison.
20523
20524 SCC_P is 1 if this is for an scc. That means that %D will have been
20525 used instead of %C, so the bits will be in different places.
20526
20527 Return -1 if OP isn't a valid comparison for some reason. */
20528
20529 int
20530 ccr_bit (rtx op, int scc_p)
20531 {
20532 enum rtx_code code = GET_CODE (op);
20533 machine_mode cc_mode;
20534 int cc_regnum;
20535 int base_bit;
20536 rtx reg;
20537
20538 if (!COMPARISON_P (op))
20539 return -1;
20540
20541 reg = XEXP (op, 0);
20542
20543 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
20544
20545 cc_mode = GET_MODE (reg);
20546 cc_regnum = REGNO (reg);
20547 base_bit = 4 * (cc_regnum - CR0_REGNO);
20548
20549 validate_condition_mode (code, cc_mode);
20550
20551 /* When generating a sCOND operation, only positive conditions are
20552 allowed. */
20553 gcc_assert (!scc_p
20554 || code == EQ || code == GT || code == LT || code == UNORDERED
20555 || code == GTU || code == LTU);
20556
20557 switch (code)
20558 {
20559 case NE:
20560 return scc_p ? base_bit + 3 : base_bit + 2;
20561 case EQ:
20562 return base_bit + 2;
20563 case GT: case GTU: case UNLE:
20564 return base_bit + 1;
20565 case LT: case LTU: case UNGE:
20566 return base_bit;
20567 case ORDERED: case UNORDERED:
20568 return base_bit + 3;
20569
20570 case GE: case GEU:
20571 /* If scc, we will have done a cror to put the bit in the
20572 unordered position. So test that bit. For integer, this is ! LT
20573 unless this is an scc insn. */
20574 return scc_p ? base_bit + 3 : base_bit;
20575
20576 case LE: case LEU:
20577 return scc_p ? base_bit + 3 : base_bit + 1;
20578
20579 default:
20580 gcc_unreachable ();
20581 }
20582 }
20583 \f
20584 /* Return the GOT register. */
20585
20586 rtx
20587 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20588 {
20589 /* The second flow pass currently (June 1999) can't update
20590 regs_ever_live without disturbing other parts of the compiler, so
20591 update it here to make the prolog/epilogue code happy. */
20592 if (!can_create_pseudo_p ()
20593 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20594 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20595
20596 crtl->uses_pic_offset_table = 1;
20597
20598 return pic_offset_table_rtx;
20599 }
20600 \f
20601 static rs6000_stack_t stack_info;
20602
20603 /* Function to init struct machine_function.
20604 This will be called, via a pointer variable,
20605 from push_function_context. */
20606
20607 static struct machine_function *
20608 rs6000_init_machine_status (void)
20609 {
20610 stack_info.reload_completed = 0;
20611 return ggc_cleared_alloc<machine_function> ();
20612 }
20613 \f
20614 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
20615
20616 /* Write out a function code label. */
20617
20618 void
20619 rs6000_output_function_entry (FILE *file, const char *fname)
20620 {
20621 if (fname[0] != '.')
20622 {
20623 switch (DEFAULT_ABI)
20624 {
20625 default:
20626 gcc_unreachable ();
20627
20628 case ABI_AIX:
20629 if (DOT_SYMBOLS)
20630 putc ('.', file);
20631 else
20632 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20633 break;
20634
20635 case ABI_ELFv2:
20636 case ABI_V4:
20637 case ABI_DARWIN:
20638 break;
20639 }
20640 }
20641
20642 RS6000_OUTPUT_BASENAME (file, fname);
20643 }
20644
20645 /* Print an operand. Recognize special options, documented below. */
20646
20647 #if TARGET_ELF
20648 /* Access to .sdata2 through r2 (see -msdata=eabi in invoke.texi) is
20649 only introduced by the linker, when applying the sda21
20650 relocation. */
20651 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20652 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20653 #else
20654 #define SMALL_DATA_RELOC "sda21"
20655 #define SMALL_DATA_REG 0
20656 #endif
20657
20658 void
20659 print_operand (FILE *file, rtx x, int code)
20660 {
20661 int i;
20662 unsigned HOST_WIDE_INT uval;
20663
20664 switch (code)
20665 {
20666 /* %a is output_address. */
20667
20668 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20669 output_operand. */
20670
20671 case 'D':
20672 /* Like 'J' but get to the GT bit only. */
20673 gcc_assert (REG_P (x));
20674
20675 /* Bit 1 is GT bit. */
20676 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20677
20678 /* Add one for shift count in rlinm for scc. */
20679 fprintf (file, "%d", i + 1);
20680 return;
20681
20682 case 'e':
20683 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20684 if (! INT_P (x))
20685 {
20686 output_operand_lossage ("invalid %%e value");
20687 return;
20688 }
20689
20690 uval = INTVAL (x);
20691 if ((uval & 0xffff) == 0 && uval != 0)
20692 putc ('s', file);
20693 return;
20694
20695 case 'E':
20696 /* X is a CR register. Print the number of the EQ bit of the CR */
20697 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20698 output_operand_lossage ("invalid %%E value");
20699 else
20700 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20701 return;
20702
20703 case 'f':
20704 /* X is a CR register. Print the shift count needed to move it
20705 to the high-order four bits. */
20706 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20707 output_operand_lossage ("invalid %%f value");
20708 else
20709 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20710 return;
20711
20712 case 'F':
20713 /* Similar, but print the count for the rotate in the opposite
20714 direction. */
20715 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20716 output_operand_lossage ("invalid %%F value");
20717 else
20718 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20719 return;
20720
20721 case 'G':
20722 /* X is a constant integer. If it is negative, print "m",
20723 otherwise print "z". This is to make an aze or ame insn. */
20724 if (GET_CODE (x) != CONST_INT)
20725 output_operand_lossage ("invalid %%G value");
20726 else if (INTVAL (x) >= 0)
20727 putc ('z', file);
20728 else
20729 putc ('m', file);
20730 return;
20731
20732 case 'h':
20733 /* If constant, output low-order five bits. Otherwise, write
20734 normally. */
20735 if (INT_P (x))
20736 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20737 else
20738 print_operand (file, x, 0);
20739 return;
20740
20741 case 'H':
20742 /* If constant, output low-order six bits. Otherwise, write
20743 normally. */
20744 if (INT_P (x))
20745 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20746 else
20747 print_operand (file, x, 0);
20748 return;
20749
20750 case 'I':
20751 /* Print `i' if this is a constant, else nothing. */
20752 if (INT_P (x))
20753 putc ('i', file);
20754 return;
20755
20756 case 'j':
20757 /* Write the bit number in CCR for jump. */
20758 i = ccr_bit (x, 0);
20759 if (i == -1)
20760 output_operand_lossage ("invalid %%j code");
20761 else
20762 fprintf (file, "%d", i);
20763 return;
20764
20765 case 'J':
20766 /* Similar, but add one for shift count in rlinm for scc and pass
20767 scc flag to `ccr_bit'. */
20768 i = ccr_bit (x, 1);
20769 if (i == -1)
20770 output_operand_lossage ("invalid %%J code");
20771 else
20772 /* If we want bit 31, write a shift count of zero, not 32. */
20773 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20774 return;
20775
20776 case 'k':
20777 /* X must be a constant. Write the 1's complement of the
20778 constant. */
20779 if (! INT_P (x))
20780 output_operand_lossage ("invalid %%k value");
20781 else
20782 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20783 return;
20784
20785 case 'K':
20786 /* X must be a symbolic constant on ELF. Write an
20787 expression suitable for an 'addi' that adds in the low 16
20788 bits of the MEM. */
20789 if (GET_CODE (x) == CONST)
20790 {
20791 if (GET_CODE (XEXP (x, 0)) != PLUS
20792 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
20793 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20794 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
20795 output_operand_lossage ("invalid %%K value");
20796 }
20797 print_operand_address (file, x);
20798 fputs ("@l", file);
20799 return;
20800
20801 /* %l is output_asm_label. */
20802
20803 case 'L':
20804 /* Write second word of DImode or DFmode reference. Works on register
20805 or non-indexed memory only. */
20806 if (REG_P (x))
20807 fputs (reg_names[REGNO (x) + 1], file);
20808 else if (MEM_P (x))
20809 {
20810 machine_mode mode = GET_MODE (x);
20811 /* Handle possible auto-increment. Since it is pre-increment and
20812 we have already done it, we can just use an offset of word. */
20813 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20814 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20815 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20816 UNITS_PER_WORD));
20817 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20818 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20819 UNITS_PER_WORD));
20820 else
20821 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20822 UNITS_PER_WORD),
20823 0));
20824
20825 if (small_data_operand (x, GET_MODE (x)))
20826 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20827 reg_names[SMALL_DATA_REG]);
20828 }
20829 return;
20830
20831 case 'N': /* Unused */
20832 /* Write the number of elements in the vector times 4. */
20833 if (GET_CODE (x) != PARALLEL)
20834 output_operand_lossage ("invalid %%N value");
20835 else
20836 fprintf (file, "%d", XVECLEN (x, 0) * 4);
20837 return;
20838
20839 case 'O': /* Unused */
20840 /* Similar, but subtract 1 first. */
20841 if (GET_CODE (x) != PARALLEL)
20842 output_operand_lossage ("invalid %%O value");
20843 else
20844 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
20845 return;
20846
20847 case 'p':
20848 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20849 if (! INT_P (x)
20850 || INTVAL (x) < 0
20851 || (i = exact_log2 (INTVAL (x))) < 0)
20852 output_operand_lossage ("invalid %%p value");
20853 else
20854 fprintf (file, "%d", i);
20855 return;
20856
20857 case 'P':
20858 /* The operand must be an indirect memory reference. The result
20859 is the register name. */
20860 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
20861 || REGNO (XEXP (x, 0)) >= 32)
20862 output_operand_lossage ("invalid %%P value");
20863 else
20864 fputs (reg_names[REGNO (XEXP (x, 0))], file);
20865 return;
20866
20867 case 'q':
20868 /* This outputs the logical code corresponding to a boolean
20869 expression. The expression may have one or both operands
20870 negated (if one, only the first one). For condition register
20871 logical operations, it will also treat the negated
20872 CR codes as NOTs, but not handle NOTs of them. */
20873 {
20874 const char *const *t = 0;
20875 const char *s;
20876 enum rtx_code code = GET_CODE (x);
20877 static const char * const tbl[3][3] = {
20878 { "and", "andc", "nor" },
20879 { "or", "orc", "nand" },
20880 { "xor", "eqv", "xor" } };
20881
20882 if (code == AND)
20883 t = tbl[0];
20884 else if (code == IOR)
20885 t = tbl[1];
20886 else if (code == XOR)
20887 t = tbl[2];
20888 else
20889 output_operand_lossage ("invalid %%q value");
20890
20891 if (GET_CODE (XEXP (x, 0)) != NOT)
20892 s = t[0];
20893 else
20894 {
20895 if (GET_CODE (XEXP (x, 1)) == NOT)
20896 s = t[2];
20897 else
20898 s = t[1];
20899 }
20900
20901 fputs (s, file);
20902 }
20903 return;
20904
20905 case 'Q':
20906 if (! TARGET_MFCRF)
20907 return;
20908 fputc (',', file);
20909 /* FALLTHRU */
20910
20911 case 'R':
20912 /* X is a CR register. Print the mask for `mtcrf'. */
20913 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
20914 output_operand_lossage ("invalid %%R value");
20915 else
20916 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
20917 return;
20918
20919 case 's':
20920 /* Low 5 bits of 32 - value */
20921 if (! INT_P (x))
20922 output_operand_lossage ("invalid %%s value");
20923 else
20924 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
20925 return;
20926
20927 case 't':
20928 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
20929 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
20930
20931 /* Bit 3 is OV bit. */
20932 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
20933
20934 /* If we want bit 31, write a shift count of zero, not 32. */
20935 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20936 return;
20937
20938 case 'T':
20939 /* Print the symbolic name of a branch target register. */
20940 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
20941 && REGNO (x) != CTR_REGNO))
20942 output_operand_lossage ("invalid %%T value");
20943 else if (REGNO (x) == LR_REGNO)
20944 fputs ("lr", file);
20945 else
20946 fputs ("ctr", file);
20947 return;
20948
20949 case 'u':
20950 /* High-order or low-order 16 bits of constant, whichever is non-zero,
20951 for use in unsigned operand. */
20952 if (! INT_P (x))
20953 {
20954 output_operand_lossage ("invalid %%u value");
20955 return;
20956 }
20957
20958 uval = INTVAL (x);
20959 if ((uval & 0xffff) == 0)
20960 uval >>= 16;
20961
20962 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
20963 return;
20964
20965 case 'v':
20966 /* High-order 16 bits of constant for use in signed operand. */
20967 if (! INT_P (x))
20968 output_operand_lossage ("invalid %%v value");
20969 else
20970 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
20971 (INTVAL (x) >> 16) & 0xffff);
20972 return;
20973
20974 case 'U':
20975 /* Print `u' if this has an auto-increment or auto-decrement. */
20976 if (MEM_P (x)
20977 && (GET_CODE (XEXP (x, 0)) == PRE_INC
20978 || GET_CODE (XEXP (x, 0)) == PRE_DEC
20979 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
20980 putc ('u', file);
20981 return;
20982
20983 case 'V':
20984 /* Print the trap code for this operand. */
20985 switch (GET_CODE (x))
20986 {
20987 case EQ:
20988 fputs ("eq", file); /* 4 */
20989 break;
20990 case NE:
20991 fputs ("ne", file); /* 24 */
20992 break;
20993 case LT:
20994 fputs ("lt", file); /* 16 */
20995 break;
20996 case LE:
20997 fputs ("le", file); /* 20 */
20998 break;
20999 case GT:
21000 fputs ("gt", file); /* 8 */
21001 break;
21002 case GE:
21003 fputs ("ge", file); /* 12 */
21004 break;
21005 case LTU:
21006 fputs ("llt", file); /* 2 */
21007 break;
21008 case LEU:
21009 fputs ("lle", file); /* 6 */
21010 break;
21011 case GTU:
21012 fputs ("lgt", file); /* 1 */
21013 break;
21014 case GEU:
21015 fputs ("lge", file); /* 5 */
21016 break;
21017 default:
21018 gcc_unreachable ();
21019 }
21020 break;
21021
21022 case 'w':
21023 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21024 normally. */
21025 if (INT_P (x))
21026 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21027 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21028 else
21029 print_operand (file, x, 0);
21030 return;
21031
21032 case 'x':
21033 /* X is a FPR or Altivec register used in a VSX context. */
21034 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21035 output_operand_lossage ("invalid %%x value");
21036 else
21037 {
21038 int reg = REGNO (x);
21039 int vsx_reg = (FP_REGNO_P (reg)
21040 ? reg - 32
21041 : reg - FIRST_ALTIVEC_REGNO + 32);
21042
21043 #ifdef TARGET_REGNAMES
21044 if (TARGET_REGNAMES)
21045 fprintf (file, "%%vs%d", vsx_reg);
21046 else
21047 #endif
21048 fprintf (file, "%d", vsx_reg);
21049 }
21050 return;
21051
21052 case 'X':
21053 if (MEM_P (x)
21054 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21055 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21056 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21057 putc ('x', file);
21058 return;
21059
21060 case 'Y':
21061 /* Like 'L', for third word of TImode/PTImode */
21062 if (REG_P (x))
21063 fputs (reg_names[REGNO (x) + 2], file);
21064 else if (MEM_P (x))
21065 {
21066 machine_mode mode = GET_MODE (x);
21067 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21068 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21069 output_address (mode, plus_constant (Pmode,
21070 XEXP (XEXP (x, 0), 0), 8));
21071 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21072 output_address (mode, plus_constant (Pmode,
21073 XEXP (XEXP (x, 0), 0), 8));
21074 else
21075 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21076 if (small_data_operand (x, GET_MODE (x)))
21077 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21078 reg_names[SMALL_DATA_REG]);
21079 }
21080 return;
21081
21082 case 'z':
21083 /* X is a SYMBOL_REF. Write out the name preceded by a
21084 period and without any trailing data in brackets. Used for function
21085 names. If we are configured for System V (or the embedded ABI) on
21086 the PowerPC, do not emit the period, since those systems do not use
21087 TOCs and the like. */
21088 gcc_assert (GET_CODE (x) == SYMBOL_REF);
21089
21090 /* For macho, check to see if we need a stub. */
21091 if (TARGET_MACHO)
21092 {
21093 const char *name = XSTR (x, 0);
21094 #if TARGET_MACHO
21095 if (darwin_emit_branch_islands
21096 && MACHOPIC_INDIRECT
21097 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21098 name = machopic_indirection_name (x, /*stub_p=*/true);
21099 #endif
21100 assemble_name (file, name);
21101 }
21102 else if (!DOT_SYMBOLS)
21103 assemble_name (file, XSTR (x, 0));
21104 else
21105 rs6000_output_function_entry (file, XSTR (x, 0));
21106 return;
21107
21108 case 'Z':
21109 /* Like 'L', for last word of TImode/PTImode. */
21110 if (REG_P (x))
21111 fputs (reg_names[REGNO (x) + 3], file);
21112 else if (MEM_P (x))
21113 {
21114 machine_mode mode = GET_MODE (x);
21115 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21116 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21117 output_address (mode, plus_constant (Pmode,
21118 XEXP (XEXP (x, 0), 0), 12));
21119 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21120 output_address (mode, plus_constant (Pmode,
21121 XEXP (XEXP (x, 0), 0), 12));
21122 else
21123 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21124 if (small_data_operand (x, GET_MODE (x)))
21125 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21126 reg_names[SMALL_DATA_REG]);
21127 }
21128 return;
21129
21130 /* Print AltiVec memory operand. */
21131 case 'y':
21132 {
21133 rtx tmp;
21134
21135 gcc_assert (MEM_P (x));
21136
21137 tmp = XEXP (x, 0);
21138
21139 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
21140 && GET_CODE (tmp) == AND
21141 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21142 && INTVAL (XEXP (tmp, 1)) == -16)
21143 tmp = XEXP (tmp, 0);
21144 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21145 && GET_CODE (tmp) == PRE_MODIFY)
21146 tmp = XEXP (tmp, 1);
21147 if (REG_P (tmp))
21148 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21149 else
21150 {
21151 if (GET_CODE (tmp) != PLUS
21152 || !REG_P (XEXP (tmp, 0))
21153 || !REG_P (XEXP (tmp, 1)))
21154 {
21155 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21156 break;
21157 }
21158
21159 if (REGNO (XEXP (tmp, 0)) == 0)
21160 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21161 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21162 else
21163 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21164 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21165 }
21166 break;
21167 }
21168
21169 case 0:
21170 if (REG_P (x))
21171 fprintf (file, "%s", reg_names[REGNO (x)]);
21172 else if (MEM_P (x))
21173 {
21174 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21175 know the width from the mode. */
21176 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21177 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21178 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21179 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21180 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21181 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21182 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21183 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21184 else
21185 output_address (GET_MODE (x), XEXP (x, 0));
21186 }
21187 else
21188 {
21189 if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21190 /* This hack along with a corresponding hack in
21191 rs6000_output_addr_const_extra arranges to output addends
21192 where the assembler expects to find them. eg.
21193 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21194 without this hack would be output as "x@toc+4". We
21195 want "x+4@toc". */
21196 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21197 else
21198 output_addr_const (file, x);
21199 }
21200 return;
21201
21202 case '&':
21203 if (const char *name = get_some_local_dynamic_name ())
21204 assemble_name (file, name);
21205 else
21206 output_operand_lossage ("'%%&' used without any "
21207 "local dynamic TLS references");
21208 return;
21209
21210 default:
21211 output_operand_lossage ("invalid %%xn code");
21212 }
21213 }
21214 \f
21215 /* Print the address of an operand. */
21216
21217 void
21218 print_operand_address (FILE *file, rtx x)
21219 {
21220 if (REG_P (x))
21221 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21222 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21223 || GET_CODE (x) == LABEL_REF)
21224 {
21225 output_addr_const (file, x);
21226 if (small_data_operand (x, GET_MODE (x)))
21227 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21228 reg_names[SMALL_DATA_REG]);
21229 else
21230 gcc_assert (!TARGET_TOC);
21231 }
21232 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21233 && REG_P (XEXP (x, 1)))
21234 {
21235 if (REGNO (XEXP (x, 0)) == 0)
21236 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21237 reg_names[ REGNO (XEXP (x, 0)) ]);
21238 else
21239 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21240 reg_names[ REGNO (XEXP (x, 1)) ]);
21241 }
21242 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21243 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21244 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21245 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21246 #if TARGET_MACHO
21247 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21248 && CONSTANT_P (XEXP (x, 1)))
21249 {
21250 fprintf (file, "lo16(");
21251 output_addr_const (file, XEXP (x, 1));
21252 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21253 }
21254 #endif
21255 #if TARGET_ELF
21256 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21257 && CONSTANT_P (XEXP (x, 1)))
21258 {
21259 output_addr_const (file, XEXP (x, 1));
21260 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21261 }
21262 #endif
21263 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21264 {
21265 /* This hack along with a corresponding hack in
21266 rs6000_output_addr_const_extra arranges to output addends
21267 where the assembler expects to find them. eg.
21268 (lo_sum (reg 9)
21269 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21270 without this hack would be output as "x@toc+8@l(9)". We
21271 want "x+8@toc@l(9)". */
21272 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21273 if (GET_CODE (x) == LO_SUM)
21274 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21275 else
21276 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21277 }
21278 else
21279 output_addr_const (file, x);
21280 }
21281 \f
21282 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21283
21284 static bool
21285 rs6000_output_addr_const_extra (FILE *file, rtx x)
21286 {
21287 if (GET_CODE (x) == UNSPEC)
21288 switch (XINT (x, 1))
21289 {
21290 case UNSPEC_TOCREL:
21291 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21292 && REG_P (XVECEXP (x, 0, 1))
21293 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21294 output_addr_const (file, XVECEXP (x, 0, 0));
21295 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21296 {
21297 if (INTVAL (tocrel_offset_oac) >= 0)
21298 fprintf (file, "+");
21299 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21300 }
21301 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21302 {
21303 putc ('-', file);
21304 assemble_name (file, toc_label_name);
21305 need_toc_init = 1;
21306 }
21307 else if (TARGET_ELF)
21308 fputs ("@toc", file);
21309 return true;
21310
21311 #if TARGET_MACHO
21312 case UNSPEC_MACHOPIC_OFFSET:
21313 output_addr_const (file, XVECEXP (x, 0, 0));
21314 putc ('-', file);
21315 machopic_output_function_base_name (file);
21316 return true;
21317 #endif
21318 }
21319 return false;
21320 }
21321 \f
21322 /* Target hook for assembling integer objects. The PowerPC version has
21323 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21324 is defined. It also needs to handle DI-mode objects on 64-bit
21325 targets. */
21326
21327 static bool
21328 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21329 {
21330 #ifdef RELOCATABLE_NEEDS_FIXUP
21331 /* Special handling for SI values. */
21332 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21333 {
21334 static int recurse = 0;
21335
21336 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21337 the .fixup section. Since the TOC section is already relocated, we
21338 don't need to mark it here. We used to skip the text section, but it
21339 should never be valid for relocated addresses to be placed in the text
21340 section. */
21341 if (DEFAULT_ABI == ABI_V4
21342 && (TARGET_RELOCATABLE || flag_pic > 1)
21343 && in_section != toc_section
21344 && !recurse
21345 && !CONST_SCALAR_INT_P (x)
21346 && CONSTANT_P (x))
21347 {
21348 char buf[256];
21349
21350 recurse = 1;
21351 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21352 fixuplabelno++;
21353 ASM_OUTPUT_LABEL (asm_out_file, buf);
21354 fprintf (asm_out_file, "\t.long\t(");
21355 output_addr_const (asm_out_file, x);
21356 fprintf (asm_out_file, ")@fixup\n");
21357 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21358 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21359 fprintf (asm_out_file, "\t.long\t");
21360 assemble_name (asm_out_file, buf);
21361 fprintf (asm_out_file, "\n\t.previous\n");
21362 recurse = 0;
21363 return true;
21364 }
21365 /* Remove initial .'s to turn a -mcall-aixdesc function
21366 address into the address of the descriptor, not the function
21367 itself. */
21368 else if (GET_CODE (x) == SYMBOL_REF
21369 && XSTR (x, 0)[0] == '.'
21370 && DEFAULT_ABI == ABI_AIX)
21371 {
21372 const char *name = XSTR (x, 0);
21373 while (*name == '.')
21374 name++;
21375
21376 fprintf (asm_out_file, "\t.long\t%s\n", name);
21377 return true;
21378 }
21379 }
21380 #endif /* RELOCATABLE_NEEDS_FIXUP */
21381 return default_assemble_integer (x, size, aligned_p);
21382 }
21383
21384 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21385 /* Emit an assembler directive to set symbol visibility for DECL to
21386 VISIBILITY_TYPE. */
21387
21388 static void
21389 rs6000_assemble_visibility (tree decl, int vis)
21390 {
21391 if (TARGET_XCOFF)
21392 return;
21393
21394 /* Functions need to have their entry point symbol visibility set as
21395 well as their descriptor symbol visibility. */
21396 if (DEFAULT_ABI == ABI_AIX
21397 && DOT_SYMBOLS
21398 && TREE_CODE (decl) == FUNCTION_DECL)
21399 {
21400 static const char * const visibility_types[] = {
21401 NULL, "protected", "hidden", "internal"
21402 };
21403
21404 const char *name, *type;
21405
21406 name = ((* targetm.strip_name_encoding)
21407 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21408 type = visibility_types[vis];
21409
21410 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21411 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21412 }
21413 else
21414 default_assemble_visibility (decl, vis);
21415 }
21416 #endif
21417 \f
21418 enum rtx_code
21419 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21420 {
21421 /* Reversal of FP compares takes care -- an ordered compare
21422 becomes an unordered compare and vice versa. */
21423 if (mode == CCFPmode
21424 && (!flag_finite_math_only
21425 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21426 || code == UNEQ || code == LTGT))
21427 return reverse_condition_maybe_unordered (code);
21428 else
21429 return reverse_condition (code);
21430 }
21431
21432 /* Generate a compare for CODE. Return a brand-new rtx that
21433 represents the result of the compare. */
21434
21435 static rtx
21436 rs6000_generate_compare (rtx cmp, machine_mode mode)
21437 {
21438 machine_mode comp_mode;
21439 rtx compare_result;
21440 enum rtx_code code = GET_CODE (cmp);
21441 rtx op0 = XEXP (cmp, 0);
21442 rtx op1 = XEXP (cmp, 1);
21443
21444 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21445 comp_mode = CCmode;
21446 else if (FLOAT_MODE_P (mode))
21447 comp_mode = CCFPmode;
21448 else if (code == GTU || code == LTU
21449 || code == GEU || code == LEU)
21450 comp_mode = CCUNSmode;
21451 else if ((code == EQ || code == NE)
21452 && unsigned_reg_p (op0)
21453 && (unsigned_reg_p (op1)
21454 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21455 /* These are unsigned values, perhaps there will be a later
21456 ordering compare that can be shared with this one. */
21457 comp_mode = CCUNSmode;
21458 else
21459 comp_mode = CCmode;
21460
21461 /* If we have an unsigned compare, make sure we don't have a signed value as
21462 an immediate. */
21463 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21464 && INTVAL (op1) < 0)
21465 {
21466 op0 = copy_rtx_if_shared (op0);
21467 op1 = force_reg (GET_MODE (op0), op1);
21468 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21469 }
21470
21471 /* First, the compare. */
21472 compare_result = gen_reg_rtx (comp_mode);
21473
21474 /* IEEE 128-bit support in VSX registers when we do not have hardware
21475 support. */
21476 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21477 {
21478 rtx libfunc = NULL_RTX;
21479 bool check_nan = false;
21480 rtx dest;
21481
21482 switch (code)
21483 {
21484 case EQ:
21485 case NE:
21486 libfunc = optab_libfunc (eq_optab, mode);
21487 break;
21488
21489 case GT:
21490 case GE:
21491 libfunc = optab_libfunc (ge_optab, mode);
21492 break;
21493
21494 case LT:
21495 case LE:
21496 libfunc = optab_libfunc (le_optab, mode);
21497 break;
21498
21499 case UNORDERED:
21500 case ORDERED:
21501 libfunc = optab_libfunc (unord_optab, mode);
21502 code = (code == UNORDERED) ? NE : EQ;
21503 break;
21504
21505 case UNGE:
21506 case UNGT:
21507 check_nan = true;
21508 libfunc = optab_libfunc (ge_optab, mode);
21509 code = (code == UNGE) ? GE : GT;
21510 break;
21511
21512 case UNLE:
21513 case UNLT:
21514 check_nan = true;
21515 libfunc = optab_libfunc (le_optab, mode);
21516 code = (code == UNLE) ? LE : LT;
21517 break;
21518
21519 case UNEQ:
21520 case LTGT:
21521 check_nan = true;
21522 libfunc = optab_libfunc (eq_optab, mode);
21523 code = (code = UNEQ) ? EQ : NE;
21524 break;
21525
21526 default:
21527 gcc_unreachable ();
21528 }
21529
21530 gcc_assert (libfunc);
21531
21532 if (!check_nan)
21533 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21534 SImode, op0, mode, op1, mode);
21535
21536 /* The library signals an exception for signalling NaNs, so we need to
21537 handle isgreater, etc. by first checking isordered. */
21538 else
21539 {
21540 rtx ne_rtx, normal_dest, unord_dest;
21541 rtx unord_func = optab_libfunc (unord_optab, mode);
21542 rtx join_label = gen_label_rtx ();
21543 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21544 rtx unord_cmp = gen_reg_rtx (comp_mode);
21545
21546
21547 /* Test for either value being a NaN. */
21548 gcc_assert (unord_func);
21549 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21550 SImode, op0, mode, op1, mode);
21551
21552 /* Set value (0) if either value is a NaN, and jump to the join
21553 label. */
21554 dest = gen_reg_rtx (SImode);
21555 emit_move_insn (dest, const1_rtx);
21556 emit_insn (gen_rtx_SET (unord_cmp,
21557 gen_rtx_COMPARE (comp_mode, unord_dest,
21558 const0_rtx)));
21559
21560 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21561 emit_jump_insn (gen_rtx_SET (pc_rtx,
21562 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21563 join_ref,
21564 pc_rtx)));
21565
21566 /* Do the normal comparison, knowing that the values are not
21567 NaNs. */
21568 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21569 SImode, op0, mode, op1, mode);
21570
21571 emit_insn (gen_cstoresi4 (dest,
21572 gen_rtx_fmt_ee (code, SImode, normal_dest,
21573 const0_rtx),
21574 normal_dest, const0_rtx));
21575
21576 /* Join NaN and non-Nan paths. Compare dest against 0. */
21577 emit_label (join_label);
21578 code = NE;
21579 }
21580
21581 emit_insn (gen_rtx_SET (compare_result,
21582 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21583 }
21584
21585 else
21586 {
21587 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21588 CLOBBERs to match cmptf_internal2 pattern. */
21589 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21590 && FLOAT128_IBM_P (GET_MODE (op0))
21591 && TARGET_HARD_FLOAT)
21592 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21593 gen_rtvec (10,
21594 gen_rtx_SET (compare_result,
21595 gen_rtx_COMPARE (comp_mode, op0, op1)),
21596 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21597 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21598 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21599 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21600 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21601 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21602 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21603 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21604 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21605 else if (GET_CODE (op1) == UNSPEC
21606 && XINT (op1, 1) == UNSPEC_SP_TEST)
21607 {
21608 rtx op1b = XVECEXP (op1, 0, 0);
21609 comp_mode = CCEQmode;
21610 compare_result = gen_reg_rtx (CCEQmode);
21611 if (TARGET_64BIT)
21612 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21613 else
21614 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21615 }
21616 else
21617 emit_insn (gen_rtx_SET (compare_result,
21618 gen_rtx_COMPARE (comp_mode, op0, op1)));
21619 }
21620
21621 /* Some kinds of FP comparisons need an OR operation;
21622 under flag_finite_math_only we don't bother. */
21623 if (FLOAT_MODE_P (mode)
21624 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
21625 && !flag_finite_math_only
21626 && (code == LE || code == GE
21627 || code == UNEQ || code == LTGT
21628 || code == UNGT || code == UNLT))
21629 {
21630 enum rtx_code or1, or2;
21631 rtx or1_rtx, or2_rtx, compare2_rtx;
21632 rtx or_result = gen_reg_rtx (CCEQmode);
21633
21634 switch (code)
21635 {
21636 case LE: or1 = LT; or2 = EQ; break;
21637 case GE: or1 = GT; or2 = EQ; break;
21638 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
21639 case LTGT: or1 = LT; or2 = GT; break;
21640 case UNGT: or1 = UNORDERED; or2 = GT; break;
21641 case UNLT: or1 = UNORDERED; or2 = LT; break;
21642 default: gcc_unreachable ();
21643 }
21644 validate_condition_mode (or1, comp_mode);
21645 validate_condition_mode (or2, comp_mode);
21646 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
21647 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
21648 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
21649 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
21650 const_true_rtx);
21651 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
21652
21653 compare_result = or_result;
21654 code = EQ;
21655 }
21656
21657 validate_condition_mode (code, GET_MODE (compare_result));
21658
21659 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
21660 }
21661
21662 \f
21663 /* Return the diagnostic message string if the binary operation OP is
21664 not permitted on TYPE1 and TYPE2, NULL otherwise. */
21665
21666 static const char*
21667 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
21668 const_tree type1,
21669 const_tree type2)
21670 {
21671 machine_mode mode1 = TYPE_MODE (type1);
21672 machine_mode mode2 = TYPE_MODE (type2);
21673
21674 /* For complex modes, use the inner type. */
21675 if (COMPLEX_MODE_P (mode1))
21676 mode1 = GET_MODE_INNER (mode1);
21677
21678 if (COMPLEX_MODE_P (mode2))
21679 mode2 = GET_MODE_INNER (mode2);
21680
21681 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
21682 double to intermix unless -mfloat128-convert. */
21683 if (mode1 == mode2)
21684 return NULL;
21685
21686 if (!TARGET_FLOAT128_CVT)
21687 {
21688 if ((mode1 == KFmode && mode2 == IFmode)
21689 || (mode1 == IFmode && mode2 == KFmode))
21690 return N_("__float128 and __ibm128 cannot be used in the same "
21691 "expression");
21692
21693 if (TARGET_IEEEQUAD
21694 && ((mode1 == IFmode && mode2 == TFmode)
21695 || (mode1 == TFmode && mode2 == IFmode)))
21696 return N_("__ibm128 and long double cannot be used in the same "
21697 "expression");
21698
21699 if (!TARGET_IEEEQUAD
21700 && ((mode1 == KFmode && mode2 == TFmode)
21701 || (mode1 == TFmode && mode2 == KFmode)))
21702 return N_("__float128 and long double cannot be used in the same "
21703 "expression");
21704 }
21705
21706 return NULL;
21707 }
21708
21709 \f
21710 /* Expand floating point conversion to/from __float128 and __ibm128. */
21711
21712 void
21713 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
21714 {
21715 machine_mode dest_mode = GET_MODE (dest);
21716 machine_mode src_mode = GET_MODE (src);
21717 convert_optab cvt = unknown_optab;
21718 bool do_move = false;
21719 rtx libfunc = NULL_RTX;
21720 rtx dest2;
21721 typedef rtx (*rtx_2func_t) (rtx, rtx);
21722 rtx_2func_t hw_convert = (rtx_2func_t)0;
21723 size_t kf_or_tf;
21724
21725 struct hw_conv_t {
21726 rtx_2func_t from_df;
21727 rtx_2func_t from_sf;
21728 rtx_2func_t from_si_sign;
21729 rtx_2func_t from_si_uns;
21730 rtx_2func_t from_di_sign;
21731 rtx_2func_t from_di_uns;
21732 rtx_2func_t to_df;
21733 rtx_2func_t to_sf;
21734 rtx_2func_t to_si_sign;
21735 rtx_2func_t to_si_uns;
21736 rtx_2func_t to_di_sign;
21737 rtx_2func_t to_di_uns;
21738 } hw_conversions[2] = {
21739 /* convertions to/from KFmode */
21740 {
21741 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
21742 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
21743 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
21744 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
21745 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
21746 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
21747 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
21748 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
21749 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
21750 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
21751 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
21752 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
21753 },
21754
21755 /* convertions to/from TFmode */
21756 {
21757 gen_extenddftf2_hw, /* TFmode <- DFmode. */
21758 gen_extendsftf2_hw, /* TFmode <- SFmode. */
21759 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
21760 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
21761 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
21762 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
21763 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
21764 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
21765 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
21766 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
21767 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
21768 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
21769 },
21770 };
21771
21772 if (dest_mode == src_mode)
21773 gcc_unreachable ();
21774
21775 /* Eliminate memory operations. */
21776 if (MEM_P (src))
21777 src = force_reg (src_mode, src);
21778
21779 if (MEM_P (dest))
21780 {
21781 rtx tmp = gen_reg_rtx (dest_mode);
21782 rs6000_expand_float128_convert (tmp, src, unsigned_p);
21783 rs6000_emit_move (dest, tmp, dest_mode);
21784 return;
21785 }
21786
21787 /* Convert to IEEE 128-bit floating point. */
21788 if (FLOAT128_IEEE_P (dest_mode))
21789 {
21790 if (dest_mode == KFmode)
21791 kf_or_tf = 0;
21792 else if (dest_mode == TFmode)
21793 kf_or_tf = 1;
21794 else
21795 gcc_unreachable ();
21796
21797 switch (src_mode)
21798 {
21799 case E_DFmode:
21800 cvt = sext_optab;
21801 hw_convert = hw_conversions[kf_or_tf].from_df;
21802 break;
21803
21804 case E_SFmode:
21805 cvt = sext_optab;
21806 hw_convert = hw_conversions[kf_or_tf].from_sf;
21807 break;
21808
21809 case E_KFmode:
21810 case E_IFmode:
21811 case E_TFmode:
21812 if (FLOAT128_IBM_P (src_mode))
21813 cvt = sext_optab;
21814 else
21815 do_move = true;
21816 break;
21817
21818 case E_SImode:
21819 if (unsigned_p)
21820 {
21821 cvt = ufloat_optab;
21822 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
21823 }
21824 else
21825 {
21826 cvt = sfloat_optab;
21827 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
21828 }
21829 break;
21830
21831 case E_DImode:
21832 if (unsigned_p)
21833 {
21834 cvt = ufloat_optab;
21835 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
21836 }
21837 else
21838 {
21839 cvt = sfloat_optab;
21840 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
21841 }
21842 break;
21843
21844 default:
21845 gcc_unreachable ();
21846 }
21847 }
21848
21849 /* Convert from IEEE 128-bit floating point. */
21850 else if (FLOAT128_IEEE_P (src_mode))
21851 {
21852 if (src_mode == KFmode)
21853 kf_or_tf = 0;
21854 else if (src_mode == TFmode)
21855 kf_or_tf = 1;
21856 else
21857 gcc_unreachable ();
21858
21859 switch (dest_mode)
21860 {
21861 case E_DFmode:
21862 cvt = trunc_optab;
21863 hw_convert = hw_conversions[kf_or_tf].to_df;
21864 break;
21865
21866 case E_SFmode:
21867 cvt = trunc_optab;
21868 hw_convert = hw_conversions[kf_or_tf].to_sf;
21869 break;
21870
21871 case E_KFmode:
21872 case E_IFmode:
21873 case E_TFmode:
21874 if (FLOAT128_IBM_P (dest_mode))
21875 cvt = trunc_optab;
21876 else
21877 do_move = true;
21878 break;
21879
21880 case E_SImode:
21881 if (unsigned_p)
21882 {
21883 cvt = ufix_optab;
21884 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
21885 }
21886 else
21887 {
21888 cvt = sfix_optab;
21889 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
21890 }
21891 break;
21892
21893 case E_DImode:
21894 if (unsigned_p)
21895 {
21896 cvt = ufix_optab;
21897 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
21898 }
21899 else
21900 {
21901 cvt = sfix_optab;
21902 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
21903 }
21904 break;
21905
21906 default:
21907 gcc_unreachable ();
21908 }
21909 }
21910
21911 /* Both IBM format. */
21912 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
21913 do_move = true;
21914
21915 else
21916 gcc_unreachable ();
21917
21918 /* Handle conversion between TFmode/KFmode/IFmode. */
21919 if (do_move)
21920 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
21921
21922 /* Handle conversion if we have hardware support. */
21923 else if (TARGET_FLOAT128_HW && hw_convert)
21924 emit_insn ((hw_convert) (dest, src));
21925
21926 /* Call an external function to do the conversion. */
21927 else if (cvt != unknown_optab)
21928 {
21929 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
21930 gcc_assert (libfunc != NULL_RTX);
21931
21932 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
21933 src, src_mode);
21934
21935 gcc_assert (dest2 != NULL_RTX);
21936 if (!rtx_equal_p (dest, dest2))
21937 emit_move_insn (dest, dest2);
21938 }
21939
21940 else
21941 gcc_unreachable ();
21942
21943 return;
21944 }
21945
21946 \f
21947 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
21948 can be used as that dest register. Return the dest register. */
21949
21950 rtx
21951 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
21952 {
21953 if (op2 == const0_rtx)
21954 return op1;
21955
21956 if (GET_CODE (scratch) == SCRATCH)
21957 scratch = gen_reg_rtx (mode);
21958
21959 if (logical_operand (op2, mode))
21960 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
21961 else
21962 emit_insn (gen_rtx_SET (scratch,
21963 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
21964
21965 return scratch;
21966 }
21967
21968 void
21969 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
21970 {
21971 rtx condition_rtx;
21972 machine_mode op_mode;
21973 enum rtx_code cond_code;
21974 rtx result = operands[0];
21975
21976 condition_rtx = rs6000_generate_compare (operands[1], mode);
21977 cond_code = GET_CODE (condition_rtx);
21978
21979 if (cond_code == NE
21980 || cond_code == GE || cond_code == LE
21981 || cond_code == GEU || cond_code == LEU
21982 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
21983 {
21984 rtx not_result = gen_reg_rtx (CCEQmode);
21985 rtx not_op, rev_cond_rtx;
21986 machine_mode cc_mode;
21987
21988 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
21989
21990 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
21991 SImode, XEXP (condition_rtx, 0), const0_rtx);
21992 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
21993 emit_insn (gen_rtx_SET (not_result, not_op));
21994 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
21995 }
21996
21997 op_mode = GET_MODE (XEXP (operands[1], 0));
21998 if (op_mode == VOIDmode)
21999 op_mode = GET_MODE (XEXP (operands[1], 1));
22000
22001 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22002 {
22003 PUT_MODE (condition_rtx, DImode);
22004 convert_move (result, condition_rtx, 0);
22005 }
22006 else
22007 {
22008 PUT_MODE (condition_rtx, SImode);
22009 emit_insn (gen_rtx_SET (result, condition_rtx));
22010 }
22011 }
22012
22013 /* Emit a branch of kind CODE to location LOC. */
22014
22015 void
22016 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22017 {
22018 rtx condition_rtx, loc_ref;
22019
22020 condition_rtx = rs6000_generate_compare (operands[0], mode);
22021 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22022 emit_jump_insn (gen_rtx_SET (pc_rtx,
22023 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22024 loc_ref, pc_rtx)));
22025 }
22026
22027 /* Return the string to output a conditional branch to LABEL, which is
22028 the operand template of the label, or NULL if the branch is really a
22029 conditional return.
22030
22031 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22032 condition code register and its mode specifies what kind of
22033 comparison we made.
22034
22035 REVERSED is nonzero if we should reverse the sense of the comparison.
22036
22037 INSN is the insn. */
22038
22039 char *
22040 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22041 {
22042 static char string[64];
22043 enum rtx_code code = GET_CODE (op);
22044 rtx cc_reg = XEXP (op, 0);
22045 machine_mode mode = GET_MODE (cc_reg);
22046 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22047 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22048 int really_reversed = reversed ^ need_longbranch;
22049 char *s = string;
22050 const char *ccode;
22051 const char *pred;
22052 rtx note;
22053
22054 validate_condition_mode (code, mode);
22055
22056 /* Work out which way this really branches. We could use
22057 reverse_condition_maybe_unordered here always but this
22058 makes the resulting assembler clearer. */
22059 if (really_reversed)
22060 {
22061 /* Reversal of FP compares takes care -- an ordered compare
22062 becomes an unordered compare and vice versa. */
22063 if (mode == CCFPmode)
22064 code = reverse_condition_maybe_unordered (code);
22065 else
22066 code = reverse_condition (code);
22067 }
22068
22069 switch (code)
22070 {
22071 /* Not all of these are actually distinct opcodes, but
22072 we distinguish them for clarity of the resulting assembler. */
22073 case NE: case LTGT:
22074 ccode = "ne"; break;
22075 case EQ: case UNEQ:
22076 ccode = "eq"; break;
22077 case GE: case GEU:
22078 ccode = "ge"; break;
22079 case GT: case GTU: case UNGT:
22080 ccode = "gt"; break;
22081 case LE: case LEU:
22082 ccode = "le"; break;
22083 case LT: case LTU: case UNLT:
22084 ccode = "lt"; break;
22085 case UNORDERED: ccode = "un"; break;
22086 case ORDERED: ccode = "nu"; break;
22087 case UNGE: ccode = "nl"; break;
22088 case UNLE: ccode = "ng"; break;
22089 default:
22090 gcc_unreachable ();
22091 }
22092
22093 /* Maybe we have a guess as to how likely the branch is. */
22094 pred = "";
22095 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22096 if (note != NULL_RTX)
22097 {
22098 /* PROB is the difference from 50%. */
22099 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22100 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22101
22102 /* Only hint for highly probable/improbable branches on newer cpus when
22103 we have real profile data, as static prediction overrides processor
22104 dynamic prediction. For older cpus we may as well always hint, but
22105 assume not taken for branches that are very close to 50% as a
22106 mispredicted taken branch is more expensive than a
22107 mispredicted not-taken branch. */
22108 if (rs6000_always_hint
22109 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22110 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22111 && br_prob_note_reliable_p (note)))
22112 {
22113 if (abs (prob) > REG_BR_PROB_BASE / 20
22114 && ((prob > 0) ^ need_longbranch))
22115 pred = "+";
22116 else
22117 pred = "-";
22118 }
22119 }
22120
22121 if (label == NULL)
22122 s += sprintf (s, "b%slr%s ", ccode, pred);
22123 else
22124 s += sprintf (s, "b%s%s ", ccode, pred);
22125
22126 /* We need to escape any '%' characters in the reg_names string.
22127 Assume they'd only be the first character.... */
22128 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22129 *s++ = '%';
22130 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22131
22132 if (label != NULL)
22133 {
22134 /* If the branch distance was too far, we may have to use an
22135 unconditional branch to go the distance. */
22136 if (need_longbranch)
22137 s += sprintf (s, ",$+8\n\tb %s", label);
22138 else
22139 s += sprintf (s, ",%s", label);
22140 }
22141
22142 return string;
22143 }
22144
22145 /* Return insn for VSX or Altivec comparisons. */
22146
22147 static rtx
22148 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22149 {
22150 rtx mask;
22151 machine_mode mode = GET_MODE (op0);
22152
22153 switch (code)
22154 {
22155 default:
22156 break;
22157
22158 case GE:
22159 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22160 return NULL_RTX;
22161 /* FALLTHRU */
22162
22163 case EQ:
22164 case GT:
22165 case GTU:
22166 case ORDERED:
22167 case UNORDERED:
22168 case UNEQ:
22169 case LTGT:
22170 mask = gen_reg_rtx (mode);
22171 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22172 return mask;
22173 }
22174
22175 return NULL_RTX;
22176 }
22177
22178 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22179 DMODE is expected destination mode. This is a recursive function. */
22180
22181 static rtx
22182 rs6000_emit_vector_compare (enum rtx_code rcode,
22183 rtx op0, rtx op1,
22184 machine_mode dmode)
22185 {
22186 rtx mask;
22187 bool swap_operands = false;
22188 bool try_again = false;
22189
22190 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22191 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22192
22193 /* See if the comparison works as is. */
22194 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22195 if (mask)
22196 return mask;
22197
22198 switch (rcode)
22199 {
22200 case LT:
22201 rcode = GT;
22202 swap_operands = true;
22203 try_again = true;
22204 break;
22205 case LTU:
22206 rcode = GTU;
22207 swap_operands = true;
22208 try_again = true;
22209 break;
22210 case NE:
22211 case UNLE:
22212 case UNLT:
22213 case UNGE:
22214 case UNGT:
22215 /* Invert condition and try again.
22216 e.g., A != B becomes ~(A==B). */
22217 {
22218 enum rtx_code rev_code;
22219 enum insn_code nor_code;
22220 rtx mask2;
22221
22222 rev_code = reverse_condition_maybe_unordered (rcode);
22223 if (rev_code == UNKNOWN)
22224 return NULL_RTX;
22225
22226 nor_code = optab_handler (one_cmpl_optab, dmode);
22227 if (nor_code == CODE_FOR_nothing)
22228 return NULL_RTX;
22229
22230 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22231 if (!mask2)
22232 return NULL_RTX;
22233
22234 mask = gen_reg_rtx (dmode);
22235 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22236 return mask;
22237 }
22238 break;
22239 case GE:
22240 case GEU:
22241 case LE:
22242 case LEU:
22243 /* Try GT/GTU/LT/LTU OR EQ */
22244 {
22245 rtx c_rtx, eq_rtx;
22246 enum insn_code ior_code;
22247 enum rtx_code new_code;
22248
22249 switch (rcode)
22250 {
22251 case GE:
22252 new_code = GT;
22253 break;
22254
22255 case GEU:
22256 new_code = GTU;
22257 break;
22258
22259 case LE:
22260 new_code = LT;
22261 break;
22262
22263 case LEU:
22264 new_code = LTU;
22265 break;
22266
22267 default:
22268 gcc_unreachable ();
22269 }
22270
22271 ior_code = optab_handler (ior_optab, dmode);
22272 if (ior_code == CODE_FOR_nothing)
22273 return NULL_RTX;
22274
22275 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22276 if (!c_rtx)
22277 return NULL_RTX;
22278
22279 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22280 if (!eq_rtx)
22281 return NULL_RTX;
22282
22283 mask = gen_reg_rtx (dmode);
22284 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22285 return mask;
22286 }
22287 break;
22288 default:
22289 return NULL_RTX;
22290 }
22291
22292 if (try_again)
22293 {
22294 if (swap_operands)
22295 std::swap (op0, op1);
22296
22297 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22298 if (mask)
22299 return mask;
22300 }
22301
22302 /* You only get two chances. */
22303 return NULL_RTX;
22304 }
22305
22306 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22307 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22308 operands for the relation operation COND. */
22309
22310 int
22311 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22312 rtx cond, rtx cc_op0, rtx cc_op1)
22313 {
22314 machine_mode dest_mode = GET_MODE (dest);
22315 machine_mode mask_mode = GET_MODE (cc_op0);
22316 enum rtx_code rcode = GET_CODE (cond);
22317 machine_mode cc_mode = CCmode;
22318 rtx mask;
22319 rtx cond2;
22320 bool invert_move = false;
22321
22322 if (VECTOR_UNIT_NONE_P (dest_mode))
22323 return 0;
22324
22325 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22326 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22327
22328 switch (rcode)
22329 {
22330 /* Swap operands if we can, and fall back to doing the operation as
22331 specified, and doing a NOR to invert the test. */
22332 case NE:
22333 case UNLE:
22334 case UNLT:
22335 case UNGE:
22336 case UNGT:
22337 /* Invert condition and try again.
22338 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22339 invert_move = true;
22340 rcode = reverse_condition_maybe_unordered (rcode);
22341 if (rcode == UNKNOWN)
22342 return 0;
22343 break;
22344
22345 case GE:
22346 case LE:
22347 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22348 {
22349 /* Invert condition to avoid compound test. */
22350 invert_move = true;
22351 rcode = reverse_condition (rcode);
22352 }
22353 break;
22354
22355 case GTU:
22356 case GEU:
22357 case LTU:
22358 case LEU:
22359 /* Mark unsigned tests with CCUNSmode. */
22360 cc_mode = CCUNSmode;
22361
22362 /* Invert condition to avoid compound test if necessary. */
22363 if (rcode == GEU || rcode == LEU)
22364 {
22365 invert_move = true;
22366 rcode = reverse_condition (rcode);
22367 }
22368 break;
22369
22370 default:
22371 break;
22372 }
22373
22374 /* Get the vector mask for the given relational operations. */
22375 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22376
22377 if (!mask)
22378 return 0;
22379
22380 if (invert_move)
22381 std::swap (op_true, op_false);
22382
22383 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22384 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22385 && (GET_CODE (op_true) == CONST_VECTOR
22386 || GET_CODE (op_false) == CONST_VECTOR))
22387 {
22388 rtx constant_0 = CONST0_RTX (dest_mode);
22389 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22390
22391 if (op_true == constant_m1 && op_false == constant_0)
22392 {
22393 emit_move_insn (dest, mask);
22394 return 1;
22395 }
22396
22397 else if (op_true == constant_0 && op_false == constant_m1)
22398 {
22399 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22400 return 1;
22401 }
22402
22403 /* If we can't use the vector comparison directly, perhaps we can use
22404 the mask for the true or false fields, instead of loading up a
22405 constant. */
22406 if (op_true == constant_m1)
22407 op_true = mask;
22408
22409 if (op_false == constant_0)
22410 op_false = mask;
22411 }
22412
22413 if (!REG_P (op_true) && !SUBREG_P (op_true))
22414 op_true = force_reg (dest_mode, op_true);
22415
22416 if (!REG_P (op_false) && !SUBREG_P (op_false))
22417 op_false = force_reg (dest_mode, op_false);
22418
22419 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22420 CONST0_RTX (dest_mode));
22421 emit_insn (gen_rtx_SET (dest,
22422 gen_rtx_IF_THEN_ELSE (dest_mode,
22423 cond2,
22424 op_true,
22425 op_false)));
22426 return 1;
22427 }
22428
22429 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22430 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22431 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22432 hardware has no such operation. */
22433
22434 static int
22435 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22436 {
22437 enum rtx_code code = GET_CODE (op);
22438 rtx op0 = XEXP (op, 0);
22439 rtx op1 = XEXP (op, 1);
22440 machine_mode compare_mode = GET_MODE (op0);
22441 machine_mode result_mode = GET_MODE (dest);
22442 bool max_p = false;
22443
22444 if (result_mode != compare_mode)
22445 return 0;
22446
22447 if (code == GE || code == GT)
22448 max_p = true;
22449 else if (code == LE || code == LT)
22450 max_p = false;
22451 else
22452 return 0;
22453
22454 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22455 ;
22456
22457 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22458 max_p = !max_p;
22459
22460 else
22461 return 0;
22462
22463 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22464 return 1;
22465 }
22466
22467 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22468 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22469 operands of the last comparison is nonzero/true, FALSE_COND if it is
22470 zero/false. Return 0 if the hardware has no such operation. */
22471
22472 static int
22473 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22474 {
22475 enum rtx_code code = GET_CODE (op);
22476 rtx op0 = XEXP (op, 0);
22477 rtx op1 = XEXP (op, 1);
22478 machine_mode result_mode = GET_MODE (dest);
22479 rtx compare_rtx;
22480 rtx cmove_rtx;
22481 rtx clobber_rtx;
22482
22483 if (!can_create_pseudo_p ())
22484 return 0;
22485
22486 switch (code)
22487 {
22488 case EQ:
22489 case GE:
22490 case GT:
22491 break;
22492
22493 case NE:
22494 case LT:
22495 case LE:
22496 code = swap_condition (code);
22497 std::swap (op0, op1);
22498 break;
22499
22500 default:
22501 return 0;
22502 }
22503
22504 /* Generate: [(parallel [(set (dest)
22505 (if_then_else (op (cmp1) (cmp2))
22506 (true)
22507 (false)))
22508 (clobber (scratch))])]. */
22509
22510 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22511 cmove_rtx = gen_rtx_SET (dest,
22512 gen_rtx_IF_THEN_ELSE (result_mode,
22513 compare_rtx,
22514 true_cond,
22515 false_cond));
22516
22517 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22518 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22519 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22520
22521 return 1;
22522 }
22523
22524 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22525 operands of the last comparison is nonzero/true, FALSE_COND if it
22526 is zero/false. Return 0 if the hardware has no such operation. */
22527
22528 int
22529 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22530 {
22531 enum rtx_code code = GET_CODE (op);
22532 rtx op0 = XEXP (op, 0);
22533 rtx op1 = XEXP (op, 1);
22534 machine_mode compare_mode = GET_MODE (op0);
22535 machine_mode result_mode = GET_MODE (dest);
22536 rtx temp;
22537 bool is_against_zero;
22538
22539 /* These modes should always match. */
22540 if (GET_MODE (op1) != compare_mode
22541 /* In the isel case however, we can use a compare immediate, so
22542 op1 may be a small constant. */
22543 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22544 return 0;
22545 if (GET_MODE (true_cond) != result_mode)
22546 return 0;
22547 if (GET_MODE (false_cond) != result_mode)
22548 return 0;
22549
22550 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22551 if (TARGET_P9_MINMAX
22552 && (compare_mode == SFmode || compare_mode == DFmode)
22553 && (result_mode == SFmode || result_mode == DFmode))
22554 {
22555 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22556 return 1;
22557
22558 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22559 return 1;
22560 }
22561
22562 /* Don't allow using floating point comparisons for integer results for
22563 now. */
22564 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22565 return 0;
22566
22567 /* First, work out if the hardware can do this at all, or
22568 if it's too slow.... */
22569 if (!FLOAT_MODE_P (compare_mode))
22570 {
22571 if (TARGET_ISEL)
22572 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22573 return 0;
22574 }
22575
22576 is_against_zero = op1 == CONST0_RTX (compare_mode);
22577
22578 /* A floating-point subtract might overflow, underflow, or produce
22579 an inexact result, thus changing the floating-point flags, so it
22580 can't be generated if we care about that. It's safe if one side
22581 of the construct is zero, since then no subtract will be
22582 generated. */
22583 if (SCALAR_FLOAT_MODE_P (compare_mode)
22584 && flag_trapping_math && ! is_against_zero)
22585 return 0;
22586
22587 /* Eliminate half of the comparisons by switching operands, this
22588 makes the remaining code simpler. */
22589 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22590 || code == LTGT || code == LT || code == UNLE)
22591 {
22592 code = reverse_condition_maybe_unordered (code);
22593 temp = true_cond;
22594 true_cond = false_cond;
22595 false_cond = temp;
22596 }
22597
22598 /* UNEQ and LTGT take four instructions for a comparison with zero,
22599 it'll probably be faster to use a branch here too. */
22600 if (code == UNEQ && HONOR_NANS (compare_mode))
22601 return 0;
22602
22603 /* We're going to try to implement comparisons by performing
22604 a subtract, then comparing against zero. Unfortunately,
22605 Inf - Inf is NaN which is not zero, and so if we don't
22606 know that the operand is finite and the comparison
22607 would treat EQ different to UNORDERED, we can't do it. */
22608 if (HONOR_INFINITIES (compare_mode)
22609 && code != GT && code != UNGE
22610 && (GET_CODE (op1) != CONST_DOUBLE
22611 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
22612 /* Constructs of the form (a OP b ? a : b) are safe. */
22613 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
22614 || (! rtx_equal_p (op0, true_cond)
22615 && ! rtx_equal_p (op1, true_cond))))
22616 return 0;
22617
22618 /* At this point we know we can use fsel. */
22619
22620 /* Reduce the comparison to a comparison against zero. */
22621 if (! is_against_zero)
22622 {
22623 temp = gen_reg_rtx (compare_mode);
22624 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
22625 op0 = temp;
22626 op1 = CONST0_RTX (compare_mode);
22627 }
22628
22629 /* If we don't care about NaNs we can reduce some of the comparisons
22630 down to faster ones. */
22631 if (! HONOR_NANS (compare_mode))
22632 switch (code)
22633 {
22634 case GT:
22635 code = LE;
22636 temp = true_cond;
22637 true_cond = false_cond;
22638 false_cond = temp;
22639 break;
22640 case UNGE:
22641 code = GE;
22642 break;
22643 case UNEQ:
22644 code = EQ;
22645 break;
22646 default:
22647 break;
22648 }
22649
22650 /* Now, reduce everything down to a GE. */
22651 switch (code)
22652 {
22653 case GE:
22654 break;
22655
22656 case LE:
22657 temp = gen_reg_rtx (compare_mode);
22658 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22659 op0 = temp;
22660 break;
22661
22662 case ORDERED:
22663 temp = gen_reg_rtx (compare_mode);
22664 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
22665 op0 = temp;
22666 break;
22667
22668 case EQ:
22669 temp = gen_reg_rtx (compare_mode);
22670 emit_insn (gen_rtx_SET (temp,
22671 gen_rtx_NEG (compare_mode,
22672 gen_rtx_ABS (compare_mode, op0))));
22673 op0 = temp;
22674 break;
22675
22676 case UNGE:
22677 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
22678 temp = gen_reg_rtx (result_mode);
22679 emit_insn (gen_rtx_SET (temp,
22680 gen_rtx_IF_THEN_ELSE (result_mode,
22681 gen_rtx_GE (VOIDmode,
22682 op0, op1),
22683 true_cond, false_cond)));
22684 false_cond = true_cond;
22685 true_cond = temp;
22686
22687 temp = gen_reg_rtx (compare_mode);
22688 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22689 op0 = temp;
22690 break;
22691
22692 case GT:
22693 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
22694 temp = gen_reg_rtx (result_mode);
22695 emit_insn (gen_rtx_SET (temp,
22696 gen_rtx_IF_THEN_ELSE (result_mode,
22697 gen_rtx_GE (VOIDmode,
22698 op0, op1),
22699 true_cond, false_cond)));
22700 true_cond = false_cond;
22701 false_cond = temp;
22702
22703 temp = gen_reg_rtx (compare_mode);
22704 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22705 op0 = temp;
22706 break;
22707
22708 default:
22709 gcc_unreachable ();
22710 }
22711
22712 emit_insn (gen_rtx_SET (dest,
22713 gen_rtx_IF_THEN_ELSE (result_mode,
22714 gen_rtx_GE (VOIDmode,
22715 op0, op1),
22716 true_cond, false_cond)));
22717 return 1;
22718 }
22719
22720 /* Same as above, but for ints (isel). */
22721
22722 int
22723 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22724 {
22725 rtx condition_rtx, cr;
22726 machine_mode mode = GET_MODE (dest);
22727 enum rtx_code cond_code;
22728 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
22729 bool signedp;
22730
22731 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
22732 return 0;
22733
22734 /* We still have to do the compare, because isel doesn't do a
22735 compare, it just looks at the CRx bits set by a previous compare
22736 instruction. */
22737 condition_rtx = rs6000_generate_compare (op, mode);
22738 cond_code = GET_CODE (condition_rtx);
22739 cr = XEXP (condition_rtx, 0);
22740 signedp = GET_MODE (cr) == CCmode;
22741
22742 isel_func = (mode == SImode
22743 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
22744 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
22745
22746 switch (cond_code)
22747 {
22748 case LT: case GT: case LTU: case GTU: case EQ:
22749 /* isel handles these directly. */
22750 break;
22751
22752 default:
22753 /* We need to swap the sense of the comparison. */
22754 {
22755 std::swap (false_cond, true_cond);
22756 PUT_CODE (condition_rtx, reverse_condition (cond_code));
22757 }
22758 break;
22759 }
22760
22761 false_cond = force_reg (mode, false_cond);
22762 if (true_cond != const0_rtx)
22763 true_cond = force_reg (mode, true_cond);
22764
22765 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
22766
22767 return 1;
22768 }
22769
22770 void
22771 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
22772 {
22773 machine_mode mode = GET_MODE (op0);
22774 enum rtx_code c;
22775 rtx target;
22776
22777 /* VSX/altivec have direct min/max insns. */
22778 if ((code == SMAX || code == SMIN)
22779 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
22780 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
22781 {
22782 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
22783 return;
22784 }
22785
22786 if (code == SMAX || code == SMIN)
22787 c = GE;
22788 else
22789 c = GEU;
22790
22791 if (code == SMAX || code == UMAX)
22792 target = emit_conditional_move (dest, c, op0, op1, mode,
22793 op0, op1, mode, 0);
22794 else
22795 target = emit_conditional_move (dest, c, op0, op1, mode,
22796 op1, op0, mode, 0);
22797 gcc_assert (target);
22798 if (target != dest)
22799 emit_move_insn (dest, target);
22800 }
22801
22802 /* A subroutine of the atomic operation splitters. Jump to LABEL if
22803 COND is true. Mark the jump as unlikely to be taken. */
22804
22805 static void
22806 emit_unlikely_jump (rtx cond, rtx label)
22807 {
22808 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
22809 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
22810 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
22811 }
22812
22813 /* A subroutine of the atomic operation splitters. Emit a load-locked
22814 instruction in MODE. For QI/HImode, possibly use a pattern than includes
22815 the zero_extend operation. */
22816
22817 static void
22818 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
22819 {
22820 rtx (*fn) (rtx, rtx) = NULL;
22821
22822 switch (mode)
22823 {
22824 case E_QImode:
22825 fn = gen_load_lockedqi;
22826 break;
22827 case E_HImode:
22828 fn = gen_load_lockedhi;
22829 break;
22830 case E_SImode:
22831 if (GET_MODE (mem) == QImode)
22832 fn = gen_load_lockedqi_si;
22833 else if (GET_MODE (mem) == HImode)
22834 fn = gen_load_lockedhi_si;
22835 else
22836 fn = gen_load_lockedsi;
22837 break;
22838 case E_DImode:
22839 fn = gen_load_lockeddi;
22840 break;
22841 case E_TImode:
22842 fn = gen_load_lockedti;
22843 break;
22844 default:
22845 gcc_unreachable ();
22846 }
22847 emit_insn (fn (reg, mem));
22848 }
22849
22850 /* A subroutine of the atomic operation splitters. Emit a store-conditional
22851 instruction in MODE. */
22852
22853 static void
22854 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
22855 {
22856 rtx (*fn) (rtx, rtx, rtx) = NULL;
22857
22858 switch (mode)
22859 {
22860 case E_QImode:
22861 fn = gen_store_conditionalqi;
22862 break;
22863 case E_HImode:
22864 fn = gen_store_conditionalhi;
22865 break;
22866 case E_SImode:
22867 fn = gen_store_conditionalsi;
22868 break;
22869 case E_DImode:
22870 fn = gen_store_conditionaldi;
22871 break;
22872 case E_TImode:
22873 fn = gen_store_conditionalti;
22874 break;
22875 default:
22876 gcc_unreachable ();
22877 }
22878
22879 /* Emit sync before stwcx. to address PPC405 Erratum. */
22880 if (PPC405_ERRATUM77)
22881 emit_insn (gen_hwsync ());
22882
22883 emit_insn (fn (res, mem, val));
22884 }
22885
22886 /* Expand barriers before and after a load_locked/store_cond sequence. */
22887
22888 static rtx
22889 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
22890 {
22891 rtx addr = XEXP (mem, 0);
22892
22893 if (!legitimate_indirect_address_p (addr, reload_completed)
22894 && !legitimate_indexed_address_p (addr, reload_completed))
22895 {
22896 addr = force_reg (Pmode, addr);
22897 mem = replace_equiv_address_nv (mem, addr);
22898 }
22899
22900 switch (model)
22901 {
22902 case MEMMODEL_RELAXED:
22903 case MEMMODEL_CONSUME:
22904 case MEMMODEL_ACQUIRE:
22905 break;
22906 case MEMMODEL_RELEASE:
22907 case MEMMODEL_ACQ_REL:
22908 emit_insn (gen_lwsync ());
22909 break;
22910 case MEMMODEL_SEQ_CST:
22911 emit_insn (gen_hwsync ());
22912 break;
22913 default:
22914 gcc_unreachable ();
22915 }
22916 return mem;
22917 }
22918
22919 static void
22920 rs6000_post_atomic_barrier (enum memmodel model)
22921 {
22922 switch (model)
22923 {
22924 case MEMMODEL_RELAXED:
22925 case MEMMODEL_CONSUME:
22926 case MEMMODEL_RELEASE:
22927 break;
22928 case MEMMODEL_ACQUIRE:
22929 case MEMMODEL_ACQ_REL:
22930 case MEMMODEL_SEQ_CST:
22931 emit_insn (gen_isync ());
22932 break;
22933 default:
22934 gcc_unreachable ();
22935 }
22936 }
22937
22938 /* A subroutine of the various atomic expanders. For sub-word operations,
22939 we must adjust things to operate on SImode. Given the original MEM,
22940 return a new aligned memory. Also build and return the quantities by
22941 which to shift and mask. */
22942
22943 static rtx
22944 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
22945 {
22946 rtx addr, align, shift, mask, mem;
22947 HOST_WIDE_INT shift_mask;
22948 machine_mode mode = GET_MODE (orig_mem);
22949
22950 /* For smaller modes, we have to implement this via SImode. */
22951 shift_mask = (mode == QImode ? 0x18 : 0x10);
22952
22953 addr = XEXP (orig_mem, 0);
22954 addr = force_reg (GET_MODE (addr), addr);
22955
22956 /* Aligned memory containing subword. Generate a new memory. We
22957 do not want any of the existing MEM_ATTR data, as we're now
22958 accessing memory outside the original object. */
22959 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
22960 NULL_RTX, 1, OPTAB_LIB_WIDEN);
22961 mem = gen_rtx_MEM (SImode, align);
22962 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
22963 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
22964 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
22965
22966 /* Shift amount for subword relative to aligned word. */
22967 shift = gen_reg_rtx (SImode);
22968 addr = gen_lowpart (SImode, addr);
22969 rtx tmp = gen_reg_rtx (SImode);
22970 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
22971 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
22972 if (BYTES_BIG_ENDIAN)
22973 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
22974 shift, 1, OPTAB_LIB_WIDEN);
22975 *pshift = shift;
22976
22977 /* Mask for insertion. */
22978 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
22979 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
22980 *pmask = mask;
22981
22982 return mem;
22983 }
22984
22985 /* A subroutine of the various atomic expanders. For sub-word operands,
22986 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
22987
22988 static rtx
22989 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
22990 {
22991 rtx x;
22992
22993 x = gen_reg_rtx (SImode);
22994 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
22995 gen_rtx_NOT (SImode, mask),
22996 oldval)));
22997
22998 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
22999
23000 return x;
23001 }
23002
23003 /* A subroutine of the various atomic expanders. For sub-word operands,
23004 extract WIDE to NARROW via SHIFT. */
23005
23006 static void
23007 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23008 {
23009 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23010 wide, 1, OPTAB_LIB_WIDEN);
23011 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23012 }
23013
23014 /* Expand an atomic compare and swap operation. */
23015
23016 void
23017 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23018 {
23019 rtx boolval, retval, mem, oldval, newval, cond;
23020 rtx label1, label2, x, mask, shift;
23021 machine_mode mode, orig_mode;
23022 enum memmodel mod_s, mod_f;
23023 bool is_weak;
23024
23025 boolval = operands[0];
23026 retval = operands[1];
23027 mem = operands[2];
23028 oldval = operands[3];
23029 newval = operands[4];
23030 is_weak = (INTVAL (operands[5]) != 0);
23031 mod_s = memmodel_base (INTVAL (operands[6]));
23032 mod_f = memmodel_base (INTVAL (operands[7]));
23033 orig_mode = mode = GET_MODE (mem);
23034
23035 mask = shift = NULL_RTX;
23036 if (mode == QImode || mode == HImode)
23037 {
23038 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23039 lwarx and shift/mask operations. With power8, we need to do the
23040 comparison in SImode, but the store is still done in QI/HImode. */
23041 oldval = convert_modes (SImode, mode, oldval, 1);
23042
23043 if (!TARGET_SYNC_HI_QI)
23044 {
23045 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23046
23047 /* Shift and mask OLDVAL into position with the word. */
23048 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23049 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23050
23051 /* Shift and mask NEWVAL into position within the word. */
23052 newval = convert_modes (SImode, mode, newval, 1);
23053 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23054 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23055 }
23056
23057 /* Prepare to adjust the return value. */
23058 retval = gen_reg_rtx (SImode);
23059 mode = SImode;
23060 }
23061 else if (reg_overlap_mentioned_p (retval, oldval))
23062 oldval = copy_to_reg (oldval);
23063
23064 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23065 oldval = copy_to_mode_reg (mode, oldval);
23066
23067 if (reg_overlap_mentioned_p (retval, newval))
23068 newval = copy_to_reg (newval);
23069
23070 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23071
23072 label1 = NULL_RTX;
23073 if (!is_weak)
23074 {
23075 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23076 emit_label (XEXP (label1, 0));
23077 }
23078 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23079
23080 emit_load_locked (mode, retval, mem);
23081
23082 x = retval;
23083 if (mask)
23084 x = expand_simple_binop (SImode, AND, retval, mask,
23085 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23086
23087 cond = gen_reg_rtx (CCmode);
23088 /* If we have TImode, synthesize a comparison. */
23089 if (mode != TImode)
23090 x = gen_rtx_COMPARE (CCmode, x, oldval);
23091 else
23092 {
23093 rtx xor1_result = gen_reg_rtx (DImode);
23094 rtx xor2_result = gen_reg_rtx (DImode);
23095 rtx or_result = gen_reg_rtx (DImode);
23096 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23097 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23098 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23099 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23100
23101 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23102 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23103 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23104 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23105 }
23106
23107 emit_insn (gen_rtx_SET (cond, x));
23108
23109 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23110 emit_unlikely_jump (x, label2);
23111
23112 x = newval;
23113 if (mask)
23114 x = rs6000_mask_atomic_subword (retval, newval, mask);
23115
23116 emit_store_conditional (orig_mode, cond, mem, x);
23117
23118 if (!is_weak)
23119 {
23120 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23121 emit_unlikely_jump (x, label1);
23122 }
23123
23124 if (!is_mm_relaxed (mod_f))
23125 emit_label (XEXP (label2, 0));
23126
23127 rs6000_post_atomic_barrier (mod_s);
23128
23129 if (is_mm_relaxed (mod_f))
23130 emit_label (XEXP (label2, 0));
23131
23132 if (shift)
23133 rs6000_finish_atomic_subword (operands[1], retval, shift);
23134 else if (mode != GET_MODE (operands[1]))
23135 convert_move (operands[1], retval, 1);
23136
23137 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23138 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23139 emit_insn (gen_rtx_SET (boolval, x));
23140 }
23141
23142 /* Expand an atomic exchange operation. */
23143
23144 void
23145 rs6000_expand_atomic_exchange (rtx operands[])
23146 {
23147 rtx retval, mem, val, cond;
23148 machine_mode mode;
23149 enum memmodel model;
23150 rtx label, x, mask, shift;
23151
23152 retval = operands[0];
23153 mem = operands[1];
23154 val = operands[2];
23155 model = memmodel_base (INTVAL (operands[3]));
23156 mode = GET_MODE (mem);
23157
23158 mask = shift = NULL_RTX;
23159 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23160 {
23161 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23162
23163 /* Shift and mask VAL into position with the word. */
23164 val = convert_modes (SImode, mode, val, 1);
23165 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23166 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23167
23168 /* Prepare to adjust the return value. */
23169 retval = gen_reg_rtx (SImode);
23170 mode = SImode;
23171 }
23172
23173 mem = rs6000_pre_atomic_barrier (mem, model);
23174
23175 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23176 emit_label (XEXP (label, 0));
23177
23178 emit_load_locked (mode, retval, mem);
23179
23180 x = val;
23181 if (mask)
23182 x = rs6000_mask_atomic_subword (retval, val, mask);
23183
23184 cond = gen_reg_rtx (CCmode);
23185 emit_store_conditional (mode, cond, mem, x);
23186
23187 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23188 emit_unlikely_jump (x, label);
23189
23190 rs6000_post_atomic_barrier (model);
23191
23192 if (shift)
23193 rs6000_finish_atomic_subword (operands[0], retval, shift);
23194 }
23195
23196 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23197 to perform. MEM is the memory on which to operate. VAL is the second
23198 operand of the binary operator. BEFORE and AFTER are optional locations to
23199 return the value of MEM either before of after the operation. MODEL_RTX
23200 is a CONST_INT containing the memory model to use. */
23201
23202 void
23203 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23204 rtx orig_before, rtx orig_after, rtx model_rtx)
23205 {
23206 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23207 machine_mode mode = GET_MODE (mem);
23208 machine_mode store_mode = mode;
23209 rtx label, x, cond, mask, shift;
23210 rtx before = orig_before, after = orig_after;
23211
23212 mask = shift = NULL_RTX;
23213 /* On power8, we want to use SImode for the operation. On previous systems,
23214 use the operation in a subword and shift/mask to get the proper byte or
23215 halfword. */
23216 if (mode == QImode || mode == HImode)
23217 {
23218 if (TARGET_SYNC_HI_QI)
23219 {
23220 val = convert_modes (SImode, mode, val, 1);
23221
23222 /* Prepare to adjust the return value. */
23223 before = gen_reg_rtx (SImode);
23224 if (after)
23225 after = gen_reg_rtx (SImode);
23226 mode = SImode;
23227 }
23228 else
23229 {
23230 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23231
23232 /* Shift and mask VAL into position with the word. */
23233 val = convert_modes (SImode, mode, val, 1);
23234 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23235 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23236
23237 switch (code)
23238 {
23239 case IOR:
23240 case XOR:
23241 /* We've already zero-extended VAL. That is sufficient to
23242 make certain that it does not affect other bits. */
23243 mask = NULL;
23244 break;
23245
23246 case AND:
23247 /* If we make certain that all of the other bits in VAL are
23248 set, that will be sufficient to not affect other bits. */
23249 x = gen_rtx_NOT (SImode, mask);
23250 x = gen_rtx_IOR (SImode, x, val);
23251 emit_insn (gen_rtx_SET (val, x));
23252 mask = NULL;
23253 break;
23254
23255 case NOT:
23256 case PLUS:
23257 case MINUS:
23258 /* These will all affect bits outside the field and need
23259 adjustment via MASK within the loop. */
23260 break;
23261
23262 default:
23263 gcc_unreachable ();
23264 }
23265
23266 /* Prepare to adjust the return value. */
23267 before = gen_reg_rtx (SImode);
23268 if (after)
23269 after = gen_reg_rtx (SImode);
23270 store_mode = mode = SImode;
23271 }
23272 }
23273
23274 mem = rs6000_pre_atomic_barrier (mem, model);
23275
23276 label = gen_label_rtx ();
23277 emit_label (label);
23278 label = gen_rtx_LABEL_REF (VOIDmode, label);
23279
23280 if (before == NULL_RTX)
23281 before = gen_reg_rtx (mode);
23282
23283 emit_load_locked (mode, before, mem);
23284
23285 if (code == NOT)
23286 {
23287 x = expand_simple_binop (mode, AND, before, val,
23288 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23289 after = expand_simple_unop (mode, NOT, x, after, 1);
23290 }
23291 else
23292 {
23293 after = expand_simple_binop (mode, code, before, val,
23294 after, 1, OPTAB_LIB_WIDEN);
23295 }
23296
23297 x = after;
23298 if (mask)
23299 {
23300 x = expand_simple_binop (SImode, AND, after, mask,
23301 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23302 x = rs6000_mask_atomic_subword (before, x, mask);
23303 }
23304 else if (store_mode != mode)
23305 x = convert_modes (store_mode, mode, x, 1);
23306
23307 cond = gen_reg_rtx (CCmode);
23308 emit_store_conditional (store_mode, cond, mem, x);
23309
23310 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23311 emit_unlikely_jump (x, label);
23312
23313 rs6000_post_atomic_barrier (model);
23314
23315 if (shift)
23316 {
23317 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23318 then do the calcuations in a SImode register. */
23319 if (orig_before)
23320 rs6000_finish_atomic_subword (orig_before, before, shift);
23321 if (orig_after)
23322 rs6000_finish_atomic_subword (orig_after, after, shift);
23323 }
23324 else if (store_mode != mode)
23325 {
23326 /* QImode/HImode on machines with lbarx/lharx where we do the native
23327 operation and then do the calcuations in a SImode register. */
23328 if (orig_before)
23329 convert_move (orig_before, before, 1);
23330 if (orig_after)
23331 convert_move (orig_after, after, 1);
23332 }
23333 else if (orig_after && after != orig_after)
23334 emit_move_insn (orig_after, after);
23335 }
23336
23337 /* Emit instructions to move SRC to DST. Called by splitters for
23338 multi-register moves. It will emit at most one instruction for
23339 each register that is accessed; that is, it won't emit li/lis pairs
23340 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23341 register. */
23342
23343 void
23344 rs6000_split_multireg_move (rtx dst, rtx src)
23345 {
23346 /* The register number of the first register being moved. */
23347 int reg;
23348 /* The mode that is to be moved. */
23349 machine_mode mode;
23350 /* The mode that the move is being done in, and its size. */
23351 machine_mode reg_mode;
23352 int reg_mode_size;
23353 /* The number of registers that will be moved. */
23354 int nregs;
23355
23356 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23357 mode = GET_MODE (dst);
23358 nregs = hard_regno_nregs (reg, mode);
23359 if (FP_REGNO_P (reg))
23360 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23361 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23362 else if (ALTIVEC_REGNO_P (reg))
23363 reg_mode = V16QImode;
23364 else
23365 reg_mode = word_mode;
23366 reg_mode_size = GET_MODE_SIZE (reg_mode);
23367
23368 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23369
23370 /* TDmode residing in FP registers is special, since the ISA requires that
23371 the lower-numbered word of a register pair is always the most significant
23372 word, even in little-endian mode. This does not match the usual subreg
23373 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23374 the appropriate constituent registers "by hand" in little-endian mode.
23375
23376 Note we do not need to check for destructive overlap here since TDmode
23377 can only reside in even/odd register pairs. */
23378 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23379 {
23380 rtx p_src, p_dst;
23381 int i;
23382
23383 for (i = 0; i < nregs; i++)
23384 {
23385 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23386 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23387 else
23388 p_src = simplify_gen_subreg (reg_mode, src, mode,
23389 i * reg_mode_size);
23390
23391 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23392 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23393 else
23394 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23395 i * reg_mode_size);
23396
23397 emit_insn (gen_rtx_SET (p_dst, p_src));
23398 }
23399
23400 return;
23401 }
23402
23403 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23404 {
23405 /* Move register range backwards, if we might have destructive
23406 overlap. */
23407 int i;
23408 for (i = nregs - 1; i >= 0; i--)
23409 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23410 i * reg_mode_size),
23411 simplify_gen_subreg (reg_mode, src, mode,
23412 i * reg_mode_size)));
23413 }
23414 else
23415 {
23416 int i;
23417 int j = -1;
23418 bool used_update = false;
23419 rtx restore_basereg = NULL_RTX;
23420
23421 if (MEM_P (src) && INT_REGNO_P (reg))
23422 {
23423 rtx breg;
23424
23425 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23426 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23427 {
23428 rtx delta_rtx;
23429 breg = XEXP (XEXP (src, 0), 0);
23430 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23431 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23432 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23433 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23434 src = replace_equiv_address (src, breg);
23435 }
23436 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23437 {
23438 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23439 {
23440 rtx basereg = XEXP (XEXP (src, 0), 0);
23441 if (TARGET_UPDATE)
23442 {
23443 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23444 emit_insn (gen_rtx_SET (ndst,
23445 gen_rtx_MEM (reg_mode,
23446 XEXP (src, 0))));
23447 used_update = true;
23448 }
23449 else
23450 emit_insn (gen_rtx_SET (basereg,
23451 XEXP (XEXP (src, 0), 1)));
23452 src = replace_equiv_address (src, basereg);
23453 }
23454 else
23455 {
23456 rtx basereg = gen_rtx_REG (Pmode, reg);
23457 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23458 src = replace_equiv_address (src, basereg);
23459 }
23460 }
23461
23462 breg = XEXP (src, 0);
23463 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23464 breg = XEXP (breg, 0);
23465
23466 /* If the base register we are using to address memory is
23467 also a destination reg, then change that register last. */
23468 if (REG_P (breg)
23469 && REGNO (breg) >= REGNO (dst)
23470 && REGNO (breg) < REGNO (dst) + nregs)
23471 j = REGNO (breg) - REGNO (dst);
23472 }
23473 else if (MEM_P (dst) && INT_REGNO_P (reg))
23474 {
23475 rtx breg;
23476
23477 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23478 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23479 {
23480 rtx delta_rtx;
23481 breg = XEXP (XEXP (dst, 0), 0);
23482 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23483 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23484 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23485
23486 /* We have to update the breg before doing the store.
23487 Use store with update, if available. */
23488
23489 if (TARGET_UPDATE)
23490 {
23491 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23492 emit_insn (TARGET_32BIT
23493 ? (TARGET_POWERPC64
23494 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23495 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
23496 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23497 used_update = true;
23498 }
23499 else
23500 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23501 dst = replace_equiv_address (dst, breg);
23502 }
23503 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
23504 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23505 {
23506 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23507 {
23508 rtx basereg = XEXP (XEXP (dst, 0), 0);
23509 if (TARGET_UPDATE)
23510 {
23511 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23512 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23513 XEXP (dst, 0)),
23514 nsrc));
23515 used_update = true;
23516 }
23517 else
23518 emit_insn (gen_rtx_SET (basereg,
23519 XEXP (XEXP (dst, 0), 1)));
23520 dst = replace_equiv_address (dst, basereg);
23521 }
23522 else
23523 {
23524 rtx basereg = XEXP (XEXP (dst, 0), 0);
23525 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23526 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23527 && REG_P (basereg)
23528 && REG_P (offsetreg)
23529 && REGNO (basereg) != REGNO (offsetreg));
23530 if (REGNO (basereg) == 0)
23531 {
23532 rtx tmp = offsetreg;
23533 offsetreg = basereg;
23534 basereg = tmp;
23535 }
23536 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23537 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23538 dst = replace_equiv_address (dst, basereg);
23539 }
23540 }
23541 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23542 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
23543 }
23544
23545 for (i = 0; i < nregs; i++)
23546 {
23547 /* Calculate index to next subword. */
23548 ++j;
23549 if (j == nregs)
23550 j = 0;
23551
23552 /* If compiler already emitted move of first word by
23553 store with update, no need to do anything. */
23554 if (j == 0 && used_update)
23555 continue;
23556
23557 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23558 j * reg_mode_size),
23559 simplify_gen_subreg (reg_mode, src, mode,
23560 j * reg_mode_size)));
23561 }
23562 if (restore_basereg != NULL_RTX)
23563 emit_insn (restore_basereg);
23564 }
23565 }
23566
23567 \f
23568 /* This page contains routines that are used to determine what the
23569 function prologue and epilogue code will do and write them out. */
23570
23571 /* Determine whether the REG is really used. */
23572
23573 static bool
23574 save_reg_p (int reg)
23575 {
23576 /* We need to mark the PIC offset register live for the same conditions
23577 as it is set up, or otherwise it won't be saved before we clobber it. */
23578
23579 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
23580 {
23581 /* When calling eh_return, we must return true for all the cases
23582 where conditional_register_usage marks the PIC offset reg
23583 call used. */
23584 if (TARGET_TOC && TARGET_MINIMAL_TOC
23585 && (crtl->calls_eh_return
23586 || df_regs_ever_live_p (reg)
23587 || !constant_pool_empty_p ()))
23588 return true;
23589
23590 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
23591 && flag_pic)
23592 return true;
23593 }
23594
23595 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
23596 }
23597
23598 /* Return the first fixed-point register that is required to be
23599 saved. 32 if none. */
23600
23601 int
23602 first_reg_to_save (void)
23603 {
23604 int first_reg;
23605
23606 /* Find lowest numbered live register. */
23607 for (first_reg = 13; first_reg <= 31; first_reg++)
23608 if (save_reg_p (first_reg))
23609 break;
23610
23611 #if TARGET_MACHO
23612 if (flag_pic
23613 && crtl->uses_pic_offset_table
23614 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
23615 return RS6000_PIC_OFFSET_TABLE_REGNUM;
23616 #endif
23617
23618 return first_reg;
23619 }
23620
23621 /* Similar, for FP regs. */
23622
23623 int
23624 first_fp_reg_to_save (void)
23625 {
23626 int first_reg;
23627
23628 /* Find lowest numbered live register. */
23629 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
23630 if (save_reg_p (first_reg))
23631 break;
23632
23633 return first_reg;
23634 }
23635
23636 /* Similar, for AltiVec regs. */
23637
23638 static int
23639 first_altivec_reg_to_save (void)
23640 {
23641 int i;
23642
23643 /* Stack frame remains as is unless we are in AltiVec ABI. */
23644 if (! TARGET_ALTIVEC_ABI)
23645 return LAST_ALTIVEC_REGNO + 1;
23646
23647 /* On Darwin, the unwind routines are compiled without
23648 TARGET_ALTIVEC, and use save_world to save/restore the
23649 altivec registers when necessary. */
23650 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23651 && ! TARGET_ALTIVEC)
23652 return FIRST_ALTIVEC_REGNO + 20;
23653
23654 /* Find lowest numbered live register. */
23655 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
23656 if (save_reg_p (i))
23657 break;
23658
23659 return i;
23660 }
23661
23662 /* Return a 32-bit mask of the AltiVec registers we need to set in
23663 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
23664 the 32-bit word is 0. */
23665
23666 static unsigned int
23667 compute_vrsave_mask (void)
23668 {
23669 unsigned int i, mask = 0;
23670
23671 /* On Darwin, the unwind routines are compiled without
23672 TARGET_ALTIVEC, and use save_world to save/restore the
23673 call-saved altivec registers when necessary. */
23674 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23675 && ! TARGET_ALTIVEC)
23676 mask |= 0xFFF;
23677
23678 /* First, find out if we use _any_ altivec registers. */
23679 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
23680 if (df_regs_ever_live_p (i))
23681 mask |= ALTIVEC_REG_BIT (i);
23682
23683 if (mask == 0)
23684 return mask;
23685
23686 /* Next, remove the argument registers from the set. These must
23687 be in the VRSAVE mask set by the caller, so we don't need to add
23688 them in again. More importantly, the mask we compute here is
23689 used to generate CLOBBERs in the set_vrsave insn, and we do not
23690 wish the argument registers to die. */
23691 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
23692 mask &= ~ALTIVEC_REG_BIT (i);
23693
23694 /* Similarly, remove the return value from the set. */
23695 {
23696 bool yes = false;
23697 diddle_return_value (is_altivec_return_reg, &yes);
23698 if (yes)
23699 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
23700 }
23701
23702 return mask;
23703 }
23704
23705 /* For a very restricted set of circumstances, we can cut down the
23706 size of prologues/epilogues by calling our own save/restore-the-world
23707 routines. */
23708
23709 static void
23710 compute_save_world_info (rs6000_stack_t *info)
23711 {
23712 info->world_save_p = 1;
23713 info->world_save_p
23714 = (WORLD_SAVE_P (info)
23715 && DEFAULT_ABI == ABI_DARWIN
23716 && !cfun->has_nonlocal_label
23717 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
23718 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
23719 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
23720 && info->cr_save_p);
23721
23722 /* This will not work in conjunction with sibcalls. Make sure there
23723 are none. (This check is expensive, but seldom executed.) */
23724 if (WORLD_SAVE_P (info))
23725 {
23726 rtx_insn *insn;
23727 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
23728 if (CALL_P (insn) && SIBLING_CALL_P (insn))
23729 {
23730 info->world_save_p = 0;
23731 break;
23732 }
23733 }
23734
23735 if (WORLD_SAVE_P (info))
23736 {
23737 /* Even if we're not touching VRsave, make sure there's room on the
23738 stack for it, if it looks like we're calling SAVE_WORLD, which
23739 will attempt to save it. */
23740 info->vrsave_size = 4;
23741
23742 /* If we are going to save the world, we need to save the link register too. */
23743 info->lr_save_p = 1;
23744
23745 /* "Save" the VRsave register too if we're saving the world. */
23746 if (info->vrsave_mask == 0)
23747 info->vrsave_mask = compute_vrsave_mask ();
23748
23749 /* Because the Darwin register save/restore routines only handle
23750 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
23751 check. */
23752 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
23753 && (info->first_altivec_reg_save
23754 >= FIRST_SAVED_ALTIVEC_REGNO));
23755 }
23756
23757 return;
23758 }
23759
23760
23761 static void
23762 is_altivec_return_reg (rtx reg, void *xyes)
23763 {
23764 bool *yes = (bool *) xyes;
23765 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
23766 *yes = true;
23767 }
23768
23769 \f
23770 /* Return whether REG is a global user reg or has been specifed by
23771 -ffixed-REG. We should not restore these, and so cannot use
23772 lmw or out-of-line restore functions if there are any. We also
23773 can't save them (well, emit frame notes for them), because frame
23774 unwinding during exception handling will restore saved registers. */
23775
23776 static bool
23777 fixed_reg_p (int reg)
23778 {
23779 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
23780 backend sets it, overriding anything the user might have given. */
23781 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
23782 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
23783 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
23784 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
23785 return false;
23786
23787 return fixed_regs[reg];
23788 }
23789
23790 /* Determine the strategy for savings/restoring registers. */
23791
23792 enum {
23793 SAVE_MULTIPLE = 0x1,
23794 SAVE_INLINE_GPRS = 0x2,
23795 SAVE_INLINE_FPRS = 0x4,
23796 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
23797 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
23798 SAVE_INLINE_VRS = 0x20,
23799 REST_MULTIPLE = 0x100,
23800 REST_INLINE_GPRS = 0x200,
23801 REST_INLINE_FPRS = 0x400,
23802 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
23803 REST_INLINE_VRS = 0x1000
23804 };
23805
23806 static int
23807 rs6000_savres_strategy (rs6000_stack_t *info,
23808 bool using_static_chain_p)
23809 {
23810 int strategy = 0;
23811
23812 /* Select between in-line and out-of-line save and restore of regs.
23813 First, all the obvious cases where we don't use out-of-line. */
23814 if (crtl->calls_eh_return
23815 || cfun->machine->ra_need_lr)
23816 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
23817 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
23818 | SAVE_INLINE_VRS | REST_INLINE_VRS);
23819
23820 if (info->first_gp_reg_save == 32)
23821 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23822
23823 if (info->first_fp_reg_save == 64)
23824 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23825
23826 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
23827 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23828
23829 /* Define cutoff for using out-of-line functions to save registers. */
23830 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
23831 {
23832 if (!optimize_size)
23833 {
23834 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23835 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23836 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23837 }
23838 else
23839 {
23840 /* Prefer out-of-line restore if it will exit. */
23841 if (info->first_fp_reg_save > 61)
23842 strategy |= SAVE_INLINE_FPRS;
23843 if (info->first_gp_reg_save > 29)
23844 {
23845 if (info->first_fp_reg_save == 64)
23846 strategy |= SAVE_INLINE_GPRS;
23847 else
23848 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23849 }
23850 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
23851 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23852 }
23853 }
23854 else if (DEFAULT_ABI == ABI_DARWIN)
23855 {
23856 if (info->first_fp_reg_save > 60)
23857 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23858 if (info->first_gp_reg_save > 29)
23859 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23860 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23861 }
23862 else
23863 {
23864 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
23865 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
23866 || info->first_fp_reg_save > 61)
23867 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23868 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23869 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23870 }
23871
23872 /* Don't bother to try to save things out-of-line if r11 is occupied
23873 by the static chain. It would require too much fiddling and the
23874 static chain is rarely used anyway. FPRs are saved w.r.t the stack
23875 pointer on Darwin, and AIX uses r1 or r12. */
23876 if (using_static_chain_p
23877 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
23878 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
23879 | SAVE_INLINE_GPRS
23880 | SAVE_INLINE_VRS);
23881
23882 /* Don't ever restore fixed regs. That means we can't use the
23883 out-of-line register restore functions if a fixed reg is in the
23884 range of regs restored. */
23885 if (!(strategy & REST_INLINE_FPRS))
23886 for (int i = info->first_fp_reg_save; i < 64; i++)
23887 if (fixed_regs[i])
23888 {
23889 strategy |= REST_INLINE_FPRS;
23890 break;
23891 }
23892
23893 /* We can only use the out-of-line routines to restore fprs if we've
23894 saved all the registers from first_fp_reg_save in the prologue.
23895 Otherwise, we risk loading garbage. Of course, if we have saved
23896 out-of-line then we know we haven't skipped any fprs. */
23897 if ((strategy & SAVE_INLINE_FPRS)
23898 && !(strategy & REST_INLINE_FPRS))
23899 for (int i = info->first_fp_reg_save; i < 64; i++)
23900 if (!save_reg_p (i))
23901 {
23902 strategy |= REST_INLINE_FPRS;
23903 break;
23904 }
23905
23906 /* Similarly, for altivec regs. */
23907 if (!(strategy & REST_INLINE_VRS))
23908 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
23909 if (fixed_regs[i])
23910 {
23911 strategy |= REST_INLINE_VRS;
23912 break;
23913 }
23914
23915 if ((strategy & SAVE_INLINE_VRS)
23916 && !(strategy & REST_INLINE_VRS))
23917 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
23918 if (!save_reg_p (i))
23919 {
23920 strategy |= REST_INLINE_VRS;
23921 break;
23922 }
23923
23924 /* info->lr_save_p isn't yet set if the only reason lr needs to be
23925 saved is an out-of-line save or restore. Set up the value for
23926 the next test (excluding out-of-line gprs). */
23927 bool lr_save_p = (info->lr_save_p
23928 || !(strategy & SAVE_INLINE_FPRS)
23929 || !(strategy & SAVE_INLINE_VRS)
23930 || !(strategy & REST_INLINE_FPRS)
23931 || !(strategy & REST_INLINE_VRS));
23932
23933 if (TARGET_MULTIPLE
23934 && !TARGET_POWERPC64
23935 && info->first_gp_reg_save < 31
23936 && !(flag_shrink_wrap
23937 && flag_shrink_wrap_separate
23938 && optimize_function_for_speed_p (cfun)))
23939 {
23940 int count = 0;
23941 for (int i = info->first_gp_reg_save; i < 32; i++)
23942 if (save_reg_p (i))
23943 count++;
23944
23945 if (count <= 1)
23946 /* Don't use store multiple if only one reg needs to be
23947 saved. This can occur for example when the ABI_V4 pic reg
23948 (r30) needs to be saved to make calls, but r31 is not
23949 used. */
23950 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23951 else
23952 {
23953 /* Prefer store multiple for saves over out-of-line
23954 routines, since the store-multiple instruction will
23955 always be smaller. */
23956 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
23957
23958 /* The situation is more complicated with load multiple.
23959 We'd prefer to use the out-of-line routines for restores,
23960 since the "exit" out-of-line routines can handle the
23961 restore of LR and the frame teardown. However if doesn't
23962 make sense to use the out-of-line routine if that is the
23963 only reason we'd need to save LR, and we can't use the
23964 "exit" out-of-line gpr restore if we have saved some
23965 fprs; In those cases it is advantageous to use load
23966 multiple when available. */
23967 if (info->first_fp_reg_save != 64 || !lr_save_p)
23968 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
23969 }
23970 }
23971
23972 /* Using the "exit" out-of-line routine does not improve code size
23973 if using it would require lr to be saved and if only saving one
23974 or two gprs. */
23975 else if (!lr_save_p && info->first_gp_reg_save > 29)
23976 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23977
23978 /* Don't ever restore fixed regs. */
23979 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
23980 for (int i = info->first_gp_reg_save; i < 32; i++)
23981 if (fixed_reg_p (i))
23982 {
23983 strategy |= REST_INLINE_GPRS;
23984 strategy &= ~REST_MULTIPLE;
23985 break;
23986 }
23987
23988 /* We can only use load multiple or the out-of-line routines to
23989 restore gprs if we've saved all the registers from
23990 first_gp_reg_save. Otherwise, we risk loading garbage.
23991 Of course, if we have saved out-of-line or used stmw then we know
23992 we haven't skipped any gprs. */
23993 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
23994 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
23995 for (int i = info->first_gp_reg_save; i < 32; i++)
23996 if (!save_reg_p (i))
23997 {
23998 strategy |= REST_INLINE_GPRS;
23999 strategy &= ~REST_MULTIPLE;
24000 break;
24001 }
24002
24003 if (TARGET_ELF && TARGET_64BIT)
24004 {
24005 if (!(strategy & SAVE_INLINE_FPRS))
24006 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24007 else if (!(strategy & SAVE_INLINE_GPRS)
24008 && info->first_fp_reg_save == 64)
24009 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24010 }
24011 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24012 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24013
24014 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24015 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24016
24017 return strategy;
24018 }
24019
24020 /* Calculate the stack information for the current function. This is
24021 complicated by having two separate calling sequences, the AIX calling
24022 sequence and the V.4 calling sequence.
24023
24024 AIX (and Darwin/Mac OS X) stack frames look like:
24025 32-bit 64-bit
24026 SP----> +---------------------------------------+
24027 | back chain to caller | 0 0
24028 +---------------------------------------+
24029 | saved CR | 4 8 (8-11)
24030 +---------------------------------------+
24031 | saved LR | 8 16
24032 +---------------------------------------+
24033 | reserved for compilers | 12 24
24034 +---------------------------------------+
24035 | reserved for binders | 16 32
24036 +---------------------------------------+
24037 | saved TOC pointer | 20 40
24038 +---------------------------------------+
24039 | Parameter save area (+padding*) (P) | 24 48
24040 +---------------------------------------+
24041 | Alloca space (A) | 24+P etc.
24042 +---------------------------------------+
24043 | Local variable space (L) | 24+P+A
24044 +---------------------------------------+
24045 | Float/int conversion temporary (X) | 24+P+A+L
24046 +---------------------------------------+
24047 | Save area for AltiVec registers (W) | 24+P+A+L+X
24048 +---------------------------------------+
24049 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24050 +---------------------------------------+
24051 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24052 +---------------------------------------+
24053 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24054 +---------------------------------------+
24055 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24056 +---------------------------------------+
24057 old SP->| back chain to caller's caller |
24058 +---------------------------------------+
24059
24060 * If the alloca area is present, the parameter save area is
24061 padded so that the former starts 16-byte aligned.
24062
24063 The required alignment for AIX configurations is two words (i.e., 8
24064 or 16 bytes).
24065
24066 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24067
24068 SP----> +---------------------------------------+
24069 | Back chain to caller | 0
24070 +---------------------------------------+
24071 | Save area for CR | 8
24072 +---------------------------------------+
24073 | Saved LR | 16
24074 +---------------------------------------+
24075 | Saved TOC pointer | 24
24076 +---------------------------------------+
24077 | Parameter save area (+padding*) (P) | 32
24078 +---------------------------------------+
24079 | Alloca space (A) | 32+P
24080 +---------------------------------------+
24081 | Local variable space (L) | 32+P+A
24082 +---------------------------------------+
24083 | Save area for AltiVec registers (W) | 32+P+A+L
24084 +---------------------------------------+
24085 | AltiVec alignment padding (Y) | 32+P+A+L+W
24086 +---------------------------------------+
24087 | Save area for GP registers (G) | 32+P+A+L+W+Y
24088 +---------------------------------------+
24089 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24090 +---------------------------------------+
24091 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24092 +---------------------------------------+
24093
24094 * If the alloca area is present, the parameter save area is
24095 padded so that the former starts 16-byte aligned.
24096
24097 V.4 stack frames look like:
24098
24099 SP----> +---------------------------------------+
24100 | back chain to caller | 0
24101 +---------------------------------------+
24102 | caller's saved LR | 4
24103 +---------------------------------------+
24104 | Parameter save area (+padding*) (P) | 8
24105 +---------------------------------------+
24106 | Alloca space (A) | 8+P
24107 +---------------------------------------+
24108 | Varargs save area (V) | 8+P+A
24109 +---------------------------------------+
24110 | Local variable space (L) | 8+P+A+V
24111 +---------------------------------------+
24112 | Float/int conversion temporary (X) | 8+P+A+V+L
24113 +---------------------------------------+
24114 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24115 +---------------------------------------+
24116 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24117 +---------------------------------------+
24118 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24119 +---------------------------------------+
24120 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24121 +---------------------------------------+
24122 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24123 +---------------------------------------+
24124 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24125 +---------------------------------------+
24126 old SP->| back chain to caller's caller |
24127 +---------------------------------------+
24128
24129 * If the alloca area is present and the required alignment is
24130 16 bytes, the parameter save area is padded so that the
24131 alloca area starts 16-byte aligned.
24132
24133 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24134 given. (But note below and in sysv4.h that we require only 8 and
24135 may round up the size of our stack frame anyways. The historical
24136 reason is early versions of powerpc-linux which didn't properly
24137 align the stack at program startup. A happy side-effect is that
24138 -mno-eabi libraries can be used with -meabi programs.)
24139
24140 The EABI configuration defaults to the V.4 layout. However,
24141 the stack alignment requirements may differ. If -mno-eabi is not
24142 given, the required stack alignment is 8 bytes; if -mno-eabi is
24143 given, the required alignment is 16 bytes. (But see V.4 comment
24144 above.) */
24145
24146 #ifndef ABI_STACK_BOUNDARY
24147 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24148 #endif
24149
24150 static rs6000_stack_t *
24151 rs6000_stack_info (void)
24152 {
24153 /* We should never be called for thunks, we are not set up for that. */
24154 gcc_assert (!cfun->is_thunk);
24155
24156 rs6000_stack_t *info = &stack_info;
24157 int reg_size = TARGET_32BIT ? 4 : 8;
24158 int ehrd_size;
24159 int ehcr_size;
24160 int save_align;
24161 int first_gp;
24162 HOST_WIDE_INT non_fixed_size;
24163 bool using_static_chain_p;
24164
24165 if (reload_completed && info->reload_completed)
24166 return info;
24167
24168 memset (info, 0, sizeof (*info));
24169 info->reload_completed = reload_completed;
24170
24171 /* Select which calling sequence. */
24172 info->abi = DEFAULT_ABI;
24173
24174 /* Calculate which registers need to be saved & save area size. */
24175 info->first_gp_reg_save = first_reg_to_save ();
24176 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24177 even if it currently looks like we won't. Reload may need it to
24178 get at a constant; if so, it will have already created a constant
24179 pool entry for it. */
24180 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24181 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24182 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24183 && crtl->uses_const_pool
24184 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24185 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24186 else
24187 first_gp = info->first_gp_reg_save;
24188
24189 info->gp_size = reg_size * (32 - first_gp);
24190
24191 info->first_fp_reg_save = first_fp_reg_to_save ();
24192 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24193
24194 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24195 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24196 - info->first_altivec_reg_save);
24197
24198 /* Does this function call anything? */
24199 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24200
24201 /* Determine if we need to save the condition code registers. */
24202 if (save_reg_p (CR2_REGNO)
24203 || save_reg_p (CR3_REGNO)
24204 || save_reg_p (CR4_REGNO))
24205 {
24206 info->cr_save_p = 1;
24207 if (DEFAULT_ABI == ABI_V4)
24208 info->cr_size = reg_size;
24209 }
24210
24211 /* If the current function calls __builtin_eh_return, then we need
24212 to allocate stack space for registers that will hold data for
24213 the exception handler. */
24214 if (crtl->calls_eh_return)
24215 {
24216 unsigned int i;
24217 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24218 continue;
24219
24220 ehrd_size = i * UNITS_PER_WORD;
24221 }
24222 else
24223 ehrd_size = 0;
24224
24225 /* In the ELFv2 ABI, we also need to allocate space for separate
24226 CR field save areas if the function calls __builtin_eh_return. */
24227 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24228 {
24229 /* This hard-codes that we have three call-saved CR fields. */
24230 ehcr_size = 3 * reg_size;
24231 /* We do *not* use the regular CR save mechanism. */
24232 info->cr_save_p = 0;
24233 }
24234 else
24235 ehcr_size = 0;
24236
24237 /* Determine various sizes. */
24238 info->reg_size = reg_size;
24239 info->fixed_size = RS6000_SAVE_AREA;
24240 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24241 if (cfun->calls_alloca)
24242 info->parm_size =
24243 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24244 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24245 else
24246 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24247 TARGET_ALTIVEC ? 16 : 8);
24248 if (FRAME_GROWS_DOWNWARD)
24249 info->vars_size
24250 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24251 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24252 - (info->fixed_size + info->vars_size + info->parm_size);
24253
24254 if (TARGET_ALTIVEC_ABI)
24255 info->vrsave_mask = compute_vrsave_mask ();
24256
24257 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24258 info->vrsave_size = 4;
24259
24260 compute_save_world_info (info);
24261
24262 /* Calculate the offsets. */
24263 switch (DEFAULT_ABI)
24264 {
24265 case ABI_NONE:
24266 default:
24267 gcc_unreachable ();
24268
24269 case ABI_AIX:
24270 case ABI_ELFv2:
24271 case ABI_DARWIN:
24272 info->fp_save_offset = -info->fp_size;
24273 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24274
24275 if (TARGET_ALTIVEC_ABI)
24276 {
24277 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24278
24279 /* Align stack so vector save area is on a quadword boundary.
24280 The padding goes above the vectors. */
24281 if (info->altivec_size != 0)
24282 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24283
24284 info->altivec_save_offset = info->vrsave_save_offset
24285 - info->altivec_padding_size
24286 - info->altivec_size;
24287 gcc_assert (info->altivec_size == 0
24288 || info->altivec_save_offset % 16 == 0);
24289
24290 /* Adjust for AltiVec case. */
24291 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24292 }
24293 else
24294 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24295
24296 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24297 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24298 info->lr_save_offset = 2*reg_size;
24299 break;
24300
24301 case ABI_V4:
24302 info->fp_save_offset = -info->fp_size;
24303 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24304 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24305
24306 if (TARGET_ALTIVEC_ABI)
24307 {
24308 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24309
24310 /* Align stack so vector save area is on a quadword boundary. */
24311 if (info->altivec_size != 0)
24312 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24313
24314 info->altivec_save_offset = info->vrsave_save_offset
24315 - info->altivec_padding_size
24316 - info->altivec_size;
24317
24318 /* Adjust for AltiVec case. */
24319 info->ehrd_offset = info->altivec_save_offset;
24320 }
24321 else
24322 info->ehrd_offset = info->cr_save_offset;
24323
24324 info->ehrd_offset -= ehrd_size;
24325 info->lr_save_offset = reg_size;
24326 }
24327
24328 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24329 info->save_size = RS6000_ALIGN (info->fp_size
24330 + info->gp_size
24331 + info->altivec_size
24332 + info->altivec_padding_size
24333 + ehrd_size
24334 + ehcr_size
24335 + info->cr_size
24336 + info->vrsave_size,
24337 save_align);
24338
24339 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24340
24341 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24342 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24343
24344 /* Determine if we need to save the link register. */
24345 if (info->calls_p
24346 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24347 && crtl->profile
24348 && !TARGET_PROFILE_KERNEL)
24349 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24350 #ifdef TARGET_RELOCATABLE
24351 || (DEFAULT_ABI == ABI_V4
24352 && (TARGET_RELOCATABLE || flag_pic > 1)
24353 && !constant_pool_empty_p ())
24354 #endif
24355 || rs6000_ra_ever_killed ())
24356 info->lr_save_p = 1;
24357
24358 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24359 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24360 && call_used_regs[STATIC_CHAIN_REGNUM]);
24361 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24362
24363 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24364 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24365 || !(info->savres_strategy & SAVE_INLINE_VRS)
24366 || !(info->savres_strategy & REST_INLINE_GPRS)
24367 || !(info->savres_strategy & REST_INLINE_FPRS)
24368 || !(info->savres_strategy & REST_INLINE_VRS))
24369 info->lr_save_p = 1;
24370
24371 if (info->lr_save_p)
24372 df_set_regs_ever_live (LR_REGNO, true);
24373
24374 /* Determine if we need to allocate any stack frame:
24375
24376 For AIX we need to push the stack if a frame pointer is needed
24377 (because the stack might be dynamically adjusted), if we are
24378 debugging, if we make calls, or if the sum of fp_save, gp_save,
24379 and local variables are more than the space needed to save all
24380 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24381 + 18*8 = 288 (GPR13 reserved).
24382
24383 For V.4 we don't have the stack cushion that AIX uses, but assume
24384 that the debugger can handle stackless frames. */
24385
24386 if (info->calls_p)
24387 info->push_p = 1;
24388
24389 else if (DEFAULT_ABI == ABI_V4)
24390 info->push_p = non_fixed_size != 0;
24391
24392 else if (frame_pointer_needed)
24393 info->push_p = 1;
24394
24395 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24396 info->push_p = 1;
24397
24398 else
24399 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24400
24401 return info;
24402 }
24403
24404 static void
24405 debug_stack_info (rs6000_stack_t *info)
24406 {
24407 const char *abi_string;
24408
24409 if (! info)
24410 info = rs6000_stack_info ();
24411
24412 fprintf (stderr, "\nStack information for function %s:\n",
24413 ((current_function_decl && DECL_NAME (current_function_decl))
24414 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24415 : "<unknown>"));
24416
24417 switch (info->abi)
24418 {
24419 default: abi_string = "Unknown"; break;
24420 case ABI_NONE: abi_string = "NONE"; break;
24421 case ABI_AIX: abi_string = "AIX"; break;
24422 case ABI_ELFv2: abi_string = "ELFv2"; break;
24423 case ABI_DARWIN: abi_string = "Darwin"; break;
24424 case ABI_V4: abi_string = "V.4"; break;
24425 }
24426
24427 fprintf (stderr, "\tABI = %5s\n", abi_string);
24428
24429 if (TARGET_ALTIVEC_ABI)
24430 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24431
24432 if (info->first_gp_reg_save != 32)
24433 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24434
24435 if (info->first_fp_reg_save != 64)
24436 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24437
24438 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24439 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24440 info->first_altivec_reg_save);
24441
24442 if (info->lr_save_p)
24443 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24444
24445 if (info->cr_save_p)
24446 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24447
24448 if (info->vrsave_mask)
24449 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24450
24451 if (info->push_p)
24452 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24453
24454 if (info->calls_p)
24455 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24456
24457 if (info->gp_size)
24458 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24459
24460 if (info->fp_size)
24461 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24462
24463 if (info->altivec_size)
24464 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24465 info->altivec_save_offset);
24466
24467 if (info->vrsave_size)
24468 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24469 info->vrsave_save_offset);
24470
24471 if (info->lr_save_p)
24472 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24473
24474 if (info->cr_save_p)
24475 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24476
24477 if (info->varargs_save_offset)
24478 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24479
24480 if (info->total_size)
24481 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24482 info->total_size);
24483
24484 if (info->vars_size)
24485 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24486 info->vars_size);
24487
24488 if (info->parm_size)
24489 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24490
24491 if (info->fixed_size)
24492 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24493
24494 if (info->gp_size)
24495 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24496
24497 if (info->fp_size)
24498 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24499
24500 if (info->altivec_size)
24501 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24502
24503 if (info->vrsave_size)
24504 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24505
24506 if (info->altivec_padding_size)
24507 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24508 info->altivec_padding_size);
24509
24510 if (info->cr_size)
24511 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
24512
24513 if (info->save_size)
24514 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
24515
24516 if (info->reg_size != 4)
24517 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
24518
24519 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
24520
24521 fprintf (stderr, "\n");
24522 }
24523
24524 rtx
24525 rs6000_return_addr (int count, rtx frame)
24526 {
24527 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
24528 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
24529 if (count != 0
24530 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
24531 {
24532 cfun->machine->ra_needs_full_frame = 1;
24533
24534 if (count == 0)
24535 /* FRAME is set to frame_pointer_rtx by the generic code, but that
24536 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
24537 frame = stack_pointer_rtx;
24538 rtx prev_frame_addr = memory_address (Pmode, frame);
24539 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
24540 rtx lr_save_off = plus_constant (Pmode,
24541 prev_frame, RETURN_ADDRESS_OFFSET);
24542 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
24543 return gen_rtx_MEM (Pmode, lr_save_addr);
24544 }
24545
24546 cfun->machine->ra_need_lr = 1;
24547 return get_hard_reg_initial_val (Pmode, LR_REGNO);
24548 }
24549
24550 /* Say whether a function is a candidate for sibcall handling or not. */
24551
24552 static bool
24553 rs6000_function_ok_for_sibcall (tree decl, tree exp)
24554 {
24555 tree fntype;
24556
24557 /* The sibcall epilogue may clobber the static chain register.
24558 ??? We could work harder and avoid that, but it's probably
24559 not worth the hassle in practice. */
24560 if (CALL_EXPR_STATIC_CHAIN (exp))
24561 return false;
24562
24563 if (decl)
24564 fntype = TREE_TYPE (decl);
24565 else
24566 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
24567
24568 /* We can't do it if the called function has more vector parameters
24569 than the current function; there's nowhere to put the VRsave code. */
24570 if (TARGET_ALTIVEC_ABI
24571 && TARGET_ALTIVEC_VRSAVE
24572 && !(decl && decl == current_function_decl))
24573 {
24574 function_args_iterator args_iter;
24575 tree type;
24576 int nvreg = 0;
24577
24578 /* Functions with vector parameters are required to have a
24579 prototype, so the argument type info must be available
24580 here. */
24581 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
24582 if (TREE_CODE (type) == VECTOR_TYPE
24583 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24584 nvreg++;
24585
24586 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
24587 if (TREE_CODE (type) == VECTOR_TYPE
24588 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24589 nvreg--;
24590
24591 if (nvreg > 0)
24592 return false;
24593 }
24594
24595 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
24596 functions, because the callee may have a different TOC pointer to
24597 the caller and there's no way to ensure we restore the TOC when
24598 we return. With the secure-plt SYSV ABI we can't make non-local
24599 calls when -fpic/PIC because the plt call stubs use r30. */
24600 if (DEFAULT_ABI == ABI_DARWIN
24601 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24602 && decl
24603 && !DECL_EXTERNAL (decl)
24604 && !DECL_WEAK (decl)
24605 && (*targetm.binds_local_p) (decl))
24606 || (DEFAULT_ABI == ABI_V4
24607 && (!TARGET_SECURE_PLT
24608 || !flag_pic
24609 || (decl
24610 && (*targetm.binds_local_p) (decl)))))
24611 {
24612 tree attr_list = TYPE_ATTRIBUTES (fntype);
24613
24614 if (!lookup_attribute ("longcall", attr_list)
24615 || lookup_attribute ("shortcall", attr_list))
24616 return true;
24617 }
24618
24619 return false;
24620 }
24621
24622 static int
24623 rs6000_ra_ever_killed (void)
24624 {
24625 rtx_insn *top;
24626 rtx reg;
24627 rtx_insn *insn;
24628
24629 if (cfun->is_thunk)
24630 return 0;
24631
24632 if (cfun->machine->lr_save_state)
24633 return cfun->machine->lr_save_state - 1;
24634
24635 /* regs_ever_live has LR marked as used if any sibcalls are present,
24636 but this should not force saving and restoring in the
24637 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
24638 clobbers LR, so that is inappropriate. */
24639
24640 /* Also, the prologue can generate a store into LR that
24641 doesn't really count, like this:
24642
24643 move LR->R0
24644 bcl to set PIC register
24645 move LR->R31
24646 move R0->LR
24647
24648 When we're called from the epilogue, we need to avoid counting
24649 this as a store. */
24650
24651 push_topmost_sequence ();
24652 top = get_insns ();
24653 pop_topmost_sequence ();
24654 reg = gen_rtx_REG (Pmode, LR_REGNO);
24655
24656 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
24657 {
24658 if (INSN_P (insn))
24659 {
24660 if (CALL_P (insn))
24661 {
24662 if (!SIBLING_CALL_P (insn))
24663 return 1;
24664 }
24665 else if (find_regno_note (insn, REG_INC, LR_REGNO))
24666 return 1;
24667 else if (set_of (reg, insn) != NULL_RTX
24668 && !prologue_epilogue_contains (insn))
24669 return 1;
24670 }
24671 }
24672 return 0;
24673 }
24674 \f
24675 /* Emit instructions needed to load the TOC register.
24676 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
24677 a constant pool; or for SVR4 -fpic. */
24678
24679 void
24680 rs6000_emit_load_toc_table (int fromprolog)
24681 {
24682 rtx dest;
24683 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
24684
24685 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
24686 {
24687 char buf[30];
24688 rtx lab, tmp1, tmp2, got;
24689
24690 lab = gen_label_rtx ();
24691 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
24692 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24693 if (flag_pic == 2)
24694 {
24695 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24696 need_toc_init = 1;
24697 }
24698 else
24699 got = rs6000_got_sym ();
24700 tmp1 = tmp2 = dest;
24701 if (!fromprolog)
24702 {
24703 tmp1 = gen_reg_rtx (Pmode);
24704 tmp2 = gen_reg_rtx (Pmode);
24705 }
24706 emit_insn (gen_load_toc_v4_PIC_1 (lab));
24707 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
24708 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
24709 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
24710 }
24711 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
24712 {
24713 emit_insn (gen_load_toc_v4_pic_si ());
24714 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24715 }
24716 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
24717 {
24718 char buf[30];
24719 rtx temp0 = (fromprolog
24720 ? gen_rtx_REG (Pmode, 0)
24721 : gen_reg_rtx (Pmode));
24722
24723 if (fromprolog)
24724 {
24725 rtx symF, symL;
24726
24727 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
24728 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24729
24730 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
24731 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24732
24733 emit_insn (gen_load_toc_v4_PIC_1 (symF));
24734 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24735 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
24736 }
24737 else
24738 {
24739 rtx tocsym, lab;
24740
24741 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24742 need_toc_init = 1;
24743 lab = gen_label_rtx ();
24744 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
24745 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24746 if (TARGET_LINK_STACK)
24747 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
24748 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
24749 }
24750 emit_insn (gen_addsi3 (dest, temp0, dest));
24751 }
24752 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
24753 {
24754 /* This is for AIX code running in non-PIC ELF32. */
24755 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24756
24757 need_toc_init = 1;
24758 emit_insn (gen_elf_high (dest, realsym));
24759 emit_insn (gen_elf_low (dest, dest, realsym));
24760 }
24761 else
24762 {
24763 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24764
24765 if (TARGET_32BIT)
24766 emit_insn (gen_load_toc_aix_si (dest));
24767 else
24768 emit_insn (gen_load_toc_aix_di (dest));
24769 }
24770 }
24771
24772 /* Emit instructions to restore the link register after determining where
24773 its value has been stored. */
24774
24775 void
24776 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
24777 {
24778 rs6000_stack_t *info = rs6000_stack_info ();
24779 rtx operands[2];
24780
24781 operands[0] = source;
24782 operands[1] = scratch;
24783
24784 if (info->lr_save_p)
24785 {
24786 rtx frame_rtx = stack_pointer_rtx;
24787 HOST_WIDE_INT sp_offset = 0;
24788 rtx tmp;
24789
24790 if (frame_pointer_needed
24791 || cfun->calls_alloca
24792 || info->total_size > 32767)
24793 {
24794 tmp = gen_frame_mem (Pmode, frame_rtx);
24795 emit_move_insn (operands[1], tmp);
24796 frame_rtx = operands[1];
24797 }
24798 else if (info->push_p)
24799 sp_offset = info->total_size;
24800
24801 tmp = plus_constant (Pmode, frame_rtx,
24802 info->lr_save_offset + sp_offset);
24803 tmp = gen_frame_mem (Pmode, tmp);
24804 emit_move_insn (tmp, operands[0]);
24805 }
24806 else
24807 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
24808
24809 /* Freeze lr_save_p. We've just emitted rtl that depends on the
24810 state of lr_save_p so any change from here on would be a bug. In
24811 particular, stop rs6000_ra_ever_killed from considering the SET
24812 of lr we may have added just above. */
24813 cfun->machine->lr_save_state = info->lr_save_p + 1;
24814 }
24815
24816 static GTY(()) alias_set_type set = -1;
24817
24818 alias_set_type
24819 get_TOC_alias_set (void)
24820 {
24821 if (set == -1)
24822 set = new_alias_set ();
24823 return set;
24824 }
24825
24826 /* This returns nonzero if the current function uses the TOC. This is
24827 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
24828 is generated by the ABI_V4 load_toc_* patterns.
24829 Return 2 instead of 1 if the load_toc_* pattern is in the function
24830 partition that doesn't start the function. */
24831 #if TARGET_ELF
24832 static int
24833 uses_TOC (void)
24834 {
24835 rtx_insn *insn;
24836 int ret = 1;
24837
24838 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
24839 {
24840 if (INSN_P (insn))
24841 {
24842 rtx pat = PATTERN (insn);
24843 int i;
24844
24845 if (GET_CODE (pat) == PARALLEL)
24846 for (i = 0; i < XVECLEN (pat, 0); i++)
24847 {
24848 rtx sub = XVECEXP (pat, 0, i);
24849 if (GET_CODE (sub) == USE)
24850 {
24851 sub = XEXP (sub, 0);
24852 if (GET_CODE (sub) == UNSPEC
24853 && XINT (sub, 1) == UNSPEC_TOC)
24854 return ret;
24855 }
24856 }
24857 }
24858 else if (crtl->has_bb_partition
24859 && NOTE_P (insn)
24860 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
24861 ret = 2;
24862 }
24863 return 0;
24864 }
24865 #endif
24866
24867 rtx
24868 create_TOC_reference (rtx symbol, rtx largetoc_reg)
24869 {
24870 rtx tocrel, tocreg, hi;
24871
24872 if (TARGET_DEBUG_ADDR)
24873 {
24874 if (GET_CODE (symbol) == SYMBOL_REF)
24875 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
24876 XSTR (symbol, 0));
24877 else
24878 {
24879 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
24880 GET_RTX_NAME (GET_CODE (symbol)));
24881 debug_rtx (symbol);
24882 }
24883 }
24884
24885 if (!can_create_pseudo_p ())
24886 df_set_regs_ever_live (TOC_REGISTER, true);
24887
24888 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
24889 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
24890 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
24891 return tocrel;
24892
24893 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
24894 if (largetoc_reg != NULL)
24895 {
24896 emit_move_insn (largetoc_reg, hi);
24897 hi = largetoc_reg;
24898 }
24899 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
24900 }
24901
24902 /* Issue assembly directives that create a reference to the given DWARF
24903 FRAME_TABLE_LABEL from the current function section. */
24904 void
24905 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
24906 {
24907 fprintf (asm_out_file, "\t.ref %s\n",
24908 (* targetm.strip_name_encoding) (frame_table_label));
24909 }
24910 \f
24911 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
24912 and the change to the stack pointer. */
24913
24914 static void
24915 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
24916 {
24917 rtvec p;
24918 int i;
24919 rtx regs[3];
24920
24921 i = 0;
24922 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
24923 if (hard_frame_needed)
24924 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
24925 if (!(REGNO (fp) == STACK_POINTER_REGNUM
24926 || (hard_frame_needed
24927 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
24928 regs[i++] = fp;
24929
24930 p = rtvec_alloc (i);
24931 while (--i >= 0)
24932 {
24933 rtx mem = gen_frame_mem (BLKmode, regs[i]);
24934 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
24935 }
24936
24937 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
24938 }
24939
24940 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
24941 and set the appropriate attributes for the generated insn. Return the
24942 first insn which adjusts the stack pointer or the last insn before
24943 the stack adjustment loop.
24944
24945 SIZE_INT is used to create the CFI note for the allocation.
24946
24947 SIZE_RTX is an rtx containing the size of the adjustment. Note that
24948 since stacks grow to lower addresses its runtime value is -SIZE_INT.
24949
24950 ORIG_SP contains the backchain value that must be stored at *sp. */
24951
24952 static rtx_insn *
24953 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
24954 {
24955 rtx_insn *insn;
24956
24957 rtx size_rtx = GEN_INT (-size_int);
24958 if (size_int > 32767)
24959 {
24960 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
24961 /* Need a note here so that try_split doesn't get confused. */
24962 if (get_last_insn () == NULL_RTX)
24963 emit_note (NOTE_INSN_DELETED);
24964 insn = emit_move_insn (tmp_reg, size_rtx);
24965 try_split (PATTERN (insn), insn, 0);
24966 size_rtx = tmp_reg;
24967 }
24968
24969 if (Pmode == SImode)
24970 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
24971 stack_pointer_rtx,
24972 size_rtx,
24973 orig_sp));
24974 else
24975 insn = emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
24976 stack_pointer_rtx,
24977 size_rtx,
24978 orig_sp));
24979 rtx par = PATTERN (insn);
24980 gcc_assert (GET_CODE (par) == PARALLEL);
24981 rtx set = XVECEXP (par, 0, 0);
24982 gcc_assert (GET_CODE (set) == SET);
24983 rtx mem = SET_DEST (set);
24984 gcc_assert (MEM_P (mem));
24985 MEM_NOTRAP_P (mem) = 1;
24986 set_mem_alias_set (mem, get_frame_alias_set ());
24987
24988 RTX_FRAME_RELATED_P (insn) = 1;
24989 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
24990 gen_rtx_SET (stack_pointer_rtx,
24991 gen_rtx_PLUS (Pmode,
24992 stack_pointer_rtx,
24993 GEN_INT (-size_int))));
24994
24995 /* Emit a blockage to ensure the allocation/probing insns are
24996 not optimized, combined, removed, etc. Add REG_STACK_CHECK
24997 note for similar reasons. */
24998 if (flag_stack_clash_protection)
24999 {
25000 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
25001 emit_insn (gen_blockage ());
25002 }
25003
25004 return insn;
25005 }
25006
25007 static HOST_WIDE_INT
25008 get_stack_clash_protection_probe_interval (void)
25009 {
25010 return (HOST_WIDE_INT_1U
25011 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
25012 }
25013
25014 static HOST_WIDE_INT
25015 get_stack_clash_protection_guard_size (void)
25016 {
25017 return (HOST_WIDE_INT_1U
25018 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
25019 }
25020
25021 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25022 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25023
25024 COPY_REG, if non-null, should contain a copy of the original
25025 stack pointer at exit from this function.
25026
25027 This is subtly different than the Ada probing in that it tries hard to
25028 prevent attacks that jump the stack guard. Thus it is never allowed to
25029 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25030 space without a suitable probe. */
25031 static rtx_insn *
25032 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
25033 rtx copy_reg)
25034 {
25035 rtx orig_sp = copy_reg;
25036
25037 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25038
25039 /* Round the size down to a multiple of PROBE_INTERVAL. */
25040 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
25041
25042 /* If explicitly requested,
25043 or the rounded size is not the same as the original size
25044 or the the rounded size is greater than a page,
25045 then we will need a copy of the original stack pointer. */
25046 if (rounded_size != orig_size
25047 || rounded_size > probe_interval
25048 || copy_reg)
25049 {
25050 /* If the caller did not request a copy of the incoming stack
25051 pointer, then we use r0 to hold the copy. */
25052 if (!copy_reg)
25053 orig_sp = gen_rtx_REG (Pmode, 0);
25054 emit_move_insn (orig_sp, stack_pointer_rtx);
25055 }
25056
25057 /* There's three cases here.
25058
25059 One is a single probe which is the most common and most efficiently
25060 implemented as it does not have to have a copy of the original
25061 stack pointer if there are no residuals.
25062
25063 Second is unrolled allocation/probes which we use if there's just
25064 a few of them. It needs to save the original stack pointer into a
25065 temporary for use as a source register in the allocation/probe.
25066
25067 Last is a loop. This is the most uncommon case and least efficient. */
25068 rtx_insn *retval = NULL;
25069 if (rounded_size == probe_interval)
25070 {
25071 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
25072
25073 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25074 }
25075 else if (rounded_size <= 8 * probe_interval)
25076 {
25077 /* The ABI requires using the store with update insns to allocate
25078 space and store the backchain into the stack
25079
25080 So we save the current stack pointer into a temporary, then
25081 emit the store-with-update insns to store the saved stack pointer
25082 into the right location in each new page. */
25083 for (int i = 0; i < rounded_size; i += probe_interval)
25084 {
25085 rtx_insn *insn
25086 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
25087
25088 /* Save the first stack adjustment in RETVAL. */
25089 if (i == 0)
25090 retval = insn;
25091 }
25092
25093 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25094 }
25095 else
25096 {
25097 /* Compute the ending address. */
25098 rtx end_addr
25099 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
25100 rtx rs = GEN_INT (-rounded_size);
25101 rtx_insn *insn;
25102 if (add_operand (rs, Pmode))
25103 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
25104 else
25105 {
25106 emit_move_insn (end_addr, GEN_INT (-rounded_size));
25107 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
25108 stack_pointer_rtx));
25109 /* Describe the effect of INSN to the CFI engine. */
25110 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25111 gen_rtx_SET (end_addr,
25112 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25113 rs)));
25114 }
25115 RTX_FRAME_RELATED_P (insn) = 1;
25116
25117 /* Emit the loop. */
25118 if (TARGET_64BIT)
25119 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
25120 stack_pointer_rtx, orig_sp,
25121 end_addr));
25122 else
25123 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
25124 stack_pointer_rtx, orig_sp,
25125 end_addr));
25126 RTX_FRAME_RELATED_P (retval) = 1;
25127 /* Describe the effect of INSN to the CFI engine. */
25128 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
25129 gen_rtx_SET (stack_pointer_rtx, end_addr));
25130
25131 /* Emit a blockage to ensure the allocation/probing insns are
25132 not optimized, combined, removed, etc. Other cases handle this
25133 within their call to rs6000_emit_allocate_stack_1. */
25134 emit_insn (gen_blockage ());
25135
25136 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
25137 }
25138
25139 if (orig_size != rounded_size)
25140 {
25141 /* Allocate (and implicitly probe) any residual space. */
25142 HOST_WIDE_INT residual = orig_size - rounded_size;
25143
25144 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
25145
25146 /* If the residual was the only allocation, then we can return the
25147 allocating insn. */
25148 if (!retval)
25149 retval = insn;
25150 }
25151
25152 return retval;
25153 }
25154
25155 /* Emit the correct code for allocating stack space, as insns.
25156 If COPY_REG, make sure a copy of the old frame is left there.
25157 The generated code may use hard register 0 as a temporary. */
25158
25159 static rtx_insn *
25160 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25161 {
25162 rtx_insn *insn;
25163 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25164 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25165 rtx todec = gen_int_mode (-size, Pmode);
25166
25167 if (INTVAL (todec) != -size)
25168 {
25169 warning (0, "stack frame too large");
25170 emit_insn (gen_trap ());
25171 return 0;
25172 }
25173
25174 if (crtl->limit_stack)
25175 {
25176 if (REG_P (stack_limit_rtx)
25177 && REGNO (stack_limit_rtx) > 1
25178 && REGNO (stack_limit_rtx) <= 31)
25179 {
25180 rtx_insn *insn
25181 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25182 gcc_assert (insn);
25183 emit_insn (insn);
25184 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25185 }
25186 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25187 && TARGET_32BIT
25188 && DEFAULT_ABI == ABI_V4
25189 && !flag_pic)
25190 {
25191 rtx toload = gen_rtx_CONST (VOIDmode,
25192 gen_rtx_PLUS (Pmode,
25193 stack_limit_rtx,
25194 GEN_INT (size)));
25195
25196 emit_insn (gen_elf_high (tmp_reg, toload));
25197 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25198 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25199 const0_rtx));
25200 }
25201 else
25202 warning (0, "stack limit expression is not supported");
25203 }
25204
25205 if (flag_stack_clash_protection)
25206 {
25207 if (size < get_stack_clash_protection_guard_size ())
25208 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25209 else
25210 {
25211 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25212 copy_reg);
25213
25214 /* If we asked for a copy with an offset, then we still need add in
25215 the offset. */
25216 if (copy_reg && copy_off)
25217 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25218 return insn;
25219 }
25220 }
25221
25222 if (copy_reg)
25223 {
25224 if (copy_off != 0)
25225 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25226 else
25227 emit_move_insn (copy_reg, stack_reg);
25228 }
25229
25230 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25231 it now and set the alias set/attributes. The above gen_*_update
25232 calls will generate a PARALLEL with the MEM set being the first
25233 operation. */
25234 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25235 return insn;
25236 }
25237
25238 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25239
25240 #if PROBE_INTERVAL > 32768
25241 #error Cannot use indexed addressing mode for stack probing
25242 #endif
25243
25244 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25245 inclusive. These are offsets from the current stack pointer. */
25246
25247 static void
25248 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25249 {
25250 /* See if we have a constant small number of probes to generate. If so,
25251 that's the easy case. */
25252 if (first + size <= 32768)
25253 {
25254 HOST_WIDE_INT i;
25255
25256 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25257 it exceeds SIZE. If only one probe is needed, this will not
25258 generate any code. Then probe at FIRST + SIZE. */
25259 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25260 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25261 -(first + i)));
25262
25263 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25264 -(first + size)));
25265 }
25266
25267 /* Otherwise, do the same as above, but in a loop. Note that we must be
25268 extra careful with variables wrapping around because we might be at
25269 the very top (or the very bottom) of the address space and we have
25270 to be able to handle this case properly; in particular, we use an
25271 equality test for the loop condition. */
25272 else
25273 {
25274 HOST_WIDE_INT rounded_size;
25275 rtx r12 = gen_rtx_REG (Pmode, 12);
25276 rtx r0 = gen_rtx_REG (Pmode, 0);
25277
25278 /* Sanity check for the addressing mode we're going to use. */
25279 gcc_assert (first <= 32768);
25280
25281 /* Step 1: round SIZE to the previous multiple of the interval. */
25282
25283 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25284
25285
25286 /* Step 2: compute initial and final value of the loop counter. */
25287
25288 /* TEST_ADDR = SP + FIRST. */
25289 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25290 -first)));
25291
25292 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25293 if (rounded_size > 32768)
25294 {
25295 emit_move_insn (r0, GEN_INT (-rounded_size));
25296 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25297 }
25298 else
25299 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25300 -rounded_size)));
25301
25302
25303 /* Step 3: the loop
25304
25305 do
25306 {
25307 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25308 probe at TEST_ADDR
25309 }
25310 while (TEST_ADDR != LAST_ADDR)
25311
25312 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25313 until it is equal to ROUNDED_SIZE. */
25314
25315 if (TARGET_64BIT)
25316 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25317 else
25318 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25319
25320
25321 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25322 that SIZE is equal to ROUNDED_SIZE. */
25323
25324 if (size != rounded_size)
25325 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25326 }
25327 }
25328
25329 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25330 addresses, not offsets. */
25331
25332 static const char *
25333 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25334 {
25335 static int labelno = 0;
25336 char loop_lab[32];
25337 rtx xops[2];
25338
25339 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25340
25341 /* Loop. */
25342 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25343
25344 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25345 xops[0] = reg1;
25346 xops[1] = GEN_INT (-PROBE_INTERVAL);
25347 output_asm_insn ("addi %0,%0,%1", xops);
25348
25349 /* Probe at TEST_ADDR. */
25350 xops[1] = gen_rtx_REG (Pmode, 0);
25351 output_asm_insn ("stw %1,0(%0)", xops);
25352
25353 /* Test if TEST_ADDR == LAST_ADDR. */
25354 xops[1] = reg2;
25355 if (TARGET_64BIT)
25356 output_asm_insn ("cmpd 0,%0,%1", xops);
25357 else
25358 output_asm_insn ("cmpw 0,%0,%1", xops);
25359
25360 /* Branch. */
25361 fputs ("\tbne 0,", asm_out_file);
25362 assemble_name_raw (asm_out_file, loop_lab);
25363 fputc ('\n', asm_out_file);
25364
25365 return "";
25366 }
25367
25368 /* This function is called when rs6000_frame_related is processing
25369 SETs within a PARALLEL, and returns whether the REGNO save ought to
25370 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25371 for out-of-line register save functions, store multiple, and the
25372 Darwin world_save. They may contain registers that don't really
25373 need saving. */
25374
25375 static bool
25376 interesting_frame_related_regno (unsigned int regno)
25377 {
25378 /* Saves apparently of r0 are actually saving LR. It doesn't make
25379 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25380 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25381 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25382 as frame related. */
25383 if (regno == 0)
25384 return true;
25385 /* If we see CR2 then we are here on a Darwin world save. Saves of
25386 CR2 signify the whole CR is being saved. This is a long-standing
25387 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25388 that CR needs to be saved. */
25389 if (regno == CR2_REGNO)
25390 return true;
25391 /* Omit frame info for any user-defined global regs. If frame info
25392 is supplied for them, frame unwinding will restore a user reg.
25393 Also omit frame info for any reg we don't need to save, as that
25394 bloats frame info and can cause problems with shrink wrapping.
25395 Since global regs won't be seen as needing to be saved, both of
25396 these conditions are covered by save_reg_p. */
25397 return save_reg_p (regno);
25398 }
25399
25400 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25401 addresses, not offsets.
25402
25403 REG2 contains the backchain that must be stored into *sp at each allocation.
25404
25405 This is subtly different than the Ada probing above in that it tries hard
25406 to prevent attacks that jump the stack guard. Thus, it is never allowed
25407 to allocate more than PROBE_INTERVAL bytes of stack space without a
25408 suitable probe. */
25409
25410 static const char *
25411 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25412 {
25413 static int labelno = 0;
25414 char loop_lab[32];
25415 rtx xops[3];
25416
25417 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25418
25419 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25420
25421 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25422
25423 /* This allocates and probes. */
25424 xops[0] = reg1;
25425 xops[1] = reg2;
25426 xops[2] = GEN_INT (-probe_interval);
25427 if (TARGET_64BIT)
25428 output_asm_insn ("stdu %1,%2(%0)", xops);
25429 else
25430 output_asm_insn ("stwu %1,%2(%0)", xops);
25431
25432 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25433 xops[0] = reg1;
25434 xops[1] = reg3;
25435 if (TARGET_64BIT)
25436 output_asm_insn ("cmpd 0,%0,%1", xops);
25437 else
25438 output_asm_insn ("cmpw 0,%0,%1", xops);
25439
25440 fputs ("\tbne 0,", asm_out_file);
25441 assemble_name_raw (asm_out_file, loop_lab);
25442 fputc ('\n', asm_out_file);
25443
25444 return "";
25445 }
25446
25447 /* Wrapper around the output_probe_stack_range routines. */
25448 const char *
25449 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
25450 {
25451 if (flag_stack_clash_protection)
25452 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
25453 else
25454 return output_probe_stack_range_1 (reg1, reg3);
25455 }
25456
25457 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25458 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25459 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25460 deduce these equivalences by itself so it wasn't necessary to hold
25461 its hand so much. Don't be tempted to always supply d2_f_d_e with
25462 the actual cfa register, ie. r31 when we are using a hard frame
25463 pointer. That fails when saving regs off r1, and sched moves the
25464 r31 setup past the reg saves. */
25465
25466 static rtx_insn *
25467 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25468 rtx reg2, rtx repl2)
25469 {
25470 rtx repl;
25471
25472 if (REGNO (reg) == STACK_POINTER_REGNUM)
25473 {
25474 gcc_checking_assert (val == 0);
25475 repl = NULL_RTX;
25476 }
25477 else
25478 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25479 GEN_INT (val));
25480
25481 rtx pat = PATTERN (insn);
25482 if (!repl && !reg2)
25483 {
25484 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25485 if (GET_CODE (pat) == PARALLEL)
25486 for (int i = 0; i < XVECLEN (pat, 0); i++)
25487 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25488 {
25489 rtx set = XVECEXP (pat, 0, i);
25490
25491 if (!REG_P (SET_SRC (set))
25492 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25493 RTX_FRAME_RELATED_P (set) = 1;
25494 }
25495 RTX_FRAME_RELATED_P (insn) = 1;
25496 return insn;
25497 }
25498
25499 /* We expect that 'pat' is either a SET or a PARALLEL containing
25500 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25501 are important so they all have to be marked RTX_FRAME_RELATED_P.
25502 Call simplify_replace_rtx on the SETs rather than the whole insn
25503 so as to leave the other stuff alone (for example USE of r12). */
25504
25505 set_used_flags (pat);
25506 if (GET_CODE (pat) == SET)
25507 {
25508 if (repl)
25509 pat = simplify_replace_rtx (pat, reg, repl);
25510 if (reg2)
25511 pat = simplify_replace_rtx (pat, reg2, repl2);
25512 }
25513 else if (GET_CODE (pat) == PARALLEL)
25514 {
25515 pat = shallow_copy_rtx (pat);
25516 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25517
25518 for (int i = 0; i < XVECLEN (pat, 0); i++)
25519 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25520 {
25521 rtx set = XVECEXP (pat, 0, i);
25522
25523 if (repl)
25524 set = simplify_replace_rtx (set, reg, repl);
25525 if (reg2)
25526 set = simplify_replace_rtx (set, reg2, repl2);
25527 XVECEXP (pat, 0, i) = set;
25528
25529 if (!REG_P (SET_SRC (set))
25530 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25531 RTX_FRAME_RELATED_P (set) = 1;
25532 }
25533 }
25534 else
25535 gcc_unreachable ();
25536
25537 RTX_FRAME_RELATED_P (insn) = 1;
25538 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
25539
25540 return insn;
25541 }
25542
25543 /* Returns an insn that has a vrsave set operation with the
25544 appropriate CLOBBERs. */
25545
25546 static rtx
25547 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25548 {
25549 int nclobs, i;
25550 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25551 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25552
25553 clobs[0]
25554 = gen_rtx_SET (vrsave,
25555 gen_rtx_UNSPEC_VOLATILE (SImode,
25556 gen_rtvec (2, reg, vrsave),
25557 UNSPECV_SET_VRSAVE));
25558
25559 nclobs = 1;
25560
25561 /* We need to clobber the registers in the mask so the scheduler
25562 does not move sets to VRSAVE before sets of AltiVec registers.
25563
25564 However, if the function receives nonlocal gotos, reload will set
25565 all call saved registers live. We will end up with:
25566
25567 (set (reg 999) (mem))
25568 (parallel [ (set (reg vrsave) (unspec blah))
25569 (clobber (reg 999))])
25570
25571 The clobber will cause the store into reg 999 to be dead, and
25572 flow will attempt to delete an epilogue insn. In this case, we
25573 need an unspec use/set of the register. */
25574
25575 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25576 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25577 {
25578 if (!epiloguep || call_used_regs [i])
25579 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
25580 gen_rtx_REG (V4SImode, i));
25581 else
25582 {
25583 rtx reg = gen_rtx_REG (V4SImode, i);
25584
25585 clobs[nclobs++]
25586 = gen_rtx_SET (reg,
25587 gen_rtx_UNSPEC (V4SImode,
25588 gen_rtvec (1, reg), 27));
25589 }
25590 }
25591
25592 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25593
25594 for (i = 0; i < nclobs; ++i)
25595 XVECEXP (insn, 0, i) = clobs[i];
25596
25597 return insn;
25598 }
25599
25600 static rtx
25601 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25602 {
25603 rtx addr, mem;
25604
25605 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25606 mem = gen_frame_mem (GET_MODE (reg), addr);
25607 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25608 }
25609
25610 static rtx
25611 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25612 {
25613 return gen_frame_set (reg, frame_reg, offset, false);
25614 }
25615
25616 static rtx
25617 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25618 {
25619 return gen_frame_set (reg, frame_reg, offset, true);
25620 }
25621
25622 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25623 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25624
25625 static rtx_insn *
25626 emit_frame_save (rtx frame_reg, machine_mode mode,
25627 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
25628 {
25629 rtx reg;
25630
25631 /* Some cases that need register indexed addressing. */
25632 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
25633 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
25634
25635 reg = gen_rtx_REG (mode, regno);
25636 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
25637 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
25638 NULL_RTX, NULL_RTX);
25639 }
25640
25641 /* Emit an offset memory reference suitable for a frame store, while
25642 converting to a valid addressing mode. */
25643
25644 static rtx
25645 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
25646 {
25647 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
25648 }
25649
25650 #ifndef TARGET_FIX_AND_CONTINUE
25651 #define TARGET_FIX_AND_CONTINUE 0
25652 #endif
25653
25654 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25655 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25656 #define LAST_SAVRES_REGISTER 31
25657 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25658
25659 enum {
25660 SAVRES_LR = 0x1,
25661 SAVRES_SAVE = 0x2,
25662 SAVRES_REG = 0x0c,
25663 SAVRES_GPR = 0,
25664 SAVRES_FPR = 4,
25665 SAVRES_VR = 8
25666 };
25667
25668 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
25669
25670 /* Temporary holding space for an out-of-line register save/restore
25671 routine name. */
25672 static char savres_routine_name[30];
25673
25674 /* Return the name for an out-of-line register save/restore routine.
25675 We are saving/restoring GPRs if GPR is true. */
25676
25677 static char *
25678 rs6000_savres_routine_name (int regno, int sel)
25679 {
25680 const char *prefix = "";
25681 const char *suffix = "";
25682
25683 /* Different targets are supposed to define
25684 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25685 routine name could be defined with:
25686
25687 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25688
25689 This is a nice idea in practice, but in reality, things are
25690 complicated in several ways:
25691
25692 - ELF targets have save/restore routines for GPRs.
25693
25694 - PPC64 ELF targets have routines for save/restore of GPRs that
25695 differ in what they do with the link register, so having a set
25696 prefix doesn't work. (We only use one of the save routines at
25697 the moment, though.)
25698
25699 - PPC32 elf targets have "exit" versions of the restore routines
25700 that restore the link register and can save some extra space.
25701 These require an extra suffix. (There are also "tail" versions
25702 of the restore routines and "GOT" versions of the save routines,
25703 but we don't generate those at present. Same problems apply,
25704 though.)
25705
25706 We deal with all this by synthesizing our own prefix/suffix and
25707 using that for the simple sprintf call shown above. */
25708 if (DEFAULT_ABI == ABI_V4)
25709 {
25710 if (TARGET_64BIT)
25711 goto aix_names;
25712
25713 if ((sel & SAVRES_REG) == SAVRES_GPR)
25714 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
25715 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25716 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
25717 else if ((sel & SAVRES_REG) == SAVRES_VR)
25718 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25719 else
25720 abort ();
25721
25722 if ((sel & SAVRES_LR))
25723 suffix = "_x";
25724 }
25725 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25726 {
25727 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25728 /* No out-of-line save/restore routines for GPRs on AIX. */
25729 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
25730 #endif
25731
25732 aix_names:
25733 if ((sel & SAVRES_REG) == SAVRES_GPR)
25734 prefix = ((sel & SAVRES_SAVE)
25735 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
25736 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
25737 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25738 {
25739 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25740 if ((sel & SAVRES_LR))
25741 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
25742 else
25743 #endif
25744 {
25745 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
25746 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
25747 }
25748 }
25749 else if ((sel & SAVRES_REG) == SAVRES_VR)
25750 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25751 else
25752 abort ();
25753 }
25754
25755 if (DEFAULT_ABI == ABI_DARWIN)
25756 {
25757 /* The Darwin approach is (slightly) different, in order to be
25758 compatible with code generated by the system toolchain. There is a
25759 single symbol for the start of save sequence, and the code here
25760 embeds an offset into that code on the basis of the first register
25761 to be saved. */
25762 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
25763 if ((sel & SAVRES_REG) == SAVRES_GPR)
25764 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
25765 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
25766 (regno - 13) * 4, prefix, regno);
25767 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25768 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
25769 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
25770 else if ((sel & SAVRES_REG) == SAVRES_VR)
25771 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
25772 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
25773 else
25774 abort ();
25775 }
25776 else
25777 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
25778
25779 return savres_routine_name;
25780 }
25781
25782 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
25783 We are saving/restoring GPRs if GPR is true. */
25784
25785 static rtx
25786 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
25787 {
25788 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
25789 ? info->first_gp_reg_save
25790 : (sel & SAVRES_REG) == SAVRES_FPR
25791 ? info->first_fp_reg_save - 32
25792 : (sel & SAVRES_REG) == SAVRES_VR
25793 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
25794 : -1);
25795 rtx sym;
25796 int select = sel;
25797
25798 /* Don't generate bogus routine names. */
25799 gcc_assert (FIRST_SAVRES_REGISTER <= regno
25800 && regno <= LAST_SAVRES_REGISTER
25801 && select >= 0 && select <= 12);
25802
25803 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
25804
25805 if (sym == NULL)
25806 {
25807 char *name;
25808
25809 name = rs6000_savres_routine_name (regno, sel);
25810
25811 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
25812 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
25813 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
25814 }
25815
25816 return sym;
25817 }
25818
25819 /* Emit a sequence of insns, including a stack tie if needed, for
25820 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
25821 reset the stack pointer, but move the base of the frame into
25822 reg UPDT_REGNO for use by out-of-line register restore routines. */
25823
25824 static rtx
25825 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
25826 unsigned updt_regno)
25827 {
25828 /* If there is nothing to do, don't do anything. */
25829 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
25830 return NULL_RTX;
25831
25832 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
25833
25834 /* This blockage is needed so that sched doesn't decide to move
25835 the sp change before the register restores. */
25836 if (DEFAULT_ABI == ABI_V4)
25837 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
25838 GEN_INT (frame_off)));
25839
25840 /* If we are restoring registers out-of-line, we will be using the
25841 "exit" variants of the restore routines, which will reset the
25842 stack for us. But we do need to point updt_reg into the
25843 right place for those routines. */
25844 if (frame_off != 0)
25845 return emit_insn (gen_add3_insn (updt_reg_rtx,
25846 frame_reg_rtx, GEN_INT (frame_off)));
25847 else
25848 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
25849
25850 return NULL_RTX;
25851 }
25852
25853 /* Return the register number used as a pointer by out-of-line
25854 save/restore functions. */
25855
25856 static inline unsigned
25857 ptr_regno_for_savres (int sel)
25858 {
25859 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25860 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
25861 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
25862 }
25863
25864 /* Construct a parallel rtx describing the effect of a call to an
25865 out-of-line register save/restore routine, and emit the insn
25866 or jump_insn as appropriate. */
25867
25868 static rtx_insn *
25869 rs6000_emit_savres_rtx (rs6000_stack_t *info,
25870 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
25871 machine_mode reg_mode, int sel)
25872 {
25873 int i;
25874 int offset, start_reg, end_reg, n_regs, use_reg;
25875 int reg_size = GET_MODE_SIZE (reg_mode);
25876 rtx sym;
25877 rtvec p;
25878 rtx par;
25879 rtx_insn *insn;
25880
25881 offset = 0;
25882 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
25883 ? info->first_gp_reg_save
25884 : (sel & SAVRES_REG) == SAVRES_FPR
25885 ? info->first_fp_reg_save
25886 : (sel & SAVRES_REG) == SAVRES_VR
25887 ? info->first_altivec_reg_save
25888 : -1);
25889 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
25890 ? 32
25891 : (sel & SAVRES_REG) == SAVRES_FPR
25892 ? 64
25893 : (sel & SAVRES_REG) == SAVRES_VR
25894 ? LAST_ALTIVEC_REGNO + 1
25895 : -1);
25896 n_regs = end_reg - start_reg;
25897 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
25898 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
25899 + n_regs);
25900
25901 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
25902 RTVEC_ELT (p, offset++) = ret_rtx;
25903
25904 RTVEC_ELT (p, offset++)
25905 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
25906
25907 sym = rs6000_savres_routine_sym (info, sel);
25908 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
25909
25910 use_reg = ptr_regno_for_savres (sel);
25911 if ((sel & SAVRES_REG) == SAVRES_VR)
25912 {
25913 /* Vector regs are saved/restored using [reg+reg] addressing. */
25914 RTVEC_ELT (p, offset++)
25915 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
25916 RTVEC_ELT (p, offset++)
25917 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
25918 }
25919 else
25920 RTVEC_ELT (p, offset++)
25921 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
25922
25923 for (i = 0; i < end_reg - start_reg; i++)
25924 RTVEC_ELT (p, i + offset)
25925 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
25926 frame_reg_rtx, save_area_offset + reg_size * i,
25927 (sel & SAVRES_SAVE) != 0);
25928
25929 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
25930 RTVEC_ELT (p, i + offset)
25931 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
25932
25933 par = gen_rtx_PARALLEL (VOIDmode, p);
25934
25935 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
25936 {
25937 insn = emit_jump_insn (par);
25938 JUMP_LABEL (insn) = ret_rtx;
25939 }
25940 else
25941 insn = emit_insn (par);
25942 return insn;
25943 }
25944
25945 /* Emit prologue code to store CR fields that need to be saved into REG. This
25946 function should only be called when moving the non-volatile CRs to REG, it
25947 is not a general purpose routine to move the entire set of CRs to REG.
25948 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
25949 volatile CRs. */
25950
25951 static void
25952 rs6000_emit_prologue_move_from_cr (rtx reg)
25953 {
25954 /* Only the ELFv2 ABI allows storing only selected fields. */
25955 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
25956 {
25957 int i, cr_reg[8], count = 0;
25958
25959 /* Collect CR fields that must be saved. */
25960 for (i = 0; i < 8; i++)
25961 if (save_reg_p (CR0_REGNO + i))
25962 cr_reg[count++] = i;
25963
25964 /* If it's just a single one, use mfcrf. */
25965 if (count == 1)
25966 {
25967 rtvec p = rtvec_alloc (1);
25968 rtvec r = rtvec_alloc (2);
25969 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
25970 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
25971 RTVEC_ELT (p, 0)
25972 = gen_rtx_SET (reg,
25973 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
25974
25975 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
25976 return;
25977 }
25978
25979 /* ??? It might be better to handle count == 2 / 3 cases here
25980 as well, using logical operations to combine the values. */
25981 }
25982
25983 emit_insn (gen_prologue_movesi_from_cr (reg));
25984 }
25985
25986 /* Return whether the split-stack arg pointer (r12) is used. */
25987
25988 static bool
25989 split_stack_arg_pointer_used_p (void)
25990 {
25991 /* If the pseudo holding the arg pointer is no longer a pseudo,
25992 then the arg pointer is used. */
25993 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
25994 && (!REG_P (cfun->machine->split_stack_arg_pointer)
25995 || (REGNO (cfun->machine->split_stack_arg_pointer)
25996 < FIRST_PSEUDO_REGISTER)))
25997 return true;
25998
25999 /* Unfortunately we also need to do some code scanning, since
26000 r12 may have been substituted for the pseudo. */
26001 rtx_insn *insn;
26002 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26003 FOR_BB_INSNS (bb, insn)
26004 if (NONDEBUG_INSN_P (insn))
26005 {
26006 /* A call destroys r12. */
26007 if (CALL_P (insn))
26008 return false;
26009
26010 df_ref use;
26011 FOR_EACH_INSN_USE (use, insn)
26012 {
26013 rtx x = DF_REF_REG (use);
26014 if (REG_P (x) && REGNO (x) == 12)
26015 return true;
26016 }
26017 df_ref def;
26018 FOR_EACH_INSN_DEF (def, insn)
26019 {
26020 rtx x = DF_REF_REG (def);
26021 if (REG_P (x) && REGNO (x) == 12)
26022 return false;
26023 }
26024 }
26025 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26026 }
26027
26028 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26029
26030 static bool
26031 rs6000_global_entry_point_needed_p (void)
26032 {
26033 /* Only needed for the ELFv2 ABI. */
26034 if (DEFAULT_ABI != ABI_ELFv2)
26035 return false;
26036
26037 /* With -msingle-pic-base, we assume the whole program shares the same
26038 TOC, so no global entry point prologues are needed anywhere. */
26039 if (TARGET_SINGLE_PIC_BASE)
26040 return false;
26041
26042 /* Ensure we have a global entry point for thunks. ??? We could
26043 avoid that if the target routine doesn't need a global entry point,
26044 but we do not know whether this is the case at this point. */
26045 if (cfun->is_thunk)
26046 return true;
26047
26048 /* For regular functions, rs6000_emit_prologue sets this flag if the
26049 routine ever uses the TOC pointer. */
26050 return cfun->machine->r2_setup_needed;
26051 }
26052
26053 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26054 static sbitmap
26055 rs6000_get_separate_components (void)
26056 {
26057 rs6000_stack_t *info = rs6000_stack_info ();
26058
26059 if (WORLD_SAVE_P (info))
26060 return NULL;
26061
26062 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26063 && !(info->savres_strategy & REST_MULTIPLE));
26064
26065 /* Component 0 is the save/restore of LR (done via GPR0).
26066 Component 2 is the save of the TOC (GPR2).
26067 Components 13..31 are the save/restore of GPR13..GPR31.
26068 Components 46..63 are the save/restore of FPR14..FPR31. */
26069
26070 cfun->machine->n_components = 64;
26071
26072 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26073 bitmap_clear (components);
26074
26075 int reg_size = TARGET_32BIT ? 4 : 8;
26076 int fp_reg_size = 8;
26077
26078 /* The GPRs we need saved to the frame. */
26079 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26080 && (info->savres_strategy & REST_INLINE_GPRS))
26081 {
26082 int offset = info->gp_save_offset;
26083 if (info->push_p)
26084 offset += info->total_size;
26085
26086 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26087 {
26088 if (IN_RANGE (offset, -0x8000, 0x7fff)
26089 && save_reg_p (regno))
26090 bitmap_set_bit (components, regno);
26091
26092 offset += reg_size;
26093 }
26094 }
26095
26096 /* Don't mess with the hard frame pointer. */
26097 if (frame_pointer_needed)
26098 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26099
26100 /* Don't mess with the fixed TOC register. */
26101 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26102 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26103 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26104 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26105
26106 /* The FPRs we need saved to the frame. */
26107 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26108 && (info->savres_strategy & REST_INLINE_FPRS))
26109 {
26110 int offset = info->fp_save_offset;
26111 if (info->push_p)
26112 offset += info->total_size;
26113
26114 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26115 {
26116 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26117 bitmap_set_bit (components, regno);
26118
26119 offset += fp_reg_size;
26120 }
26121 }
26122
26123 /* Optimize LR save and restore if we can. This is component 0. Any
26124 out-of-line register save/restore routines need LR. */
26125 if (info->lr_save_p
26126 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26127 && (info->savres_strategy & SAVE_INLINE_GPRS)
26128 && (info->savres_strategy & REST_INLINE_GPRS)
26129 && (info->savres_strategy & SAVE_INLINE_FPRS)
26130 && (info->savres_strategy & REST_INLINE_FPRS)
26131 && (info->savres_strategy & SAVE_INLINE_VRS)
26132 && (info->savres_strategy & REST_INLINE_VRS))
26133 {
26134 int offset = info->lr_save_offset;
26135 if (info->push_p)
26136 offset += info->total_size;
26137 if (IN_RANGE (offset, -0x8000, 0x7fff))
26138 bitmap_set_bit (components, 0);
26139 }
26140
26141 /* Optimize saving the TOC. This is component 2. */
26142 if (cfun->machine->save_toc_in_prologue)
26143 bitmap_set_bit (components, 2);
26144
26145 return components;
26146 }
26147
26148 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26149 static sbitmap
26150 rs6000_components_for_bb (basic_block bb)
26151 {
26152 rs6000_stack_t *info = rs6000_stack_info ();
26153
26154 bitmap in = DF_LIVE_IN (bb);
26155 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26156 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26157
26158 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26159 bitmap_clear (components);
26160
26161 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26162
26163 /* GPRs. */
26164 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26165 if (bitmap_bit_p (in, regno)
26166 || bitmap_bit_p (gen, regno)
26167 || bitmap_bit_p (kill, regno))
26168 bitmap_set_bit (components, regno);
26169
26170 /* FPRs. */
26171 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26172 if (bitmap_bit_p (in, regno)
26173 || bitmap_bit_p (gen, regno)
26174 || bitmap_bit_p (kill, regno))
26175 bitmap_set_bit (components, regno);
26176
26177 /* The link register. */
26178 if (bitmap_bit_p (in, LR_REGNO)
26179 || bitmap_bit_p (gen, LR_REGNO)
26180 || bitmap_bit_p (kill, LR_REGNO))
26181 bitmap_set_bit (components, 0);
26182
26183 /* The TOC save. */
26184 if (bitmap_bit_p (in, TOC_REGNUM)
26185 || bitmap_bit_p (gen, TOC_REGNUM)
26186 || bitmap_bit_p (kill, TOC_REGNUM))
26187 bitmap_set_bit (components, 2);
26188
26189 return components;
26190 }
26191
26192 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26193 static void
26194 rs6000_disqualify_components (sbitmap components, edge e,
26195 sbitmap edge_components, bool /*is_prologue*/)
26196 {
26197 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26198 live where we want to place that code. */
26199 if (bitmap_bit_p (edge_components, 0)
26200 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26201 {
26202 if (dump_file)
26203 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26204 "on entry to bb %d\n", e->dest->index);
26205 bitmap_clear_bit (components, 0);
26206 }
26207 }
26208
26209 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26210 static void
26211 rs6000_emit_prologue_components (sbitmap components)
26212 {
26213 rs6000_stack_t *info = rs6000_stack_info ();
26214 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26215 ? HARD_FRAME_POINTER_REGNUM
26216 : STACK_POINTER_REGNUM);
26217
26218 machine_mode reg_mode = Pmode;
26219 int reg_size = TARGET_32BIT ? 4 : 8;
26220 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26221 int fp_reg_size = 8;
26222
26223 /* Prologue for LR. */
26224 if (bitmap_bit_p (components, 0))
26225 {
26226 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26227 rtx reg = gen_rtx_REG (reg_mode, 0);
26228 rtx_insn *insn = emit_move_insn (reg, lr);
26229 RTX_FRAME_RELATED_P (insn) = 1;
26230 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (reg, lr));
26231
26232 int offset = info->lr_save_offset;
26233 if (info->push_p)
26234 offset += info->total_size;
26235
26236 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26237 RTX_FRAME_RELATED_P (insn) = 1;
26238 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26239 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26240 }
26241
26242 /* Prologue for TOC. */
26243 if (bitmap_bit_p (components, 2))
26244 {
26245 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26246 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26247 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26248 }
26249
26250 /* Prologue for the GPRs. */
26251 int offset = info->gp_save_offset;
26252 if (info->push_p)
26253 offset += info->total_size;
26254
26255 for (int i = info->first_gp_reg_save; i < 32; i++)
26256 {
26257 if (bitmap_bit_p (components, i))
26258 {
26259 rtx reg = gen_rtx_REG (reg_mode, i);
26260 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26261 RTX_FRAME_RELATED_P (insn) = 1;
26262 rtx set = copy_rtx (single_set (insn));
26263 add_reg_note (insn, REG_CFA_OFFSET, set);
26264 }
26265
26266 offset += reg_size;
26267 }
26268
26269 /* Prologue for the FPRs. */
26270 offset = info->fp_save_offset;
26271 if (info->push_p)
26272 offset += info->total_size;
26273
26274 for (int i = info->first_fp_reg_save; i < 64; i++)
26275 {
26276 if (bitmap_bit_p (components, i))
26277 {
26278 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26279 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26280 RTX_FRAME_RELATED_P (insn) = 1;
26281 rtx set = copy_rtx (single_set (insn));
26282 add_reg_note (insn, REG_CFA_OFFSET, set);
26283 }
26284
26285 offset += fp_reg_size;
26286 }
26287 }
26288
26289 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26290 static void
26291 rs6000_emit_epilogue_components (sbitmap components)
26292 {
26293 rs6000_stack_t *info = rs6000_stack_info ();
26294 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26295 ? HARD_FRAME_POINTER_REGNUM
26296 : STACK_POINTER_REGNUM);
26297
26298 machine_mode reg_mode = Pmode;
26299 int reg_size = TARGET_32BIT ? 4 : 8;
26300
26301 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26302 int fp_reg_size = 8;
26303
26304 /* Epilogue for the FPRs. */
26305 int offset = info->fp_save_offset;
26306 if (info->push_p)
26307 offset += info->total_size;
26308
26309 for (int i = info->first_fp_reg_save; i < 64; i++)
26310 {
26311 if (bitmap_bit_p (components, i))
26312 {
26313 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26314 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26315 RTX_FRAME_RELATED_P (insn) = 1;
26316 add_reg_note (insn, REG_CFA_RESTORE, reg);
26317 }
26318
26319 offset += fp_reg_size;
26320 }
26321
26322 /* Epilogue for the GPRs. */
26323 offset = info->gp_save_offset;
26324 if (info->push_p)
26325 offset += info->total_size;
26326
26327 for (int i = info->first_gp_reg_save; i < 32; i++)
26328 {
26329 if (bitmap_bit_p (components, i))
26330 {
26331 rtx reg = gen_rtx_REG (reg_mode, i);
26332 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26333 RTX_FRAME_RELATED_P (insn) = 1;
26334 add_reg_note (insn, REG_CFA_RESTORE, reg);
26335 }
26336
26337 offset += reg_size;
26338 }
26339
26340 /* Epilogue for LR. */
26341 if (bitmap_bit_p (components, 0))
26342 {
26343 int offset = info->lr_save_offset;
26344 if (info->push_p)
26345 offset += info->total_size;
26346
26347 rtx reg = gen_rtx_REG (reg_mode, 0);
26348 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26349
26350 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26351 insn = emit_move_insn (lr, reg);
26352 RTX_FRAME_RELATED_P (insn) = 1;
26353 add_reg_note (insn, REG_CFA_RESTORE, lr);
26354 }
26355 }
26356
26357 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26358 static void
26359 rs6000_set_handled_components (sbitmap components)
26360 {
26361 rs6000_stack_t *info = rs6000_stack_info ();
26362
26363 for (int i = info->first_gp_reg_save; i < 32; i++)
26364 if (bitmap_bit_p (components, i))
26365 cfun->machine->gpr_is_wrapped_separately[i] = true;
26366
26367 for (int i = info->first_fp_reg_save; i < 64; i++)
26368 if (bitmap_bit_p (components, i))
26369 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26370
26371 if (bitmap_bit_p (components, 0))
26372 cfun->machine->lr_is_wrapped_separately = true;
26373
26374 if (bitmap_bit_p (components, 2))
26375 cfun->machine->toc_is_wrapped_separately = true;
26376 }
26377
26378 /* VRSAVE is a bit vector representing which AltiVec registers
26379 are used. The OS uses this to determine which vector
26380 registers to save on a context switch. We need to save
26381 VRSAVE on the stack frame, add whatever AltiVec registers we
26382 used in this function, and do the corresponding magic in the
26383 epilogue. */
26384 static void
26385 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26386 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26387 {
26388 /* Get VRSAVE into a GPR. */
26389 rtx reg = gen_rtx_REG (SImode, save_regno);
26390 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26391 if (TARGET_MACHO)
26392 emit_insn (gen_get_vrsave_internal (reg));
26393 else
26394 emit_insn (gen_rtx_SET (reg, vrsave));
26395
26396 /* Save VRSAVE. */
26397 int offset = info->vrsave_save_offset + frame_off;
26398 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26399
26400 /* Include the registers in the mask. */
26401 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26402
26403 emit_insn (generate_set_vrsave (reg, info, 0));
26404 }
26405
26406 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26407 called, it left the arg pointer to the old stack in r29. Otherwise, the
26408 arg pointer is the top of the current frame. */
26409 static void
26410 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26411 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26412 {
26413 cfun->machine->split_stack_argp_used = true;
26414
26415 if (sp_adjust)
26416 {
26417 rtx r12 = gen_rtx_REG (Pmode, 12);
26418 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26419 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26420 emit_insn_before (set_r12, sp_adjust);
26421 }
26422 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26423 {
26424 rtx r12 = gen_rtx_REG (Pmode, 12);
26425 if (frame_off == 0)
26426 emit_move_insn (r12, frame_reg_rtx);
26427 else
26428 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26429 }
26430
26431 if (info->push_p)
26432 {
26433 rtx r12 = gen_rtx_REG (Pmode, 12);
26434 rtx r29 = gen_rtx_REG (Pmode, 29);
26435 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26436 rtx not_more = gen_label_rtx ();
26437 rtx jump;
26438
26439 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26440 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26441 gen_rtx_LABEL_REF (VOIDmode, not_more),
26442 pc_rtx);
26443 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26444 JUMP_LABEL (jump) = not_more;
26445 LABEL_NUSES (not_more) += 1;
26446 emit_move_insn (r12, r29);
26447 emit_label (not_more);
26448 }
26449 }
26450
26451 /* Emit function prologue as insns. */
26452
26453 void
26454 rs6000_emit_prologue (void)
26455 {
26456 rs6000_stack_t *info = rs6000_stack_info ();
26457 machine_mode reg_mode = Pmode;
26458 int reg_size = TARGET_32BIT ? 4 : 8;
26459 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26460 int fp_reg_size = 8;
26461 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26462 rtx frame_reg_rtx = sp_reg_rtx;
26463 unsigned int cr_save_regno;
26464 rtx cr_save_rtx = NULL_RTX;
26465 rtx_insn *insn;
26466 int strategy;
26467 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26468 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26469 && call_used_regs[STATIC_CHAIN_REGNUM]);
26470 int using_split_stack = (flag_split_stack
26471 && (lookup_attribute ("no_split_stack",
26472 DECL_ATTRIBUTES (cfun->decl))
26473 == NULL));
26474
26475 /* Offset to top of frame for frame_reg and sp respectively. */
26476 HOST_WIDE_INT frame_off = 0;
26477 HOST_WIDE_INT sp_off = 0;
26478 /* sp_adjust is the stack adjusting instruction, tracked so that the
26479 insn setting up the split-stack arg pointer can be emitted just
26480 prior to it, when r12 is not used here for other purposes. */
26481 rtx_insn *sp_adjust = 0;
26482
26483 #if CHECKING_P
26484 /* Track and check usage of r0, r11, r12. */
26485 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26486 #define START_USE(R) do \
26487 { \
26488 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26489 reg_inuse |= 1 << (R); \
26490 } while (0)
26491 #define END_USE(R) do \
26492 { \
26493 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26494 reg_inuse &= ~(1 << (R)); \
26495 } while (0)
26496 #define NOT_INUSE(R) do \
26497 { \
26498 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26499 } while (0)
26500 #else
26501 #define START_USE(R) do {} while (0)
26502 #define END_USE(R) do {} while (0)
26503 #define NOT_INUSE(R) do {} while (0)
26504 #endif
26505
26506 if (DEFAULT_ABI == ABI_ELFv2
26507 && !TARGET_SINGLE_PIC_BASE)
26508 {
26509 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26510
26511 /* With -mminimal-toc we may generate an extra use of r2 below. */
26512 if (TARGET_TOC && TARGET_MINIMAL_TOC
26513 && !constant_pool_empty_p ())
26514 cfun->machine->r2_setup_needed = true;
26515 }
26516
26517
26518 if (flag_stack_usage_info)
26519 current_function_static_stack_size = info->total_size;
26520
26521 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26522 {
26523 HOST_WIDE_INT size = info->total_size;
26524
26525 if (crtl->is_leaf && !cfun->calls_alloca)
26526 {
26527 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
26528 rs6000_emit_probe_stack_range (get_stack_check_protect (),
26529 size - get_stack_check_protect ());
26530 }
26531 else if (size > 0)
26532 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
26533 }
26534
26535 if (TARGET_FIX_AND_CONTINUE)
26536 {
26537 /* gdb on darwin arranges to forward a function from the old
26538 address by modifying the first 5 instructions of the function
26539 to branch to the overriding function. This is necessary to
26540 permit function pointers that point to the old function to
26541 actually forward to the new function. */
26542 emit_insn (gen_nop ());
26543 emit_insn (gen_nop ());
26544 emit_insn (gen_nop ());
26545 emit_insn (gen_nop ());
26546 emit_insn (gen_nop ());
26547 }
26548
26549 /* Handle world saves specially here. */
26550 if (WORLD_SAVE_P (info))
26551 {
26552 int i, j, sz;
26553 rtx treg;
26554 rtvec p;
26555 rtx reg0;
26556
26557 /* save_world expects lr in r0. */
26558 reg0 = gen_rtx_REG (Pmode, 0);
26559 if (info->lr_save_p)
26560 {
26561 insn = emit_move_insn (reg0,
26562 gen_rtx_REG (Pmode, LR_REGNO));
26563 RTX_FRAME_RELATED_P (insn) = 1;
26564 }
26565
26566 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26567 assumptions about the offsets of various bits of the stack
26568 frame. */
26569 gcc_assert (info->gp_save_offset == -220
26570 && info->fp_save_offset == -144
26571 && info->lr_save_offset == 8
26572 && info->cr_save_offset == 4
26573 && info->push_p
26574 && info->lr_save_p
26575 && (!crtl->calls_eh_return
26576 || info->ehrd_offset == -432)
26577 && info->vrsave_save_offset == -224
26578 && info->altivec_save_offset == -416);
26579
26580 treg = gen_rtx_REG (SImode, 11);
26581 emit_move_insn (treg, GEN_INT (-info->total_size));
26582
26583 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26584 in R11. It also clobbers R12, so beware! */
26585
26586 /* Preserve CR2 for save_world prologues */
26587 sz = 5;
26588 sz += 32 - info->first_gp_reg_save;
26589 sz += 64 - info->first_fp_reg_save;
26590 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26591 p = rtvec_alloc (sz);
26592 j = 0;
26593 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
26594 gen_rtx_REG (SImode,
26595 LR_REGNO));
26596 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26597 gen_rtx_SYMBOL_REF (Pmode,
26598 "*save_world"));
26599 /* We do floats first so that the instruction pattern matches
26600 properly. */
26601 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26602 RTVEC_ELT (p, j++)
26603 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
26604 info->first_fp_reg_save + i),
26605 frame_reg_rtx,
26606 info->fp_save_offset + frame_off + 8 * i);
26607 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26608 RTVEC_ELT (p, j++)
26609 = gen_frame_store (gen_rtx_REG (V4SImode,
26610 info->first_altivec_reg_save + i),
26611 frame_reg_rtx,
26612 info->altivec_save_offset + frame_off + 16 * i);
26613 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26614 RTVEC_ELT (p, j++)
26615 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26616 frame_reg_rtx,
26617 info->gp_save_offset + frame_off + reg_size * i);
26618
26619 /* CR register traditionally saved as CR2. */
26620 RTVEC_ELT (p, j++)
26621 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26622 frame_reg_rtx, info->cr_save_offset + frame_off);
26623 /* Explain about use of R0. */
26624 if (info->lr_save_p)
26625 RTVEC_ELT (p, j++)
26626 = gen_frame_store (reg0,
26627 frame_reg_rtx, info->lr_save_offset + frame_off);
26628 /* Explain what happens to the stack pointer. */
26629 {
26630 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
26631 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
26632 }
26633
26634 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26635 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26636 treg, GEN_INT (-info->total_size));
26637 sp_off = frame_off = info->total_size;
26638 }
26639
26640 strategy = info->savres_strategy;
26641
26642 /* For V.4, update stack before we do any saving and set back pointer. */
26643 if (! WORLD_SAVE_P (info)
26644 && info->push_p
26645 && (DEFAULT_ABI == ABI_V4
26646 || crtl->calls_eh_return))
26647 {
26648 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
26649 || !(strategy & SAVE_INLINE_GPRS)
26650 || !(strategy & SAVE_INLINE_VRS));
26651 int ptr_regno = -1;
26652 rtx ptr_reg = NULL_RTX;
26653 int ptr_off = 0;
26654
26655 if (info->total_size < 32767)
26656 frame_off = info->total_size;
26657 else if (need_r11)
26658 ptr_regno = 11;
26659 else if (info->cr_save_p
26660 || info->lr_save_p
26661 || info->first_fp_reg_save < 64
26662 || info->first_gp_reg_save < 32
26663 || info->altivec_size != 0
26664 || info->vrsave_size != 0
26665 || crtl->calls_eh_return)
26666 ptr_regno = 12;
26667 else
26668 {
26669 /* The prologue won't be saving any regs so there is no need
26670 to set up a frame register to access any frame save area.
26671 We also won't be using frame_off anywhere below, but set
26672 the correct value anyway to protect against future
26673 changes to this function. */
26674 frame_off = info->total_size;
26675 }
26676 if (ptr_regno != -1)
26677 {
26678 /* Set up the frame offset to that needed by the first
26679 out-of-line save function. */
26680 START_USE (ptr_regno);
26681 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26682 frame_reg_rtx = ptr_reg;
26683 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
26684 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
26685 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
26686 ptr_off = info->gp_save_offset + info->gp_size;
26687 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
26688 ptr_off = info->altivec_save_offset + info->altivec_size;
26689 frame_off = -ptr_off;
26690 }
26691 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26692 ptr_reg, ptr_off);
26693 if (REGNO (frame_reg_rtx) == 12)
26694 sp_adjust = 0;
26695 sp_off = info->total_size;
26696 if (frame_reg_rtx != sp_reg_rtx)
26697 rs6000_emit_stack_tie (frame_reg_rtx, false);
26698 }
26699
26700 /* If we use the link register, get it into r0. */
26701 if (!WORLD_SAVE_P (info) && info->lr_save_p
26702 && !cfun->machine->lr_is_wrapped_separately)
26703 {
26704 rtx addr, reg, mem;
26705
26706 reg = gen_rtx_REG (Pmode, 0);
26707 START_USE (0);
26708 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
26709 RTX_FRAME_RELATED_P (insn) = 1;
26710
26711 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
26712 | SAVE_NOINLINE_FPRS_SAVES_LR)))
26713 {
26714 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26715 GEN_INT (info->lr_save_offset + frame_off));
26716 mem = gen_rtx_MEM (Pmode, addr);
26717 /* This should not be of rs6000_sr_alias_set, because of
26718 __builtin_return_address. */
26719
26720 insn = emit_move_insn (mem, reg);
26721 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26722 NULL_RTX, NULL_RTX);
26723 END_USE (0);
26724 }
26725 }
26726
26727 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26728 r12 will be needed by out-of-line gpr restore. */
26729 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26730 && !(strategy & (SAVE_INLINE_GPRS
26731 | SAVE_NOINLINE_GPRS_SAVES_LR))
26732 ? 11 : 12);
26733 if (!WORLD_SAVE_P (info)
26734 && info->cr_save_p
26735 && REGNO (frame_reg_rtx) != cr_save_regno
26736 && !(using_static_chain_p && cr_save_regno == 11)
26737 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
26738 {
26739 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
26740 START_USE (cr_save_regno);
26741 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
26742 }
26743
26744 /* Do any required saving of fpr's. If only one or two to save, do
26745 it ourselves. Otherwise, call function. */
26746 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
26747 {
26748 int offset = info->fp_save_offset + frame_off;
26749 for (int i = info->first_fp_reg_save; i < 64; i++)
26750 {
26751 if (save_reg_p (i)
26752 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
26753 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
26754 sp_off - frame_off);
26755
26756 offset += fp_reg_size;
26757 }
26758 }
26759 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
26760 {
26761 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
26762 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
26763 unsigned ptr_regno = ptr_regno_for_savres (sel);
26764 rtx ptr_reg = frame_reg_rtx;
26765
26766 if (REGNO (frame_reg_rtx) == ptr_regno)
26767 gcc_checking_assert (frame_off == 0);
26768 else
26769 {
26770 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26771 NOT_INUSE (ptr_regno);
26772 emit_insn (gen_add3_insn (ptr_reg,
26773 frame_reg_rtx, GEN_INT (frame_off)));
26774 }
26775 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26776 info->fp_save_offset,
26777 info->lr_save_offset,
26778 DFmode, sel);
26779 rs6000_frame_related (insn, ptr_reg, sp_off,
26780 NULL_RTX, NULL_RTX);
26781 if (lr)
26782 END_USE (0);
26783 }
26784
26785 /* Save GPRs. This is done as a PARALLEL if we are using
26786 the store-multiple instructions. */
26787 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
26788 {
26789 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
26790 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
26791 unsigned ptr_regno = ptr_regno_for_savres (sel);
26792 rtx ptr_reg = frame_reg_rtx;
26793 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
26794 int end_save = info->gp_save_offset + info->gp_size;
26795 int ptr_off;
26796
26797 if (ptr_regno == 12)
26798 sp_adjust = 0;
26799 if (!ptr_set_up)
26800 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26801
26802 /* Need to adjust r11 (r12) if we saved any FPRs. */
26803 if (end_save + frame_off != 0)
26804 {
26805 rtx offset = GEN_INT (end_save + frame_off);
26806
26807 if (ptr_set_up)
26808 frame_off = -end_save;
26809 else
26810 NOT_INUSE (ptr_regno);
26811 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
26812 }
26813 else if (!ptr_set_up)
26814 {
26815 NOT_INUSE (ptr_regno);
26816 emit_move_insn (ptr_reg, frame_reg_rtx);
26817 }
26818 ptr_off = -end_save;
26819 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26820 info->gp_save_offset + ptr_off,
26821 info->lr_save_offset + ptr_off,
26822 reg_mode, sel);
26823 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
26824 NULL_RTX, NULL_RTX);
26825 if (lr)
26826 END_USE (0);
26827 }
26828 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
26829 {
26830 rtvec p;
26831 int i;
26832 p = rtvec_alloc (32 - info->first_gp_reg_save);
26833 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26834 RTVEC_ELT (p, i)
26835 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26836 frame_reg_rtx,
26837 info->gp_save_offset + frame_off + reg_size * i);
26838 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26839 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26840 NULL_RTX, NULL_RTX);
26841 }
26842 else if (!WORLD_SAVE_P (info))
26843 {
26844 int offset = info->gp_save_offset + frame_off;
26845 for (int i = info->first_gp_reg_save; i < 32; i++)
26846 {
26847 if (save_reg_p (i)
26848 && !cfun->machine->gpr_is_wrapped_separately[i])
26849 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
26850 sp_off - frame_off);
26851
26852 offset += reg_size;
26853 }
26854 }
26855
26856 if (crtl->calls_eh_return)
26857 {
26858 unsigned int i;
26859 rtvec p;
26860
26861 for (i = 0; ; ++i)
26862 {
26863 unsigned int regno = EH_RETURN_DATA_REGNO (i);
26864 if (regno == INVALID_REGNUM)
26865 break;
26866 }
26867
26868 p = rtvec_alloc (i);
26869
26870 for (i = 0; ; ++i)
26871 {
26872 unsigned int regno = EH_RETURN_DATA_REGNO (i);
26873 if (regno == INVALID_REGNUM)
26874 break;
26875
26876 rtx set
26877 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
26878 sp_reg_rtx,
26879 info->ehrd_offset + sp_off + reg_size * (int) i);
26880 RTVEC_ELT (p, i) = set;
26881 RTX_FRAME_RELATED_P (set) = 1;
26882 }
26883
26884 insn = emit_insn (gen_blockage ());
26885 RTX_FRAME_RELATED_P (insn) = 1;
26886 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
26887 }
26888
26889 /* In AIX ABI we need to make sure r2 is really saved. */
26890 if (TARGET_AIX && crtl->calls_eh_return)
26891 {
26892 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
26893 rtx join_insn, note;
26894 rtx_insn *save_insn;
26895 long toc_restore_insn;
26896
26897 tmp_reg = gen_rtx_REG (Pmode, 11);
26898 tmp_reg_si = gen_rtx_REG (SImode, 11);
26899 if (using_static_chain_p)
26900 {
26901 START_USE (0);
26902 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
26903 }
26904 else
26905 START_USE (11);
26906 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
26907 /* Peek at instruction to which this function returns. If it's
26908 restoring r2, then we know we've already saved r2. We can't
26909 unconditionally save r2 because the value we have will already
26910 be updated if we arrived at this function via a plt call or
26911 toc adjusting stub. */
26912 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
26913 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
26914 + RS6000_TOC_SAVE_SLOT);
26915 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
26916 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
26917 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
26918 validate_condition_mode (EQ, CCUNSmode);
26919 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
26920 emit_insn (gen_rtx_SET (compare_result,
26921 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
26922 toc_save_done = gen_label_rtx ();
26923 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26924 gen_rtx_EQ (VOIDmode, compare_result,
26925 const0_rtx),
26926 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
26927 pc_rtx);
26928 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26929 JUMP_LABEL (jump) = toc_save_done;
26930 LABEL_NUSES (toc_save_done) += 1;
26931
26932 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
26933 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
26934 sp_off - frame_off);
26935
26936 emit_label (toc_save_done);
26937
26938 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
26939 have a CFG that has different saves along different paths.
26940 Move the note to a dummy blockage insn, which describes that
26941 R2 is unconditionally saved after the label. */
26942 /* ??? An alternate representation might be a special insn pattern
26943 containing both the branch and the store. That might let the
26944 code that minimizes the number of DW_CFA_advance opcodes better
26945 freedom in placing the annotations. */
26946 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
26947 if (note)
26948 remove_note (save_insn, note);
26949 else
26950 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
26951 copy_rtx (PATTERN (save_insn)), NULL_RTX);
26952 RTX_FRAME_RELATED_P (save_insn) = 0;
26953
26954 join_insn = emit_insn (gen_blockage ());
26955 REG_NOTES (join_insn) = note;
26956 RTX_FRAME_RELATED_P (join_insn) = 1;
26957
26958 if (using_static_chain_p)
26959 {
26960 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
26961 END_USE (0);
26962 }
26963 else
26964 END_USE (11);
26965 }
26966
26967 /* Save CR if we use any that must be preserved. */
26968 if (!WORLD_SAVE_P (info) && info->cr_save_p)
26969 {
26970 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26971 GEN_INT (info->cr_save_offset + frame_off));
26972 rtx mem = gen_frame_mem (SImode, addr);
26973
26974 /* If we didn't copy cr before, do so now using r0. */
26975 if (cr_save_rtx == NULL_RTX)
26976 {
26977 START_USE (0);
26978 cr_save_rtx = gen_rtx_REG (SImode, 0);
26979 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
26980 }
26981
26982 /* Saving CR requires a two-instruction sequence: one instruction
26983 to move the CR to a general-purpose register, and a second
26984 instruction that stores the GPR to memory.
26985
26986 We do not emit any DWARF CFI records for the first of these,
26987 because we cannot properly represent the fact that CR is saved in
26988 a register. One reason is that we cannot express that multiple
26989 CR fields are saved; another reason is that on 64-bit, the size
26990 of the CR register in DWARF (4 bytes) differs from the size of
26991 a general-purpose register.
26992
26993 This means if any intervening instruction were to clobber one of
26994 the call-saved CR fields, we'd have incorrect CFI. To prevent
26995 this from happening, we mark the store to memory as a use of
26996 those CR fields, which prevents any such instruction from being
26997 scheduled in between the two instructions. */
26998 rtx crsave_v[9];
26999 int n_crsave = 0;
27000 int i;
27001
27002 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27003 for (i = 0; i < 8; i++)
27004 if (save_reg_p (CR0_REGNO + i))
27005 crsave_v[n_crsave++]
27006 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27007
27008 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27009 gen_rtvec_v (n_crsave, crsave_v)));
27010 END_USE (REGNO (cr_save_rtx));
27011
27012 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27013 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27014 so we need to construct a frame expression manually. */
27015 RTX_FRAME_RELATED_P (insn) = 1;
27016
27017 /* Update address to be stack-pointer relative, like
27018 rs6000_frame_related would do. */
27019 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27020 GEN_INT (info->cr_save_offset + sp_off));
27021 mem = gen_frame_mem (SImode, addr);
27022
27023 if (DEFAULT_ABI == ABI_ELFv2)
27024 {
27025 /* In the ELFv2 ABI we generate separate CFI records for each
27026 CR field that was actually saved. They all point to the
27027 same 32-bit stack slot. */
27028 rtx crframe[8];
27029 int n_crframe = 0;
27030
27031 for (i = 0; i < 8; i++)
27032 if (save_reg_p (CR0_REGNO + i))
27033 {
27034 crframe[n_crframe]
27035 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27036
27037 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27038 n_crframe++;
27039 }
27040
27041 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27042 gen_rtx_PARALLEL (VOIDmode,
27043 gen_rtvec_v (n_crframe, crframe)));
27044 }
27045 else
27046 {
27047 /* In other ABIs, by convention, we use a single CR regnum to
27048 represent the fact that all call-saved CR fields are saved.
27049 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27050 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27051 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27052 }
27053 }
27054
27055 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27056 *separate* slots if the routine calls __builtin_eh_return, so
27057 that they can be independently restored by the unwinder. */
27058 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27059 {
27060 int i, cr_off = info->ehcr_offset;
27061 rtx crsave;
27062
27063 /* ??? We might get better performance by using multiple mfocrf
27064 instructions. */
27065 crsave = gen_rtx_REG (SImode, 0);
27066 emit_insn (gen_prologue_movesi_from_cr (crsave));
27067
27068 for (i = 0; i < 8; i++)
27069 if (!call_used_regs[CR0_REGNO + i])
27070 {
27071 rtvec p = rtvec_alloc (2);
27072 RTVEC_ELT (p, 0)
27073 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27074 RTVEC_ELT (p, 1)
27075 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27076
27077 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27078
27079 RTX_FRAME_RELATED_P (insn) = 1;
27080 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27081 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27082 sp_reg_rtx, cr_off + sp_off));
27083
27084 cr_off += reg_size;
27085 }
27086 }
27087
27088 /* If we are emitting stack probes, but allocate no stack, then
27089 just note that in the dump file. */
27090 if (flag_stack_clash_protection
27091 && dump_file
27092 && !info->push_p)
27093 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
27094
27095 /* Update stack and set back pointer unless this is V.4,
27096 for which it was done previously. */
27097 if (!WORLD_SAVE_P (info) && info->push_p
27098 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27099 {
27100 rtx ptr_reg = NULL;
27101 int ptr_off = 0;
27102
27103 /* If saving altivec regs we need to be able to address all save
27104 locations using a 16-bit offset. */
27105 if ((strategy & SAVE_INLINE_VRS) == 0
27106 || (info->altivec_size != 0
27107 && (info->altivec_save_offset + info->altivec_size - 16
27108 + info->total_size - frame_off) > 32767)
27109 || (info->vrsave_size != 0
27110 && (info->vrsave_save_offset
27111 + info->total_size - frame_off) > 32767))
27112 {
27113 int sel = SAVRES_SAVE | SAVRES_VR;
27114 unsigned ptr_regno = ptr_regno_for_savres (sel);
27115
27116 if (using_static_chain_p
27117 && ptr_regno == STATIC_CHAIN_REGNUM)
27118 ptr_regno = 12;
27119 if (REGNO (frame_reg_rtx) != ptr_regno)
27120 START_USE (ptr_regno);
27121 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27122 frame_reg_rtx = ptr_reg;
27123 ptr_off = info->altivec_save_offset + info->altivec_size;
27124 frame_off = -ptr_off;
27125 }
27126 else if (REGNO (frame_reg_rtx) == 1)
27127 frame_off = info->total_size;
27128 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27129 ptr_reg, ptr_off);
27130 if (REGNO (frame_reg_rtx) == 12)
27131 sp_adjust = 0;
27132 sp_off = info->total_size;
27133 if (frame_reg_rtx != sp_reg_rtx)
27134 rs6000_emit_stack_tie (frame_reg_rtx, false);
27135 }
27136
27137 /* Set frame pointer, if needed. */
27138 if (frame_pointer_needed)
27139 {
27140 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27141 sp_reg_rtx);
27142 RTX_FRAME_RELATED_P (insn) = 1;
27143 }
27144
27145 /* Save AltiVec registers if needed. Save here because the red zone does
27146 not always include AltiVec registers. */
27147 if (!WORLD_SAVE_P (info)
27148 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27149 {
27150 int end_save = info->altivec_save_offset + info->altivec_size;
27151 int ptr_off;
27152 /* Oddly, the vector save/restore functions point r0 at the end
27153 of the save area, then use r11 or r12 to load offsets for
27154 [reg+reg] addressing. */
27155 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27156 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27157 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27158
27159 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27160 NOT_INUSE (0);
27161 if (scratch_regno == 12)
27162 sp_adjust = 0;
27163 if (end_save + frame_off != 0)
27164 {
27165 rtx offset = GEN_INT (end_save + frame_off);
27166
27167 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27168 }
27169 else
27170 emit_move_insn (ptr_reg, frame_reg_rtx);
27171
27172 ptr_off = -end_save;
27173 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27174 info->altivec_save_offset + ptr_off,
27175 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27176 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27177 NULL_RTX, NULL_RTX);
27178 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27179 {
27180 /* The oddity mentioned above clobbered our frame reg. */
27181 emit_move_insn (frame_reg_rtx, ptr_reg);
27182 frame_off = ptr_off;
27183 }
27184 }
27185 else if (!WORLD_SAVE_P (info)
27186 && info->altivec_size != 0)
27187 {
27188 int i;
27189
27190 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27191 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27192 {
27193 rtx areg, savereg, mem;
27194 HOST_WIDE_INT offset;
27195
27196 offset = (info->altivec_save_offset + frame_off
27197 + 16 * (i - info->first_altivec_reg_save));
27198
27199 savereg = gen_rtx_REG (V4SImode, i);
27200
27201 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27202 {
27203 mem = gen_frame_mem (V4SImode,
27204 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27205 GEN_INT (offset)));
27206 insn = emit_insn (gen_rtx_SET (mem, savereg));
27207 areg = NULL_RTX;
27208 }
27209 else
27210 {
27211 NOT_INUSE (0);
27212 areg = gen_rtx_REG (Pmode, 0);
27213 emit_move_insn (areg, GEN_INT (offset));
27214
27215 /* AltiVec addressing mode is [reg+reg]. */
27216 mem = gen_frame_mem (V4SImode,
27217 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27218
27219 /* Rather than emitting a generic move, force use of the stvx
27220 instruction, which we always want on ISA 2.07 (power8) systems.
27221 In particular we don't want xxpermdi/stxvd2x for little
27222 endian. */
27223 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27224 }
27225
27226 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27227 areg, GEN_INT (offset));
27228 }
27229 }
27230
27231 /* VRSAVE is a bit vector representing which AltiVec registers
27232 are used. The OS uses this to determine which vector
27233 registers to save on a context switch. We need to save
27234 VRSAVE on the stack frame, add whatever AltiVec registers we
27235 used in this function, and do the corresponding magic in the
27236 epilogue. */
27237
27238 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27239 {
27240 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27241 be using r12 as frame_reg_rtx and r11 as the static chain
27242 pointer for nested functions. */
27243 int save_regno = 12;
27244 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27245 && !using_static_chain_p)
27246 save_regno = 11;
27247 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27248 {
27249 save_regno = 11;
27250 if (using_static_chain_p)
27251 save_regno = 0;
27252 }
27253 NOT_INUSE (save_regno);
27254
27255 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27256 }
27257
27258 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27259 if (!TARGET_SINGLE_PIC_BASE
27260 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27261 && !constant_pool_empty_p ())
27262 || (DEFAULT_ABI == ABI_V4
27263 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27264 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27265 {
27266 /* If emit_load_toc_table will use the link register, we need to save
27267 it. We use R12 for this purpose because emit_load_toc_table
27268 can use register 0. This allows us to use a plain 'blr' to return
27269 from the procedure more often. */
27270 int save_LR_around_toc_setup = (TARGET_ELF
27271 && DEFAULT_ABI == ABI_V4
27272 && flag_pic
27273 && ! info->lr_save_p
27274 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27275 if (save_LR_around_toc_setup)
27276 {
27277 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27278 rtx tmp = gen_rtx_REG (Pmode, 12);
27279
27280 sp_adjust = 0;
27281 insn = emit_move_insn (tmp, lr);
27282 RTX_FRAME_RELATED_P (insn) = 1;
27283
27284 rs6000_emit_load_toc_table (TRUE);
27285
27286 insn = emit_move_insn (lr, tmp);
27287 add_reg_note (insn, REG_CFA_RESTORE, lr);
27288 RTX_FRAME_RELATED_P (insn) = 1;
27289 }
27290 else
27291 rs6000_emit_load_toc_table (TRUE);
27292 }
27293
27294 #if TARGET_MACHO
27295 if (!TARGET_SINGLE_PIC_BASE
27296 && DEFAULT_ABI == ABI_DARWIN
27297 && flag_pic && crtl->uses_pic_offset_table)
27298 {
27299 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27300 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27301
27302 /* Save and restore LR locally around this call (in R0). */
27303 if (!info->lr_save_p)
27304 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27305
27306 emit_insn (gen_load_macho_picbase (src));
27307
27308 emit_move_insn (gen_rtx_REG (Pmode,
27309 RS6000_PIC_OFFSET_TABLE_REGNUM),
27310 lr);
27311
27312 if (!info->lr_save_p)
27313 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27314 }
27315 #endif
27316
27317 /* If we need to, save the TOC register after doing the stack setup.
27318 Do not emit eh frame info for this save. The unwinder wants info,
27319 conceptually attached to instructions in this function, about
27320 register values in the caller of this function. This R2 may have
27321 already been changed from the value in the caller.
27322 We don't attempt to write accurate DWARF EH frame info for R2
27323 because code emitted by gcc for a (non-pointer) function call
27324 doesn't save and restore R2. Instead, R2 is managed out-of-line
27325 by a linker generated plt call stub when the function resides in
27326 a shared library. This behavior is costly to describe in DWARF,
27327 both in terms of the size of DWARF info and the time taken in the
27328 unwinder to interpret it. R2 changes, apart from the
27329 calls_eh_return case earlier in this function, are handled by
27330 linux-unwind.h frob_update_context. */
27331 if (rs6000_save_toc_in_prologue_p ()
27332 && !cfun->machine->toc_is_wrapped_separately)
27333 {
27334 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27335 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27336 }
27337
27338 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27339 if (using_split_stack && split_stack_arg_pointer_used_p ())
27340 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27341 }
27342
27343 /* Output .extern statements for the save/restore routines we use. */
27344
27345 static void
27346 rs6000_output_savres_externs (FILE *file)
27347 {
27348 rs6000_stack_t *info = rs6000_stack_info ();
27349
27350 if (TARGET_DEBUG_STACK)
27351 debug_stack_info (info);
27352
27353 /* Write .extern for any function we will call to save and restore
27354 fp values. */
27355 if (info->first_fp_reg_save < 64
27356 && !TARGET_MACHO
27357 && !TARGET_ELF)
27358 {
27359 char *name;
27360 int regno = info->first_fp_reg_save - 32;
27361
27362 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27363 {
27364 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27365 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27366 name = rs6000_savres_routine_name (regno, sel);
27367 fprintf (file, "\t.extern %s\n", name);
27368 }
27369 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27370 {
27371 bool lr = (info->savres_strategy
27372 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27373 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27374 name = rs6000_savres_routine_name (regno, sel);
27375 fprintf (file, "\t.extern %s\n", name);
27376 }
27377 }
27378 }
27379
27380 /* Write function prologue. */
27381
27382 static void
27383 rs6000_output_function_prologue (FILE *file)
27384 {
27385 if (!cfun->is_thunk)
27386 rs6000_output_savres_externs (file);
27387
27388 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27389 immediately after the global entry point label. */
27390 if (rs6000_global_entry_point_needed_p ())
27391 {
27392 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27393
27394 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27395
27396 if (TARGET_CMODEL != CMODEL_LARGE)
27397 {
27398 /* In the small and medium code models, we assume the TOC is less
27399 2 GB away from the text section, so it can be computed via the
27400 following two-instruction sequence. */
27401 char buf[256];
27402
27403 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27404 fprintf (file, "0:\taddis 2,12,.TOC.-");
27405 assemble_name (file, buf);
27406 fprintf (file, "@ha\n");
27407 fprintf (file, "\taddi 2,2,.TOC.-");
27408 assemble_name (file, buf);
27409 fprintf (file, "@l\n");
27410 }
27411 else
27412 {
27413 /* In the large code model, we allow arbitrary offsets between the
27414 TOC and the text section, so we have to load the offset from
27415 memory. The data field is emitted directly before the global
27416 entry point in rs6000_elf_declare_function_name. */
27417 char buf[256];
27418
27419 #ifdef HAVE_AS_ENTRY_MARKERS
27420 /* If supported by the linker, emit a marker relocation. If the
27421 total code size of the final executable or shared library
27422 happens to fit into 2 GB after all, the linker will replace
27423 this code sequence with the sequence for the small or medium
27424 code model. */
27425 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27426 #endif
27427 fprintf (file, "\tld 2,");
27428 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27429 assemble_name (file, buf);
27430 fprintf (file, "-");
27431 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27432 assemble_name (file, buf);
27433 fprintf (file, "(12)\n");
27434 fprintf (file, "\tadd 2,2,12\n");
27435 }
27436
27437 fputs ("\t.localentry\t", file);
27438 assemble_name (file, name);
27439 fputs (",.-", file);
27440 assemble_name (file, name);
27441 fputs ("\n", file);
27442 }
27443
27444 /* Output -mprofile-kernel code. This needs to be done here instead of
27445 in output_function_profile since it must go after the ELFv2 ABI
27446 local entry point. */
27447 if (TARGET_PROFILE_KERNEL && crtl->profile)
27448 {
27449 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27450 gcc_assert (!TARGET_32BIT);
27451
27452 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27453
27454 /* In the ELFv2 ABI we have no compiler stack word. It must be
27455 the resposibility of _mcount to preserve the static chain
27456 register if required. */
27457 if (DEFAULT_ABI != ABI_ELFv2
27458 && cfun->static_chain_decl != NULL)
27459 {
27460 asm_fprintf (file, "\tstd %s,24(%s)\n",
27461 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27462 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27463 asm_fprintf (file, "\tld %s,24(%s)\n",
27464 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27465 }
27466 else
27467 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27468 }
27469
27470 rs6000_pic_labelno++;
27471 }
27472
27473 /* -mprofile-kernel code calls mcount before the function prolog,
27474 so a profiled leaf function should stay a leaf function. */
27475 static bool
27476 rs6000_keep_leaf_when_profiled ()
27477 {
27478 return TARGET_PROFILE_KERNEL;
27479 }
27480
27481 /* Non-zero if vmx regs are restored before the frame pop, zero if
27482 we restore after the pop when possible. */
27483 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27484
27485 /* Restoring cr is a two step process: loading a reg from the frame
27486 save, then moving the reg to cr. For ABI_V4 we must let the
27487 unwinder know that the stack location is no longer valid at or
27488 before the stack deallocation, but we can't emit a cfa_restore for
27489 cr at the stack deallocation like we do for other registers.
27490 The trouble is that it is possible for the move to cr to be
27491 scheduled after the stack deallocation. So say exactly where cr
27492 is located on each of the two insns. */
27493
27494 static rtx
27495 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27496 {
27497 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27498 rtx reg = gen_rtx_REG (SImode, regno);
27499 rtx_insn *insn = emit_move_insn (reg, mem);
27500
27501 if (!exit_func && DEFAULT_ABI == ABI_V4)
27502 {
27503 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27504 rtx set = gen_rtx_SET (reg, cr);
27505
27506 add_reg_note (insn, REG_CFA_REGISTER, set);
27507 RTX_FRAME_RELATED_P (insn) = 1;
27508 }
27509 return reg;
27510 }
27511
27512 /* Reload CR from REG. */
27513
27514 static void
27515 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27516 {
27517 int count = 0;
27518 int i;
27519
27520 if (using_mfcr_multiple)
27521 {
27522 for (i = 0; i < 8; i++)
27523 if (save_reg_p (CR0_REGNO + i))
27524 count++;
27525 gcc_assert (count);
27526 }
27527
27528 if (using_mfcr_multiple && count > 1)
27529 {
27530 rtx_insn *insn;
27531 rtvec p;
27532 int ndx;
27533
27534 p = rtvec_alloc (count);
27535
27536 ndx = 0;
27537 for (i = 0; i < 8; i++)
27538 if (save_reg_p (CR0_REGNO + i))
27539 {
27540 rtvec r = rtvec_alloc (2);
27541 RTVEC_ELT (r, 0) = reg;
27542 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27543 RTVEC_ELT (p, ndx) =
27544 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27545 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27546 ndx++;
27547 }
27548 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27549 gcc_assert (ndx == count);
27550
27551 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27552 CR field separately. */
27553 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27554 {
27555 for (i = 0; i < 8; i++)
27556 if (save_reg_p (CR0_REGNO + i))
27557 add_reg_note (insn, REG_CFA_RESTORE,
27558 gen_rtx_REG (SImode, CR0_REGNO + i));
27559
27560 RTX_FRAME_RELATED_P (insn) = 1;
27561 }
27562 }
27563 else
27564 for (i = 0; i < 8; i++)
27565 if (save_reg_p (CR0_REGNO + i))
27566 {
27567 rtx insn = emit_insn (gen_movsi_to_cr_one
27568 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27569
27570 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27571 CR field separately, attached to the insn that in fact
27572 restores this particular CR field. */
27573 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27574 {
27575 add_reg_note (insn, REG_CFA_RESTORE,
27576 gen_rtx_REG (SImode, CR0_REGNO + i));
27577
27578 RTX_FRAME_RELATED_P (insn) = 1;
27579 }
27580 }
27581
27582 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27583 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27584 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27585 {
27586 rtx_insn *insn = get_last_insn ();
27587 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27588
27589 add_reg_note (insn, REG_CFA_RESTORE, cr);
27590 RTX_FRAME_RELATED_P (insn) = 1;
27591 }
27592 }
27593
27594 /* Like cr, the move to lr instruction can be scheduled after the
27595 stack deallocation, but unlike cr, its stack frame save is still
27596 valid. So we only need to emit the cfa_restore on the correct
27597 instruction. */
27598
27599 static void
27600 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27601 {
27602 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27603 rtx reg = gen_rtx_REG (Pmode, regno);
27604
27605 emit_move_insn (reg, mem);
27606 }
27607
27608 static void
27609 restore_saved_lr (int regno, bool exit_func)
27610 {
27611 rtx reg = gen_rtx_REG (Pmode, regno);
27612 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27613 rtx_insn *insn = emit_move_insn (lr, reg);
27614
27615 if (!exit_func && flag_shrink_wrap)
27616 {
27617 add_reg_note (insn, REG_CFA_RESTORE, lr);
27618 RTX_FRAME_RELATED_P (insn) = 1;
27619 }
27620 }
27621
27622 static rtx
27623 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27624 {
27625 if (DEFAULT_ABI == ABI_ELFv2)
27626 {
27627 int i;
27628 for (i = 0; i < 8; i++)
27629 if (save_reg_p (CR0_REGNO + i))
27630 {
27631 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
27632 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
27633 cfa_restores);
27634 }
27635 }
27636 else if (info->cr_save_p)
27637 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27638 gen_rtx_REG (SImode, CR2_REGNO),
27639 cfa_restores);
27640
27641 if (info->lr_save_p)
27642 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27643 gen_rtx_REG (Pmode, LR_REGNO),
27644 cfa_restores);
27645 return cfa_restores;
27646 }
27647
27648 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27649 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27650 below stack pointer not cloberred by signals. */
27651
27652 static inline bool
27653 offset_below_red_zone_p (HOST_WIDE_INT offset)
27654 {
27655 return offset < (DEFAULT_ABI == ABI_V4
27656 ? 0
27657 : TARGET_32BIT ? -220 : -288);
27658 }
27659
27660 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27661
27662 static void
27663 emit_cfa_restores (rtx cfa_restores)
27664 {
27665 rtx_insn *insn = get_last_insn ();
27666 rtx *loc = &REG_NOTES (insn);
27667
27668 while (*loc)
27669 loc = &XEXP (*loc, 1);
27670 *loc = cfa_restores;
27671 RTX_FRAME_RELATED_P (insn) = 1;
27672 }
27673
27674 /* Emit function epilogue as insns. */
27675
27676 void
27677 rs6000_emit_epilogue (int sibcall)
27678 {
27679 rs6000_stack_t *info;
27680 int restoring_GPRs_inline;
27681 int restoring_FPRs_inline;
27682 int using_load_multiple;
27683 int using_mtcr_multiple;
27684 int use_backchain_to_restore_sp;
27685 int restore_lr;
27686 int strategy;
27687 HOST_WIDE_INT frame_off = 0;
27688 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
27689 rtx frame_reg_rtx = sp_reg_rtx;
27690 rtx cfa_restores = NULL_RTX;
27691 rtx insn;
27692 rtx cr_save_reg = NULL_RTX;
27693 machine_mode reg_mode = Pmode;
27694 int reg_size = TARGET_32BIT ? 4 : 8;
27695 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
27696 int fp_reg_size = 8;
27697 int i;
27698 bool exit_func;
27699 unsigned ptr_regno;
27700
27701 info = rs6000_stack_info ();
27702
27703 strategy = info->savres_strategy;
27704 using_load_multiple = strategy & REST_MULTIPLE;
27705 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
27706 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
27707 using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
27708 || rs6000_tune == PROCESSOR_PPC603
27709 || rs6000_tune == PROCESSOR_PPC750
27710 || optimize_size);
27711 /* Restore via the backchain when we have a large frame, since this
27712 is more efficient than an addis, addi pair. The second condition
27713 here will not trigger at the moment; We don't actually need a
27714 frame pointer for alloca, but the generic parts of the compiler
27715 give us one anyway. */
27716 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
27717 ? info->lr_save_offset
27718 : 0) > 32767
27719 || (cfun->calls_alloca
27720 && !frame_pointer_needed));
27721 restore_lr = (info->lr_save_p
27722 && (restoring_FPRs_inline
27723 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
27724 && (restoring_GPRs_inline
27725 || info->first_fp_reg_save < 64)
27726 && !cfun->machine->lr_is_wrapped_separately);
27727
27728
27729 if (WORLD_SAVE_P (info))
27730 {
27731 int i, j;
27732 char rname[30];
27733 const char *alloc_rname;
27734 rtvec p;
27735
27736 /* eh_rest_world_r10 will return to the location saved in the LR
27737 stack slot (which is not likely to be our caller.)
27738 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27739 rest_world is similar, except any R10 parameter is ignored.
27740 The exception-handling stuff that was here in 2.95 is no
27741 longer necessary. */
27742
27743 p = rtvec_alloc (9
27744 + 32 - info->first_gp_reg_save
27745 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
27746 + 63 + 1 - info->first_fp_reg_save);
27747
27748 strcpy (rname, ((crtl->calls_eh_return) ?
27749 "*eh_rest_world_r10" : "*rest_world"));
27750 alloc_rname = ggc_strdup (rname);
27751
27752 j = 0;
27753 RTVEC_ELT (p, j++) = ret_rtx;
27754 RTVEC_ELT (p, j++)
27755 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
27756 /* The instruction pattern requires a clobber here;
27757 it is shared with the restVEC helper. */
27758 RTVEC_ELT (p, j++)
27759 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
27760
27761 {
27762 /* CR register traditionally saved as CR2. */
27763 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
27764 RTVEC_ELT (p, j++)
27765 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
27766 if (flag_shrink_wrap)
27767 {
27768 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27769 gen_rtx_REG (Pmode, LR_REGNO),
27770 cfa_restores);
27771 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27772 }
27773 }
27774
27775 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27776 {
27777 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
27778 RTVEC_ELT (p, j++)
27779 = gen_frame_load (reg,
27780 frame_reg_rtx, info->gp_save_offset + reg_size * i);
27781 if (flag_shrink_wrap
27782 && save_reg_p (info->first_gp_reg_save + i))
27783 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27784 }
27785 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27786 {
27787 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
27788 RTVEC_ELT (p, j++)
27789 = gen_frame_load (reg,
27790 frame_reg_rtx, info->altivec_save_offset + 16 * i);
27791 if (flag_shrink_wrap
27792 && save_reg_p (info->first_altivec_reg_save + i))
27793 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27794 }
27795 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
27796 {
27797 rtx reg = gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
27798 info->first_fp_reg_save + i);
27799 RTVEC_ELT (p, j++)
27800 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
27801 if (flag_shrink_wrap
27802 && save_reg_p (info->first_fp_reg_save + i))
27803 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27804 }
27805 RTVEC_ELT (p, j++)
27806 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
27807 RTVEC_ELT (p, j++)
27808 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
27809 RTVEC_ELT (p, j++)
27810 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
27811 RTVEC_ELT (p, j++)
27812 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
27813 RTVEC_ELT (p, j++)
27814 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
27815 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
27816
27817 if (flag_shrink_wrap)
27818 {
27819 REG_NOTES (insn) = cfa_restores;
27820 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
27821 RTX_FRAME_RELATED_P (insn) = 1;
27822 }
27823 return;
27824 }
27825
27826 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
27827 if (info->push_p)
27828 frame_off = info->total_size;
27829
27830 /* Restore AltiVec registers if we must do so before adjusting the
27831 stack. */
27832 if (info->altivec_size != 0
27833 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27834 || (DEFAULT_ABI != ABI_V4
27835 && offset_below_red_zone_p (info->altivec_save_offset))))
27836 {
27837 int i;
27838 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
27839
27840 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27841 if (use_backchain_to_restore_sp)
27842 {
27843 int frame_regno = 11;
27844
27845 if ((strategy & REST_INLINE_VRS) == 0)
27846 {
27847 /* Of r11 and r12, select the one not clobbered by an
27848 out-of-line restore function for the frame register. */
27849 frame_regno = 11 + 12 - scratch_regno;
27850 }
27851 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
27852 emit_move_insn (frame_reg_rtx,
27853 gen_rtx_MEM (Pmode, sp_reg_rtx));
27854 frame_off = 0;
27855 }
27856 else if (frame_pointer_needed)
27857 frame_reg_rtx = hard_frame_pointer_rtx;
27858
27859 if ((strategy & REST_INLINE_VRS) == 0)
27860 {
27861 int end_save = info->altivec_save_offset + info->altivec_size;
27862 int ptr_off;
27863 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27864 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27865
27866 if (end_save + frame_off != 0)
27867 {
27868 rtx offset = GEN_INT (end_save + frame_off);
27869
27870 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27871 }
27872 else
27873 emit_move_insn (ptr_reg, frame_reg_rtx);
27874
27875 ptr_off = -end_save;
27876 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27877 info->altivec_save_offset + ptr_off,
27878 0, V4SImode, SAVRES_VR);
27879 }
27880 else
27881 {
27882 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27883 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27884 {
27885 rtx addr, areg, mem, insn;
27886 rtx reg = gen_rtx_REG (V4SImode, i);
27887 HOST_WIDE_INT offset
27888 = (info->altivec_save_offset + frame_off
27889 + 16 * (i - info->first_altivec_reg_save));
27890
27891 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27892 {
27893 mem = gen_frame_mem (V4SImode,
27894 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27895 GEN_INT (offset)));
27896 insn = gen_rtx_SET (reg, mem);
27897 }
27898 else
27899 {
27900 areg = gen_rtx_REG (Pmode, 0);
27901 emit_move_insn (areg, GEN_INT (offset));
27902
27903 /* AltiVec addressing mode is [reg+reg]. */
27904 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
27905 mem = gen_frame_mem (V4SImode, addr);
27906
27907 /* Rather than emitting a generic move, force use of the
27908 lvx instruction, which we always want. In particular we
27909 don't want lxvd2x/xxpermdi for little endian. */
27910 insn = gen_altivec_lvx_v4si_internal (reg, mem);
27911 }
27912
27913 (void) emit_insn (insn);
27914 }
27915 }
27916
27917 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27918 if (((strategy & REST_INLINE_VRS) == 0
27919 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
27920 && (flag_shrink_wrap
27921 || (offset_below_red_zone_p
27922 (info->altivec_save_offset
27923 + 16 * (i - info->first_altivec_reg_save))))
27924 && save_reg_p (i))
27925 {
27926 rtx reg = gen_rtx_REG (V4SImode, i);
27927 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27928 }
27929 }
27930
27931 /* Restore VRSAVE if we must do so before adjusting the stack. */
27932 if (info->vrsave_size != 0
27933 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27934 || (DEFAULT_ABI != ABI_V4
27935 && offset_below_red_zone_p (info->vrsave_save_offset))))
27936 {
27937 rtx reg;
27938
27939 if (frame_reg_rtx == sp_reg_rtx)
27940 {
27941 if (use_backchain_to_restore_sp)
27942 {
27943 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27944 emit_move_insn (frame_reg_rtx,
27945 gen_rtx_MEM (Pmode, sp_reg_rtx));
27946 frame_off = 0;
27947 }
27948 else if (frame_pointer_needed)
27949 frame_reg_rtx = hard_frame_pointer_rtx;
27950 }
27951
27952 reg = gen_rtx_REG (SImode, 12);
27953 emit_insn (gen_frame_load (reg, frame_reg_rtx,
27954 info->vrsave_save_offset + frame_off));
27955
27956 emit_insn (generate_set_vrsave (reg, info, 1));
27957 }
27958
27959 insn = NULL_RTX;
27960 /* If we have a large stack frame, restore the old stack pointer
27961 using the backchain. */
27962 if (use_backchain_to_restore_sp)
27963 {
27964 if (frame_reg_rtx == sp_reg_rtx)
27965 {
27966 /* Under V.4, don't reset the stack pointer until after we're done
27967 loading the saved registers. */
27968 if (DEFAULT_ABI == ABI_V4)
27969 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27970
27971 insn = emit_move_insn (frame_reg_rtx,
27972 gen_rtx_MEM (Pmode, sp_reg_rtx));
27973 frame_off = 0;
27974 }
27975 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27976 && DEFAULT_ABI == ABI_V4)
27977 /* frame_reg_rtx has been set up by the altivec restore. */
27978 ;
27979 else
27980 {
27981 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
27982 frame_reg_rtx = sp_reg_rtx;
27983 }
27984 }
27985 /* If we have a frame pointer, we can restore the old stack pointer
27986 from it. */
27987 else if (frame_pointer_needed)
27988 {
27989 frame_reg_rtx = sp_reg_rtx;
27990 if (DEFAULT_ABI == ABI_V4)
27991 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
27992 /* Prevent reordering memory accesses against stack pointer restore. */
27993 else if (cfun->calls_alloca
27994 || offset_below_red_zone_p (-info->total_size))
27995 rs6000_emit_stack_tie (frame_reg_rtx, true);
27996
27997 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
27998 GEN_INT (info->total_size)));
27999 frame_off = 0;
28000 }
28001 else if (info->push_p
28002 && DEFAULT_ABI != ABI_V4
28003 && !crtl->calls_eh_return)
28004 {
28005 /* Prevent reordering memory accesses against stack pointer restore. */
28006 if (cfun->calls_alloca
28007 || offset_below_red_zone_p (-info->total_size))
28008 rs6000_emit_stack_tie (frame_reg_rtx, false);
28009 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28010 GEN_INT (info->total_size)));
28011 frame_off = 0;
28012 }
28013 if (insn && frame_reg_rtx == sp_reg_rtx)
28014 {
28015 if (cfa_restores)
28016 {
28017 REG_NOTES (insn) = cfa_restores;
28018 cfa_restores = NULL_RTX;
28019 }
28020 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28021 RTX_FRAME_RELATED_P (insn) = 1;
28022 }
28023
28024 /* Restore AltiVec registers if we have not done so already. */
28025 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28026 && info->altivec_size != 0
28027 && (DEFAULT_ABI == ABI_V4
28028 || !offset_below_red_zone_p (info->altivec_save_offset)))
28029 {
28030 int i;
28031
28032 if ((strategy & REST_INLINE_VRS) == 0)
28033 {
28034 int end_save = info->altivec_save_offset + info->altivec_size;
28035 int ptr_off;
28036 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28037 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28038 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28039
28040 if (end_save + frame_off != 0)
28041 {
28042 rtx offset = GEN_INT (end_save + frame_off);
28043
28044 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28045 }
28046 else
28047 emit_move_insn (ptr_reg, frame_reg_rtx);
28048
28049 ptr_off = -end_save;
28050 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28051 info->altivec_save_offset + ptr_off,
28052 0, V4SImode, SAVRES_VR);
28053 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28054 {
28055 /* Frame reg was clobbered by out-of-line save. Restore it
28056 from ptr_reg, and if we are calling out-of-line gpr or
28057 fpr restore set up the correct pointer and offset. */
28058 unsigned newptr_regno = 1;
28059 if (!restoring_GPRs_inline)
28060 {
28061 bool lr = info->gp_save_offset + info->gp_size == 0;
28062 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28063 newptr_regno = ptr_regno_for_savres (sel);
28064 end_save = info->gp_save_offset + info->gp_size;
28065 }
28066 else if (!restoring_FPRs_inline)
28067 {
28068 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28069 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28070 newptr_regno = ptr_regno_for_savres (sel);
28071 end_save = info->fp_save_offset + info->fp_size;
28072 }
28073
28074 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28075 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28076
28077 if (end_save + ptr_off != 0)
28078 {
28079 rtx offset = GEN_INT (end_save + ptr_off);
28080
28081 frame_off = -end_save;
28082 if (TARGET_32BIT)
28083 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28084 ptr_reg, offset));
28085 else
28086 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28087 ptr_reg, offset));
28088 }
28089 else
28090 {
28091 frame_off = ptr_off;
28092 emit_move_insn (frame_reg_rtx, ptr_reg);
28093 }
28094 }
28095 }
28096 else
28097 {
28098 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28099 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28100 {
28101 rtx addr, areg, mem, insn;
28102 rtx reg = gen_rtx_REG (V4SImode, i);
28103 HOST_WIDE_INT offset
28104 = (info->altivec_save_offset + frame_off
28105 + 16 * (i - info->first_altivec_reg_save));
28106
28107 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28108 {
28109 mem = gen_frame_mem (V4SImode,
28110 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28111 GEN_INT (offset)));
28112 insn = gen_rtx_SET (reg, mem);
28113 }
28114 else
28115 {
28116 areg = gen_rtx_REG (Pmode, 0);
28117 emit_move_insn (areg, GEN_INT (offset));
28118
28119 /* AltiVec addressing mode is [reg+reg]. */
28120 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28121 mem = gen_frame_mem (V4SImode, addr);
28122
28123 /* Rather than emitting a generic move, force use of the
28124 lvx instruction, which we always want. In particular we
28125 don't want lxvd2x/xxpermdi for little endian. */
28126 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28127 }
28128
28129 (void) emit_insn (insn);
28130 }
28131 }
28132
28133 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28134 if (((strategy & REST_INLINE_VRS) == 0
28135 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28136 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28137 && save_reg_p (i))
28138 {
28139 rtx reg = gen_rtx_REG (V4SImode, i);
28140 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28141 }
28142 }
28143
28144 /* Restore VRSAVE if we have not done so already. */
28145 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28146 && info->vrsave_size != 0
28147 && (DEFAULT_ABI == ABI_V4
28148 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28149 {
28150 rtx reg;
28151
28152 reg = gen_rtx_REG (SImode, 12);
28153 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28154 info->vrsave_save_offset + frame_off));
28155
28156 emit_insn (generate_set_vrsave (reg, info, 1));
28157 }
28158
28159 /* If we exit by an out-of-line restore function on ABI_V4 then that
28160 function will deallocate the stack, so we don't need to worry
28161 about the unwinder restoring cr from an invalid stack frame
28162 location. */
28163 exit_func = (!restoring_FPRs_inline
28164 || (!restoring_GPRs_inline
28165 && info->first_fp_reg_save == 64));
28166
28167 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28168 *separate* slots if the routine calls __builtin_eh_return, so
28169 that they can be independently restored by the unwinder. */
28170 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28171 {
28172 int i, cr_off = info->ehcr_offset;
28173
28174 for (i = 0; i < 8; i++)
28175 if (!call_used_regs[CR0_REGNO + i])
28176 {
28177 rtx reg = gen_rtx_REG (SImode, 0);
28178 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28179 cr_off + frame_off));
28180
28181 insn = emit_insn (gen_movsi_to_cr_one
28182 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28183
28184 if (!exit_func && flag_shrink_wrap)
28185 {
28186 add_reg_note (insn, REG_CFA_RESTORE,
28187 gen_rtx_REG (SImode, CR0_REGNO + i));
28188
28189 RTX_FRAME_RELATED_P (insn) = 1;
28190 }
28191
28192 cr_off += reg_size;
28193 }
28194 }
28195
28196 /* Get the old lr if we saved it. If we are restoring registers
28197 out-of-line, then the out-of-line routines can do this for us. */
28198 if (restore_lr && restoring_GPRs_inline)
28199 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28200
28201 /* Get the old cr if we saved it. */
28202 if (info->cr_save_p)
28203 {
28204 unsigned cr_save_regno = 12;
28205
28206 if (!restoring_GPRs_inline)
28207 {
28208 /* Ensure we don't use the register used by the out-of-line
28209 gpr register restore below. */
28210 bool lr = info->gp_save_offset + info->gp_size == 0;
28211 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28212 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28213
28214 if (gpr_ptr_regno == 12)
28215 cr_save_regno = 11;
28216 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28217 }
28218 else if (REGNO (frame_reg_rtx) == 12)
28219 cr_save_regno = 11;
28220
28221 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28222 info->cr_save_offset + frame_off,
28223 exit_func);
28224 }
28225
28226 /* Set LR here to try to overlap restores below. */
28227 if (restore_lr && restoring_GPRs_inline)
28228 restore_saved_lr (0, exit_func);
28229
28230 /* Load exception handler data registers, if needed. */
28231 if (crtl->calls_eh_return)
28232 {
28233 unsigned int i, regno;
28234
28235 if (TARGET_AIX)
28236 {
28237 rtx reg = gen_rtx_REG (reg_mode, 2);
28238 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28239 frame_off + RS6000_TOC_SAVE_SLOT));
28240 }
28241
28242 for (i = 0; ; ++i)
28243 {
28244 rtx mem;
28245
28246 regno = EH_RETURN_DATA_REGNO (i);
28247 if (regno == INVALID_REGNUM)
28248 break;
28249
28250 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28251 info->ehrd_offset + frame_off
28252 + reg_size * (int) i);
28253
28254 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28255 }
28256 }
28257
28258 /* Restore GPRs. This is done as a PARALLEL if we are using
28259 the load-multiple instructions. */
28260 if (!restoring_GPRs_inline)
28261 {
28262 /* We are jumping to an out-of-line function. */
28263 rtx ptr_reg;
28264 int end_save = info->gp_save_offset + info->gp_size;
28265 bool can_use_exit = end_save == 0;
28266 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28267 int ptr_off;
28268
28269 /* Emit stack reset code if we need it. */
28270 ptr_regno = ptr_regno_for_savres (sel);
28271 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28272 if (can_use_exit)
28273 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28274 else if (end_save + frame_off != 0)
28275 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28276 GEN_INT (end_save + frame_off)));
28277 else if (REGNO (frame_reg_rtx) != ptr_regno)
28278 emit_move_insn (ptr_reg, frame_reg_rtx);
28279 if (REGNO (frame_reg_rtx) == ptr_regno)
28280 frame_off = -end_save;
28281
28282 if (can_use_exit && info->cr_save_p)
28283 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28284
28285 ptr_off = -end_save;
28286 rs6000_emit_savres_rtx (info, ptr_reg,
28287 info->gp_save_offset + ptr_off,
28288 info->lr_save_offset + ptr_off,
28289 reg_mode, sel);
28290 }
28291 else if (using_load_multiple)
28292 {
28293 rtvec p;
28294 p = rtvec_alloc (32 - info->first_gp_reg_save);
28295 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28296 RTVEC_ELT (p, i)
28297 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28298 frame_reg_rtx,
28299 info->gp_save_offset + frame_off + reg_size * i);
28300 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28301 }
28302 else
28303 {
28304 int offset = info->gp_save_offset + frame_off;
28305 for (i = info->first_gp_reg_save; i < 32; i++)
28306 {
28307 if (save_reg_p (i)
28308 && !cfun->machine->gpr_is_wrapped_separately[i])
28309 {
28310 rtx reg = gen_rtx_REG (reg_mode, i);
28311 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28312 }
28313
28314 offset += reg_size;
28315 }
28316 }
28317
28318 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28319 {
28320 /* If the frame pointer was used then we can't delay emitting
28321 a REG_CFA_DEF_CFA note. This must happen on the insn that
28322 restores the frame pointer, r31. We may have already emitted
28323 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28324 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28325 be harmless if emitted. */
28326 if (frame_pointer_needed)
28327 {
28328 insn = get_last_insn ();
28329 add_reg_note (insn, REG_CFA_DEF_CFA,
28330 plus_constant (Pmode, frame_reg_rtx, frame_off));
28331 RTX_FRAME_RELATED_P (insn) = 1;
28332 }
28333
28334 /* Set up cfa_restores. We always need these when
28335 shrink-wrapping. If not shrink-wrapping then we only need
28336 the cfa_restore when the stack location is no longer valid.
28337 The cfa_restores must be emitted on or before the insn that
28338 invalidates the stack, and of course must not be emitted
28339 before the insn that actually does the restore. The latter
28340 is why it is a bad idea to emit the cfa_restores as a group
28341 on the last instruction here that actually does a restore:
28342 That insn may be reordered with respect to others doing
28343 restores. */
28344 if (flag_shrink_wrap
28345 && !restoring_GPRs_inline
28346 && info->first_fp_reg_save == 64)
28347 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28348
28349 for (i = info->first_gp_reg_save; i < 32; i++)
28350 if (save_reg_p (i)
28351 && !cfun->machine->gpr_is_wrapped_separately[i])
28352 {
28353 rtx reg = gen_rtx_REG (reg_mode, i);
28354 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28355 }
28356 }
28357
28358 if (!restoring_GPRs_inline
28359 && info->first_fp_reg_save == 64)
28360 {
28361 /* We are jumping to an out-of-line function. */
28362 if (cfa_restores)
28363 emit_cfa_restores (cfa_restores);
28364 return;
28365 }
28366
28367 if (restore_lr && !restoring_GPRs_inline)
28368 {
28369 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28370 restore_saved_lr (0, exit_func);
28371 }
28372
28373 /* Restore fpr's if we need to do it without calling a function. */
28374 if (restoring_FPRs_inline)
28375 {
28376 int offset = info->fp_save_offset + frame_off;
28377 for (i = info->first_fp_reg_save; i < 64; i++)
28378 {
28379 if (save_reg_p (i)
28380 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28381 {
28382 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28383 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28384 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28385 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28386 cfa_restores);
28387 }
28388
28389 offset += fp_reg_size;
28390 }
28391 }
28392
28393 /* If we saved cr, restore it here. Just those that were used. */
28394 if (info->cr_save_p)
28395 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28396
28397 /* If this is V.4, unwind the stack pointer after all of the loads
28398 have been done, or set up r11 if we are restoring fp out of line. */
28399 ptr_regno = 1;
28400 if (!restoring_FPRs_inline)
28401 {
28402 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28403 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28404 ptr_regno = ptr_regno_for_savres (sel);
28405 }
28406
28407 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28408 if (REGNO (frame_reg_rtx) == ptr_regno)
28409 frame_off = 0;
28410
28411 if (insn && restoring_FPRs_inline)
28412 {
28413 if (cfa_restores)
28414 {
28415 REG_NOTES (insn) = cfa_restores;
28416 cfa_restores = NULL_RTX;
28417 }
28418 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28419 RTX_FRAME_RELATED_P (insn) = 1;
28420 }
28421
28422 if (crtl->calls_eh_return)
28423 {
28424 rtx sa = EH_RETURN_STACKADJ_RTX;
28425 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28426 }
28427
28428 if (!sibcall && restoring_FPRs_inline)
28429 {
28430 if (cfa_restores)
28431 {
28432 /* We can't hang the cfa_restores off a simple return,
28433 since the shrink-wrap code sometimes uses an existing
28434 return. This means there might be a path from
28435 pre-prologue code to this return, and dwarf2cfi code
28436 wants the eh_frame unwinder state to be the same on
28437 all paths to any point. So we need to emit the
28438 cfa_restores before the return. For -m64 we really
28439 don't need epilogue cfa_restores at all, except for
28440 this irritating dwarf2cfi with shrink-wrap
28441 requirement; The stack red-zone means eh_frame info
28442 from the prologue telling the unwinder to restore
28443 from the stack is perfectly good right to the end of
28444 the function. */
28445 emit_insn (gen_blockage ());
28446 emit_cfa_restores (cfa_restores);
28447 cfa_restores = NULL_RTX;
28448 }
28449
28450 emit_jump_insn (targetm.gen_simple_return ());
28451 }
28452
28453 if (!sibcall && !restoring_FPRs_inline)
28454 {
28455 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28456 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28457 int elt = 0;
28458 RTVEC_ELT (p, elt++) = ret_rtx;
28459 if (lr)
28460 RTVEC_ELT (p, elt++)
28461 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
28462
28463 /* We have to restore more than two FP registers, so branch to the
28464 restore function. It will return to our caller. */
28465 int i;
28466 int reg;
28467 rtx sym;
28468
28469 if (flag_shrink_wrap)
28470 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28471
28472 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28473 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28474 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28475 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28476
28477 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28478 {
28479 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28480
28481 RTVEC_ELT (p, elt++)
28482 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28483 if (flag_shrink_wrap
28484 && save_reg_p (info->first_fp_reg_save + i))
28485 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28486 }
28487
28488 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28489 }
28490
28491 if (cfa_restores)
28492 {
28493 if (sibcall)
28494 /* Ensure the cfa_restores are hung off an insn that won't
28495 be reordered above other restores. */
28496 emit_insn (gen_blockage ());
28497
28498 emit_cfa_restores (cfa_restores);
28499 }
28500 }
28501
28502 /* Write function epilogue. */
28503
28504 static void
28505 rs6000_output_function_epilogue (FILE *file)
28506 {
28507 #if TARGET_MACHO
28508 macho_branch_islands ();
28509
28510 {
28511 rtx_insn *insn = get_last_insn ();
28512 rtx_insn *deleted_debug_label = NULL;
28513
28514 /* Mach-O doesn't support labels at the end of objects, so if
28515 it looks like we might want one, take special action.
28516
28517 First, collect any sequence of deleted debug labels. */
28518 while (insn
28519 && NOTE_P (insn)
28520 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28521 {
28522 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28523 notes only, instead set their CODE_LABEL_NUMBER to -1,
28524 otherwise there would be code generation differences
28525 in between -g and -g0. */
28526 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28527 deleted_debug_label = insn;
28528 insn = PREV_INSN (insn);
28529 }
28530
28531 /* Second, if we have:
28532 label:
28533 barrier
28534 then this needs to be detected, so skip past the barrier. */
28535
28536 if (insn && BARRIER_P (insn))
28537 insn = PREV_INSN (insn);
28538
28539 /* Up to now we've only seen notes or barriers. */
28540 if (insn)
28541 {
28542 if (LABEL_P (insn)
28543 || (NOTE_P (insn)
28544 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
28545 /* Trailing label: <barrier>. */
28546 fputs ("\tnop\n", file);
28547 else
28548 {
28549 /* Lastly, see if we have a completely empty function body. */
28550 while (insn && ! INSN_P (insn))
28551 insn = PREV_INSN (insn);
28552 /* If we don't find any insns, we've got an empty function body;
28553 I.e. completely empty - without a return or branch. This is
28554 taken as the case where a function body has been removed
28555 because it contains an inline __builtin_unreachable(). GCC
28556 states that reaching __builtin_unreachable() means UB so we're
28557 not obliged to do anything special; however, we want
28558 non-zero-sized function bodies. To meet this, and help the
28559 user out, let's trap the case. */
28560 if (insn == NULL)
28561 fputs ("\ttrap\n", file);
28562 }
28563 }
28564 else if (deleted_debug_label)
28565 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28566 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28567 CODE_LABEL_NUMBER (insn) = -1;
28568 }
28569 #endif
28570
28571 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28572 on its format.
28573
28574 We don't output a traceback table if -finhibit-size-directive was
28575 used. The documentation for -finhibit-size-directive reads
28576 ``don't output a @code{.size} assembler directive, or anything
28577 else that would cause trouble if the function is split in the
28578 middle, and the two halves are placed at locations far apart in
28579 memory.'' The traceback table has this property, since it
28580 includes the offset from the start of the function to the
28581 traceback table itself.
28582
28583 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28584 different traceback table. */
28585 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28586 && ! flag_inhibit_size_directive
28587 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28588 {
28589 const char *fname = NULL;
28590 const char *language_string = lang_hooks.name;
28591 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28592 int i;
28593 int optional_tbtab;
28594 rs6000_stack_t *info = rs6000_stack_info ();
28595
28596 if (rs6000_traceback == traceback_full)
28597 optional_tbtab = 1;
28598 else if (rs6000_traceback == traceback_part)
28599 optional_tbtab = 0;
28600 else
28601 optional_tbtab = !optimize_size && !TARGET_ELF;
28602
28603 if (optional_tbtab)
28604 {
28605 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28606 while (*fname == '.') /* V.4 encodes . in the name */
28607 fname++;
28608
28609 /* Need label immediately before tbtab, so we can compute
28610 its offset from the function start. */
28611 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28612 ASM_OUTPUT_LABEL (file, fname);
28613 }
28614
28615 /* The .tbtab pseudo-op can only be used for the first eight
28616 expressions, since it can't handle the possibly variable
28617 length fields that follow. However, if you omit the optional
28618 fields, the assembler outputs zeros for all optional fields
28619 anyways, giving each variable length field is minimum length
28620 (as defined in sys/debug.h). Thus we can not use the .tbtab
28621 pseudo-op at all. */
28622
28623 /* An all-zero word flags the start of the tbtab, for debuggers
28624 that have to find it by searching forward from the entry
28625 point or from the current pc. */
28626 fputs ("\t.long 0\n", file);
28627
28628 /* Tbtab format type. Use format type 0. */
28629 fputs ("\t.byte 0,", file);
28630
28631 /* Language type. Unfortunately, there does not seem to be any
28632 official way to discover the language being compiled, so we
28633 use language_string.
28634 C is 0. Fortran is 1. Ada is 3. C++ is 9.
28635 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28636 a number, so for now use 9. LTO, Go, D, and JIT aren't assigned
28637 numbers either, so for now use 0. */
28638 if (lang_GNU_C ()
28639 || ! strcmp (language_string, "GNU GIMPLE")
28640 || ! strcmp (language_string, "GNU Go")
28641 || ! strcmp (language_string, "GNU D")
28642 || ! strcmp (language_string, "libgccjit"))
28643 i = 0;
28644 else if (! strcmp (language_string, "GNU F77")
28645 || lang_GNU_Fortran ())
28646 i = 1;
28647 else if (! strcmp (language_string, "GNU Ada"))
28648 i = 3;
28649 else if (lang_GNU_CXX ()
28650 || ! strcmp (language_string, "GNU Objective-C++"))
28651 i = 9;
28652 else if (! strcmp (language_string, "GNU Java"))
28653 i = 13;
28654 else if (! strcmp (language_string, "GNU Objective-C"))
28655 i = 14;
28656 else
28657 gcc_unreachable ();
28658 fprintf (file, "%d,", i);
28659
28660 /* 8 single bit fields: global linkage (not set for C extern linkage,
28661 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28662 from start of procedure stored in tbtab, internal function, function
28663 has controlled storage, function has no toc, function uses fp,
28664 function logs/aborts fp operations. */
28665 /* Assume that fp operations are used if any fp reg must be saved. */
28666 fprintf (file, "%d,",
28667 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
28668
28669 /* 6 bitfields: function is interrupt handler, name present in
28670 proc table, function calls alloca, on condition directives
28671 (controls stack walks, 3 bits), saves condition reg, saves
28672 link reg. */
28673 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28674 set up as a frame pointer, even when there is no alloca call. */
28675 fprintf (file, "%d,",
28676 ((optional_tbtab << 6)
28677 | ((optional_tbtab & frame_pointer_needed) << 5)
28678 | (info->cr_save_p << 1)
28679 | (info->lr_save_p)));
28680
28681 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28682 (6 bits). */
28683 fprintf (file, "%d,",
28684 (info->push_p << 7) | (64 - info->first_fp_reg_save));
28685
28686 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28687 fprintf (file, "%d,", (32 - first_reg_to_save ()));
28688
28689 if (optional_tbtab)
28690 {
28691 /* Compute the parameter info from the function decl argument
28692 list. */
28693 tree decl;
28694 int next_parm_info_bit = 31;
28695
28696 for (decl = DECL_ARGUMENTS (current_function_decl);
28697 decl; decl = DECL_CHAIN (decl))
28698 {
28699 rtx parameter = DECL_INCOMING_RTL (decl);
28700 machine_mode mode = GET_MODE (parameter);
28701
28702 if (GET_CODE (parameter) == REG)
28703 {
28704 if (SCALAR_FLOAT_MODE_P (mode))
28705 {
28706 int bits;
28707
28708 float_parms++;
28709
28710 switch (mode)
28711 {
28712 case E_SFmode:
28713 case E_SDmode:
28714 bits = 0x2;
28715 break;
28716
28717 case E_DFmode:
28718 case E_DDmode:
28719 case E_TFmode:
28720 case E_TDmode:
28721 case E_IFmode:
28722 case E_KFmode:
28723 bits = 0x3;
28724 break;
28725
28726 default:
28727 gcc_unreachable ();
28728 }
28729
28730 /* If only one bit will fit, don't or in this entry. */
28731 if (next_parm_info_bit > 0)
28732 parm_info |= (bits << (next_parm_info_bit - 1));
28733 next_parm_info_bit -= 2;
28734 }
28735 else
28736 {
28737 fixed_parms += ((GET_MODE_SIZE (mode)
28738 + (UNITS_PER_WORD - 1))
28739 / UNITS_PER_WORD);
28740 next_parm_info_bit -= 1;
28741 }
28742 }
28743 }
28744 }
28745
28746 /* Number of fixed point parameters. */
28747 /* This is actually the number of words of fixed point parameters; thus
28748 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28749 fprintf (file, "%d,", fixed_parms);
28750
28751 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28752 all on stack. */
28753 /* This is actually the number of fp registers that hold parameters;
28754 and thus the maximum value is 13. */
28755 /* Set parameters on stack bit if parameters are not in their original
28756 registers, regardless of whether they are on the stack? Xlc
28757 seems to set the bit when not optimizing. */
28758 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
28759
28760 if (optional_tbtab)
28761 {
28762 /* Optional fields follow. Some are variable length. */
28763
28764 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
28765 float, 11 double float. */
28766 /* There is an entry for each parameter in a register, in the order
28767 that they occur in the parameter list. Any intervening arguments
28768 on the stack are ignored. If the list overflows a long (max
28769 possible length 34 bits) then completely leave off all elements
28770 that don't fit. */
28771 /* Only emit this long if there was at least one parameter. */
28772 if (fixed_parms || float_parms)
28773 fprintf (file, "\t.long %d\n", parm_info);
28774
28775 /* Offset from start of code to tb table. */
28776 fputs ("\t.long ", file);
28777 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28778 RS6000_OUTPUT_BASENAME (file, fname);
28779 putc ('-', file);
28780 rs6000_output_function_entry (file, fname);
28781 putc ('\n', file);
28782
28783 /* Interrupt handler mask. */
28784 /* Omit this long, since we never set the interrupt handler bit
28785 above. */
28786
28787 /* Number of CTL (controlled storage) anchors. */
28788 /* Omit this long, since the has_ctl bit is never set above. */
28789
28790 /* Displacement into stack of each CTL anchor. */
28791 /* Omit this list of longs, because there are no CTL anchors. */
28792
28793 /* Length of function name. */
28794 if (*fname == '*')
28795 ++fname;
28796 fprintf (file, "\t.short %d\n", (int) strlen (fname));
28797
28798 /* Function name. */
28799 assemble_string (fname, strlen (fname));
28800
28801 /* Register for alloca automatic storage; this is always reg 31.
28802 Only emit this if the alloca bit was set above. */
28803 if (frame_pointer_needed)
28804 fputs ("\t.byte 31\n", file);
28805
28806 fputs ("\t.align 2\n", file);
28807 }
28808 }
28809
28810 /* Arrange to define .LCTOC1 label, if not already done. */
28811 if (need_toc_init)
28812 {
28813 need_toc_init = 0;
28814 if (!toc_initialized)
28815 {
28816 switch_to_section (toc_section);
28817 switch_to_section (current_function_section ());
28818 }
28819 }
28820 }
28821
28822 /* -fsplit-stack support. */
28823
28824 /* A SYMBOL_REF for __morestack. */
28825 static GTY(()) rtx morestack_ref;
28826
28827 static rtx
28828 gen_add3_const (rtx rt, rtx ra, long c)
28829 {
28830 if (TARGET_64BIT)
28831 return gen_adddi3 (rt, ra, GEN_INT (c));
28832 else
28833 return gen_addsi3 (rt, ra, GEN_INT (c));
28834 }
28835
28836 /* Emit -fsplit-stack prologue, which goes before the regular function
28837 prologue (at local entry point in the case of ELFv2). */
28838
28839 void
28840 rs6000_expand_split_stack_prologue (void)
28841 {
28842 rs6000_stack_t *info = rs6000_stack_info ();
28843 unsigned HOST_WIDE_INT allocate;
28844 long alloc_hi, alloc_lo;
28845 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
28846 rtx_insn *insn;
28847
28848 gcc_assert (flag_split_stack && reload_completed);
28849
28850 if (!info->push_p)
28851 return;
28852
28853 if (global_regs[29])
28854 {
28855 error ("%qs uses register r29", "-fsplit-stack");
28856 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
28857 "conflicts with %qD", global_regs_decl[29]);
28858 }
28859
28860 allocate = info->total_size;
28861 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
28862 {
28863 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
28864 return;
28865 }
28866 if (morestack_ref == NULL_RTX)
28867 {
28868 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
28869 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
28870 | SYMBOL_FLAG_FUNCTION);
28871 }
28872
28873 r0 = gen_rtx_REG (Pmode, 0);
28874 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28875 r12 = gen_rtx_REG (Pmode, 12);
28876 emit_insn (gen_load_split_stack_limit (r0));
28877 /* Always emit two insns here to calculate the requested stack,
28878 so that the linker can edit them when adjusting size for calling
28879 non-split-stack code. */
28880 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
28881 alloc_lo = -allocate - alloc_hi;
28882 if (alloc_hi != 0)
28883 {
28884 emit_insn (gen_add3_const (r12, r1, alloc_hi));
28885 if (alloc_lo != 0)
28886 emit_insn (gen_add3_const (r12, r12, alloc_lo));
28887 else
28888 emit_insn (gen_nop ());
28889 }
28890 else
28891 {
28892 emit_insn (gen_add3_const (r12, r1, alloc_lo));
28893 emit_insn (gen_nop ());
28894 }
28895
28896 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
28897 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
28898 ok_label = gen_label_rtx ();
28899 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
28900 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
28901 gen_rtx_LABEL_REF (VOIDmode, ok_label),
28902 pc_rtx);
28903 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
28904 JUMP_LABEL (insn) = ok_label;
28905 /* Mark the jump as very likely to be taken. */
28906 add_reg_br_prob_note (insn, profile_probability::very_likely ());
28907
28908 lr = gen_rtx_REG (Pmode, LR_REGNO);
28909 insn = emit_move_insn (r0, lr);
28910 RTX_FRAME_RELATED_P (insn) = 1;
28911 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
28912 RTX_FRAME_RELATED_P (insn) = 1;
28913
28914 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
28915 const0_rtx, const0_rtx));
28916 call_fusage = NULL_RTX;
28917 use_reg (&call_fusage, r12);
28918 /* Say the call uses r0, even though it doesn't, to stop regrename
28919 from twiddling with the insns saving lr, trashing args for cfun.
28920 The insns restoring lr are similarly protected by making
28921 split_stack_return use r0. */
28922 use_reg (&call_fusage, r0);
28923 add_function_usage_to (insn, call_fusage);
28924 /* Indicate that this function can't jump to non-local gotos. */
28925 make_reg_eh_region_note_nothrow_nononlocal (insn);
28926 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
28927 insn = emit_move_insn (lr, r0);
28928 add_reg_note (insn, REG_CFA_RESTORE, lr);
28929 RTX_FRAME_RELATED_P (insn) = 1;
28930 emit_insn (gen_split_stack_return ());
28931
28932 emit_label (ok_label);
28933 LABEL_NUSES (ok_label) = 1;
28934 }
28935
28936 /* Return the internal arg pointer used for function incoming
28937 arguments. When -fsplit-stack, the arg pointer is r12 so we need
28938 to copy it to a pseudo in order for it to be preserved over calls
28939 and suchlike. We'd really like to use a pseudo here for the
28940 internal arg pointer but data-flow analysis is not prepared to
28941 accept pseudos as live at the beginning of a function. */
28942
28943 static rtx
28944 rs6000_internal_arg_pointer (void)
28945 {
28946 if (flag_split_stack
28947 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
28948 == NULL))
28949
28950 {
28951 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
28952 {
28953 rtx pat;
28954
28955 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
28956 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
28957
28958 /* Put the pseudo initialization right after the note at the
28959 beginning of the function. */
28960 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
28961 gen_rtx_REG (Pmode, 12));
28962 push_topmost_sequence ();
28963 emit_insn_after (pat, get_insns ());
28964 pop_topmost_sequence ();
28965 }
28966 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
28967 FIRST_PARM_OFFSET (current_function_decl));
28968 return copy_to_reg (ret);
28969 }
28970 return virtual_incoming_args_rtx;
28971 }
28972
28973 /* We may have to tell the dataflow pass that the split stack prologue
28974 is initializing a register. */
28975
28976 static void
28977 rs6000_live_on_entry (bitmap regs)
28978 {
28979 if (flag_split_stack)
28980 bitmap_set_bit (regs, 12);
28981 }
28982
28983 /* Emit -fsplit-stack dynamic stack allocation space check. */
28984
28985 void
28986 rs6000_split_stack_space_check (rtx size, rtx label)
28987 {
28988 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28989 rtx limit = gen_reg_rtx (Pmode);
28990 rtx requested = gen_reg_rtx (Pmode);
28991 rtx cmp = gen_reg_rtx (CCUNSmode);
28992 rtx jump;
28993
28994 emit_insn (gen_load_split_stack_limit (limit));
28995 if (CONST_INT_P (size))
28996 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
28997 else
28998 {
28999 size = force_reg (Pmode, size);
29000 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29001 }
29002 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29003 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29004 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29005 gen_rtx_LABEL_REF (VOIDmode, label),
29006 pc_rtx);
29007 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29008 JUMP_LABEL (jump) = label;
29009 }
29010 \f
29011 /* A C compound statement that outputs the assembler code for a thunk
29012 function, used to implement C++ virtual function calls with
29013 multiple inheritance. The thunk acts as a wrapper around a virtual
29014 function, adjusting the implicit object parameter before handing
29015 control off to the real function.
29016
29017 First, emit code to add the integer DELTA to the location that
29018 contains the incoming first argument. Assume that this argument
29019 contains a pointer, and is the one used to pass the `this' pointer
29020 in C++. This is the incoming argument *before* the function
29021 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29022 values of all other incoming arguments.
29023
29024 After the addition, emit code to jump to FUNCTION, which is a
29025 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29026 not touch the return address. Hence returning from FUNCTION will
29027 return to whoever called the current `thunk'.
29028
29029 The effect must be as if FUNCTION had been called directly with the
29030 adjusted first argument. This macro is responsible for emitting
29031 all of the code for a thunk function; output_function_prologue()
29032 and output_function_epilogue() are not invoked.
29033
29034 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29035 been extracted from it.) It might possibly be useful on some
29036 targets, but probably not.
29037
29038 If you do not define this macro, the target-independent code in the
29039 C++ frontend will generate a less efficient heavyweight thunk that
29040 calls FUNCTION instead of jumping to it. The generic approach does
29041 not support varargs. */
29042
29043 static void
29044 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29045 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29046 tree function)
29047 {
29048 rtx this_rtx, funexp;
29049 rtx_insn *insn;
29050
29051 reload_completed = 1;
29052 epilogue_completed = 1;
29053
29054 /* Mark the end of the (empty) prologue. */
29055 emit_note (NOTE_INSN_PROLOGUE_END);
29056
29057 /* Find the "this" pointer. If the function returns a structure,
29058 the structure return pointer is in r3. */
29059 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29060 this_rtx = gen_rtx_REG (Pmode, 4);
29061 else
29062 this_rtx = gen_rtx_REG (Pmode, 3);
29063
29064 /* Apply the constant offset, if required. */
29065 if (delta)
29066 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29067
29068 /* Apply the offset from the vtable, if required. */
29069 if (vcall_offset)
29070 {
29071 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29072 rtx tmp = gen_rtx_REG (Pmode, 12);
29073
29074 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29075 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29076 {
29077 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29078 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29079 }
29080 else
29081 {
29082 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29083
29084 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29085 }
29086 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29087 }
29088
29089 /* Generate a tail call to the target function. */
29090 if (!TREE_USED (function))
29091 {
29092 assemble_external (function);
29093 TREE_USED (function) = 1;
29094 }
29095 funexp = XEXP (DECL_RTL (function), 0);
29096 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29097
29098 #if TARGET_MACHO
29099 if (MACHOPIC_INDIRECT)
29100 funexp = machopic_indirect_call_target (funexp);
29101 #endif
29102
29103 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29104 generate sibcall RTL explicitly. */
29105 insn = emit_call_insn (
29106 gen_rtx_PARALLEL (VOIDmode,
29107 gen_rtvec (3,
29108 gen_rtx_CALL (VOIDmode,
29109 funexp, const0_rtx),
29110 gen_rtx_USE (VOIDmode, const0_rtx),
29111 simple_return_rtx)));
29112 SIBLING_CALL_P (insn) = 1;
29113 emit_barrier ();
29114
29115 /* Run just enough of rest_of_compilation to get the insns emitted.
29116 There's not really enough bulk here to make other passes such as
29117 instruction scheduling worth while. Note that use_thunk calls
29118 assemble_start_function and assemble_end_function. */
29119 insn = get_insns ();
29120 shorten_branches (insn);
29121 final_start_function (insn, file, 1);
29122 final (insn, file, 1);
29123 final_end_function ();
29124
29125 reload_completed = 0;
29126 epilogue_completed = 0;
29127 }
29128 \f
29129 /* A quick summary of the various types of 'constant-pool tables'
29130 under PowerPC:
29131
29132 Target Flags Name One table per
29133 AIX (none) AIX TOC object file
29134 AIX -mfull-toc AIX TOC object file
29135 AIX -mminimal-toc AIX minimal TOC translation unit
29136 SVR4/EABI (none) SVR4 SDATA object file
29137 SVR4/EABI -fpic SVR4 pic object file
29138 SVR4/EABI -fPIC SVR4 PIC translation unit
29139 SVR4/EABI -mrelocatable EABI TOC function
29140 SVR4/EABI -maix AIX TOC object file
29141 SVR4/EABI -maix -mminimal-toc
29142 AIX minimal TOC translation unit
29143
29144 Name Reg. Set by entries contains:
29145 made by addrs? fp? sum?
29146
29147 AIX TOC 2 crt0 as Y option option
29148 AIX minimal TOC 30 prolog gcc Y Y option
29149 SVR4 SDATA 13 crt0 gcc N Y N
29150 SVR4 pic 30 prolog ld Y not yet N
29151 SVR4 PIC 30 prolog gcc Y option option
29152 EABI TOC 30 prolog gcc Y option option
29153
29154 */
29155
29156 /* Hash functions for the hash table. */
29157
29158 static unsigned
29159 rs6000_hash_constant (rtx k)
29160 {
29161 enum rtx_code code = GET_CODE (k);
29162 machine_mode mode = GET_MODE (k);
29163 unsigned result = (code << 3) ^ mode;
29164 const char *format;
29165 int flen, fidx;
29166
29167 format = GET_RTX_FORMAT (code);
29168 flen = strlen (format);
29169 fidx = 0;
29170
29171 switch (code)
29172 {
29173 case LABEL_REF:
29174 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29175
29176 case CONST_WIDE_INT:
29177 {
29178 int i;
29179 flen = CONST_WIDE_INT_NUNITS (k);
29180 for (i = 0; i < flen; i++)
29181 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29182 return result;
29183 }
29184
29185 case CONST_DOUBLE:
29186 if (mode != VOIDmode)
29187 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29188 flen = 2;
29189 break;
29190
29191 case CODE_LABEL:
29192 fidx = 3;
29193 break;
29194
29195 default:
29196 break;
29197 }
29198
29199 for (; fidx < flen; fidx++)
29200 switch (format[fidx])
29201 {
29202 case 's':
29203 {
29204 unsigned i, len;
29205 const char *str = XSTR (k, fidx);
29206 len = strlen (str);
29207 result = result * 613 + len;
29208 for (i = 0; i < len; i++)
29209 result = result * 613 + (unsigned) str[i];
29210 break;
29211 }
29212 case 'u':
29213 case 'e':
29214 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29215 break;
29216 case 'i':
29217 case 'n':
29218 result = result * 613 + (unsigned) XINT (k, fidx);
29219 break;
29220 case 'w':
29221 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29222 result = result * 613 + (unsigned) XWINT (k, fidx);
29223 else
29224 {
29225 size_t i;
29226 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29227 result = result * 613 + (unsigned) (XWINT (k, fidx)
29228 >> CHAR_BIT * i);
29229 }
29230 break;
29231 case '0':
29232 break;
29233 default:
29234 gcc_unreachable ();
29235 }
29236
29237 return result;
29238 }
29239
29240 hashval_t
29241 toc_hasher::hash (toc_hash_struct *thc)
29242 {
29243 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29244 }
29245
29246 /* Compare H1 and H2 for equivalence. */
29247
29248 bool
29249 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29250 {
29251 rtx r1 = h1->key;
29252 rtx r2 = h2->key;
29253
29254 if (h1->key_mode != h2->key_mode)
29255 return 0;
29256
29257 return rtx_equal_p (r1, r2);
29258 }
29259
29260 /* These are the names given by the C++ front-end to vtables, and
29261 vtable-like objects. Ideally, this logic should not be here;
29262 instead, there should be some programmatic way of inquiring as
29263 to whether or not an object is a vtable. */
29264
29265 #define VTABLE_NAME_P(NAME) \
29266 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29267 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29268 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29269 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29270 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29271
29272 #ifdef NO_DOLLAR_IN_LABEL
29273 /* Return a GGC-allocated character string translating dollar signs in
29274 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29275
29276 const char *
29277 rs6000_xcoff_strip_dollar (const char *name)
29278 {
29279 char *strip, *p;
29280 const char *q;
29281 size_t len;
29282
29283 q = (const char *) strchr (name, '$');
29284
29285 if (q == 0 || q == name)
29286 return name;
29287
29288 len = strlen (name);
29289 strip = XALLOCAVEC (char, len + 1);
29290 strcpy (strip, name);
29291 p = strip + (q - name);
29292 while (p)
29293 {
29294 *p = '_';
29295 p = strchr (p + 1, '$');
29296 }
29297
29298 return ggc_alloc_string (strip, len);
29299 }
29300 #endif
29301
29302 void
29303 rs6000_output_symbol_ref (FILE *file, rtx x)
29304 {
29305 const char *name = XSTR (x, 0);
29306
29307 /* Currently C++ toc references to vtables can be emitted before it
29308 is decided whether the vtable is public or private. If this is
29309 the case, then the linker will eventually complain that there is
29310 a reference to an unknown section. Thus, for vtables only,
29311 we emit the TOC reference to reference the identifier and not the
29312 symbol. */
29313 if (VTABLE_NAME_P (name))
29314 {
29315 RS6000_OUTPUT_BASENAME (file, name);
29316 }
29317 else
29318 assemble_name (file, name);
29319 }
29320
29321 /* Output a TOC entry. We derive the entry name from what is being
29322 written. */
29323
29324 void
29325 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29326 {
29327 char buf[256];
29328 const char *name = buf;
29329 rtx base = x;
29330 HOST_WIDE_INT offset = 0;
29331
29332 gcc_assert (!TARGET_NO_TOC);
29333
29334 /* When the linker won't eliminate them, don't output duplicate
29335 TOC entries (this happens on AIX if there is any kind of TOC,
29336 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29337 CODE_LABELs. */
29338 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29339 {
29340 struct toc_hash_struct *h;
29341
29342 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29343 time because GGC is not initialized at that point. */
29344 if (toc_hash_table == NULL)
29345 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29346
29347 h = ggc_alloc<toc_hash_struct> ();
29348 h->key = x;
29349 h->key_mode = mode;
29350 h->labelno = labelno;
29351
29352 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29353 if (*found == NULL)
29354 *found = h;
29355 else /* This is indeed a duplicate.
29356 Set this label equal to that label. */
29357 {
29358 fputs ("\t.set ", file);
29359 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29360 fprintf (file, "%d,", labelno);
29361 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29362 fprintf (file, "%d\n", ((*found)->labelno));
29363
29364 #ifdef HAVE_AS_TLS
29365 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29366 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29367 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29368 {
29369 fputs ("\t.set ", file);
29370 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29371 fprintf (file, "%d,", labelno);
29372 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29373 fprintf (file, "%d\n", ((*found)->labelno));
29374 }
29375 #endif
29376 return;
29377 }
29378 }
29379
29380 /* If we're going to put a double constant in the TOC, make sure it's
29381 aligned properly when strict alignment is on. */
29382 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29383 && STRICT_ALIGNMENT
29384 && GET_MODE_BITSIZE (mode) >= 64
29385 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29386 ASM_OUTPUT_ALIGN (file, 3);
29387 }
29388
29389 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29390
29391 /* Handle FP constants specially. Note that if we have a minimal
29392 TOC, things we put here aren't actually in the TOC, so we can allow
29393 FP constants. */
29394 if (GET_CODE (x) == CONST_DOUBLE &&
29395 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29396 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29397 {
29398 long k[4];
29399
29400 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29401 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29402 else
29403 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29404
29405 if (TARGET_64BIT)
29406 {
29407 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29408 fputs (DOUBLE_INT_ASM_OP, file);
29409 else
29410 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29411 k[0] & 0xffffffff, k[1] & 0xffffffff,
29412 k[2] & 0xffffffff, k[3] & 0xffffffff);
29413 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29414 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29415 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29416 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29417 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29418 return;
29419 }
29420 else
29421 {
29422 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29423 fputs ("\t.long ", file);
29424 else
29425 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29426 k[0] & 0xffffffff, k[1] & 0xffffffff,
29427 k[2] & 0xffffffff, k[3] & 0xffffffff);
29428 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29429 k[0] & 0xffffffff, k[1] & 0xffffffff,
29430 k[2] & 0xffffffff, k[3] & 0xffffffff);
29431 return;
29432 }
29433 }
29434 else if (GET_CODE (x) == CONST_DOUBLE &&
29435 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29436 {
29437 long k[2];
29438
29439 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29440 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29441 else
29442 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29443
29444 if (TARGET_64BIT)
29445 {
29446 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29447 fputs (DOUBLE_INT_ASM_OP, file);
29448 else
29449 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29450 k[0] & 0xffffffff, k[1] & 0xffffffff);
29451 fprintf (file, "0x%lx%08lx\n",
29452 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29453 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29454 return;
29455 }
29456 else
29457 {
29458 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29459 fputs ("\t.long ", file);
29460 else
29461 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29462 k[0] & 0xffffffff, k[1] & 0xffffffff);
29463 fprintf (file, "0x%lx,0x%lx\n",
29464 k[0] & 0xffffffff, k[1] & 0xffffffff);
29465 return;
29466 }
29467 }
29468 else if (GET_CODE (x) == CONST_DOUBLE &&
29469 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29470 {
29471 long l;
29472
29473 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29474 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29475 else
29476 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29477
29478 if (TARGET_64BIT)
29479 {
29480 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29481 fputs (DOUBLE_INT_ASM_OP, file);
29482 else
29483 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29484 if (WORDS_BIG_ENDIAN)
29485 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29486 else
29487 fprintf (file, "0x%lx\n", l & 0xffffffff);
29488 return;
29489 }
29490 else
29491 {
29492 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29493 fputs ("\t.long ", file);
29494 else
29495 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29496 fprintf (file, "0x%lx\n", l & 0xffffffff);
29497 return;
29498 }
29499 }
29500 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
29501 {
29502 unsigned HOST_WIDE_INT low;
29503 HOST_WIDE_INT high;
29504
29505 low = INTVAL (x) & 0xffffffff;
29506 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29507
29508 /* TOC entries are always Pmode-sized, so when big-endian
29509 smaller integer constants in the TOC need to be padded.
29510 (This is still a win over putting the constants in
29511 a separate constant pool, because then we'd have
29512 to have both a TOC entry _and_ the actual constant.)
29513
29514 For a 32-bit target, CONST_INT values are loaded and shifted
29515 entirely within `low' and can be stored in one TOC entry. */
29516
29517 /* It would be easy to make this work, but it doesn't now. */
29518 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29519
29520 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29521 {
29522 low |= high << 32;
29523 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29524 high = (HOST_WIDE_INT) low >> 32;
29525 low &= 0xffffffff;
29526 }
29527
29528 if (TARGET_64BIT)
29529 {
29530 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29531 fputs (DOUBLE_INT_ASM_OP, file);
29532 else
29533 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29534 (long) high & 0xffffffff, (long) low & 0xffffffff);
29535 fprintf (file, "0x%lx%08lx\n",
29536 (long) high & 0xffffffff, (long) low & 0xffffffff);
29537 return;
29538 }
29539 else
29540 {
29541 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29542 {
29543 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29544 fputs ("\t.long ", file);
29545 else
29546 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29547 (long) high & 0xffffffff, (long) low & 0xffffffff);
29548 fprintf (file, "0x%lx,0x%lx\n",
29549 (long) high & 0xffffffff, (long) low & 0xffffffff);
29550 }
29551 else
29552 {
29553 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29554 fputs ("\t.long ", file);
29555 else
29556 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29557 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29558 }
29559 return;
29560 }
29561 }
29562
29563 if (GET_CODE (x) == CONST)
29564 {
29565 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29566 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
29567
29568 base = XEXP (XEXP (x, 0), 0);
29569 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29570 }
29571
29572 switch (GET_CODE (base))
29573 {
29574 case SYMBOL_REF:
29575 name = XSTR (base, 0);
29576 break;
29577
29578 case LABEL_REF:
29579 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29580 CODE_LABEL_NUMBER (XEXP (base, 0)));
29581 break;
29582
29583 case CODE_LABEL:
29584 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29585 break;
29586
29587 default:
29588 gcc_unreachable ();
29589 }
29590
29591 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29592 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29593 else
29594 {
29595 fputs ("\t.tc ", file);
29596 RS6000_OUTPUT_BASENAME (file, name);
29597
29598 if (offset < 0)
29599 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29600 else if (offset)
29601 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29602
29603 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29604 after other TOC symbols, reducing overflow of small TOC access
29605 to [TC] symbols. */
29606 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29607 ? "[TE]," : "[TC],", file);
29608 }
29609
29610 /* Currently C++ toc references to vtables can be emitted before it
29611 is decided whether the vtable is public or private. If this is
29612 the case, then the linker will eventually complain that there is
29613 a TOC reference to an unknown section. Thus, for vtables only,
29614 we emit the TOC reference to reference the symbol and not the
29615 section. */
29616 if (VTABLE_NAME_P (name))
29617 {
29618 RS6000_OUTPUT_BASENAME (file, name);
29619 if (offset < 0)
29620 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29621 else if (offset > 0)
29622 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29623 }
29624 else
29625 output_addr_const (file, x);
29626
29627 #if HAVE_AS_TLS
29628 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
29629 {
29630 switch (SYMBOL_REF_TLS_MODEL (base))
29631 {
29632 case 0:
29633 break;
29634 case TLS_MODEL_LOCAL_EXEC:
29635 fputs ("@le", file);
29636 break;
29637 case TLS_MODEL_INITIAL_EXEC:
29638 fputs ("@ie", file);
29639 break;
29640 /* Use global-dynamic for local-dynamic. */
29641 case TLS_MODEL_GLOBAL_DYNAMIC:
29642 case TLS_MODEL_LOCAL_DYNAMIC:
29643 putc ('\n', file);
29644 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
29645 fputs ("\t.tc .", file);
29646 RS6000_OUTPUT_BASENAME (file, name);
29647 fputs ("[TC],", file);
29648 output_addr_const (file, x);
29649 fputs ("@m", file);
29650 break;
29651 default:
29652 gcc_unreachable ();
29653 }
29654 }
29655 #endif
29656
29657 putc ('\n', file);
29658 }
29659 \f
29660 /* Output an assembler pseudo-op to write an ASCII string of N characters
29661 starting at P to FILE.
29662
29663 On the RS/6000, we have to do this using the .byte operation and
29664 write out special characters outside the quoted string.
29665 Also, the assembler is broken; very long strings are truncated,
29666 so we must artificially break them up early. */
29667
29668 void
29669 output_ascii (FILE *file, const char *p, int n)
29670 {
29671 char c;
29672 int i, count_string;
29673 const char *for_string = "\t.byte \"";
29674 const char *for_decimal = "\t.byte ";
29675 const char *to_close = NULL;
29676
29677 count_string = 0;
29678 for (i = 0; i < n; i++)
29679 {
29680 c = *p++;
29681 if (c >= ' ' && c < 0177)
29682 {
29683 if (for_string)
29684 fputs (for_string, file);
29685 putc (c, file);
29686
29687 /* Write two quotes to get one. */
29688 if (c == '"')
29689 {
29690 putc (c, file);
29691 ++count_string;
29692 }
29693
29694 for_string = NULL;
29695 for_decimal = "\"\n\t.byte ";
29696 to_close = "\"\n";
29697 ++count_string;
29698
29699 if (count_string >= 512)
29700 {
29701 fputs (to_close, file);
29702
29703 for_string = "\t.byte \"";
29704 for_decimal = "\t.byte ";
29705 to_close = NULL;
29706 count_string = 0;
29707 }
29708 }
29709 else
29710 {
29711 if (for_decimal)
29712 fputs (for_decimal, file);
29713 fprintf (file, "%d", c);
29714
29715 for_string = "\n\t.byte \"";
29716 for_decimal = ", ";
29717 to_close = "\n";
29718 count_string = 0;
29719 }
29720 }
29721
29722 /* Now close the string if we have written one. Then end the line. */
29723 if (to_close)
29724 fputs (to_close, file);
29725 }
29726 \f
29727 /* Generate a unique section name for FILENAME for a section type
29728 represented by SECTION_DESC. Output goes into BUF.
29729
29730 SECTION_DESC can be any string, as long as it is different for each
29731 possible section type.
29732
29733 We name the section in the same manner as xlc. The name begins with an
29734 underscore followed by the filename (after stripping any leading directory
29735 names) with the last period replaced by the string SECTION_DESC. If
29736 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29737 the name. */
29738
29739 void
29740 rs6000_gen_section_name (char **buf, const char *filename,
29741 const char *section_desc)
29742 {
29743 const char *q, *after_last_slash, *last_period = 0;
29744 char *p;
29745 int len;
29746
29747 after_last_slash = filename;
29748 for (q = filename; *q; q++)
29749 {
29750 if (*q == '/')
29751 after_last_slash = q + 1;
29752 else if (*q == '.')
29753 last_period = q;
29754 }
29755
29756 len = strlen (after_last_slash) + strlen (section_desc) + 2;
29757 *buf = (char *) xmalloc (len);
29758
29759 p = *buf;
29760 *p++ = '_';
29761
29762 for (q = after_last_slash; *q; q++)
29763 {
29764 if (q == last_period)
29765 {
29766 strcpy (p, section_desc);
29767 p += strlen (section_desc);
29768 break;
29769 }
29770
29771 else if (ISALNUM (*q))
29772 *p++ = *q;
29773 }
29774
29775 if (last_period == 0)
29776 strcpy (p, section_desc);
29777 else
29778 *p = '\0';
29779 }
29780 \f
29781 /* Emit profile function. */
29782
29783 void
29784 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
29785 {
29786 /* Non-standard profiling for kernels, which just saves LR then calls
29787 _mcount without worrying about arg saves. The idea is to change
29788 the function prologue as little as possible as it isn't easy to
29789 account for arg save/restore code added just for _mcount. */
29790 if (TARGET_PROFILE_KERNEL)
29791 return;
29792
29793 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29794 {
29795 #ifndef NO_PROFILE_COUNTERS
29796 # define NO_PROFILE_COUNTERS 0
29797 #endif
29798 if (NO_PROFILE_COUNTERS)
29799 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29800 LCT_NORMAL, VOIDmode);
29801 else
29802 {
29803 char buf[30];
29804 const char *label_name;
29805 rtx fun;
29806
29807 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29808 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
29809 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
29810
29811 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29812 LCT_NORMAL, VOIDmode, fun, Pmode);
29813 }
29814 }
29815 else if (DEFAULT_ABI == ABI_DARWIN)
29816 {
29817 const char *mcount_name = RS6000_MCOUNT;
29818 int caller_addr_regno = LR_REGNO;
29819
29820 /* Be conservative and always set this, at least for now. */
29821 crtl->uses_pic_offset_table = 1;
29822
29823 #if TARGET_MACHO
29824 /* For PIC code, set up a stub and collect the caller's address
29825 from r0, which is where the prologue puts it. */
29826 if (MACHOPIC_INDIRECT
29827 && crtl->uses_pic_offset_table)
29828 caller_addr_regno = 0;
29829 #endif
29830 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
29831 LCT_NORMAL, VOIDmode,
29832 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
29833 }
29834 }
29835
29836 /* Write function profiler code. */
29837
29838 void
29839 output_function_profiler (FILE *file, int labelno)
29840 {
29841 char buf[100];
29842
29843 switch (DEFAULT_ABI)
29844 {
29845 default:
29846 gcc_unreachable ();
29847
29848 case ABI_V4:
29849 if (!TARGET_32BIT)
29850 {
29851 warning (0, "no profiling of 64-bit code for this ABI");
29852 return;
29853 }
29854 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29855 fprintf (file, "\tmflr %s\n", reg_names[0]);
29856 if (NO_PROFILE_COUNTERS)
29857 {
29858 asm_fprintf (file, "\tstw %s,4(%s)\n",
29859 reg_names[0], reg_names[1]);
29860 }
29861 else if (TARGET_SECURE_PLT && flag_pic)
29862 {
29863 if (TARGET_LINK_STACK)
29864 {
29865 char name[32];
29866 get_ppc476_thunk_name (name);
29867 asm_fprintf (file, "\tbl %s\n", name);
29868 }
29869 else
29870 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
29871 asm_fprintf (file, "\tstw %s,4(%s)\n",
29872 reg_names[0], reg_names[1]);
29873 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
29874 asm_fprintf (file, "\taddis %s,%s,",
29875 reg_names[12], reg_names[12]);
29876 assemble_name (file, buf);
29877 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
29878 assemble_name (file, buf);
29879 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
29880 }
29881 else if (flag_pic == 1)
29882 {
29883 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
29884 asm_fprintf (file, "\tstw %s,4(%s)\n",
29885 reg_names[0], reg_names[1]);
29886 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
29887 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
29888 assemble_name (file, buf);
29889 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
29890 }
29891 else if (flag_pic > 1)
29892 {
29893 asm_fprintf (file, "\tstw %s,4(%s)\n",
29894 reg_names[0], reg_names[1]);
29895 /* Now, we need to get the address of the label. */
29896 if (TARGET_LINK_STACK)
29897 {
29898 char name[32];
29899 get_ppc476_thunk_name (name);
29900 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
29901 assemble_name (file, buf);
29902 fputs ("-.\n1:", file);
29903 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
29904 asm_fprintf (file, "\taddi %s,%s,4\n",
29905 reg_names[11], reg_names[11]);
29906 }
29907 else
29908 {
29909 fputs ("\tbcl 20,31,1f\n\t.long ", file);
29910 assemble_name (file, buf);
29911 fputs ("-.\n1:", file);
29912 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
29913 }
29914 asm_fprintf (file, "\tlwz %s,0(%s)\n",
29915 reg_names[0], reg_names[11]);
29916 asm_fprintf (file, "\tadd %s,%s,%s\n",
29917 reg_names[0], reg_names[0], reg_names[11]);
29918 }
29919 else
29920 {
29921 asm_fprintf (file, "\tlis %s,", reg_names[12]);
29922 assemble_name (file, buf);
29923 fputs ("@ha\n", file);
29924 asm_fprintf (file, "\tstw %s,4(%s)\n",
29925 reg_names[0], reg_names[1]);
29926 asm_fprintf (file, "\tla %s,", reg_names[0]);
29927 assemble_name (file, buf);
29928 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
29929 }
29930
29931 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
29932 fprintf (file, "\tbl %s%s\n",
29933 RS6000_MCOUNT, flag_pic ? "@plt" : "");
29934 break;
29935
29936 case ABI_AIX:
29937 case ABI_ELFv2:
29938 case ABI_DARWIN:
29939 /* Don't do anything, done in output_profile_hook (). */
29940 break;
29941 }
29942 }
29943
29944 \f
29945
29946 /* The following variable value is the last issued insn. */
29947
29948 static rtx_insn *last_scheduled_insn;
29949
29950 /* The following variable helps to balance issuing of load and
29951 store instructions */
29952
29953 static int load_store_pendulum;
29954
29955 /* The following variable helps pair divide insns during scheduling. */
29956 static int divide_cnt;
29957 /* The following variable helps pair and alternate vector and vector load
29958 insns during scheduling. */
29959 static int vec_pairing;
29960
29961
29962 /* Power4 load update and store update instructions are cracked into a
29963 load or store and an integer insn which are executed in the same cycle.
29964 Branches have their own dispatch slot which does not count against the
29965 GCC issue rate, but it changes the program flow so there are no other
29966 instructions to issue in this cycle. */
29967
29968 static int
29969 rs6000_variable_issue_1 (rtx_insn *insn, int more)
29970 {
29971 last_scheduled_insn = insn;
29972 if (GET_CODE (PATTERN (insn)) == USE
29973 || GET_CODE (PATTERN (insn)) == CLOBBER)
29974 {
29975 cached_can_issue_more = more;
29976 return cached_can_issue_more;
29977 }
29978
29979 if (insn_terminates_group_p (insn, current_group))
29980 {
29981 cached_can_issue_more = 0;
29982 return cached_can_issue_more;
29983 }
29984
29985 /* If no reservation, but reach here */
29986 if (recog_memoized (insn) < 0)
29987 return more;
29988
29989 if (rs6000_sched_groups)
29990 {
29991 if (is_microcoded_insn (insn))
29992 cached_can_issue_more = 0;
29993 else if (is_cracked_insn (insn))
29994 cached_can_issue_more = more > 2 ? more - 2 : 0;
29995 else
29996 cached_can_issue_more = more - 1;
29997
29998 return cached_can_issue_more;
29999 }
30000
30001 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
30002 return 0;
30003
30004 cached_can_issue_more = more - 1;
30005 return cached_can_issue_more;
30006 }
30007
30008 static int
30009 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30010 {
30011 int r = rs6000_variable_issue_1 (insn, more);
30012 if (verbose)
30013 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30014 return r;
30015 }
30016
30017 /* Adjust the cost of a scheduling dependency. Return the new cost of
30018 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30019
30020 static int
30021 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30022 unsigned int)
30023 {
30024 enum attr_type attr_type;
30025
30026 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30027 return cost;
30028
30029 switch (dep_type)
30030 {
30031 case REG_DEP_TRUE:
30032 {
30033 /* Data dependency; DEP_INSN writes a register that INSN reads
30034 some cycles later. */
30035
30036 /* Separate a load from a narrower, dependent store. */
30037 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
30038 && GET_CODE (PATTERN (insn)) == SET
30039 && GET_CODE (PATTERN (dep_insn)) == SET
30040 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
30041 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
30042 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30043 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30044 return cost + 14;
30045
30046 attr_type = get_attr_type (insn);
30047
30048 switch (attr_type)
30049 {
30050 case TYPE_JMPREG:
30051 /* Tell the first scheduling pass about the latency between
30052 a mtctr and bctr (and mtlr and br/blr). The first
30053 scheduling pass will not know about this latency since
30054 the mtctr instruction, which has the latency associated
30055 to it, will be generated by reload. */
30056 return 4;
30057 case TYPE_BRANCH:
30058 /* Leave some extra cycles between a compare and its
30059 dependent branch, to inhibit expensive mispredicts. */
30060 if ((rs6000_tune == PROCESSOR_PPC603
30061 || rs6000_tune == PROCESSOR_PPC604
30062 || rs6000_tune == PROCESSOR_PPC604e
30063 || rs6000_tune == PROCESSOR_PPC620
30064 || rs6000_tune == PROCESSOR_PPC630
30065 || rs6000_tune == PROCESSOR_PPC750
30066 || rs6000_tune == PROCESSOR_PPC7400
30067 || rs6000_tune == PROCESSOR_PPC7450
30068 || rs6000_tune == PROCESSOR_PPCE5500
30069 || rs6000_tune == PROCESSOR_PPCE6500
30070 || rs6000_tune == PROCESSOR_POWER4
30071 || rs6000_tune == PROCESSOR_POWER5
30072 || rs6000_tune == PROCESSOR_POWER7
30073 || rs6000_tune == PROCESSOR_POWER8
30074 || rs6000_tune == PROCESSOR_POWER9
30075 || rs6000_tune == PROCESSOR_CELL)
30076 && recog_memoized (dep_insn)
30077 && (INSN_CODE (dep_insn) >= 0))
30078
30079 switch (get_attr_type (dep_insn))
30080 {
30081 case TYPE_CMP:
30082 case TYPE_FPCOMPARE:
30083 case TYPE_CR_LOGICAL:
30084 return cost + 2;
30085 case TYPE_EXTS:
30086 case TYPE_MUL:
30087 if (get_attr_dot (dep_insn) == DOT_YES)
30088 return cost + 2;
30089 else
30090 break;
30091 case TYPE_SHIFT:
30092 if (get_attr_dot (dep_insn) == DOT_YES
30093 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30094 return cost + 2;
30095 else
30096 break;
30097 default:
30098 break;
30099 }
30100 break;
30101
30102 case TYPE_STORE:
30103 case TYPE_FPSTORE:
30104 if ((rs6000_tune == PROCESSOR_POWER6)
30105 && recog_memoized (dep_insn)
30106 && (INSN_CODE (dep_insn) >= 0))
30107 {
30108
30109 if (GET_CODE (PATTERN (insn)) != SET)
30110 /* If this happens, we have to extend this to schedule
30111 optimally. Return default for now. */
30112 return cost;
30113
30114 /* Adjust the cost for the case where the value written
30115 by a fixed point operation is used as the address
30116 gen value on a store. */
30117 switch (get_attr_type (dep_insn))
30118 {
30119 case TYPE_LOAD:
30120 case TYPE_CNTLZ:
30121 {
30122 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30123 return get_attr_sign_extend (dep_insn)
30124 == SIGN_EXTEND_YES ? 6 : 4;
30125 break;
30126 }
30127 case TYPE_SHIFT:
30128 {
30129 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30130 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30131 6 : 3;
30132 break;
30133 }
30134 case TYPE_INTEGER:
30135 case TYPE_ADD:
30136 case TYPE_LOGICAL:
30137 case TYPE_EXTS:
30138 case TYPE_INSERT:
30139 {
30140 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30141 return 3;
30142 break;
30143 }
30144 case TYPE_STORE:
30145 case TYPE_FPLOAD:
30146 case TYPE_FPSTORE:
30147 {
30148 if (get_attr_update (dep_insn) == UPDATE_YES
30149 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30150 return 3;
30151 break;
30152 }
30153 case TYPE_MUL:
30154 {
30155 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30156 return 17;
30157 break;
30158 }
30159 case TYPE_DIV:
30160 {
30161 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30162 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30163 break;
30164 }
30165 default:
30166 break;
30167 }
30168 }
30169 break;
30170
30171 case TYPE_LOAD:
30172 if ((rs6000_tune == PROCESSOR_POWER6)
30173 && recog_memoized (dep_insn)
30174 && (INSN_CODE (dep_insn) >= 0))
30175 {
30176
30177 /* Adjust the cost for the case where the value written
30178 by a fixed point instruction is used within the address
30179 gen portion of a subsequent load(u)(x) */
30180 switch (get_attr_type (dep_insn))
30181 {
30182 case TYPE_LOAD:
30183 case TYPE_CNTLZ:
30184 {
30185 if (set_to_load_agen (dep_insn, insn))
30186 return get_attr_sign_extend (dep_insn)
30187 == SIGN_EXTEND_YES ? 6 : 4;
30188 break;
30189 }
30190 case TYPE_SHIFT:
30191 {
30192 if (set_to_load_agen (dep_insn, insn))
30193 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30194 6 : 3;
30195 break;
30196 }
30197 case TYPE_INTEGER:
30198 case TYPE_ADD:
30199 case TYPE_LOGICAL:
30200 case TYPE_EXTS:
30201 case TYPE_INSERT:
30202 {
30203 if (set_to_load_agen (dep_insn, insn))
30204 return 3;
30205 break;
30206 }
30207 case TYPE_STORE:
30208 case TYPE_FPLOAD:
30209 case TYPE_FPSTORE:
30210 {
30211 if (get_attr_update (dep_insn) == UPDATE_YES
30212 && set_to_load_agen (dep_insn, insn))
30213 return 3;
30214 break;
30215 }
30216 case TYPE_MUL:
30217 {
30218 if (set_to_load_agen (dep_insn, insn))
30219 return 17;
30220 break;
30221 }
30222 case TYPE_DIV:
30223 {
30224 if (set_to_load_agen (dep_insn, insn))
30225 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30226 break;
30227 }
30228 default:
30229 break;
30230 }
30231 }
30232 break;
30233
30234 case TYPE_FPLOAD:
30235 if ((rs6000_tune == PROCESSOR_POWER6)
30236 && get_attr_update (insn) == UPDATE_NO
30237 && recog_memoized (dep_insn)
30238 && (INSN_CODE (dep_insn) >= 0)
30239 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30240 return 2;
30241
30242 default:
30243 break;
30244 }
30245
30246 /* Fall out to return default cost. */
30247 }
30248 break;
30249
30250 case REG_DEP_OUTPUT:
30251 /* Output dependency; DEP_INSN writes a register that INSN writes some
30252 cycles later. */
30253 if ((rs6000_tune == PROCESSOR_POWER6)
30254 && recog_memoized (dep_insn)
30255 && (INSN_CODE (dep_insn) >= 0))
30256 {
30257 attr_type = get_attr_type (insn);
30258
30259 switch (attr_type)
30260 {
30261 case TYPE_FP:
30262 case TYPE_FPSIMPLE:
30263 if (get_attr_type (dep_insn) == TYPE_FP
30264 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30265 return 1;
30266 break;
30267 case TYPE_FPLOAD:
30268 if (get_attr_update (insn) == UPDATE_NO
30269 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30270 return 2;
30271 break;
30272 default:
30273 break;
30274 }
30275 }
30276 /* Fall through, no cost for output dependency. */
30277 /* FALLTHRU */
30278
30279 case REG_DEP_ANTI:
30280 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30281 cycles later. */
30282 return 0;
30283
30284 default:
30285 gcc_unreachable ();
30286 }
30287
30288 return cost;
30289 }
30290
30291 /* Debug version of rs6000_adjust_cost. */
30292
30293 static int
30294 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30295 int cost, unsigned int dw)
30296 {
30297 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30298
30299 if (ret != cost)
30300 {
30301 const char *dep;
30302
30303 switch (dep_type)
30304 {
30305 default: dep = "unknown depencency"; break;
30306 case REG_DEP_TRUE: dep = "data dependency"; break;
30307 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30308 case REG_DEP_ANTI: dep = "anti depencency"; break;
30309 }
30310
30311 fprintf (stderr,
30312 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30313 "%s, insn:\n", ret, cost, dep);
30314
30315 debug_rtx (insn);
30316 }
30317
30318 return ret;
30319 }
30320
30321 /* The function returns a true if INSN is microcoded.
30322 Return false otherwise. */
30323
30324 static bool
30325 is_microcoded_insn (rtx_insn *insn)
30326 {
30327 if (!insn || !NONDEBUG_INSN_P (insn)
30328 || GET_CODE (PATTERN (insn)) == USE
30329 || GET_CODE (PATTERN (insn)) == CLOBBER)
30330 return false;
30331
30332 if (rs6000_tune == PROCESSOR_CELL)
30333 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30334
30335 if (rs6000_sched_groups
30336 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30337 {
30338 enum attr_type type = get_attr_type (insn);
30339 if ((type == TYPE_LOAD
30340 && get_attr_update (insn) == UPDATE_YES
30341 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30342 || ((type == TYPE_LOAD || type == TYPE_STORE)
30343 && get_attr_update (insn) == UPDATE_YES
30344 && get_attr_indexed (insn) == INDEXED_YES)
30345 || type == TYPE_MFCR)
30346 return true;
30347 }
30348
30349 return false;
30350 }
30351
30352 /* The function returns true if INSN is cracked into 2 instructions
30353 by the processor (and therefore occupies 2 issue slots). */
30354
30355 static bool
30356 is_cracked_insn (rtx_insn *insn)
30357 {
30358 if (!insn || !NONDEBUG_INSN_P (insn)
30359 || GET_CODE (PATTERN (insn)) == USE
30360 || GET_CODE (PATTERN (insn)) == CLOBBER)
30361 return false;
30362
30363 if (rs6000_sched_groups
30364 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30365 {
30366 enum attr_type type = get_attr_type (insn);
30367 if ((type == TYPE_LOAD
30368 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30369 && get_attr_update (insn) == UPDATE_NO)
30370 || (type == TYPE_LOAD
30371 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30372 && get_attr_update (insn) == UPDATE_YES
30373 && get_attr_indexed (insn) == INDEXED_NO)
30374 || (type == TYPE_STORE
30375 && get_attr_update (insn) == UPDATE_YES
30376 && get_attr_indexed (insn) == INDEXED_NO)
30377 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30378 && get_attr_update (insn) == UPDATE_YES)
30379 || (type == TYPE_CR_LOGICAL
30380 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
30381 || (type == TYPE_EXTS
30382 && get_attr_dot (insn) == DOT_YES)
30383 || (type == TYPE_SHIFT
30384 && get_attr_dot (insn) == DOT_YES
30385 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30386 || (type == TYPE_MUL
30387 && get_attr_dot (insn) == DOT_YES)
30388 || type == TYPE_DIV
30389 || (type == TYPE_INSERT
30390 && get_attr_size (insn) == SIZE_32))
30391 return true;
30392 }
30393
30394 return false;
30395 }
30396
30397 /* The function returns true if INSN can be issued only from
30398 the branch slot. */
30399
30400 static bool
30401 is_branch_slot_insn (rtx_insn *insn)
30402 {
30403 if (!insn || !NONDEBUG_INSN_P (insn)
30404 || GET_CODE (PATTERN (insn)) == USE
30405 || GET_CODE (PATTERN (insn)) == CLOBBER)
30406 return false;
30407
30408 if (rs6000_sched_groups)
30409 {
30410 enum attr_type type = get_attr_type (insn);
30411 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30412 return true;
30413 return false;
30414 }
30415
30416 return false;
30417 }
30418
30419 /* The function returns true if out_inst sets a value that is
30420 used in the address generation computation of in_insn */
30421 static bool
30422 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30423 {
30424 rtx out_set, in_set;
30425
30426 /* For performance reasons, only handle the simple case where
30427 both loads are a single_set. */
30428 out_set = single_set (out_insn);
30429 if (out_set)
30430 {
30431 in_set = single_set (in_insn);
30432 if (in_set)
30433 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30434 }
30435
30436 return false;
30437 }
30438
30439 /* Try to determine base/offset/size parts of the given MEM.
30440 Return true if successful, false if all the values couldn't
30441 be determined.
30442
30443 This function only looks for REG or REG+CONST address forms.
30444 REG+REG address form will return false. */
30445
30446 static bool
30447 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30448 HOST_WIDE_INT *size)
30449 {
30450 rtx addr_rtx;
30451 if MEM_SIZE_KNOWN_P (mem)
30452 *size = MEM_SIZE (mem);
30453 else
30454 return false;
30455
30456 addr_rtx = (XEXP (mem, 0));
30457 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30458 addr_rtx = XEXP (addr_rtx, 1);
30459
30460 *offset = 0;
30461 while (GET_CODE (addr_rtx) == PLUS
30462 && CONST_INT_P (XEXP (addr_rtx, 1)))
30463 {
30464 *offset += INTVAL (XEXP (addr_rtx, 1));
30465 addr_rtx = XEXP (addr_rtx, 0);
30466 }
30467 if (!REG_P (addr_rtx))
30468 return false;
30469
30470 *base = addr_rtx;
30471 return true;
30472 }
30473
30474 /* The function returns true if the target storage location of
30475 mem1 is adjacent to the target storage location of mem2 */
30476 /* Return 1 if memory locations are adjacent. */
30477
30478 static bool
30479 adjacent_mem_locations (rtx mem1, rtx mem2)
30480 {
30481 rtx reg1, reg2;
30482 HOST_WIDE_INT off1, size1, off2, size2;
30483
30484 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30485 && get_memref_parts (mem2, &reg2, &off2, &size2))
30486 return ((REGNO (reg1) == REGNO (reg2))
30487 && ((off1 + size1 == off2)
30488 || (off2 + size2 == off1)));
30489
30490 return false;
30491 }
30492
30493 /* This function returns true if it can be determined that the two MEM
30494 locations overlap by at least 1 byte based on base reg/offset/size. */
30495
30496 static bool
30497 mem_locations_overlap (rtx mem1, rtx mem2)
30498 {
30499 rtx reg1, reg2;
30500 HOST_WIDE_INT off1, size1, off2, size2;
30501
30502 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30503 && get_memref_parts (mem2, &reg2, &off2, &size2))
30504 return ((REGNO (reg1) == REGNO (reg2))
30505 && (((off1 <= off2) && (off1 + size1 > off2))
30506 || ((off2 <= off1) && (off2 + size2 > off1))));
30507
30508 return false;
30509 }
30510
30511 /* A C statement (sans semicolon) to update the integer scheduling
30512 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30513 INSN earlier, reduce the priority to execute INSN later. Do not
30514 define this macro if you do not need to adjust the scheduling
30515 priorities of insns. */
30516
30517 static int
30518 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30519 {
30520 rtx load_mem, str_mem;
30521 /* On machines (like the 750) which have asymmetric integer units,
30522 where one integer unit can do multiply and divides and the other
30523 can't, reduce the priority of multiply/divide so it is scheduled
30524 before other integer operations. */
30525
30526 #if 0
30527 if (! INSN_P (insn))
30528 return priority;
30529
30530 if (GET_CODE (PATTERN (insn)) == USE)
30531 return priority;
30532
30533 switch (rs6000_tune) {
30534 case PROCESSOR_PPC750:
30535 switch (get_attr_type (insn))
30536 {
30537 default:
30538 break;
30539
30540 case TYPE_MUL:
30541 case TYPE_DIV:
30542 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30543 priority, priority);
30544 if (priority >= 0 && priority < 0x01000000)
30545 priority >>= 3;
30546 break;
30547 }
30548 }
30549 #endif
30550
30551 if (insn_must_be_first_in_group (insn)
30552 && reload_completed
30553 && current_sched_info->sched_max_insns_priority
30554 && rs6000_sched_restricted_insns_priority)
30555 {
30556
30557 /* Prioritize insns that can be dispatched only in the first
30558 dispatch slot. */
30559 if (rs6000_sched_restricted_insns_priority == 1)
30560 /* Attach highest priority to insn. This means that in
30561 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30562 precede 'priority' (critical path) considerations. */
30563 return current_sched_info->sched_max_insns_priority;
30564 else if (rs6000_sched_restricted_insns_priority == 2)
30565 /* Increase priority of insn by a minimal amount. This means that in
30566 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30567 considerations precede dispatch-slot restriction considerations. */
30568 return (priority + 1);
30569 }
30570
30571 if (rs6000_tune == PROCESSOR_POWER6
30572 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30573 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30574 /* Attach highest priority to insn if the scheduler has just issued two
30575 stores and this instruction is a load, or two loads and this instruction
30576 is a store. Power6 wants loads and stores scheduled alternately
30577 when possible */
30578 return current_sched_info->sched_max_insns_priority;
30579
30580 return priority;
30581 }
30582
30583 /* Return true if the instruction is nonpipelined on the Cell. */
30584 static bool
30585 is_nonpipeline_insn (rtx_insn *insn)
30586 {
30587 enum attr_type type;
30588 if (!insn || !NONDEBUG_INSN_P (insn)
30589 || GET_CODE (PATTERN (insn)) == USE
30590 || GET_CODE (PATTERN (insn)) == CLOBBER)
30591 return false;
30592
30593 type = get_attr_type (insn);
30594 if (type == TYPE_MUL
30595 || type == TYPE_DIV
30596 || type == TYPE_SDIV
30597 || type == TYPE_DDIV
30598 || type == TYPE_SSQRT
30599 || type == TYPE_DSQRT
30600 || type == TYPE_MFCR
30601 || type == TYPE_MFCRF
30602 || type == TYPE_MFJMPR)
30603 {
30604 return true;
30605 }
30606 return false;
30607 }
30608
30609
30610 /* Return how many instructions the machine can issue per cycle. */
30611
30612 static int
30613 rs6000_issue_rate (void)
30614 {
30615 /* Unless scheduling for register pressure, use issue rate of 1 for
30616 first scheduling pass to decrease degradation. */
30617 if (!reload_completed && !flag_sched_pressure)
30618 return 1;
30619
30620 switch (rs6000_tune) {
30621 case PROCESSOR_RS64A:
30622 case PROCESSOR_PPC601: /* ? */
30623 case PROCESSOR_PPC7450:
30624 return 3;
30625 case PROCESSOR_PPC440:
30626 case PROCESSOR_PPC603:
30627 case PROCESSOR_PPC750:
30628 case PROCESSOR_PPC7400:
30629 case PROCESSOR_PPC8540:
30630 case PROCESSOR_PPC8548:
30631 case PROCESSOR_CELL:
30632 case PROCESSOR_PPCE300C2:
30633 case PROCESSOR_PPCE300C3:
30634 case PROCESSOR_PPCE500MC:
30635 case PROCESSOR_PPCE500MC64:
30636 case PROCESSOR_PPCE5500:
30637 case PROCESSOR_PPCE6500:
30638 case PROCESSOR_TITAN:
30639 return 2;
30640 case PROCESSOR_PPC476:
30641 case PROCESSOR_PPC604:
30642 case PROCESSOR_PPC604e:
30643 case PROCESSOR_PPC620:
30644 case PROCESSOR_PPC630:
30645 return 4;
30646 case PROCESSOR_POWER4:
30647 case PROCESSOR_POWER5:
30648 case PROCESSOR_POWER6:
30649 case PROCESSOR_POWER7:
30650 return 5;
30651 case PROCESSOR_POWER8:
30652 return 7;
30653 case PROCESSOR_POWER9:
30654 return 6;
30655 default:
30656 return 1;
30657 }
30658 }
30659
30660 /* Return how many instructions to look ahead for better insn
30661 scheduling. */
30662
30663 static int
30664 rs6000_use_sched_lookahead (void)
30665 {
30666 switch (rs6000_tune)
30667 {
30668 case PROCESSOR_PPC8540:
30669 case PROCESSOR_PPC8548:
30670 return 4;
30671
30672 case PROCESSOR_CELL:
30673 return (reload_completed ? 8 : 0);
30674
30675 default:
30676 return 0;
30677 }
30678 }
30679
30680 /* We are choosing insn from the ready queue. Return zero if INSN can be
30681 chosen. */
30682 static int
30683 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
30684 {
30685 if (ready_index == 0)
30686 return 0;
30687
30688 if (rs6000_tune != PROCESSOR_CELL)
30689 return 0;
30690
30691 gcc_assert (insn != NULL_RTX && INSN_P (insn));
30692
30693 if (!reload_completed
30694 || is_nonpipeline_insn (insn)
30695 || is_microcoded_insn (insn))
30696 return 1;
30697
30698 return 0;
30699 }
30700
30701 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30702 and return true. */
30703
30704 static bool
30705 find_mem_ref (rtx pat, rtx *mem_ref)
30706 {
30707 const char * fmt;
30708 int i, j;
30709
30710 /* stack_tie does not produce any real memory traffic. */
30711 if (tie_operand (pat, VOIDmode))
30712 return false;
30713
30714 if (GET_CODE (pat) == MEM)
30715 {
30716 *mem_ref = pat;
30717 return true;
30718 }
30719
30720 /* Recursively process the pattern. */
30721 fmt = GET_RTX_FORMAT (GET_CODE (pat));
30722
30723 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
30724 {
30725 if (fmt[i] == 'e')
30726 {
30727 if (find_mem_ref (XEXP (pat, i), mem_ref))
30728 return true;
30729 }
30730 else if (fmt[i] == 'E')
30731 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
30732 {
30733 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
30734 return true;
30735 }
30736 }
30737
30738 return false;
30739 }
30740
30741 /* Determine if PAT is a PATTERN of a load insn. */
30742
30743 static bool
30744 is_load_insn1 (rtx pat, rtx *load_mem)
30745 {
30746 if (!pat || pat == NULL_RTX)
30747 return false;
30748
30749 if (GET_CODE (pat) == SET)
30750 return find_mem_ref (SET_SRC (pat), load_mem);
30751
30752 if (GET_CODE (pat) == PARALLEL)
30753 {
30754 int i;
30755
30756 for (i = 0; i < XVECLEN (pat, 0); i++)
30757 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
30758 return true;
30759 }
30760
30761 return false;
30762 }
30763
30764 /* Determine if INSN loads from memory. */
30765
30766 static bool
30767 is_load_insn (rtx insn, rtx *load_mem)
30768 {
30769 if (!insn || !INSN_P (insn))
30770 return false;
30771
30772 if (CALL_P (insn))
30773 return false;
30774
30775 return is_load_insn1 (PATTERN (insn), load_mem);
30776 }
30777
30778 /* Determine if PAT is a PATTERN of a store insn. */
30779
30780 static bool
30781 is_store_insn1 (rtx pat, rtx *str_mem)
30782 {
30783 if (!pat || pat == NULL_RTX)
30784 return false;
30785
30786 if (GET_CODE (pat) == SET)
30787 return find_mem_ref (SET_DEST (pat), str_mem);
30788
30789 if (GET_CODE (pat) == PARALLEL)
30790 {
30791 int i;
30792
30793 for (i = 0; i < XVECLEN (pat, 0); i++)
30794 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
30795 return true;
30796 }
30797
30798 return false;
30799 }
30800
30801 /* Determine if INSN stores to memory. */
30802
30803 static bool
30804 is_store_insn (rtx insn, rtx *str_mem)
30805 {
30806 if (!insn || !INSN_P (insn))
30807 return false;
30808
30809 return is_store_insn1 (PATTERN (insn), str_mem);
30810 }
30811
30812 /* Return whether TYPE is a Power9 pairable vector instruction type. */
30813
30814 static bool
30815 is_power9_pairable_vec_type (enum attr_type type)
30816 {
30817 switch (type)
30818 {
30819 case TYPE_VECSIMPLE:
30820 case TYPE_VECCOMPLEX:
30821 case TYPE_VECDIV:
30822 case TYPE_VECCMP:
30823 case TYPE_VECPERM:
30824 case TYPE_VECFLOAT:
30825 case TYPE_VECFDIV:
30826 case TYPE_VECDOUBLE:
30827 return true;
30828 default:
30829 break;
30830 }
30831 return false;
30832 }
30833
30834 /* Returns whether the dependence between INSN and NEXT is considered
30835 costly by the given target. */
30836
30837 static bool
30838 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
30839 {
30840 rtx insn;
30841 rtx next;
30842 rtx load_mem, str_mem;
30843
30844 /* If the flag is not enabled - no dependence is considered costly;
30845 allow all dependent insns in the same group.
30846 This is the most aggressive option. */
30847 if (rs6000_sched_costly_dep == no_dep_costly)
30848 return false;
30849
30850 /* If the flag is set to 1 - a dependence is always considered costly;
30851 do not allow dependent instructions in the same group.
30852 This is the most conservative option. */
30853 if (rs6000_sched_costly_dep == all_deps_costly)
30854 return true;
30855
30856 insn = DEP_PRO (dep);
30857 next = DEP_CON (dep);
30858
30859 if (rs6000_sched_costly_dep == store_to_load_dep_costly
30860 && is_load_insn (next, &load_mem)
30861 && is_store_insn (insn, &str_mem))
30862 /* Prevent load after store in the same group. */
30863 return true;
30864
30865 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
30866 && is_load_insn (next, &load_mem)
30867 && is_store_insn (insn, &str_mem)
30868 && DEP_TYPE (dep) == REG_DEP_TRUE
30869 && mem_locations_overlap(str_mem, load_mem))
30870 /* Prevent load after store in the same group if it is a true
30871 dependence. */
30872 return true;
30873
30874 /* The flag is set to X; dependences with latency >= X are considered costly,
30875 and will not be scheduled in the same group. */
30876 if (rs6000_sched_costly_dep <= max_dep_latency
30877 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
30878 return true;
30879
30880 return false;
30881 }
30882
30883 /* Return the next insn after INSN that is found before TAIL is reached,
30884 skipping any "non-active" insns - insns that will not actually occupy
30885 an issue slot. Return NULL_RTX if such an insn is not found. */
30886
30887 static rtx_insn *
30888 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
30889 {
30890 if (insn == NULL_RTX || insn == tail)
30891 return NULL;
30892
30893 while (1)
30894 {
30895 insn = NEXT_INSN (insn);
30896 if (insn == NULL_RTX || insn == tail)
30897 return NULL;
30898
30899 if (CALL_P (insn)
30900 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
30901 || (NONJUMP_INSN_P (insn)
30902 && GET_CODE (PATTERN (insn)) != USE
30903 && GET_CODE (PATTERN (insn)) != CLOBBER
30904 && INSN_CODE (insn) != CODE_FOR_stack_tie))
30905 break;
30906 }
30907 return insn;
30908 }
30909
30910 /* Do Power9 specific sched_reorder2 reordering of ready list. */
30911
30912 static int
30913 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
30914 {
30915 int pos;
30916 int i;
30917 rtx_insn *tmp;
30918 enum attr_type type, type2;
30919
30920 type = get_attr_type (last_scheduled_insn);
30921
30922 /* Try to issue fixed point divides back-to-back in pairs so they will be
30923 routed to separate execution units and execute in parallel. */
30924 if (type == TYPE_DIV && divide_cnt == 0)
30925 {
30926 /* First divide has been scheduled. */
30927 divide_cnt = 1;
30928
30929 /* Scan the ready list looking for another divide, if found move it
30930 to the end of the list so it is chosen next. */
30931 pos = lastpos;
30932 while (pos >= 0)
30933 {
30934 if (recog_memoized (ready[pos]) >= 0
30935 && get_attr_type (ready[pos]) == TYPE_DIV)
30936 {
30937 tmp = ready[pos];
30938 for (i = pos; i < lastpos; i++)
30939 ready[i] = ready[i + 1];
30940 ready[lastpos] = tmp;
30941 break;
30942 }
30943 pos--;
30944 }
30945 }
30946 else
30947 {
30948 /* Last insn was the 2nd divide or not a divide, reset the counter. */
30949 divide_cnt = 0;
30950
30951 /* The best dispatch throughput for vector and vector load insns can be
30952 achieved by interleaving a vector and vector load such that they'll
30953 dispatch to the same superslice. If this pairing cannot be achieved
30954 then it is best to pair vector insns together and vector load insns
30955 together.
30956
30957 To aid in this pairing, vec_pairing maintains the current state with
30958 the following values:
30959
30960 0 : Initial state, no vecload/vector pairing has been started.
30961
30962 1 : A vecload or vector insn has been issued and a candidate for
30963 pairing has been found and moved to the end of the ready
30964 list. */
30965 if (type == TYPE_VECLOAD)
30966 {
30967 /* Issued a vecload. */
30968 if (vec_pairing == 0)
30969 {
30970 int vecload_pos = -1;
30971 /* We issued a single vecload, look for a vector insn to pair it
30972 with. If one isn't found, try to pair another vecload. */
30973 pos = lastpos;
30974 while (pos >= 0)
30975 {
30976 if (recog_memoized (ready[pos]) >= 0)
30977 {
30978 type2 = get_attr_type (ready[pos]);
30979 if (is_power9_pairable_vec_type (type2))
30980 {
30981 /* Found a vector insn to pair with, move it to the
30982 end of the ready list so it is scheduled next. */
30983 tmp = ready[pos];
30984 for (i = pos; i < lastpos; i++)
30985 ready[i] = ready[i + 1];
30986 ready[lastpos] = tmp;
30987 vec_pairing = 1;
30988 return cached_can_issue_more;
30989 }
30990 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
30991 /* Remember position of first vecload seen. */
30992 vecload_pos = pos;
30993 }
30994 pos--;
30995 }
30996 if (vecload_pos >= 0)
30997 {
30998 /* Didn't find a vector to pair with but did find a vecload,
30999 move it to the end of the ready list. */
31000 tmp = ready[vecload_pos];
31001 for (i = vecload_pos; i < lastpos; i++)
31002 ready[i] = ready[i + 1];
31003 ready[lastpos] = tmp;
31004 vec_pairing = 1;
31005 return cached_can_issue_more;
31006 }
31007 }
31008 }
31009 else if (is_power9_pairable_vec_type (type))
31010 {
31011 /* Issued a vector operation. */
31012 if (vec_pairing == 0)
31013 {
31014 int vec_pos = -1;
31015 /* We issued a single vector insn, look for a vecload to pair it
31016 with. If one isn't found, try to pair another vector. */
31017 pos = lastpos;
31018 while (pos >= 0)
31019 {
31020 if (recog_memoized (ready[pos]) >= 0)
31021 {
31022 type2 = get_attr_type (ready[pos]);
31023 if (type2 == TYPE_VECLOAD)
31024 {
31025 /* Found a vecload insn to pair with, move it to the
31026 end of the ready list so it is scheduled next. */
31027 tmp = ready[pos];
31028 for (i = pos; i < lastpos; i++)
31029 ready[i] = ready[i + 1];
31030 ready[lastpos] = tmp;
31031 vec_pairing = 1;
31032 return cached_can_issue_more;
31033 }
31034 else if (is_power9_pairable_vec_type (type2)
31035 && vec_pos == -1)
31036 /* Remember position of first vector insn seen. */
31037 vec_pos = pos;
31038 }
31039 pos--;
31040 }
31041 if (vec_pos >= 0)
31042 {
31043 /* Didn't find a vecload to pair with but did find a vector
31044 insn, move it to the end of the ready list. */
31045 tmp = ready[vec_pos];
31046 for (i = vec_pos; i < lastpos; i++)
31047 ready[i] = ready[i + 1];
31048 ready[lastpos] = tmp;
31049 vec_pairing = 1;
31050 return cached_can_issue_more;
31051 }
31052 }
31053 }
31054
31055 /* We've either finished a vec/vecload pair, couldn't find an insn to
31056 continue the current pair, or the last insn had nothing to do with
31057 with pairing. In any case, reset the state. */
31058 vec_pairing = 0;
31059 }
31060
31061 return cached_can_issue_more;
31062 }
31063
31064 /* We are about to begin issuing insns for this clock cycle. */
31065
31066 static int
31067 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31068 rtx_insn **ready ATTRIBUTE_UNUSED,
31069 int *pn_ready ATTRIBUTE_UNUSED,
31070 int clock_var ATTRIBUTE_UNUSED)
31071 {
31072 int n_ready = *pn_ready;
31073
31074 if (sched_verbose)
31075 fprintf (dump, "// rs6000_sched_reorder :\n");
31076
31077 /* Reorder the ready list, if the second to last ready insn
31078 is a nonepipeline insn. */
31079 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
31080 {
31081 if (is_nonpipeline_insn (ready[n_ready - 1])
31082 && (recog_memoized (ready[n_ready - 2]) > 0))
31083 /* Simply swap first two insns. */
31084 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31085 }
31086
31087 if (rs6000_tune == PROCESSOR_POWER6)
31088 load_store_pendulum = 0;
31089
31090 return rs6000_issue_rate ();
31091 }
31092
31093 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31094
31095 static int
31096 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31097 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31098 {
31099 if (sched_verbose)
31100 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31101
31102 /* For Power6, we need to handle some special cases to try and keep the
31103 store queue from overflowing and triggering expensive flushes.
31104
31105 This code monitors how load and store instructions are being issued
31106 and skews the ready list one way or the other to increase the likelihood
31107 that a desired instruction is issued at the proper time.
31108
31109 A couple of things are done. First, we maintain a "load_store_pendulum"
31110 to track the current state of load/store issue.
31111
31112 - If the pendulum is at zero, then no loads or stores have been
31113 issued in the current cycle so we do nothing.
31114
31115 - If the pendulum is 1, then a single load has been issued in this
31116 cycle and we attempt to locate another load in the ready list to
31117 issue with it.
31118
31119 - If the pendulum is -2, then two stores have already been
31120 issued in this cycle, so we increase the priority of the first load
31121 in the ready list to increase it's likelihood of being chosen first
31122 in the next cycle.
31123
31124 - If the pendulum is -1, then a single store has been issued in this
31125 cycle and we attempt to locate another store in the ready list to
31126 issue with it, preferring a store to an adjacent memory location to
31127 facilitate store pairing in the store queue.
31128
31129 - If the pendulum is 2, then two loads have already been
31130 issued in this cycle, so we increase the priority of the first store
31131 in the ready list to increase it's likelihood of being chosen first
31132 in the next cycle.
31133
31134 - If the pendulum < -2 or > 2, then do nothing.
31135
31136 Note: This code covers the most common scenarios. There exist non
31137 load/store instructions which make use of the LSU and which
31138 would need to be accounted for to strictly model the behavior
31139 of the machine. Those instructions are currently unaccounted
31140 for to help minimize compile time overhead of this code.
31141 */
31142 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
31143 {
31144 int pos;
31145 int i;
31146 rtx_insn *tmp;
31147 rtx load_mem, str_mem;
31148
31149 if (is_store_insn (last_scheduled_insn, &str_mem))
31150 /* Issuing a store, swing the load_store_pendulum to the left */
31151 load_store_pendulum--;
31152 else if (is_load_insn (last_scheduled_insn, &load_mem))
31153 /* Issuing a load, swing the load_store_pendulum to the right */
31154 load_store_pendulum++;
31155 else
31156 return cached_can_issue_more;
31157
31158 /* If the pendulum is balanced, or there is only one instruction on
31159 the ready list, then all is well, so return. */
31160 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31161 return cached_can_issue_more;
31162
31163 if (load_store_pendulum == 1)
31164 {
31165 /* A load has been issued in this cycle. Scan the ready list
31166 for another load to issue with it */
31167 pos = *pn_ready-1;
31168
31169 while (pos >= 0)
31170 {
31171 if (is_load_insn (ready[pos], &load_mem))
31172 {
31173 /* Found a load. Move it to the head of the ready list,
31174 and adjust it's priority so that it is more likely to
31175 stay there */
31176 tmp = ready[pos];
31177 for (i=pos; i<*pn_ready-1; i++)
31178 ready[i] = ready[i + 1];
31179 ready[*pn_ready-1] = tmp;
31180
31181 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31182 INSN_PRIORITY (tmp)++;
31183 break;
31184 }
31185 pos--;
31186 }
31187 }
31188 else if (load_store_pendulum == -2)
31189 {
31190 /* Two stores have been issued in this cycle. Increase the
31191 priority of the first load in the ready list to favor it for
31192 issuing in the next cycle. */
31193 pos = *pn_ready-1;
31194
31195 while (pos >= 0)
31196 {
31197 if (is_load_insn (ready[pos], &load_mem)
31198 && !sel_sched_p ()
31199 && INSN_PRIORITY_KNOWN (ready[pos]))
31200 {
31201 INSN_PRIORITY (ready[pos])++;
31202
31203 /* Adjust the pendulum to account for the fact that a load
31204 was found and increased in priority. This is to prevent
31205 increasing the priority of multiple loads */
31206 load_store_pendulum--;
31207
31208 break;
31209 }
31210 pos--;
31211 }
31212 }
31213 else if (load_store_pendulum == -1)
31214 {
31215 /* A store has been issued in this cycle. Scan the ready list for
31216 another store to issue with it, preferring a store to an adjacent
31217 memory location */
31218 int first_store_pos = -1;
31219
31220 pos = *pn_ready-1;
31221
31222 while (pos >= 0)
31223 {
31224 if (is_store_insn (ready[pos], &str_mem))
31225 {
31226 rtx str_mem2;
31227 /* Maintain the index of the first store found on the
31228 list */
31229 if (first_store_pos == -1)
31230 first_store_pos = pos;
31231
31232 if (is_store_insn (last_scheduled_insn, &str_mem2)
31233 && adjacent_mem_locations (str_mem, str_mem2))
31234 {
31235 /* Found an adjacent store. Move it to the head of the
31236 ready list, and adjust it's priority so that it is
31237 more likely to stay there */
31238 tmp = ready[pos];
31239 for (i=pos; i<*pn_ready-1; i++)
31240 ready[i] = ready[i + 1];
31241 ready[*pn_ready-1] = tmp;
31242
31243 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31244 INSN_PRIORITY (tmp)++;
31245
31246 first_store_pos = -1;
31247
31248 break;
31249 };
31250 }
31251 pos--;
31252 }
31253
31254 if (first_store_pos >= 0)
31255 {
31256 /* An adjacent store wasn't found, but a non-adjacent store was,
31257 so move the non-adjacent store to the front of the ready
31258 list, and adjust its priority so that it is more likely to
31259 stay there. */
31260 tmp = ready[first_store_pos];
31261 for (i=first_store_pos; i<*pn_ready-1; i++)
31262 ready[i] = ready[i + 1];
31263 ready[*pn_ready-1] = tmp;
31264 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31265 INSN_PRIORITY (tmp)++;
31266 }
31267 }
31268 else if (load_store_pendulum == 2)
31269 {
31270 /* Two loads have been issued in this cycle. Increase the priority
31271 of the first store in the ready list to favor it for issuing in
31272 the next cycle. */
31273 pos = *pn_ready-1;
31274
31275 while (pos >= 0)
31276 {
31277 if (is_store_insn (ready[pos], &str_mem)
31278 && !sel_sched_p ()
31279 && INSN_PRIORITY_KNOWN (ready[pos]))
31280 {
31281 INSN_PRIORITY (ready[pos])++;
31282
31283 /* Adjust the pendulum to account for the fact that a store
31284 was found and increased in priority. This is to prevent
31285 increasing the priority of multiple stores */
31286 load_store_pendulum++;
31287
31288 break;
31289 }
31290 pos--;
31291 }
31292 }
31293 }
31294
31295 /* Do Power9 dependent reordering if necessary. */
31296 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31297 && recog_memoized (last_scheduled_insn) >= 0)
31298 return power9_sched_reorder2 (ready, *pn_ready - 1);
31299
31300 return cached_can_issue_more;
31301 }
31302
31303 /* Return whether the presence of INSN causes a dispatch group termination
31304 of group WHICH_GROUP.
31305
31306 If WHICH_GROUP == current_group, this function will return true if INSN
31307 causes the termination of the current group (i.e, the dispatch group to
31308 which INSN belongs). This means that INSN will be the last insn in the
31309 group it belongs to.
31310
31311 If WHICH_GROUP == previous_group, this function will return true if INSN
31312 causes the termination of the previous group (i.e, the dispatch group that
31313 precedes the group to which INSN belongs). This means that INSN will be
31314 the first insn in the group it belongs to). */
31315
31316 static bool
31317 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31318 {
31319 bool first, last;
31320
31321 if (! insn)
31322 return false;
31323
31324 first = insn_must_be_first_in_group (insn);
31325 last = insn_must_be_last_in_group (insn);
31326
31327 if (first && last)
31328 return true;
31329
31330 if (which_group == current_group)
31331 return last;
31332 else if (which_group == previous_group)
31333 return first;
31334
31335 return false;
31336 }
31337
31338
31339 static bool
31340 insn_must_be_first_in_group (rtx_insn *insn)
31341 {
31342 enum attr_type type;
31343
31344 if (!insn
31345 || NOTE_P (insn)
31346 || DEBUG_INSN_P (insn)
31347 || GET_CODE (PATTERN (insn)) == USE
31348 || GET_CODE (PATTERN (insn)) == CLOBBER)
31349 return false;
31350
31351 switch (rs6000_tune)
31352 {
31353 case PROCESSOR_POWER5:
31354 if (is_cracked_insn (insn))
31355 return true;
31356 /* FALLTHRU */
31357 case PROCESSOR_POWER4:
31358 if (is_microcoded_insn (insn))
31359 return true;
31360
31361 if (!rs6000_sched_groups)
31362 return false;
31363
31364 type = get_attr_type (insn);
31365
31366 switch (type)
31367 {
31368 case TYPE_MFCR:
31369 case TYPE_MFCRF:
31370 case TYPE_MTCR:
31371 case TYPE_CR_LOGICAL:
31372 case TYPE_MTJMPR:
31373 case TYPE_MFJMPR:
31374 case TYPE_DIV:
31375 case TYPE_LOAD_L:
31376 case TYPE_STORE_C:
31377 case TYPE_ISYNC:
31378 case TYPE_SYNC:
31379 return true;
31380 default:
31381 break;
31382 }
31383 break;
31384 case PROCESSOR_POWER6:
31385 type = get_attr_type (insn);
31386
31387 switch (type)
31388 {
31389 case TYPE_EXTS:
31390 case TYPE_CNTLZ:
31391 case TYPE_TRAP:
31392 case TYPE_MUL:
31393 case TYPE_INSERT:
31394 case TYPE_FPCOMPARE:
31395 case TYPE_MFCR:
31396 case TYPE_MTCR:
31397 case TYPE_MFJMPR:
31398 case TYPE_MTJMPR:
31399 case TYPE_ISYNC:
31400 case TYPE_SYNC:
31401 case TYPE_LOAD_L:
31402 case TYPE_STORE_C:
31403 return true;
31404 case TYPE_SHIFT:
31405 if (get_attr_dot (insn) == DOT_NO
31406 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31407 return true;
31408 else
31409 break;
31410 case TYPE_DIV:
31411 if (get_attr_size (insn) == SIZE_32)
31412 return true;
31413 else
31414 break;
31415 case TYPE_LOAD:
31416 case TYPE_STORE:
31417 case TYPE_FPLOAD:
31418 case TYPE_FPSTORE:
31419 if (get_attr_update (insn) == UPDATE_YES)
31420 return true;
31421 else
31422 break;
31423 default:
31424 break;
31425 }
31426 break;
31427 case PROCESSOR_POWER7:
31428 type = get_attr_type (insn);
31429
31430 switch (type)
31431 {
31432 case TYPE_CR_LOGICAL:
31433 case TYPE_MFCR:
31434 case TYPE_MFCRF:
31435 case TYPE_MTCR:
31436 case TYPE_DIV:
31437 case TYPE_ISYNC:
31438 case TYPE_LOAD_L:
31439 case TYPE_STORE_C:
31440 case TYPE_MFJMPR:
31441 case TYPE_MTJMPR:
31442 return true;
31443 case TYPE_MUL:
31444 case TYPE_SHIFT:
31445 case TYPE_EXTS:
31446 if (get_attr_dot (insn) == DOT_YES)
31447 return true;
31448 else
31449 break;
31450 case TYPE_LOAD:
31451 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31452 || get_attr_update (insn) == UPDATE_YES)
31453 return true;
31454 else
31455 break;
31456 case TYPE_STORE:
31457 case TYPE_FPLOAD:
31458 case TYPE_FPSTORE:
31459 if (get_attr_update (insn) == UPDATE_YES)
31460 return true;
31461 else
31462 break;
31463 default:
31464 break;
31465 }
31466 break;
31467 case PROCESSOR_POWER8:
31468 type = get_attr_type (insn);
31469
31470 switch (type)
31471 {
31472 case TYPE_CR_LOGICAL:
31473 case TYPE_MFCR:
31474 case TYPE_MFCRF:
31475 case TYPE_MTCR:
31476 case TYPE_SYNC:
31477 case TYPE_ISYNC:
31478 case TYPE_LOAD_L:
31479 case TYPE_STORE_C:
31480 case TYPE_VECSTORE:
31481 case TYPE_MFJMPR:
31482 case TYPE_MTJMPR:
31483 return true;
31484 case TYPE_SHIFT:
31485 case TYPE_EXTS:
31486 case TYPE_MUL:
31487 if (get_attr_dot (insn) == DOT_YES)
31488 return true;
31489 else
31490 break;
31491 case TYPE_LOAD:
31492 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31493 || get_attr_update (insn) == UPDATE_YES)
31494 return true;
31495 else
31496 break;
31497 case TYPE_STORE:
31498 if (get_attr_update (insn) == UPDATE_YES
31499 && get_attr_indexed (insn) == INDEXED_YES)
31500 return true;
31501 else
31502 break;
31503 default:
31504 break;
31505 }
31506 break;
31507 default:
31508 break;
31509 }
31510
31511 return false;
31512 }
31513
31514 static bool
31515 insn_must_be_last_in_group (rtx_insn *insn)
31516 {
31517 enum attr_type type;
31518
31519 if (!insn
31520 || NOTE_P (insn)
31521 || DEBUG_INSN_P (insn)
31522 || GET_CODE (PATTERN (insn)) == USE
31523 || GET_CODE (PATTERN (insn)) == CLOBBER)
31524 return false;
31525
31526 switch (rs6000_tune) {
31527 case PROCESSOR_POWER4:
31528 case PROCESSOR_POWER5:
31529 if (is_microcoded_insn (insn))
31530 return true;
31531
31532 if (is_branch_slot_insn (insn))
31533 return true;
31534
31535 break;
31536 case PROCESSOR_POWER6:
31537 type = get_attr_type (insn);
31538
31539 switch (type)
31540 {
31541 case TYPE_EXTS:
31542 case TYPE_CNTLZ:
31543 case TYPE_TRAP:
31544 case TYPE_MUL:
31545 case TYPE_FPCOMPARE:
31546 case TYPE_MFCR:
31547 case TYPE_MTCR:
31548 case TYPE_MFJMPR:
31549 case TYPE_MTJMPR:
31550 case TYPE_ISYNC:
31551 case TYPE_SYNC:
31552 case TYPE_LOAD_L:
31553 case TYPE_STORE_C:
31554 return true;
31555 case TYPE_SHIFT:
31556 if (get_attr_dot (insn) == DOT_NO
31557 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31558 return true;
31559 else
31560 break;
31561 case TYPE_DIV:
31562 if (get_attr_size (insn) == SIZE_32)
31563 return true;
31564 else
31565 break;
31566 default:
31567 break;
31568 }
31569 break;
31570 case PROCESSOR_POWER7:
31571 type = get_attr_type (insn);
31572
31573 switch (type)
31574 {
31575 case TYPE_ISYNC:
31576 case TYPE_SYNC:
31577 case TYPE_LOAD_L:
31578 case TYPE_STORE_C:
31579 return true;
31580 case TYPE_LOAD:
31581 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31582 && get_attr_update (insn) == UPDATE_YES)
31583 return true;
31584 else
31585 break;
31586 case TYPE_STORE:
31587 if (get_attr_update (insn) == UPDATE_YES
31588 && get_attr_indexed (insn) == INDEXED_YES)
31589 return true;
31590 else
31591 break;
31592 default:
31593 break;
31594 }
31595 break;
31596 case PROCESSOR_POWER8:
31597 type = get_attr_type (insn);
31598
31599 switch (type)
31600 {
31601 case TYPE_MFCR:
31602 case TYPE_MTCR:
31603 case TYPE_ISYNC:
31604 case TYPE_SYNC:
31605 case TYPE_LOAD_L:
31606 case TYPE_STORE_C:
31607 return true;
31608 case TYPE_LOAD:
31609 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31610 && get_attr_update (insn) == UPDATE_YES)
31611 return true;
31612 else
31613 break;
31614 case TYPE_STORE:
31615 if (get_attr_update (insn) == UPDATE_YES
31616 && get_attr_indexed (insn) == INDEXED_YES)
31617 return true;
31618 else
31619 break;
31620 default:
31621 break;
31622 }
31623 break;
31624 default:
31625 break;
31626 }
31627
31628 return false;
31629 }
31630
31631 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31632 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31633
31634 static bool
31635 is_costly_group (rtx *group_insns, rtx next_insn)
31636 {
31637 int i;
31638 int issue_rate = rs6000_issue_rate ();
31639
31640 for (i = 0; i < issue_rate; i++)
31641 {
31642 sd_iterator_def sd_it;
31643 dep_t dep;
31644 rtx insn = group_insns[i];
31645
31646 if (!insn)
31647 continue;
31648
31649 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
31650 {
31651 rtx next = DEP_CON (dep);
31652
31653 if (next == next_insn
31654 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
31655 return true;
31656 }
31657 }
31658
31659 return false;
31660 }
31661
31662 /* Utility of the function redefine_groups.
31663 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31664 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31665 to keep it "far" (in a separate group) from GROUP_INSNS, following
31666 one of the following schemes, depending on the value of the flag
31667 -minsert_sched_nops = X:
31668 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31669 in order to force NEXT_INSN into a separate group.
31670 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31671 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31672 insertion (has a group just ended, how many vacant issue slots remain in the
31673 last group, and how many dispatch groups were encountered so far). */
31674
31675 static int
31676 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
31677 rtx_insn *next_insn, bool *group_end, int can_issue_more,
31678 int *group_count)
31679 {
31680 rtx nop;
31681 bool force;
31682 int issue_rate = rs6000_issue_rate ();
31683 bool end = *group_end;
31684 int i;
31685
31686 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
31687 return can_issue_more;
31688
31689 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
31690 return can_issue_more;
31691
31692 force = is_costly_group (group_insns, next_insn);
31693 if (!force)
31694 return can_issue_more;
31695
31696 if (sched_verbose > 6)
31697 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
31698 *group_count ,can_issue_more);
31699
31700 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
31701 {
31702 if (*group_end)
31703 can_issue_more = 0;
31704
31705 /* Since only a branch can be issued in the last issue_slot, it is
31706 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31707 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31708 in this case the last nop will start a new group and the branch
31709 will be forced to the new group. */
31710 if (can_issue_more && !is_branch_slot_insn (next_insn))
31711 can_issue_more--;
31712
31713 /* Do we have a special group ending nop? */
31714 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
31715 || rs6000_tune == PROCESSOR_POWER8)
31716 {
31717 nop = gen_group_ending_nop ();
31718 emit_insn_before (nop, next_insn);
31719 can_issue_more = 0;
31720 }
31721 else
31722 while (can_issue_more > 0)
31723 {
31724 nop = gen_nop ();
31725 emit_insn_before (nop, next_insn);
31726 can_issue_more--;
31727 }
31728
31729 *group_end = true;
31730 return 0;
31731 }
31732
31733 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
31734 {
31735 int n_nops = rs6000_sched_insert_nops;
31736
31737 /* Nops can't be issued from the branch slot, so the effective
31738 issue_rate for nops is 'issue_rate - 1'. */
31739 if (can_issue_more == 0)
31740 can_issue_more = issue_rate;
31741 can_issue_more--;
31742 if (can_issue_more == 0)
31743 {
31744 can_issue_more = issue_rate - 1;
31745 (*group_count)++;
31746 end = true;
31747 for (i = 0; i < issue_rate; i++)
31748 {
31749 group_insns[i] = 0;
31750 }
31751 }
31752
31753 while (n_nops > 0)
31754 {
31755 nop = gen_nop ();
31756 emit_insn_before (nop, next_insn);
31757 if (can_issue_more == issue_rate - 1) /* new group begins */
31758 end = false;
31759 can_issue_more--;
31760 if (can_issue_more == 0)
31761 {
31762 can_issue_more = issue_rate - 1;
31763 (*group_count)++;
31764 end = true;
31765 for (i = 0; i < issue_rate; i++)
31766 {
31767 group_insns[i] = 0;
31768 }
31769 }
31770 n_nops--;
31771 }
31772
31773 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
31774 can_issue_more++;
31775
31776 /* Is next_insn going to start a new group? */
31777 *group_end
31778 = (end
31779 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31780 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31781 || (can_issue_more < issue_rate &&
31782 insn_terminates_group_p (next_insn, previous_group)));
31783 if (*group_end && end)
31784 (*group_count)--;
31785
31786 if (sched_verbose > 6)
31787 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
31788 *group_count, can_issue_more);
31789 return can_issue_more;
31790 }
31791
31792 return can_issue_more;
31793 }
31794
31795 /* This function tries to synch the dispatch groups that the compiler "sees"
31796 with the dispatch groups that the processor dispatcher is expected to
31797 form in practice. It tries to achieve this synchronization by forcing the
31798 estimated processor grouping on the compiler (as opposed to the function
31799 'pad_goups' which tries to force the scheduler's grouping on the processor).
31800
31801 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
31802 examines the (estimated) dispatch groups that will be formed by the processor
31803 dispatcher. It marks these group boundaries to reflect the estimated
31804 processor grouping, overriding the grouping that the scheduler had marked.
31805 Depending on the value of the flag '-minsert-sched-nops' this function can
31806 force certain insns into separate groups or force a certain distance between
31807 them by inserting nops, for example, if there exists a "costly dependence"
31808 between the insns.
31809
31810 The function estimates the group boundaries that the processor will form as
31811 follows: It keeps track of how many vacant issue slots are available after
31812 each insn. A subsequent insn will start a new group if one of the following
31813 4 cases applies:
31814 - no more vacant issue slots remain in the current dispatch group.
31815 - only the last issue slot, which is the branch slot, is vacant, but the next
31816 insn is not a branch.
31817 - only the last 2 or less issue slots, including the branch slot, are vacant,
31818 which means that a cracked insn (which occupies two issue slots) can't be
31819 issued in this group.
31820 - less than 'issue_rate' slots are vacant, and the next insn always needs to
31821 start a new group. */
31822
31823 static int
31824 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31825 rtx_insn *tail)
31826 {
31827 rtx_insn *insn, *next_insn;
31828 int issue_rate;
31829 int can_issue_more;
31830 int slot, i;
31831 bool group_end;
31832 int group_count = 0;
31833 rtx *group_insns;
31834
31835 /* Initialize. */
31836 issue_rate = rs6000_issue_rate ();
31837 group_insns = XALLOCAVEC (rtx, issue_rate);
31838 for (i = 0; i < issue_rate; i++)
31839 {
31840 group_insns[i] = 0;
31841 }
31842 can_issue_more = issue_rate;
31843 slot = 0;
31844 insn = get_next_active_insn (prev_head_insn, tail);
31845 group_end = false;
31846
31847 while (insn != NULL_RTX)
31848 {
31849 slot = (issue_rate - can_issue_more);
31850 group_insns[slot] = insn;
31851 can_issue_more =
31852 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
31853 if (insn_terminates_group_p (insn, current_group))
31854 can_issue_more = 0;
31855
31856 next_insn = get_next_active_insn (insn, tail);
31857 if (next_insn == NULL_RTX)
31858 return group_count + 1;
31859
31860 /* Is next_insn going to start a new group? */
31861 group_end
31862 = (can_issue_more == 0
31863 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31864 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31865 || (can_issue_more < issue_rate &&
31866 insn_terminates_group_p (next_insn, previous_group)));
31867
31868 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
31869 next_insn, &group_end, can_issue_more,
31870 &group_count);
31871
31872 if (group_end)
31873 {
31874 group_count++;
31875 can_issue_more = 0;
31876 for (i = 0; i < issue_rate; i++)
31877 {
31878 group_insns[i] = 0;
31879 }
31880 }
31881
31882 if (GET_MODE (next_insn) == TImode && can_issue_more)
31883 PUT_MODE (next_insn, VOIDmode);
31884 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
31885 PUT_MODE (next_insn, TImode);
31886
31887 insn = next_insn;
31888 if (can_issue_more == 0)
31889 can_issue_more = issue_rate;
31890 } /* while */
31891
31892 return group_count;
31893 }
31894
31895 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
31896 dispatch group boundaries that the scheduler had marked. Pad with nops
31897 any dispatch groups which have vacant issue slots, in order to force the
31898 scheduler's grouping on the processor dispatcher. The function
31899 returns the number of dispatch groups found. */
31900
31901 static int
31902 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31903 rtx_insn *tail)
31904 {
31905 rtx_insn *insn, *next_insn;
31906 rtx nop;
31907 int issue_rate;
31908 int can_issue_more;
31909 int group_end;
31910 int group_count = 0;
31911
31912 /* Initialize issue_rate. */
31913 issue_rate = rs6000_issue_rate ();
31914 can_issue_more = issue_rate;
31915
31916 insn = get_next_active_insn (prev_head_insn, tail);
31917 next_insn = get_next_active_insn (insn, tail);
31918
31919 while (insn != NULL_RTX)
31920 {
31921 can_issue_more =
31922 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
31923
31924 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
31925
31926 if (next_insn == NULL_RTX)
31927 break;
31928
31929 if (group_end)
31930 {
31931 /* If the scheduler had marked group termination at this location
31932 (between insn and next_insn), and neither insn nor next_insn will
31933 force group termination, pad the group with nops to force group
31934 termination. */
31935 if (can_issue_more
31936 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
31937 && !insn_terminates_group_p (insn, current_group)
31938 && !insn_terminates_group_p (next_insn, previous_group))
31939 {
31940 if (!is_branch_slot_insn (next_insn))
31941 can_issue_more--;
31942
31943 while (can_issue_more)
31944 {
31945 nop = gen_nop ();
31946 emit_insn_before (nop, next_insn);
31947 can_issue_more--;
31948 }
31949 }
31950
31951 can_issue_more = issue_rate;
31952 group_count++;
31953 }
31954
31955 insn = next_insn;
31956 next_insn = get_next_active_insn (insn, tail);
31957 }
31958
31959 return group_count;
31960 }
31961
31962 /* We're beginning a new block. Initialize data structures as necessary. */
31963
31964 static void
31965 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
31966 int sched_verbose ATTRIBUTE_UNUSED,
31967 int max_ready ATTRIBUTE_UNUSED)
31968 {
31969 last_scheduled_insn = NULL;
31970 load_store_pendulum = 0;
31971 divide_cnt = 0;
31972 vec_pairing = 0;
31973 }
31974
31975 /* The following function is called at the end of scheduling BB.
31976 After reload, it inserts nops at insn group bundling. */
31977
31978 static void
31979 rs6000_sched_finish (FILE *dump, int sched_verbose)
31980 {
31981 int n_groups;
31982
31983 if (sched_verbose)
31984 fprintf (dump, "=== Finishing schedule.\n");
31985
31986 if (reload_completed && rs6000_sched_groups)
31987 {
31988 /* Do not run sched_finish hook when selective scheduling enabled. */
31989 if (sel_sched_p ())
31990 return;
31991
31992 if (rs6000_sched_insert_nops == sched_finish_none)
31993 return;
31994
31995 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
31996 n_groups = pad_groups (dump, sched_verbose,
31997 current_sched_info->prev_head,
31998 current_sched_info->next_tail);
31999 else
32000 n_groups = redefine_groups (dump, sched_verbose,
32001 current_sched_info->prev_head,
32002 current_sched_info->next_tail);
32003
32004 if (sched_verbose >= 6)
32005 {
32006 fprintf (dump, "ngroups = %d\n", n_groups);
32007 print_rtl (dump, current_sched_info->prev_head);
32008 fprintf (dump, "Done finish_sched\n");
32009 }
32010 }
32011 }
32012
32013 struct rs6000_sched_context
32014 {
32015 short cached_can_issue_more;
32016 rtx_insn *last_scheduled_insn;
32017 int load_store_pendulum;
32018 int divide_cnt;
32019 int vec_pairing;
32020 };
32021
32022 typedef struct rs6000_sched_context rs6000_sched_context_def;
32023 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32024
32025 /* Allocate store for new scheduling context. */
32026 static void *
32027 rs6000_alloc_sched_context (void)
32028 {
32029 return xmalloc (sizeof (rs6000_sched_context_def));
32030 }
32031
32032 /* If CLEAN_P is true then initializes _SC with clean data,
32033 and from the global context otherwise. */
32034 static void
32035 rs6000_init_sched_context (void *_sc, bool clean_p)
32036 {
32037 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32038
32039 if (clean_p)
32040 {
32041 sc->cached_can_issue_more = 0;
32042 sc->last_scheduled_insn = NULL;
32043 sc->load_store_pendulum = 0;
32044 sc->divide_cnt = 0;
32045 sc->vec_pairing = 0;
32046 }
32047 else
32048 {
32049 sc->cached_can_issue_more = cached_can_issue_more;
32050 sc->last_scheduled_insn = last_scheduled_insn;
32051 sc->load_store_pendulum = load_store_pendulum;
32052 sc->divide_cnt = divide_cnt;
32053 sc->vec_pairing = vec_pairing;
32054 }
32055 }
32056
32057 /* Sets the global scheduling context to the one pointed to by _SC. */
32058 static void
32059 rs6000_set_sched_context (void *_sc)
32060 {
32061 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32062
32063 gcc_assert (sc != NULL);
32064
32065 cached_can_issue_more = sc->cached_can_issue_more;
32066 last_scheduled_insn = sc->last_scheduled_insn;
32067 load_store_pendulum = sc->load_store_pendulum;
32068 divide_cnt = sc->divide_cnt;
32069 vec_pairing = sc->vec_pairing;
32070 }
32071
32072 /* Free _SC. */
32073 static void
32074 rs6000_free_sched_context (void *_sc)
32075 {
32076 gcc_assert (_sc != NULL);
32077
32078 free (_sc);
32079 }
32080
32081 static bool
32082 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32083 {
32084 switch (get_attr_type (insn))
32085 {
32086 case TYPE_DIV:
32087 case TYPE_SDIV:
32088 case TYPE_DDIV:
32089 case TYPE_VECDIV:
32090 case TYPE_SSQRT:
32091 case TYPE_DSQRT:
32092 return false;
32093
32094 default:
32095 return true;
32096 }
32097 }
32098 \f
32099 /* Length in units of the trampoline for entering a nested function. */
32100
32101 int
32102 rs6000_trampoline_size (void)
32103 {
32104 int ret = 0;
32105
32106 switch (DEFAULT_ABI)
32107 {
32108 default:
32109 gcc_unreachable ();
32110
32111 case ABI_AIX:
32112 ret = (TARGET_32BIT) ? 12 : 24;
32113 break;
32114
32115 case ABI_ELFv2:
32116 gcc_assert (!TARGET_32BIT);
32117 ret = 32;
32118 break;
32119
32120 case ABI_DARWIN:
32121 case ABI_V4:
32122 ret = (TARGET_32BIT) ? 40 : 48;
32123 break;
32124 }
32125
32126 return ret;
32127 }
32128
32129 /* Emit RTL insns to initialize the variable parts of a trampoline.
32130 FNADDR is an RTX for the address of the function's pure code.
32131 CXT is an RTX for the static chain value for the function. */
32132
32133 static void
32134 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32135 {
32136 int regsize = (TARGET_32BIT) ? 4 : 8;
32137 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32138 rtx ctx_reg = force_reg (Pmode, cxt);
32139 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32140
32141 switch (DEFAULT_ABI)
32142 {
32143 default:
32144 gcc_unreachable ();
32145
32146 /* Under AIX, just build the 3 word function descriptor */
32147 case ABI_AIX:
32148 {
32149 rtx fnmem, fn_reg, toc_reg;
32150
32151 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32152 error ("you cannot take the address of a nested function if you use "
32153 "the %qs option", "-mno-pointers-to-nested-functions");
32154
32155 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32156 fn_reg = gen_reg_rtx (Pmode);
32157 toc_reg = gen_reg_rtx (Pmode);
32158
32159 /* Macro to shorten the code expansions below. */
32160 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32161
32162 m_tramp = replace_equiv_address (m_tramp, addr);
32163
32164 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32165 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32166 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32167 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32168 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32169
32170 # undef MEM_PLUS
32171 }
32172 break;
32173
32174 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32175 case ABI_ELFv2:
32176 case ABI_DARWIN:
32177 case ABI_V4:
32178 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32179 LCT_NORMAL, VOIDmode,
32180 addr, Pmode,
32181 GEN_INT (rs6000_trampoline_size ()), SImode,
32182 fnaddr, Pmode,
32183 ctx_reg, Pmode);
32184 break;
32185 }
32186 }
32187
32188 \f
32189 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32190 identifier as an argument, so the front end shouldn't look it up. */
32191
32192 static bool
32193 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32194 {
32195 return is_attribute_p ("altivec", attr_id);
32196 }
32197
32198 /* Handle the "altivec" attribute. The attribute may have
32199 arguments as follows:
32200
32201 __attribute__((altivec(vector__)))
32202 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32203 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32204
32205 and may appear more than once (e.g., 'vector bool char') in a
32206 given declaration. */
32207
32208 static tree
32209 rs6000_handle_altivec_attribute (tree *node,
32210 tree name ATTRIBUTE_UNUSED,
32211 tree args,
32212 int flags ATTRIBUTE_UNUSED,
32213 bool *no_add_attrs)
32214 {
32215 tree type = *node, result = NULL_TREE;
32216 machine_mode mode;
32217 int unsigned_p;
32218 char altivec_type
32219 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32220 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32221 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32222 : '?');
32223
32224 while (POINTER_TYPE_P (type)
32225 || TREE_CODE (type) == FUNCTION_TYPE
32226 || TREE_CODE (type) == METHOD_TYPE
32227 || TREE_CODE (type) == ARRAY_TYPE)
32228 type = TREE_TYPE (type);
32229
32230 mode = TYPE_MODE (type);
32231
32232 /* Check for invalid AltiVec type qualifiers. */
32233 if (type == long_double_type_node)
32234 error ("use of %<long double%> in AltiVec types is invalid");
32235 else if (type == boolean_type_node)
32236 error ("use of boolean types in AltiVec types is invalid");
32237 else if (TREE_CODE (type) == COMPLEX_TYPE)
32238 error ("use of %<complex%> in AltiVec types is invalid");
32239 else if (DECIMAL_FLOAT_MODE_P (mode))
32240 error ("use of decimal floating point types in AltiVec types is invalid");
32241 else if (!TARGET_VSX)
32242 {
32243 if (type == long_unsigned_type_node || type == long_integer_type_node)
32244 {
32245 if (TARGET_64BIT)
32246 error ("use of %<long%> in AltiVec types is invalid for "
32247 "64-bit code without %qs", "-mvsx");
32248 else if (rs6000_warn_altivec_long)
32249 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32250 "use %<int%>");
32251 }
32252 else if (type == long_long_unsigned_type_node
32253 || type == long_long_integer_type_node)
32254 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32255 "-mvsx");
32256 else if (type == double_type_node)
32257 error ("use of %<double%> in AltiVec types is invalid without %qs",
32258 "-mvsx");
32259 }
32260
32261 switch (altivec_type)
32262 {
32263 case 'v':
32264 unsigned_p = TYPE_UNSIGNED (type);
32265 switch (mode)
32266 {
32267 case E_TImode:
32268 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32269 break;
32270 case E_DImode:
32271 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32272 break;
32273 case E_SImode:
32274 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32275 break;
32276 case E_HImode:
32277 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32278 break;
32279 case E_QImode:
32280 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32281 break;
32282 case E_SFmode: result = V4SF_type_node; break;
32283 case E_DFmode: result = V2DF_type_node; break;
32284 /* If the user says 'vector int bool', we may be handed the 'bool'
32285 attribute _before_ the 'vector' attribute, and so select the
32286 proper type in the 'b' case below. */
32287 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32288 case E_V2DImode: case E_V2DFmode:
32289 result = type;
32290 default: break;
32291 }
32292 break;
32293 case 'b':
32294 switch (mode)
32295 {
32296 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32297 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32298 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32299 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32300 default: break;
32301 }
32302 break;
32303 case 'p':
32304 switch (mode)
32305 {
32306 case E_V8HImode: result = pixel_V8HI_type_node;
32307 default: break;
32308 }
32309 default: break;
32310 }
32311
32312 /* Propagate qualifiers attached to the element type
32313 onto the vector type. */
32314 if (result && result != type && TYPE_QUALS (type))
32315 result = build_qualified_type (result, TYPE_QUALS (type));
32316
32317 *no_add_attrs = true; /* No need to hang on to the attribute. */
32318
32319 if (result)
32320 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32321
32322 return NULL_TREE;
32323 }
32324
32325 /* AltiVec defines five built-in scalar types that serve as vector
32326 elements; we must teach the compiler how to mangle them. The 128-bit
32327 floating point mangling is target-specific as well. */
32328
32329 static const char *
32330 rs6000_mangle_type (const_tree type)
32331 {
32332 type = TYPE_MAIN_VARIANT (type);
32333
32334 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32335 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32336 return NULL;
32337
32338 if (type == bool_char_type_node) return "U6__boolc";
32339 if (type == bool_short_type_node) return "U6__bools";
32340 if (type == pixel_type_node) return "u7__pixel";
32341 if (type == bool_int_type_node) return "U6__booli";
32342 if (type == bool_long_long_type_node) return "U6__boolx";
32343
32344 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IBM_P (TYPE_MODE (type)))
32345 return "g";
32346 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IEEE_P (TYPE_MODE (type)))
32347 return ieee128_mangling_gcc_8_1 ? "U10__float128" : "u9__ieee128";
32348
32349 /* For all other types, use the default mangling. */
32350 return NULL;
32351 }
32352
32353 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32354 struct attribute_spec.handler. */
32355
32356 static tree
32357 rs6000_handle_longcall_attribute (tree *node, tree name,
32358 tree args ATTRIBUTE_UNUSED,
32359 int flags ATTRIBUTE_UNUSED,
32360 bool *no_add_attrs)
32361 {
32362 if (TREE_CODE (*node) != FUNCTION_TYPE
32363 && TREE_CODE (*node) != FIELD_DECL
32364 && TREE_CODE (*node) != TYPE_DECL)
32365 {
32366 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32367 name);
32368 *no_add_attrs = true;
32369 }
32370
32371 return NULL_TREE;
32372 }
32373
32374 /* Set longcall attributes on all functions declared when
32375 rs6000_default_long_calls is true. */
32376 static void
32377 rs6000_set_default_type_attributes (tree type)
32378 {
32379 if (rs6000_default_long_calls
32380 && (TREE_CODE (type) == FUNCTION_TYPE
32381 || TREE_CODE (type) == METHOD_TYPE))
32382 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32383 NULL_TREE,
32384 TYPE_ATTRIBUTES (type));
32385
32386 #if TARGET_MACHO
32387 darwin_set_default_type_attributes (type);
32388 #endif
32389 }
32390
32391 /* Return a reference suitable for calling a function with the
32392 longcall attribute. */
32393
32394 rtx
32395 rs6000_longcall_ref (rtx call_ref)
32396 {
32397 const char *call_name;
32398 tree node;
32399
32400 if (GET_CODE (call_ref) != SYMBOL_REF)
32401 return call_ref;
32402
32403 /* System V adds '.' to the internal name, so skip them. */
32404 call_name = XSTR (call_ref, 0);
32405 if (*call_name == '.')
32406 {
32407 while (*call_name == '.')
32408 call_name++;
32409
32410 node = get_identifier (call_name);
32411 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32412 }
32413
32414 return force_reg (Pmode, call_ref);
32415 }
32416 \f
32417 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32418 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32419 #endif
32420
32421 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32422 struct attribute_spec.handler. */
32423 static tree
32424 rs6000_handle_struct_attribute (tree *node, tree name,
32425 tree args ATTRIBUTE_UNUSED,
32426 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32427 {
32428 tree *type = NULL;
32429 if (DECL_P (*node))
32430 {
32431 if (TREE_CODE (*node) == TYPE_DECL)
32432 type = &TREE_TYPE (*node);
32433 }
32434 else
32435 type = node;
32436
32437 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32438 || TREE_CODE (*type) == UNION_TYPE)))
32439 {
32440 warning (OPT_Wattributes, "%qE attribute ignored", name);
32441 *no_add_attrs = true;
32442 }
32443
32444 else if ((is_attribute_p ("ms_struct", name)
32445 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32446 || ((is_attribute_p ("gcc_struct", name)
32447 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32448 {
32449 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32450 name);
32451 *no_add_attrs = true;
32452 }
32453
32454 return NULL_TREE;
32455 }
32456
32457 static bool
32458 rs6000_ms_bitfield_layout_p (const_tree record_type)
32459 {
32460 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32461 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32462 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32463 }
32464 \f
32465 #ifdef USING_ELFOS_H
32466
32467 /* A get_unnamed_section callback, used for switching to toc_section. */
32468
32469 static void
32470 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32471 {
32472 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32473 && TARGET_MINIMAL_TOC)
32474 {
32475 if (!toc_initialized)
32476 {
32477 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32478 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32479 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32480 fprintf (asm_out_file, "\t.tc ");
32481 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32482 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32483 fprintf (asm_out_file, "\n");
32484
32485 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32486 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32487 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32488 fprintf (asm_out_file, " = .+32768\n");
32489 toc_initialized = 1;
32490 }
32491 else
32492 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32493 }
32494 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32495 {
32496 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32497 if (!toc_initialized)
32498 {
32499 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32500 toc_initialized = 1;
32501 }
32502 }
32503 else
32504 {
32505 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32506 if (!toc_initialized)
32507 {
32508 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32509 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32510 fprintf (asm_out_file, " = .+32768\n");
32511 toc_initialized = 1;
32512 }
32513 }
32514 }
32515
32516 /* Implement TARGET_ASM_INIT_SECTIONS. */
32517
32518 static void
32519 rs6000_elf_asm_init_sections (void)
32520 {
32521 toc_section
32522 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32523
32524 sdata2_section
32525 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32526 SDATA2_SECTION_ASM_OP);
32527 }
32528
32529 /* Implement TARGET_SELECT_RTX_SECTION. */
32530
32531 static section *
32532 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32533 unsigned HOST_WIDE_INT align)
32534 {
32535 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32536 return toc_section;
32537 else
32538 return default_elf_select_rtx_section (mode, x, align);
32539 }
32540 \f
32541 /* For a SYMBOL_REF, set generic flags and then perform some
32542 target-specific processing.
32543
32544 When the AIX ABI is requested on a non-AIX system, replace the
32545 function name with the real name (with a leading .) rather than the
32546 function descriptor name. This saves a lot of overriding code to
32547 read the prefixes. */
32548
32549 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32550 static void
32551 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32552 {
32553 default_encode_section_info (decl, rtl, first);
32554
32555 if (first
32556 && TREE_CODE (decl) == FUNCTION_DECL
32557 && !TARGET_AIX
32558 && DEFAULT_ABI == ABI_AIX)
32559 {
32560 rtx sym_ref = XEXP (rtl, 0);
32561 size_t len = strlen (XSTR (sym_ref, 0));
32562 char *str = XALLOCAVEC (char, len + 2);
32563 str[0] = '.';
32564 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32565 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32566 }
32567 }
32568
32569 static inline bool
32570 compare_section_name (const char *section, const char *templ)
32571 {
32572 int len;
32573
32574 len = strlen (templ);
32575 return (strncmp (section, templ, len) == 0
32576 && (section[len] == 0 || section[len] == '.'));
32577 }
32578
32579 bool
32580 rs6000_elf_in_small_data_p (const_tree decl)
32581 {
32582 if (rs6000_sdata == SDATA_NONE)
32583 return false;
32584
32585 /* We want to merge strings, so we never consider them small data. */
32586 if (TREE_CODE (decl) == STRING_CST)
32587 return false;
32588
32589 /* Functions are never in the small data area. */
32590 if (TREE_CODE (decl) == FUNCTION_DECL)
32591 return false;
32592
32593 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32594 {
32595 const char *section = DECL_SECTION_NAME (decl);
32596 if (compare_section_name (section, ".sdata")
32597 || compare_section_name (section, ".sdata2")
32598 || compare_section_name (section, ".gnu.linkonce.s")
32599 || compare_section_name (section, ".sbss")
32600 || compare_section_name (section, ".sbss2")
32601 || compare_section_name (section, ".gnu.linkonce.sb")
32602 || strcmp (section, ".PPC.EMB.sdata0") == 0
32603 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32604 return true;
32605 }
32606 else
32607 {
32608 /* If we are told not to put readonly data in sdata, then don't. */
32609 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
32610 && !rs6000_readonly_in_sdata)
32611 return false;
32612
32613 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
32614
32615 if (size > 0
32616 && size <= g_switch_value
32617 /* If it's not public, and we're not going to reference it there,
32618 there's no need to put it in the small data section. */
32619 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
32620 return true;
32621 }
32622
32623 return false;
32624 }
32625
32626 #endif /* USING_ELFOS_H */
32627 \f
32628 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32629
32630 static bool
32631 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
32632 {
32633 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
32634 }
32635
32636 /* Do not place thread-local symbols refs in the object blocks. */
32637
32638 static bool
32639 rs6000_use_blocks_for_decl_p (const_tree decl)
32640 {
32641 return !DECL_THREAD_LOCAL_P (decl);
32642 }
32643 \f
32644 /* Return a REG that occurs in ADDR with coefficient 1.
32645 ADDR can be effectively incremented by incrementing REG.
32646
32647 r0 is special and we must not select it as an address
32648 register by this routine since our caller will try to
32649 increment the returned register via an "la" instruction. */
32650
32651 rtx
32652 find_addr_reg (rtx addr)
32653 {
32654 while (GET_CODE (addr) == PLUS)
32655 {
32656 if (GET_CODE (XEXP (addr, 0)) == REG
32657 && REGNO (XEXP (addr, 0)) != 0)
32658 addr = XEXP (addr, 0);
32659 else if (GET_CODE (XEXP (addr, 1)) == REG
32660 && REGNO (XEXP (addr, 1)) != 0)
32661 addr = XEXP (addr, 1);
32662 else if (CONSTANT_P (XEXP (addr, 0)))
32663 addr = XEXP (addr, 1);
32664 else if (CONSTANT_P (XEXP (addr, 1)))
32665 addr = XEXP (addr, 0);
32666 else
32667 gcc_unreachable ();
32668 }
32669 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
32670 return addr;
32671 }
32672
32673 void
32674 rs6000_fatal_bad_address (rtx op)
32675 {
32676 fatal_insn ("bad address", op);
32677 }
32678
32679 #if TARGET_MACHO
32680
32681 typedef struct branch_island_d {
32682 tree function_name;
32683 tree label_name;
32684 int line_number;
32685 } branch_island;
32686
32687
32688 static vec<branch_island, va_gc> *branch_islands;
32689
32690 /* Remember to generate a branch island for far calls to the given
32691 function. */
32692
32693 static void
32694 add_compiler_branch_island (tree label_name, tree function_name,
32695 int line_number)
32696 {
32697 branch_island bi = {function_name, label_name, line_number};
32698 vec_safe_push (branch_islands, bi);
32699 }
32700
32701 /* Generate far-jump branch islands for everything recorded in
32702 branch_islands. Invoked immediately after the last instruction of
32703 the epilogue has been emitted; the branch islands must be appended
32704 to, and contiguous with, the function body. Mach-O stubs are
32705 generated in machopic_output_stub(). */
32706
32707 static void
32708 macho_branch_islands (void)
32709 {
32710 char tmp_buf[512];
32711
32712 while (!vec_safe_is_empty (branch_islands))
32713 {
32714 branch_island *bi = &branch_islands->last ();
32715 const char *label = IDENTIFIER_POINTER (bi->label_name);
32716 const char *name = IDENTIFIER_POINTER (bi->function_name);
32717 char name_buf[512];
32718 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32719 if (name[0] == '*' || name[0] == '&')
32720 strcpy (name_buf, name+1);
32721 else
32722 {
32723 name_buf[0] = '_';
32724 strcpy (name_buf+1, name);
32725 }
32726 strcpy (tmp_buf, "\n");
32727 strcat (tmp_buf, label);
32728 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32729 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32730 dbxout_stabd (N_SLINE, bi->line_number);
32731 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32732 if (flag_pic)
32733 {
32734 if (TARGET_LINK_STACK)
32735 {
32736 char name[32];
32737 get_ppc476_thunk_name (name);
32738 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
32739 strcat (tmp_buf, name);
32740 strcat (tmp_buf, "\n");
32741 strcat (tmp_buf, label);
32742 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32743 }
32744 else
32745 {
32746 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
32747 strcat (tmp_buf, label);
32748 strcat (tmp_buf, "_pic\n");
32749 strcat (tmp_buf, label);
32750 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32751 }
32752
32753 strcat (tmp_buf, "\taddis r11,r11,ha16(");
32754 strcat (tmp_buf, name_buf);
32755 strcat (tmp_buf, " - ");
32756 strcat (tmp_buf, label);
32757 strcat (tmp_buf, "_pic)\n");
32758
32759 strcat (tmp_buf, "\tmtlr r0\n");
32760
32761 strcat (tmp_buf, "\taddi r12,r11,lo16(");
32762 strcat (tmp_buf, name_buf);
32763 strcat (tmp_buf, " - ");
32764 strcat (tmp_buf, label);
32765 strcat (tmp_buf, "_pic)\n");
32766
32767 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
32768 }
32769 else
32770 {
32771 strcat (tmp_buf, ":\nlis r12,hi16(");
32772 strcat (tmp_buf, name_buf);
32773 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
32774 strcat (tmp_buf, name_buf);
32775 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
32776 }
32777 output_asm_insn (tmp_buf, 0);
32778 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32779 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32780 dbxout_stabd (N_SLINE, bi->line_number);
32781 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32782 branch_islands->pop ();
32783 }
32784 }
32785
32786 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
32787 already there or not. */
32788
32789 static int
32790 no_previous_def (tree function_name)
32791 {
32792 branch_island *bi;
32793 unsigned ix;
32794
32795 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32796 if (function_name == bi->function_name)
32797 return 0;
32798 return 1;
32799 }
32800
32801 /* GET_PREV_LABEL gets the label name from the previous definition of
32802 the function. */
32803
32804 static tree
32805 get_prev_label (tree function_name)
32806 {
32807 branch_island *bi;
32808 unsigned ix;
32809
32810 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32811 if (function_name == bi->function_name)
32812 return bi->label_name;
32813 return NULL_TREE;
32814 }
32815
32816 /* INSN is either a function call or a millicode call. It may have an
32817 unconditional jump in its delay slot.
32818
32819 CALL_DEST is the routine we are calling. */
32820
32821 char *
32822 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
32823 int cookie_operand_number)
32824 {
32825 static char buf[256];
32826 if (darwin_emit_branch_islands
32827 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
32828 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
32829 {
32830 tree labelname;
32831 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
32832
32833 if (no_previous_def (funname))
32834 {
32835 rtx label_rtx = gen_label_rtx ();
32836 char *label_buf, temp_buf[256];
32837 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
32838 CODE_LABEL_NUMBER (label_rtx));
32839 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
32840 labelname = get_identifier (label_buf);
32841 add_compiler_branch_island (labelname, funname, insn_line (insn));
32842 }
32843 else
32844 labelname = get_prev_label (funname);
32845
32846 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
32847 instruction will reach 'foo', otherwise link as 'bl L42'".
32848 "L42" should be a 'branch island', that will do a far jump to
32849 'foo'. Branch islands are generated in
32850 macho_branch_islands(). */
32851 sprintf (buf, "jbsr %%z%d,%.246s",
32852 dest_operand_number, IDENTIFIER_POINTER (labelname));
32853 }
32854 else
32855 sprintf (buf, "bl %%z%d", dest_operand_number);
32856 return buf;
32857 }
32858
32859 /* Generate PIC and indirect symbol stubs. */
32860
32861 void
32862 machopic_output_stub (FILE *file, const char *symb, const char *stub)
32863 {
32864 unsigned int length;
32865 char *symbol_name, *lazy_ptr_name;
32866 char *local_label_0;
32867 static int label = 0;
32868
32869 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
32870 symb = (*targetm.strip_name_encoding) (symb);
32871
32872
32873 length = strlen (symb);
32874 symbol_name = XALLOCAVEC (char, length + 32);
32875 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
32876
32877 lazy_ptr_name = XALLOCAVEC (char, length + 32);
32878 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
32879
32880 if (flag_pic == 2)
32881 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
32882 else
32883 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
32884
32885 if (flag_pic == 2)
32886 {
32887 fprintf (file, "\t.align 5\n");
32888
32889 fprintf (file, "%s:\n", stub);
32890 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32891
32892 label++;
32893 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
32894 sprintf (local_label_0, "\"L%011d$spb\"", label);
32895
32896 fprintf (file, "\tmflr r0\n");
32897 if (TARGET_LINK_STACK)
32898 {
32899 char name[32];
32900 get_ppc476_thunk_name (name);
32901 fprintf (file, "\tbl %s\n", name);
32902 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
32903 }
32904 else
32905 {
32906 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
32907 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
32908 }
32909 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
32910 lazy_ptr_name, local_label_0);
32911 fprintf (file, "\tmtlr r0\n");
32912 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
32913 (TARGET_64BIT ? "ldu" : "lwzu"),
32914 lazy_ptr_name, local_label_0);
32915 fprintf (file, "\tmtctr r12\n");
32916 fprintf (file, "\tbctr\n");
32917 }
32918 else
32919 {
32920 fprintf (file, "\t.align 4\n");
32921
32922 fprintf (file, "%s:\n", stub);
32923 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32924
32925 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
32926 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
32927 (TARGET_64BIT ? "ldu" : "lwzu"),
32928 lazy_ptr_name);
32929 fprintf (file, "\tmtctr r12\n");
32930 fprintf (file, "\tbctr\n");
32931 }
32932
32933 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
32934 fprintf (file, "%s:\n", lazy_ptr_name);
32935 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
32936 fprintf (file, "%sdyld_stub_binding_helper\n",
32937 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
32938 }
32939
32940 /* Legitimize PIC addresses. If the address is already
32941 position-independent, we return ORIG. Newly generated
32942 position-independent addresses go into a reg. This is REG if non
32943 zero, otherwise we allocate register(s) as necessary. */
32944
32945 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
32946
32947 rtx
32948 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
32949 rtx reg)
32950 {
32951 rtx base, offset;
32952
32953 if (reg == NULL && !reload_completed)
32954 reg = gen_reg_rtx (Pmode);
32955
32956 if (GET_CODE (orig) == CONST)
32957 {
32958 rtx reg_temp;
32959
32960 if (GET_CODE (XEXP (orig, 0)) == PLUS
32961 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
32962 return orig;
32963
32964 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
32965
32966 /* Use a different reg for the intermediate value, as
32967 it will be marked UNCHANGING. */
32968 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
32969 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
32970 Pmode, reg_temp);
32971 offset =
32972 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
32973 Pmode, reg);
32974
32975 if (GET_CODE (offset) == CONST_INT)
32976 {
32977 if (SMALL_INT (offset))
32978 return plus_constant (Pmode, base, INTVAL (offset));
32979 else if (!reload_completed)
32980 offset = force_reg (Pmode, offset);
32981 else
32982 {
32983 rtx mem = force_const_mem (Pmode, orig);
32984 return machopic_legitimize_pic_address (mem, Pmode, reg);
32985 }
32986 }
32987 return gen_rtx_PLUS (Pmode, base, offset);
32988 }
32989
32990 /* Fall back on generic machopic code. */
32991 return machopic_legitimize_pic_address (orig, mode, reg);
32992 }
32993
32994 /* Output a .machine directive for the Darwin assembler, and call
32995 the generic start_file routine. */
32996
32997 static void
32998 rs6000_darwin_file_start (void)
32999 {
33000 static const struct
33001 {
33002 const char *arg;
33003 const char *name;
33004 HOST_WIDE_INT if_set;
33005 } mapping[] = {
33006 { "ppc64", "ppc64", MASK_64BIT },
33007 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33008 { "power4", "ppc970", 0 },
33009 { "G5", "ppc970", 0 },
33010 { "7450", "ppc7450", 0 },
33011 { "7400", "ppc7400", MASK_ALTIVEC },
33012 { "G4", "ppc7400", 0 },
33013 { "750", "ppc750", 0 },
33014 { "740", "ppc750", 0 },
33015 { "G3", "ppc750", 0 },
33016 { "604e", "ppc604e", 0 },
33017 { "604", "ppc604", 0 },
33018 { "603e", "ppc603", 0 },
33019 { "603", "ppc603", 0 },
33020 { "601", "ppc601", 0 },
33021 { NULL, "ppc", 0 } };
33022 const char *cpu_id = "";
33023 size_t i;
33024
33025 rs6000_file_start ();
33026 darwin_file_start ();
33027
33028 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33029
33030 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33031 cpu_id = rs6000_default_cpu;
33032
33033 if (global_options_set.x_rs6000_cpu_index)
33034 cpu_id = processor_target_table[rs6000_cpu_index].name;
33035
33036 /* Look through the mapping array. Pick the first name that either
33037 matches the argument, has a bit set in IF_SET that is also set
33038 in the target flags, or has a NULL name. */
33039
33040 i = 0;
33041 while (mapping[i].arg != NULL
33042 && strcmp (mapping[i].arg, cpu_id) != 0
33043 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33044 i++;
33045
33046 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33047 }
33048
33049 #endif /* TARGET_MACHO */
33050
33051 #if TARGET_ELF
33052 static int
33053 rs6000_elf_reloc_rw_mask (void)
33054 {
33055 if (flag_pic)
33056 return 3;
33057 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33058 return 2;
33059 else
33060 return 0;
33061 }
33062
33063 /* Record an element in the table of global constructors. SYMBOL is
33064 a SYMBOL_REF of the function to be called; PRIORITY is a number
33065 between 0 and MAX_INIT_PRIORITY.
33066
33067 This differs from default_named_section_asm_out_constructor in
33068 that we have special handling for -mrelocatable. */
33069
33070 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33071 static void
33072 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33073 {
33074 const char *section = ".ctors";
33075 char buf[18];
33076
33077 if (priority != DEFAULT_INIT_PRIORITY)
33078 {
33079 sprintf (buf, ".ctors.%.5u",
33080 /* Invert the numbering so the linker puts us in the proper
33081 order; constructors are run from right to left, and the
33082 linker sorts in increasing order. */
33083 MAX_INIT_PRIORITY - priority);
33084 section = buf;
33085 }
33086
33087 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33088 assemble_align (POINTER_SIZE);
33089
33090 if (DEFAULT_ABI == ABI_V4
33091 && (TARGET_RELOCATABLE || flag_pic > 1))
33092 {
33093 fputs ("\t.long (", asm_out_file);
33094 output_addr_const (asm_out_file, symbol);
33095 fputs (")@fixup\n", asm_out_file);
33096 }
33097 else
33098 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33099 }
33100
33101 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33102 static void
33103 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33104 {
33105 const char *section = ".dtors";
33106 char buf[18];
33107
33108 if (priority != DEFAULT_INIT_PRIORITY)
33109 {
33110 sprintf (buf, ".dtors.%.5u",
33111 /* Invert the numbering so the linker puts us in the proper
33112 order; constructors are run from right to left, and the
33113 linker sorts in increasing order. */
33114 MAX_INIT_PRIORITY - priority);
33115 section = buf;
33116 }
33117
33118 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33119 assemble_align (POINTER_SIZE);
33120
33121 if (DEFAULT_ABI == ABI_V4
33122 && (TARGET_RELOCATABLE || flag_pic > 1))
33123 {
33124 fputs ("\t.long (", asm_out_file);
33125 output_addr_const (asm_out_file, symbol);
33126 fputs (")@fixup\n", asm_out_file);
33127 }
33128 else
33129 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33130 }
33131
33132 void
33133 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33134 {
33135 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33136 {
33137 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33138 ASM_OUTPUT_LABEL (file, name);
33139 fputs (DOUBLE_INT_ASM_OP, file);
33140 rs6000_output_function_entry (file, name);
33141 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33142 if (DOT_SYMBOLS)
33143 {
33144 fputs ("\t.size\t", file);
33145 assemble_name (file, name);
33146 fputs (",24\n\t.type\t.", file);
33147 assemble_name (file, name);
33148 fputs (",@function\n", file);
33149 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33150 {
33151 fputs ("\t.globl\t.", file);
33152 assemble_name (file, name);
33153 putc ('\n', file);
33154 }
33155 }
33156 else
33157 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33158 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33159 rs6000_output_function_entry (file, name);
33160 fputs (":\n", file);
33161 return;
33162 }
33163
33164 int uses_toc;
33165 if (DEFAULT_ABI == ABI_V4
33166 && (TARGET_RELOCATABLE || flag_pic > 1)
33167 && !TARGET_SECURE_PLT
33168 && (!constant_pool_empty_p () || crtl->profile)
33169 && (uses_toc = uses_TOC ()))
33170 {
33171 char buf[256];
33172
33173 if (uses_toc == 2)
33174 switch_to_other_text_partition ();
33175 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33176
33177 fprintf (file, "\t.long ");
33178 assemble_name (file, toc_label_name);
33179 need_toc_init = 1;
33180 putc ('-', file);
33181 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33182 assemble_name (file, buf);
33183 putc ('\n', file);
33184 if (uses_toc == 2)
33185 switch_to_other_text_partition ();
33186 }
33187
33188 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33189 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33190
33191 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33192 {
33193 char buf[256];
33194
33195 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33196
33197 fprintf (file, "\t.quad .TOC.-");
33198 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33199 assemble_name (file, buf);
33200 putc ('\n', file);
33201 }
33202
33203 if (DEFAULT_ABI == ABI_AIX)
33204 {
33205 const char *desc_name, *orig_name;
33206
33207 orig_name = (*targetm.strip_name_encoding) (name);
33208 desc_name = orig_name;
33209 while (*desc_name == '.')
33210 desc_name++;
33211
33212 if (TREE_PUBLIC (decl))
33213 fprintf (file, "\t.globl %s\n", desc_name);
33214
33215 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33216 fprintf (file, "%s:\n", desc_name);
33217 fprintf (file, "\t.long %s\n", orig_name);
33218 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33219 fputs ("\t.long 0\n", file);
33220 fprintf (file, "\t.previous\n");
33221 }
33222 ASM_OUTPUT_LABEL (file, name);
33223 }
33224
33225 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33226 static void
33227 rs6000_elf_file_end (void)
33228 {
33229 #ifdef HAVE_AS_GNU_ATTRIBUTE
33230 /* ??? The value emitted depends on options active at file end.
33231 Assume anyone using #pragma or attributes that might change
33232 options knows what they are doing. */
33233 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33234 && rs6000_passes_float)
33235 {
33236 int fp;
33237
33238 if (TARGET_HARD_FLOAT)
33239 fp = 1;
33240 else
33241 fp = 2;
33242 if (rs6000_passes_long_double)
33243 {
33244 if (!TARGET_LONG_DOUBLE_128)
33245 fp |= 2 * 4;
33246 else if (TARGET_IEEEQUAD)
33247 fp |= 3 * 4;
33248 else
33249 fp |= 1 * 4;
33250 }
33251 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33252 }
33253 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33254 {
33255 if (rs6000_passes_vector)
33256 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33257 (TARGET_ALTIVEC_ABI ? 2 : 1));
33258 if (rs6000_returns_struct)
33259 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33260 aix_struct_return ? 2 : 1);
33261 }
33262 #endif
33263 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33264 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33265 file_end_indicate_exec_stack ();
33266 #endif
33267
33268 if (flag_split_stack)
33269 file_end_indicate_split_stack ();
33270
33271 if (cpu_builtin_p)
33272 {
33273 /* We have expanded a CPU builtin, so we need to emit a reference to
33274 the special symbol that LIBC uses to declare it supports the
33275 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33276 switch_to_section (data_section);
33277 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33278 fprintf (asm_out_file, "\t%s %s\n",
33279 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33280 }
33281 }
33282 #endif
33283
33284 #if TARGET_XCOFF
33285
33286 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33287 #define HAVE_XCOFF_DWARF_EXTRAS 0
33288 #endif
33289
33290 static enum unwind_info_type
33291 rs6000_xcoff_debug_unwind_info (void)
33292 {
33293 return UI_NONE;
33294 }
33295
33296 static void
33297 rs6000_xcoff_asm_output_anchor (rtx symbol)
33298 {
33299 char buffer[100];
33300
33301 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33302 SYMBOL_REF_BLOCK_OFFSET (symbol));
33303 fprintf (asm_out_file, "%s", SET_ASM_OP);
33304 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33305 fprintf (asm_out_file, ",");
33306 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33307 fprintf (asm_out_file, "\n");
33308 }
33309
33310 static void
33311 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33312 {
33313 fputs (GLOBAL_ASM_OP, stream);
33314 RS6000_OUTPUT_BASENAME (stream, name);
33315 putc ('\n', stream);
33316 }
33317
33318 /* A get_unnamed_decl callback, used for read-only sections. PTR
33319 points to the section string variable. */
33320
33321 static void
33322 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33323 {
33324 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33325 *(const char *const *) directive,
33326 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33327 }
33328
33329 /* Likewise for read-write sections. */
33330
33331 static void
33332 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33333 {
33334 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33335 *(const char *const *) directive,
33336 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33337 }
33338
33339 static void
33340 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33341 {
33342 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33343 *(const char *const *) directive,
33344 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33345 }
33346
33347 /* A get_unnamed_section callback, used for switching to toc_section. */
33348
33349 static void
33350 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33351 {
33352 if (TARGET_MINIMAL_TOC)
33353 {
33354 /* toc_section is always selected at least once from
33355 rs6000_xcoff_file_start, so this is guaranteed to
33356 always be defined once and only once in each file. */
33357 if (!toc_initialized)
33358 {
33359 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33360 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33361 toc_initialized = 1;
33362 }
33363 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33364 (TARGET_32BIT ? "" : ",3"));
33365 }
33366 else
33367 fputs ("\t.toc\n", asm_out_file);
33368 }
33369
33370 /* Implement TARGET_ASM_INIT_SECTIONS. */
33371
33372 static void
33373 rs6000_xcoff_asm_init_sections (void)
33374 {
33375 read_only_data_section
33376 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33377 &xcoff_read_only_section_name);
33378
33379 private_data_section
33380 = get_unnamed_section (SECTION_WRITE,
33381 rs6000_xcoff_output_readwrite_section_asm_op,
33382 &xcoff_private_data_section_name);
33383
33384 tls_data_section
33385 = get_unnamed_section (SECTION_TLS,
33386 rs6000_xcoff_output_tls_section_asm_op,
33387 &xcoff_tls_data_section_name);
33388
33389 tls_private_data_section
33390 = get_unnamed_section (SECTION_TLS,
33391 rs6000_xcoff_output_tls_section_asm_op,
33392 &xcoff_private_data_section_name);
33393
33394 read_only_private_data_section
33395 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33396 &xcoff_private_data_section_name);
33397
33398 toc_section
33399 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33400
33401 readonly_data_section = read_only_data_section;
33402 }
33403
33404 static int
33405 rs6000_xcoff_reloc_rw_mask (void)
33406 {
33407 return 3;
33408 }
33409
33410 static void
33411 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33412 tree decl ATTRIBUTE_UNUSED)
33413 {
33414 int smclass;
33415 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33416
33417 if (flags & SECTION_EXCLUDE)
33418 smclass = 4;
33419 else if (flags & SECTION_DEBUG)
33420 {
33421 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33422 return;
33423 }
33424 else if (flags & SECTION_CODE)
33425 smclass = 0;
33426 else if (flags & SECTION_TLS)
33427 smclass = 3;
33428 else if (flags & SECTION_WRITE)
33429 smclass = 2;
33430 else
33431 smclass = 1;
33432
33433 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33434 (flags & SECTION_CODE) ? "." : "",
33435 name, suffix[smclass], flags & SECTION_ENTSIZE);
33436 }
33437
33438 #define IN_NAMED_SECTION(DECL) \
33439 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33440 && DECL_SECTION_NAME (DECL) != NULL)
33441
33442 static section *
33443 rs6000_xcoff_select_section (tree decl, int reloc,
33444 unsigned HOST_WIDE_INT align)
33445 {
33446 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33447 named section. */
33448 if (align > BIGGEST_ALIGNMENT)
33449 {
33450 resolve_unique_section (decl, reloc, true);
33451 if (IN_NAMED_SECTION (decl))
33452 return get_named_section (decl, NULL, reloc);
33453 }
33454
33455 if (decl_readonly_section (decl, reloc))
33456 {
33457 if (TREE_PUBLIC (decl))
33458 return read_only_data_section;
33459 else
33460 return read_only_private_data_section;
33461 }
33462 else
33463 {
33464 #if HAVE_AS_TLS
33465 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33466 {
33467 if (TREE_PUBLIC (decl))
33468 return tls_data_section;
33469 else if (bss_initializer_p (decl))
33470 {
33471 /* Convert to COMMON to emit in BSS. */
33472 DECL_COMMON (decl) = 1;
33473 return tls_comm_section;
33474 }
33475 else
33476 return tls_private_data_section;
33477 }
33478 else
33479 #endif
33480 if (TREE_PUBLIC (decl))
33481 return data_section;
33482 else
33483 return private_data_section;
33484 }
33485 }
33486
33487 static void
33488 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33489 {
33490 const char *name;
33491
33492 /* Use select_section for private data and uninitialized data with
33493 alignment <= BIGGEST_ALIGNMENT. */
33494 if (!TREE_PUBLIC (decl)
33495 || DECL_COMMON (decl)
33496 || (DECL_INITIAL (decl) == NULL_TREE
33497 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33498 || DECL_INITIAL (decl) == error_mark_node
33499 || (flag_zero_initialized_in_bss
33500 && initializer_zerop (DECL_INITIAL (decl))))
33501 return;
33502
33503 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33504 name = (*targetm.strip_name_encoding) (name);
33505 set_decl_section_name (decl, name);
33506 }
33507
33508 /* Select section for constant in constant pool.
33509
33510 On RS/6000, all constants are in the private read-only data area.
33511 However, if this is being placed in the TOC it must be output as a
33512 toc entry. */
33513
33514 static section *
33515 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33516 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33517 {
33518 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33519 return toc_section;
33520 else
33521 return read_only_private_data_section;
33522 }
33523
33524 /* Remove any trailing [DS] or the like from the symbol name. */
33525
33526 static const char *
33527 rs6000_xcoff_strip_name_encoding (const char *name)
33528 {
33529 size_t len;
33530 if (*name == '*')
33531 name++;
33532 len = strlen (name);
33533 if (name[len - 1] == ']')
33534 return ggc_alloc_string (name, len - 4);
33535 else
33536 return name;
33537 }
33538
33539 /* Section attributes. AIX is always PIC. */
33540
33541 static unsigned int
33542 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33543 {
33544 unsigned int align;
33545 unsigned int flags = default_section_type_flags (decl, name, reloc);
33546
33547 /* Align to at least UNIT size. */
33548 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33549 align = MIN_UNITS_PER_WORD;
33550 else
33551 /* Increase alignment of large objects if not already stricter. */
33552 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33553 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33554 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33555
33556 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33557 }
33558
33559 /* Output at beginning of assembler file.
33560
33561 Initialize the section names for the RS/6000 at this point.
33562
33563 Specify filename, including full path, to assembler.
33564
33565 We want to go into the TOC section so at least one .toc will be emitted.
33566 Also, in order to output proper .bs/.es pairs, we need at least one static
33567 [RW] section emitted.
33568
33569 Finally, declare mcount when profiling to make the assembler happy. */
33570
33571 static void
33572 rs6000_xcoff_file_start (void)
33573 {
33574 rs6000_gen_section_name (&xcoff_bss_section_name,
33575 main_input_filename, ".bss_");
33576 rs6000_gen_section_name (&xcoff_private_data_section_name,
33577 main_input_filename, ".rw_");
33578 rs6000_gen_section_name (&xcoff_read_only_section_name,
33579 main_input_filename, ".ro_");
33580 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33581 main_input_filename, ".tls_");
33582 rs6000_gen_section_name (&xcoff_tbss_section_name,
33583 main_input_filename, ".tbss_[UL]");
33584
33585 fputs ("\t.file\t", asm_out_file);
33586 output_quoted_string (asm_out_file, main_input_filename);
33587 fputc ('\n', asm_out_file);
33588 if (write_symbols != NO_DEBUG)
33589 switch_to_section (private_data_section);
33590 switch_to_section (toc_section);
33591 switch_to_section (text_section);
33592 if (profile_flag)
33593 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33594 rs6000_file_start ();
33595 }
33596
33597 /* Output at end of assembler file.
33598 On the RS/6000, referencing data should automatically pull in text. */
33599
33600 static void
33601 rs6000_xcoff_file_end (void)
33602 {
33603 switch_to_section (text_section);
33604 fputs ("_section_.text:\n", asm_out_file);
33605 switch_to_section (data_section);
33606 fputs (TARGET_32BIT
33607 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33608 asm_out_file);
33609 }
33610
33611 struct declare_alias_data
33612 {
33613 FILE *file;
33614 bool function_descriptor;
33615 };
33616
33617 /* Declare alias N. A helper function for for_node_and_aliases. */
33618
33619 static bool
33620 rs6000_declare_alias (struct symtab_node *n, void *d)
33621 {
33622 struct declare_alias_data *data = (struct declare_alias_data *)d;
33623 /* Main symbol is output specially, because varasm machinery does part of
33624 the job for us - we do not need to declare .globl/lglobs and such. */
33625 if (!n->alias || n->weakref)
33626 return false;
33627
33628 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33629 return false;
33630
33631 /* Prevent assemble_alias from trying to use .set pseudo operation
33632 that does not behave as expected by the middle-end. */
33633 TREE_ASM_WRITTEN (n->decl) = true;
33634
33635 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33636 char *buffer = (char *) alloca (strlen (name) + 2);
33637 char *p;
33638 int dollar_inside = 0;
33639
33640 strcpy (buffer, name);
33641 p = strchr (buffer, '$');
33642 while (p) {
33643 *p = '_';
33644 dollar_inside++;
33645 p = strchr (p + 1, '$');
33646 }
33647 if (TREE_PUBLIC (n->decl))
33648 {
33649 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33650 {
33651 if (dollar_inside) {
33652 if (data->function_descriptor)
33653 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33654 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33655 }
33656 if (data->function_descriptor)
33657 {
33658 fputs ("\t.globl .", data->file);
33659 RS6000_OUTPUT_BASENAME (data->file, buffer);
33660 putc ('\n', data->file);
33661 }
33662 fputs ("\t.globl ", data->file);
33663 RS6000_OUTPUT_BASENAME (data->file, buffer);
33664 putc ('\n', data->file);
33665 }
33666 #ifdef ASM_WEAKEN_DECL
33667 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
33668 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
33669 #endif
33670 }
33671 else
33672 {
33673 if (dollar_inside)
33674 {
33675 if (data->function_descriptor)
33676 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33677 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33678 }
33679 if (data->function_descriptor)
33680 {
33681 fputs ("\t.lglobl .", data->file);
33682 RS6000_OUTPUT_BASENAME (data->file, buffer);
33683 putc ('\n', data->file);
33684 }
33685 fputs ("\t.lglobl ", data->file);
33686 RS6000_OUTPUT_BASENAME (data->file, buffer);
33687 putc ('\n', data->file);
33688 }
33689 if (data->function_descriptor)
33690 fputs (".", data->file);
33691 RS6000_OUTPUT_BASENAME (data->file, buffer);
33692 fputs (":\n", data->file);
33693 return false;
33694 }
33695
33696
33697 #ifdef HAVE_GAS_HIDDEN
33698 /* Helper function to calculate visibility of a DECL
33699 and return the value as a const string. */
33700
33701 static const char *
33702 rs6000_xcoff_visibility (tree decl)
33703 {
33704 static const char * const visibility_types[] = {
33705 "", ",protected", ",hidden", ",internal"
33706 };
33707
33708 enum symbol_visibility vis = DECL_VISIBILITY (decl);
33709 return visibility_types[vis];
33710 }
33711 #endif
33712
33713
33714 /* This macro produces the initial definition of a function name.
33715 On the RS/6000, we need to place an extra '.' in the function name and
33716 output the function descriptor.
33717 Dollar signs are converted to underscores.
33718
33719 The csect for the function will have already been created when
33720 text_section was selected. We do have to go back to that csect, however.
33721
33722 The third and fourth parameters to the .function pseudo-op (16 and 044)
33723 are placeholders which no longer have any use.
33724
33725 Because AIX assembler's .set command has unexpected semantics, we output
33726 all aliases as alternative labels in front of the definition. */
33727
33728 void
33729 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
33730 {
33731 char *buffer = (char *) alloca (strlen (name) + 1);
33732 char *p;
33733 int dollar_inside = 0;
33734 struct declare_alias_data data = {file, false};
33735
33736 strcpy (buffer, name);
33737 p = strchr (buffer, '$');
33738 while (p) {
33739 *p = '_';
33740 dollar_inside++;
33741 p = strchr (p + 1, '$');
33742 }
33743 if (TREE_PUBLIC (decl))
33744 {
33745 if (!RS6000_WEAK || !DECL_WEAK (decl))
33746 {
33747 if (dollar_inside) {
33748 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33749 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33750 }
33751 fputs ("\t.globl .", file);
33752 RS6000_OUTPUT_BASENAME (file, buffer);
33753 #ifdef HAVE_GAS_HIDDEN
33754 fputs (rs6000_xcoff_visibility (decl), file);
33755 #endif
33756 putc ('\n', file);
33757 }
33758 }
33759 else
33760 {
33761 if (dollar_inside) {
33762 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33763 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33764 }
33765 fputs ("\t.lglobl .", file);
33766 RS6000_OUTPUT_BASENAME (file, buffer);
33767 putc ('\n', file);
33768 }
33769 fputs ("\t.csect ", file);
33770 RS6000_OUTPUT_BASENAME (file, buffer);
33771 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
33772 RS6000_OUTPUT_BASENAME (file, buffer);
33773 fputs (":\n", file);
33774 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33775 &data, true);
33776 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
33777 RS6000_OUTPUT_BASENAME (file, buffer);
33778 fputs (", TOC[tc0], 0\n", file);
33779 in_section = NULL;
33780 switch_to_section (function_section (decl));
33781 putc ('.', file);
33782 RS6000_OUTPUT_BASENAME (file, buffer);
33783 fputs (":\n", file);
33784 data.function_descriptor = true;
33785 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33786 &data, true);
33787 if (!DECL_IGNORED_P (decl))
33788 {
33789 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33790 xcoffout_declare_function (file, decl, buffer);
33791 else if (write_symbols == DWARF2_DEBUG)
33792 {
33793 name = (*targetm.strip_name_encoding) (name);
33794 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
33795 }
33796 }
33797 return;
33798 }
33799
33800
33801 /* Output assembly language to globalize a symbol from a DECL,
33802 possibly with visibility. */
33803
33804 void
33805 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
33806 {
33807 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
33808 fputs (GLOBAL_ASM_OP, stream);
33809 RS6000_OUTPUT_BASENAME (stream, name);
33810 #ifdef HAVE_GAS_HIDDEN
33811 fputs (rs6000_xcoff_visibility (decl), stream);
33812 #endif
33813 putc ('\n', stream);
33814 }
33815
33816 /* Output assembly language to define a symbol as COMMON from a DECL,
33817 possibly with visibility. */
33818
33819 void
33820 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
33821 tree decl ATTRIBUTE_UNUSED,
33822 const char *name,
33823 unsigned HOST_WIDE_INT size,
33824 unsigned HOST_WIDE_INT align)
33825 {
33826 unsigned HOST_WIDE_INT align2 = 2;
33827
33828 if (align > 32)
33829 align2 = floor_log2 (align / BITS_PER_UNIT);
33830 else if (size > 4)
33831 align2 = 3;
33832
33833 fputs (COMMON_ASM_OP, stream);
33834 RS6000_OUTPUT_BASENAME (stream, name);
33835
33836 fprintf (stream,
33837 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
33838 size, align2);
33839
33840 #ifdef HAVE_GAS_HIDDEN
33841 if (decl != NULL)
33842 fputs (rs6000_xcoff_visibility (decl), stream);
33843 #endif
33844 putc ('\n', stream);
33845 }
33846
33847 /* This macro produces the initial definition of a object (variable) name.
33848 Because AIX assembler's .set command has unexpected semantics, we output
33849 all aliases as alternative labels in front of the definition. */
33850
33851 void
33852 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
33853 {
33854 struct declare_alias_data data = {file, false};
33855 RS6000_OUTPUT_BASENAME (file, name);
33856 fputs (":\n", file);
33857 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33858 &data, true);
33859 }
33860
33861 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
33862
33863 void
33864 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
33865 {
33866 fputs (integer_asm_op (size, FALSE), file);
33867 assemble_name (file, label);
33868 fputs ("-$", file);
33869 }
33870
33871 /* Output a symbol offset relative to the dbase for the current object.
33872 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
33873 signed offsets.
33874
33875 __gcc_unwind_dbase is embedded in all executables/libraries through
33876 libgcc/config/rs6000/crtdbase.S. */
33877
33878 void
33879 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
33880 {
33881 fputs (integer_asm_op (size, FALSE), file);
33882 assemble_name (file, label);
33883 fputs("-__gcc_unwind_dbase", file);
33884 }
33885
33886 #ifdef HAVE_AS_TLS
33887 static void
33888 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
33889 {
33890 rtx symbol;
33891 int flags;
33892 const char *symname;
33893
33894 default_encode_section_info (decl, rtl, first);
33895
33896 /* Careful not to prod global register variables. */
33897 if (!MEM_P (rtl))
33898 return;
33899 symbol = XEXP (rtl, 0);
33900 if (GET_CODE (symbol) != SYMBOL_REF)
33901 return;
33902
33903 flags = SYMBOL_REF_FLAGS (symbol);
33904
33905 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33906 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
33907
33908 SYMBOL_REF_FLAGS (symbol) = flags;
33909
33910 /* Append mapping class to extern decls. */
33911 symname = XSTR (symbol, 0);
33912 if (decl /* sync condition with assemble_external () */
33913 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
33914 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
33915 || TREE_CODE (decl) == FUNCTION_DECL)
33916 && symname[strlen (symname) - 1] != ']')
33917 {
33918 char *newname = (char *) alloca (strlen (symname) + 5);
33919 strcpy (newname, symname);
33920 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
33921 ? "[DS]" : "[UA]"));
33922 XSTR (symbol, 0) = ggc_strdup (newname);
33923 }
33924 }
33925 #endif /* HAVE_AS_TLS */
33926 #endif /* TARGET_XCOFF */
33927
33928 void
33929 rs6000_asm_weaken_decl (FILE *stream, tree decl,
33930 const char *name, const char *val)
33931 {
33932 fputs ("\t.weak\t", stream);
33933 RS6000_OUTPUT_BASENAME (stream, name);
33934 if (decl && TREE_CODE (decl) == FUNCTION_DECL
33935 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
33936 {
33937 if (TARGET_XCOFF)
33938 fputs ("[DS]", stream);
33939 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
33940 if (TARGET_XCOFF)
33941 fputs (rs6000_xcoff_visibility (decl), stream);
33942 #endif
33943 fputs ("\n\t.weak\t.", stream);
33944 RS6000_OUTPUT_BASENAME (stream, name);
33945 }
33946 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
33947 if (TARGET_XCOFF)
33948 fputs (rs6000_xcoff_visibility (decl), stream);
33949 #endif
33950 fputc ('\n', stream);
33951 if (val)
33952 {
33953 #ifdef ASM_OUTPUT_DEF
33954 ASM_OUTPUT_DEF (stream, name, val);
33955 #endif
33956 if (decl && TREE_CODE (decl) == FUNCTION_DECL
33957 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
33958 {
33959 fputs ("\t.set\t.", stream);
33960 RS6000_OUTPUT_BASENAME (stream, name);
33961 fputs (",.", stream);
33962 RS6000_OUTPUT_BASENAME (stream, val);
33963 fputc ('\n', stream);
33964 }
33965 }
33966 }
33967
33968
33969 /* Return true if INSN should not be copied. */
33970
33971 static bool
33972 rs6000_cannot_copy_insn_p (rtx_insn *insn)
33973 {
33974 return recog_memoized (insn) >= 0
33975 && get_attr_cannot_copy (insn);
33976 }
33977
33978 /* Compute a (partial) cost for rtx X. Return true if the complete
33979 cost has been computed, and false if subexpressions should be
33980 scanned. In either case, *TOTAL contains the cost result. */
33981
33982 static bool
33983 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
33984 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
33985 {
33986 int code = GET_CODE (x);
33987
33988 switch (code)
33989 {
33990 /* On the RS/6000, if it is valid in the insn, it is free. */
33991 case CONST_INT:
33992 if (((outer_code == SET
33993 || outer_code == PLUS
33994 || outer_code == MINUS)
33995 && (satisfies_constraint_I (x)
33996 || satisfies_constraint_L (x)))
33997 || (outer_code == AND
33998 && (satisfies_constraint_K (x)
33999 || (mode == SImode
34000 ? satisfies_constraint_L (x)
34001 : satisfies_constraint_J (x))))
34002 || ((outer_code == IOR || outer_code == XOR)
34003 && (satisfies_constraint_K (x)
34004 || (mode == SImode
34005 ? satisfies_constraint_L (x)
34006 : satisfies_constraint_J (x))))
34007 || outer_code == ASHIFT
34008 || outer_code == ASHIFTRT
34009 || outer_code == LSHIFTRT
34010 || outer_code == ROTATE
34011 || outer_code == ROTATERT
34012 || outer_code == ZERO_EXTRACT
34013 || (outer_code == MULT
34014 && satisfies_constraint_I (x))
34015 || ((outer_code == DIV || outer_code == UDIV
34016 || outer_code == MOD || outer_code == UMOD)
34017 && exact_log2 (INTVAL (x)) >= 0)
34018 || (outer_code == COMPARE
34019 && (satisfies_constraint_I (x)
34020 || satisfies_constraint_K (x)))
34021 || ((outer_code == EQ || outer_code == NE)
34022 && (satisfies_constraint_I (x)
34023 || satisfies_constraint_K (x)
34024 || (mode == SImode
34025 ? satisfies_constraint_L (x)
34026 : satisfies_constraint_J (x))))
34027 || (outer_code == GTU
34028 && satisfies_constraint_I (x))
34029 || (outer_code == LTU
34030 && satisfies_constraint_P (x)))
34031 {
34032 *total = 0;
34033 return true;
34034 }
34035 else if ((outer_code == PLUS
34036 && reg_or_add_cint_operand (x, VOIDmode))
34037 || (outer_code == MINUS
34038 && reg_or_sub_cint_operand (x, VOIDmode))
34039 || ((outer_code == SET
34040 || outer_code == IOR
34041 || outer_code == XOR)
34042 && (INTVAL (x)
34043 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34044 {
34045 *total = COSTS_N_INSNS (1);
34046 return true;
34047 }
34048 /* FALLTHRU */
34049
34050 case CONST_DOUBLE:
34051 case CONST_WIDE_INT:
34052 case CONST:
34053 case HIGH:
34054 case SYMBOL_REF:
34055 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34056 return true;
34057
34058 case MEM:
34059 /* When optimizing for size, MEM should be slightly more expensive
34060 than generating address, e.g., (plus (reg) (const)).
34061 L1 cache latency is about two instructions. */
34062 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34063 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34064 *total += COSTS_N_INSNS (100);
34065 return true;
34066
34067 case LABEL_REF:
34068 *total = 0;
34069 return true;
34070
34071 case PLUS:
34072 case MINUS:
34073 if (FLOAT_MODE_P (mode))
34074 *total = rs6000_cost->fp;
34075 else
34076 *total = COSTS_N_INSNS (1);
34077 return false;
34078
34079 case MULT:
34080 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34081 && satisfies_constraint_I (XEXP (x, 1)))
34082 {
34083 if (INTVAL (XEXP (x, 1)) >= -256
34084 && INTVAL (XEXP (x, 1)) <= 255)
34085 *total = rs6000_cost->mulsi_const9;
34086 else
34087 *total = rs6000_cost->mulsi_const;
34088 }
34089 else if (mode == SFmode)
34090 *total = rs6000_cost->fp;
34091 else if (FLOAT_MODE_P (mode))
34092 *total = rs6000_cost->dmul;
34093 else if (mode == DImode)
34094 *total = rs6000_cost->muldi;
34095 else
34096 *total = rs6000_cost->mulsi;
34097 return false;
34098
34099 case FMA:
34100 if (mode == SFmode)
34101 *total = rs6000_cost->fp;
34102 else
34103 *total = rs6000_cost->dmul;
34104 break;
34105
34106 case DIV:
34107 case MOD:
34108 if (FLOAT_MODE_P (mode))
34109 {
34110 *total = mode == DFmode ? rs6000_cost->ddiv
34111 : rs6000_cost->sdiv;
34112 return false;
34113 }
34114 /* FALLTHRU */
34115
34116 case UDIV:
34117 case UMOD:
34118 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34119 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34120 {
34121 if (code == DIV || code == MOD)
34122 /* Shift, addze */
34123 *total = COSTS_N_INSNS (2);
34124 else
34125 /* Shift */
34126 *total = COSTS_N_INSNS (1);
34127 }
34128 else
34129 {
34130 if (GET_MODE (XEXP (x, 1)) == DImode)
34131 *total = rs6000_cost->divdi;
34132 else
34133 *total = rs6000_cost->divsi;
34134 }
34135 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34136 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34137 *total += COSTS_N_INSNS (2);
34138 return false;
34139
34140 case CTZ:
34141 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34142 return false;
34143
34144 case FFS:
34145 *total = COSTS_N_INSNS (4);
34146 return false;
34147
34148 case POPCOUNT:
34149 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34150 return false;
34151
34152 case PARITY:
34153 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34154 return false;
34155
34156 case NOT:
34157 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34158 *total = 0;
34159 else
34160 *total = COSTS_N_INSNS (1);
34161 return false;
34162
34163 case AND:
34164 if (CONST_INT_P (XEXP (x, 1)))
34165 {
34166 rtx left = XEXP (x, 0);
34167 rtx_code left_code = GET_CODE (left);
34168
34169 /* rotate-and-mask: 1 insn. */
34170 if ((left_code == ROTATE
34171 || left_code == ASHIFT
34172 || left_code == LSHIFTRT)
34173 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34174 {
34175 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34176 if (!CONST_INT_P (XEXP (left, 1)))
34177 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34178 *total += COSTS_N_INSNS (1);
34179 return true;
34180 }
34181
34182 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34183 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34184 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34185 || (val & 0xffff) == val
34186 || (val & 0xffff0000) == val
34187 || ((val & 0xffff) == 0 && mode == SImode))
34188 {
34189 *total = rtx_cost (left, mode, AND, 0, speed);
34190 *total += COSTS_N_INSNS (1);
34191 return true;
34192 }
34193
34194 /* 2 insns. */
34195 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34196 {
34197 *total = rtx_cost (left, mode, AND, 0, speed);
34198 *total += COSTS_N_INSNS (2);
34199 return true;
34200 }
34201 }
34202
34203 *total = COSTS_N_INSNS (1);
34204 return false;
34205
34206 case IOR:
34207 /* FIXME */
34208 *total = COSTS_N_INSNS (1);
34209 return true;
34210
34211 case CLZ:
34212 case XOR:
34213 case ZERO_EXTRACT:
34214 *total = COSTS_N_INSNS (1);
34215 return false;
34216
34217 case ASHIFT:
34218 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34219 the sign extend and shift separately within the insn. */
34220 if (TARGET_EXTSWSLI && mode == DImode
34221 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34222 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34223 {
34224 *total = 0;
34225 return false;
34226 }
34227 /* fall through */
34228
34229 case ASHIFTRT:
34230 case LSHIFTRT:
34231 case ROTATE:
34232 case ROTATERT:
34233 /* Handle mul_highpart. */
34234 if (outer_code == TRUNCATE
34235 && GET_CODE (XEXP (x, 0)) == MULT)
34236 {
34237 if (mode == DImode)
34238 *total = rs6000_cost->muldi;
34239 else
34240 *total = rs6000_cost->mulsi;
34241 return true;
34242 }
34243 else if (outer_code == AND)
34244 *total = 0;
34245 else
34246 *total = COSTS_N_INSNS (1);
34247 return false;
34248
34249 case SIGN_EXTEND:
34250 case ZERO_EXTEND:
34251 if (GET_CODE (XEXP (x, 0)) == MEM)
34252 *total = 0;
34253 else
34254 *total = COSTS_N_INSNS (1);
34255 return false;
34256
34257 case COMPARE:
34258 case NEG:
34259 case ABS:
34260 if (!FLOAT_MODE_P (mode))
34261 {
34262 *total = COSTS_N_INSNS (1);
34263 return false;
34264 }
34265 /* FALLTHRU */
34266
34267 case FLOAT:
34268 case UNSIGNED_FLOAT:
34269 case FIX:
34270 case UNSIGNED_FIX:
34271 case FLOAT_TRUNCATE:
34272 *total = rs6000_cost->fp;
34273 return false;
34274
34275 case FLOAT_EXTEND:
34276 if (mode == DFmode)
34277 *total = rs6000_cost->sfdf_convert;
34278 else
34279 *total = rs6000_cost->fp;
34280 return false;
34281
34282 case UNSPEC:
34283 switch (XINT (x, 1))
34284 {
34285 case UNSPEC_FRSP:
34286 *total = rs6000_cost->fp;
34287 return true;
34288
34289 default:
34290 break;
34291 }
34292 break;
34293
34294 case CALL:
34295 case IF_THEN_ELSE:
34296 if (!speed)
34297 {
34298 *total = COSTS_N_INSNS (1);
34299 return true;
34300 }
34301 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34302 {
34303 *total = rs6000_cost->fp;
34304 return false;
34305 }
34306 break;
34307
34308 case NE:
34309 case EQ:
34310 case GTU:
34311 case LTU:
34312 /* Carry bit requires mode == Pmode.
34313 NEG or PLUS already counted so only add one. */
34314 if (mode == Pmode
34315 && (outer_code == NEG || outer_code == PLUS))
34316 {
34317 *total = COSTS_N_INSNS (1);
34318 return true;
34319 }
34320 /* FALLTHRU */
34321
34322 case GT:
34323 case LT:
34324 case UNORDERED:
34325 if (outer_code == SET)
34326 {
34327 if (XEXP (x, 1) == const0_rtx)
34328 {
34329 *total = COSTS_N_INSNS (2);
34330 return true;
34331 }
34332 else
34333 {
34334 *total = COSTS_N_INSNS (3);
34335 return false;
34336 }
34337 }
34338 /* CC COMPARE. */
34339 if (outer_code == COMPARE)
34340 {
34341 *total = 0;
34342 return true;
34343 }
34344 break;
34345
34346 default:
34347 break;
34348 }
34349
34350 return false;
34351 }
34352
34353 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34354
34355 static bool
34356 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34357 int opno, int *total, bool speed)
34358 {
34359 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34360
34361 fprintf (stderr,
34362 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34363 "opno = %d, total = %d, speed = %s, x:\n",
34364 ret ? "complete" : "scan inner",
34365 GET_MODE_NAME (mode),
34366 GET_RTX_NAME (outer_code),
34367 opno,
34368 *total,
34369 speed ? "true" : "false");
34370
34371 debug_rtx (x);
34372
34373 return ret;
34374 }
34375
34376 static int
34377 rs6000_insn_cost (rtx_insn *insn, bool speed)
34378 {
34379 if (recog_memoized (insn) < 0)
34380 return 0;
34381
34382 if (!speed)
34383 return get_attr_length (insn);
34384
34385 int cost = get_attr_cost (insn);
34386 if (cost > 0)
34387 return cost;
34388
34389 int n = get_attr_length (insn) / 4;
34390 enum attr_type type = get_attr_type (insn);
34391
34392 switch (type)
34393 {
34394 case TYPE_LOAD:
34395 case TYPE_FPLOAD:
34396 case TYPE_VECLOAD:
34397 cost = COSTS_N_INSNS (n + 1);
34398 break;
34399
34400 case TYPE_MUL:
34401 switch (get_attr_size (insn))
34402 {
34403 case SIZE_8:
34404 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
34405 break;
34406 case SIZE_16:
34407 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
34408 break;
34409 case SIZE_32:
34410 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
34411 break;
34412 case SIZE_64:
34413 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
34414 break;
34415 default:
34416 gcc_unreachable ();
34417 }
34418 break;
34419 case TYPE_DIV:
34420 switch (get_attr_size (insn))
34421 {
34422 case SIZE_32:
34423 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
34424 break;
34425 case SIZE_64:
34426 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
34427 break;
34428 default:
34429 gcc_unreachable ();
34430 }
34431 break;
34432
34433 case TYPE_FP:
34434 cost = n * rs6000_cost->fp;
34435 break;
34436 case TYPE_DMUL:
34437 cost = n * rs6000_cost->dmul;
34438 break;
34439 case TYPE_SDIV:
34440 cost = n * rs6000_cost->sdiv;
34441 break;
34442 case TYPE_DDIV:
34443 cost = n * rs6000_cost->ddiv;
34444 break;
34445
34446 case TYPE_SYNC:
34447 case TYPE_LOAD_L:
34448 case TYPE_MFCR:
34449 case TYPE_MFCRF:
34450 cost = COSTS_N_INSNS (n + 2);
34451 break;
34452
34453 default:
34454 cost = COSTS_N_INSNS (n);
34455 }
34456
34457 return cost;
34458 }
34459
34460 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34461
34462 static int
34463 rs6000_debug_address_cost (rtx x, machine_mode mode,
34464 addr_space_t as, bool speed)
34465 {
34466 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34467
34468 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34469 ret, speed ? "true" : "false");
34470 debug_rtx (x);
34471
34472 return ret;
34473 }
34474
34475
34476 /* A C expression returning the cost of moving data from a register of class
34477 CLASS1 to one of CLASS2. */
34478
34479 static int
34480 rs6000_register_move_cost (machine_mode mode,
34481 reg_class_t from, reg_class_t to)
34482 {
34483 int ret;
34484
34485 if (TARGET_DEBUG_COST)
34486 dbg_cost_ctrl++;
34487
34488 /* Moves from/to GENERAL_REGS. */
34489 if (reg_classes_intersect_p (to, GENERAL_REGS)
34490 || reg_classes_intersect_p (from, GENERAL_REGS))
34491 {
34492 reg_class_t rclass = from;
34493
34494 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34495 rclass = to;
34496
34497 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34498 ret = (rs6000_memory_move_cost (mode, rclass, false)
34499 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34500
34501 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34502 shift. */
34503 else if (rclass == CR_REGS)
34504 ret = 4;
34505
34506 /* For those processors that have slow LR/CTR moves, make them more
34507 expensive than memory in order to bias spills to memory .*/
34508 else if ((rs6000_tune == PROCESSOR_POWER6
34509 || rs6000_tune == PROCESSOR_POWER7
34510 || rs6000_tune == PROCESSOR_POWER8
34511 || rs6000_tune == PROCESSOR_POWER9)
34512 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34513 ret = 6 * hard_regno_nregs (0, mode);
34514
34515 else
34516 /* A move will cost one instruction per GPR moved. */
34517 ret = 2 * hard_regno_nregs (0, mode);
34518 }
34519
34520 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34521 else if (VECTOR_MEM_VSX_P (mode)
34522 && reg_classes_intersect_p (to, VSX_REGS)
34523 && reg_classes_intersect_p (from, VSX_REGS))
34524 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
34525
34526 /* Moving between two similar registers is just one instruction. */
34527 else if (reg_classes_intersect_p (to, from))
34528 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34529
34530 /* Everything else has to go through GENERAL_REGS. */
34531 else
34532 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34533 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34534
34535 if (TARGET_DEBUG_COST)
34536 {
34537 if (dbg_cost_ctrl == 1)
34538 fprintf (stderr,
34539 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34540 ret, GET_MODE_NAME (mode), reg_class_names[from],
34541 reg_class_names[to]);
34542 dbg_cost_ctrl--;
34543 }
34544
34545 return ret;
34546 }
34547
34548 /* A C expressions returning the cost of moving data of MODE from a register to
34549 or from memory. */
34550
34551 static int
34552 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34553 bool in ATTRIBUTE_UNUSED)
34554 {
34555 int ret;
34556
34557 if (TARGET_DEBUG_COST)
34558 dbg_cost_ctrl++;
34559
34560 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34561 ret = 4 * hard_regno_nregs (0, mode);
34562 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34563 || reg_classes_intersect_p (rclass, VSX_REGS)))
34564 ret = 4 * hard_regno_nregs (32, mode);
34565 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34566 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
34567 else
34568 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34569
34570 if (TARGET_DEBUG_COST)
34571 {
34572 if (dbg_cost_ctrl == 1)
34573 fprintf (stderr,
34574 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34575 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34576 dbg_cost_ctrl--;
34577 }
34578
34579 return ret;
34580 }
34581
34582 /* Returns a code for a target-specific builtin that implements
34583 reciprocal of the function, or NULL_TREE if not available. */
34584
34585 static tree
34586 rs6000_builtin_reciprocal (tree fndecl)
34587 {
34588 switch (DECL_FUNCTION_CODE (fndecl))
34589 {
34590 case VSX_BUILTIN_XVSQRTDP:
34591 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34592 return NULL_TREE;
34593
34594 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34595
34596 case VSX_BUILTIN_XVSQRTSP:
34597 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34598 return NULL_TREE;
34599
34600 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34601
34602 default:
34603 return NULL_TREE;
34604 }
34605 }
34606
34607 /* Load up a constant. If the mode is a vector mode, splat the value across
34608 all of the vector elements. */
34609
34610 static rtx
34611 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34612 {
34613 rtx reg;
34614
34615 if (mode == SFmode || mode == DFmode)
34616 {
34617 rtx d = const_double_from_real_value (dconst, mode);
34618 reg = force_reg (mode, d);
34619 }
34620 else if (mode == V4SFmode)
34621 {
34622 rtx d = const_double_from_real_value (dconst, SFmode);
34623 rtvec v = gen_rtvec (4, d, d, d, d);
34624 reg = gen_reg_rtx (mode);
34625 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34626 }
34627 else if (mode == V2DFmode)
34628 {
34629 rtx d = const_double_from_real_value (dconst, DFmode);
34630 rtvec v = gen_rtvec (2, d, d);
34631 reg = gen_reg_rtx (mode);
34632 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34633 }
34634 else
34635 gcc_unreachable ();
34636
34637 return reg;
34638 }
34639
34640 /* Generate an FMA instruction. */
34641
34642 static void
34643 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34644 {
34645 machine_mode mode = GET_MODE (target);
34646 rtx dst;
34647
34648 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34649 gcc_assert (dst != NULL);
34650
34651 if (dst != target)
34652 emit_move_insn (target, dst);
34653 }
34654
34655 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34656
34657 static void
34658 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
34659 {
34660 machine_mode mode = GET_MODE (dst);
34661 rtx r;
34662
34663 /* This is a tad more complicated, since the fnma_optab is for
34664 a different expression: fma(-m1, m2, a), which is the same
34665 thing except in the case of signed zeros.
34666
34667 Fortunately we know that if FMA is supported that FNMSUB is
34668 also supported in the ISA. Just expand it directly. */
34669
34670 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
34671
34672 r = gen_rtx_NEG (mode, a);
34673 r = gen_rtx_FMA (mode, m1, m2, r);
34674 r = gen_rtx_NEG (mode, r);
34675 emit_insn (gen_rtx_SET (dst, r));
34676 }
34677
34678 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34679 add a reg_note saying that this was a division. Support both scalar and
34680 vector divide. Assumes no trapping math and finite arguments. */
34681
34682 void
34683 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
34684 {
34685 machine_mode mode = GET_MODE (dst);
34686 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
34687 int i;
34688
34689 /* Low precision estimates guarantee 5 bits of accuracy. High
34690 precision estimates guarantee 14 bits of accuracy. SFmode
34691 requires 23 bits of accuracy. DFmode requires 52 bits of
34692 accuracy. Each pass at least doubles the accuracy, leading
34693 to the following. */
34694 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34695 if (mode == DFmode || mode == V2DFmode)
34696 passes++;
34697
34698 enum insn_code code = optab_handler (smul_optab, mode);
34699 insn_gen_fn gen_mul = GEN_FCN (code);
34700
34701 gcc_assert (code != CODE_FOR_nothing);
34702
34703 one = rs6000_load_constant_and_splat (mode, dconst1);
34704
34705 /* x0 = 1./d estimate */
34706 x0 = gen_reg_rtx (mode);
34707 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
34708 UNSPEC_FRES)));
34709
34710 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34711 if (passes > 1) {
34712
34713 /* e0 = 1. - d * x0 */
34714 e0 = gen_reg_rtx (mode);
34715 rs6000_emit_nmsub (e0, d, x0, one);
34716
34717 /* x1 = x0 + e0 * x0 */
34718 x1 = gen_reg_rtx (mode);
34719 rs6000_emit_madd (x1, e0, x0, x0);
34720
34721 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
34722 ++i, xprev = xnext, eprev = enext) {
34723
34724 /* enext = eprev * eprev */
34725 enext = gen_reg_rtx (mode);
34726 emit_insn (gen_mul (enext, eprev, eprev));
34727
34728 /* xnext = xprev + enext * xprev */
34729 xnext = gen_reg_rtx (mode);
34730 rs6000_emit_madd (xnext, enext, xprev, xprev);
34731 }
34732
34733 } else
34734 xprev = x0;
34735
34736 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34737
34738 /* u = n * xprev */
34739 u = gen_reg_rtx (mode);
34740 emit_insn (gen_mul (u, n, xprev));
34741
34742 /* v = n - (d * u) */
34743 v = gen_reg_rtx (mode);
34744 rs6000_emit_nmsub (v, d, u, n);
34745
34746 /* dst = (v * xprev) + u */
34747 rs6000_emit_madd (dst, v, xprev, u);
34748
34749 if (note_p)
34750 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
34751 }
34752
34753 /* Goldschmidt's Algorithm for single/double-precision floating point
34754 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34755
34756 void
34757 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
34758 {
34759 machine_mode mode = GET_MODE (src);
34760 rtx e = gen_reg_rtx (mode);
34761 rtx g = gen_reg_rtx (mode);
34762 rtx h = gen_reg_rtx (mode);
34763
34764 /* Low precision estimates guarantee 5 bits of accuracy. High
34765 precision estimates guarantee 14 bits of accuracy. SFmode
34766 requires 23 bits of accuracy. DFmode requires 52 bits of
34767 accuracy. Each pass at least doubles the accuracy, leading
34768 to the following. */
34769 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34770 if (mode == DFmode || mode == V2DFmode)
34771 passes++;
34772
34773 int i;
34774 rtx mhalf;
34775 enum insn_code code = optab_handler (smul_optab, mode);
34776 insn_gen_fn gen_mul = GEN_FCN (code);
34777
34778 gcc_assert (code != CODE_FOR_nothing);
34779
34780 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
34781
34782 /* e = rsqrt estimate */
34783 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
34784 UNSPEC_RSQRT)));
34785
34786 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
34787 if (!recip)
34788 {
34789 rtx zero = force_reg (mode, CONST0_RTX (mode));
34790
34791 if (mode == SFmode)
34792 {
34793 rtx target = emit_conditional_move (e, GT, src, zero, mode,
34794 e, zero, mode, 0);
34795 if (target != e)
34796 emit_move_insn (e, target);
34797 }
34798 else
34799 {
34800 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
34801 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
34802 }
34803 }
34804
34805 /* g = sqrt estimate. */
34806 emit_insn (gen_mul (g, e, src));
34807 /* h = 1/(2*sqrt) estimate. */
34808 emit_insn (gen_mul (h, e, mhalf));
34809
34810 if (recip)
34811 {
34812 if (passes == 1)
34813 {
34814 rtx t = gen_reg_rtx (mode);
34815 rs6000_emit_nmsub (t, g, h, mhalf);
34816 /* Apply correction directly to 1/rsqrt estimate. */
34817 rs6000_emit_madd (dst, e, t, e);
34818 }
34819 else
34820 {
34821 for (i = 0; i < passes; i++)
34822 {
34823 rtx t1 = gen_reg_rtx (mode);
34824 rtx g1 = gen_reg_rtx (mode);
34825 rtx h1 = gen_reg_rtx (mode);
34826
34827 rs6000_emit_nmsub (t1, g, h, mhalf);
34828 rs6000_emit_madd (g1, g, t1, g);
34829 rs6000_emit_madd (h1, h, t1, h);
34830
34831 g = g1;
34832 h = h1;
34833 }
34834 /* Multiply by 2 for 1/rsqrt. */
34835 emit_insn (gen_add3_insn (dst, h, h));
34836 }
34837 }
34838 else
34839 {
34840 rtx t = gen_reg_rtx (mode);
34841 rs6000_emit_nmsub (t, g, h, mhalf);
34842 rs6000_emit_madd (dst, g, t, g);
34843 }
34844
34845 return;
34846 }
34847
34848 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
34849 (Power7) targets. DST is the target, and SRC is the argument operand. */
34850
34851 void
34852 rs6000_emit_popcount (rtx dst, rtx src)
34853 {
34854 machine_mode mode = GET_MODE (dst);
34855 rtx tmp1, tmp2;
34856
34857 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
34858 if (TARGET_POPCNTD)
34859 {
34860 if (mode == SImode)
34861 emit_insn (gen_popcntdsi2 (dst, src));
34862 else
34863 emit_insn (gen_popcntddi2 (dst, src));
34864 return;
34865 }
34866
34867 tmp1 = gen_reg_rtx (mode);
34868
34869 if (mode == SImode)
34870 {
34871 emit_insn (gen_popcntbsi2 (tmp1, src));
34872 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
34873 NULL_RTX, 0);
34874 tmp2 = force_reg (SImode, tmp2);
34875 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
34876 }
34877 else
34878 {
34879 emit_insn (gen_popcntbdi2 (tmp1, src));
34880 tmp2 = expand_mult (DImode, tmp1,
34881 GEN_INT ((HOST_WIDE_INT)
34882 0x01010101 << 32 | 0x01010101),
34883 NULL_RTX, 0);
34884 tmp2 = force_reg (DImode, tmp2);
34885 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
34886 }
34887 }
34888
34889
34890 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
34891 target, and SRC is the argument operand. */
34892
34893 void
34894 rs6000_emit_parity (rtx dst, rtx src)
34895 {
34896 machine_mode mode = GET_MODE (dst);
34897 rtx tmp;
34898
34899 tmp = gen_reg_rtx (mode);
34900
34901 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
34902 if (TARGET_CMPB)
34903 {
34904 if (mode == SImode)
34905 {
34906 emit_insn (gen_popcntbsi2 (tmp, src));
34907 emit_insn (gen_paritysi2_cmpb (dst, tmp));
34908 }
34909 else
34910 {
34911 emit_insn (gen_popcntbdi2 (tmp, src));
34912 emit_insn (gen_paritydi2_cmpb (dst, tmp));
34913 }
34914 return;
34915 }
34916
34917 if (mode == SImode)
34918 {
34919 /* Is mult+shift >= shift+xor+shift+xor? */
34920 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
34921 {
34922 rtx tmp1, tmp2, tmp3, tmp4;
34923
34924 tmp1 = gen_reg_rtx (SImode);
34925 emit_insn (gen_popcntbsi2 (tmp1, src));
34926
34927 tmp2 = gen_reg_rtx (SImode);
34928 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
34929 tmp3 = gen_reg_rtx (SImode);
34930 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
34931
34932 tmp4 = gen_reg_rtx (SImode);
34933 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
34934 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
34935 }
34936 else
34937 rs6000_emit_popcount (tmp, src);
34938 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
34939 }
34940 else
34941 {
34942 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
34943 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
34944 {
34945 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
34946
34947 tmp1 = gen_reg_rtx (DImode);
34948 emit_insn (gen_popcntbdi2 (tmp1, src));
34949
34950 tmp2 = gen_reg_rtx (DImode);
34951 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
34952 tmp3 = gen_reg_rtx (DImode);
34953 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
34954
34955 tmp4 = gen_reg_rtx (DImode);
34956 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
34957 tmp5 = gen_reg_rtx (DImode);
34958 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
34959
34960 tmp6 = gen_reg_rtx (DImode);
34961 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
34962 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
34963 }
34964 else
34965 rs6000_emit_popcount (tmp, src);
34966 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
34967 }
34968 }
34969
34970 /* Expand an Altivec constant permutation for little endian mode.
34971 OP0 and OP1 are the input vectors and TARGET is the output vector.
34972 SEL specifies the constant permutation vector.
34973
34974 There are two issues: First, the two input operands must be
34975 swapped so that together they form a double-wide array in LE
34976 order. Second, the vperm instruction has surprising behavior
34977 in LE mode: it interprets the elements of the source vectors
34978 in BE mode ("left to right") and interprets the elements of
34979 the destination vector in LE mode ("right to left"). To
34980 correct for this, we must subtract each element of the permute
34981 control vector from 31.
34982
34983 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
34984 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
34985 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
34986 serve as the permute control vector. Then, in BE mode,
34987
34988 vperm 9,10,11,12
34989
34990 places the desired result in vr9. However, in LE mode the
34991 vector contents will be
34992
34993 vr10 = 00000003 00000002 00000001 00000000
34994 vr11 = 00000007 00000006 00000005 00000004
34995
34996 The result of the vperm using the same permute control vector is
34997
34998 vr9 = 05000000 07000000 01000000 03000000
34999
35000 That is, the leftmost 4 bytes of vr10 are interpreted as the
35001 source for the rightmost 4 bytes of vr9, and so on.
35002
35003 If we change the permute control vector to
35004
35005 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35006
35007 and issue
35008
35009 vperm 9,11,10,12
35010
35011 we get the desired
35012
35013 vr9 = 00000006 00000004 00000002 00000000. */
35014
35015 static void
35016 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
35017 const vec_perm_indices &sel)
35018 {
35019 unsigned int i;
35020 rtx perm[16];
35021 rtx constv, unspec;
35022
35023 /* Unpack and adjust the constant selector. */
35024 for (i = 0; i < 16; ++i)
35025 {
35026 unsigned int elt = 31 - (sel[i] & 31);
35027 perm[i] = GEN_INT (elt);
35028 }
35029
35030 /* Expand to a permute, swapping the inputs and using the
35031 adjusted selector. */
35032 if (!REG_P (op0))
35033 op0 = force_reg (V16QImode, op0);
35034 if (!REG_P (op1))
35035 op1 = force_reg (V16QImode, op1);
35036
35037 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35038 constv = force_reg (V16QImode, constv);
35039 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35040 UNSPEC_VPERM);
35041 if (!REG_P (target))
35042 {
35043 rtx tmp = gen_reg_rtx (V16QImode);
35044 emit_move_insn (tmp, unspec);
35045 unspec = tmp;
35046 }
35047
35048 emit_move_insn (target, unspec);
35049 }
35050
35051 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35052 permute control vector. But here it's not a constant, so we must
35053 generate a vector NAND or NOR to do the adjustment. */
35054
35055 void
35056 altivec_expand_vec_perm_le (rtx operands[4])
35057 {
35058 rtx notx, iorx, unspec;
35059 rtx target = operands[0];
35060 rtx op0 = operands[1];
35061 rtx op1 = operands[2];
35062 rtx sel = operands[3];
35063 rtx tmp = target;
35064 rtx norreg = gen_reg_rtx (V16QImode);
35065 machine_mode mode = GET_MODE (target);
35066
35067 /* Get everything in regs so the pattern matches. */
35068 if (!REG_P (op0))
35069 op0 = force_reg (mode, op0);
35070 if (!REG_P (op1))
35071 op1 = force_reg (mode, op1);
35072 if (!REG_P (sel))
35073 sel = force_reg (V16QImode, sel);
35074 if (!REG_P (target))
35075 tmp = gen_reg_rtx (mode);
35076
35077 if (TARGET_P9_VECTOR)
35078 {
35079 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
35080 UNSPEC_VPERMR);
35081 }
35082 else
35083 {
35084 /* Invert the selector with a VNAND if available, else a VNOR.
35085 The VNAND is preferred for future fusion opportunities. */
35086 notx = gen_rtx_NOT (V16QImode, sel);
35087 iorx = (TARGET_P8_VECTOR
35088 ? gen_rtx_IOR (V16QImode, notx, notx)
35089 : gen_rtx_AND (V16QImode, notx, notx));
35090 emit_insn (gen_rtx_SET (norreg, iorx));
35091
35092 /* Permute with operands reversed and adjusted selector. */
35093 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35094 UNSPEC_VPERM);
35095 }
35096
35097 /* Copy into target, possibly by way of a register. */
35098 if (!REG_P (target))
35099 {
35100 emit_move_insn (tmp, unspec);
35101 unspec = tmp;
35102 }
35103
35104 emit_move_insn (target, unspec);
35105 }
35106
35107 /* Expand an Altivec constant permutation. Return true if we match
35108 an efficient implementation; false to fall back to VPERM.
35109
35110 OP0 and OP1 are the input vectors and TARGET is the output vector.
35111 SEL specifies the constant permutation vector. */
35112
35113 static bool
35114 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
35115 const vec_perm_indices &sel)
35116 {
35117 struct altivec_perm_insn {
35118 HOST_WIDE_INT mask;
35119 enum insn_code impl;
35120 unsigned char perm[16];
35121 };
35122 static const struct altivec_perm_insn patterns[] = {
35123 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35124 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35125 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35126 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35127 { OPTION_MASK_ALTIVEC,
35128 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35129 : CODE_FOR_altivec_vmrglb_direct),
35130 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35131 { OPTION_MASK_ALTIVEC,
35132 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35133 : CODE_FOR_altivec_vmrglh_direct),
35134 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35135 { OPTION_MASK_ALTIVEC,
35136 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35137 : CODE_FOR_altivec_vmrglw_direct),
35138 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35139 { OPTION_MASK_ALTIVEC,
35140 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35141 : CODE_FOR_altivec_vmrghb_direct),
35142 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35143 { OPTION_MASK_ALTIVEC,
35144 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35145 : CODE_FOR_altivec_vmrghh_direct),
35146 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35147 { OPTION_MASK_ALTIVEC,
35148 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35149 : CODE_FOR_altivec_vmrghw_direct),
35150 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35151 { OPTION_MASK_P8_VECTOR,
35152 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35153 : CODE_FOR_p8_vmrgow_v4sf_direct),
35154 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35155 { OPTION_MASK_P8_VECTOR,
35156 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35157 : CODE_FOR_p8_vmrgew_v4sf_direct),
35158 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35159 };
35160
35161 unsigned int i, j, elt, which;
35162 unsigned char perm[16];
35163 rtx x;
35164 bool one_vec;
35165
35166 /* Unpack the constant selector. */
35167 for (i = which = 0; i < 16; ++i)
35168 {
35169 elt = sel[i] & 31;
35170 which |= (elt < 16 ? 1 : 2);
35171 perm[i] = elt;
35172 }
35173
35174 /* Simplify the constant selector based on operands. */
35175 switch (which)
35176 {
35177 default:
35178 gcc_unreachable ();
35179
35180 case 3:
35181 one_vec = false;
35182 if (!rtx_equal_p (op0, op1))
35183 break;
35184 /* FALLTHRU */
35185
35186 case 2:
35187 for (i = 0; i < 16; ++i)
35188 perm[i] &= 15;
35189 op0 = op1;
35190 one_vec = true;
35191 break;
35192
35193 case 1:
35194 op1 = op0;
35195 one_vec = true;
35196 break;
35197 }
35198
35199 /* Look for splat patterns. */
35200 if (one_vec)
35201 {
35202 elt = perm[0];
35203
35204 for (i = 0; i < 16; ++i)
35205 if (perm[i] != elt)
35206 break;
35207 if (i == 16)
35208 {
35209 if (!BYTES_BIG_ENDIAN)
35210 elt = 15 - elt;
35211 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35212 return true;
35213 }
35214
35215 if (elt % 2 == 0)
35216 {
35217 for (i = 0; i < 16; i += 2)
35218 if (perm[i] != elt || perm[i + 1] != elt + 1)
35219 break;
35220 if (i == 16)
35221 {
35222 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35223 x = gen_reg_rtx (V8HImode);
35224 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35225 GEN_INT (field)));
35226 emit_move_insn (target, gen_lowpart (V16QImode, x));
35227 return true;
35228 }
35229 }
35230
35231 if (elt % 4 == 0)
35232 {
35233 for (i = 0; i < 16; i += 4)
35234 if (perm[i] != elt
35235 || perm[i + 1] != elt + 1
35236 || perm[i + 2] != elt + 2
35237 || perm[i + 3] != elt + 3)
35238 break;
35239 if (i == 16)
35240 {
35241 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35242 x = gen_reg_rtx (V4SImode);
35243 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35244 GEN_INT (field)));
35245 emit_move_insn (target, gen_lowpart (V16QImode, x));
35246 return true;
35247 }
35248 }
35249 }
35250
35251 /* Look for merge and pack patterns. */
35252 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35253 {
35254 bool swapped;
35255
35256 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35257 continue;
35258
35259 elt = patterns[j].perm[0];
35260 if (perm[0] == elt)
35261 swapped = false;
35262 else if (perm[0] == elt + 16)
35263 swapped = true;
35264 else
35265 continue;
35266 for (i = 1; i < 16; ++i)
35267 {
35268 elt = patterns[j].perm[i];
35269 if (swapped)
35270 elt = (elt >= 16 ? elt - 16 : elt + 16);
35271 else if (one_vec && elt >= 16)
35272 elt -= 16;
35273 if (perm[i] != elt)
35274 break;
35275 }
35276 if (i == 16)
35277 {
35278 enum insn_code icode = patterns[j].impl;
35279 machine_mode omode = insn_data[icode].operand[0].mode;
35280 machine_mode imode = insn_data[icode].operand[1].mode;
35281
35282 /* For little-endian, don't use vpkuwum and vpkuhum if the
35283 underlying vector type is not V4SI and V8HI, respectively.
35284 For example, using vpkuwum with a V8HI picks up the even
35285 halfwords (BE numbering) when the even halfwords (LE
35286 numbering) are what we need. */
35287 if (!BYTES_BIG_ENDIAN
35288 && icode == CODE_FOR_altivec_vpkuwum_direct
35289 && ((GET_CODE (op0) == REG
35290 && GET_MODE (op0) != V4SImode)
35291 || (GET_CODE (op0) == SUBREG
35292 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35293 continue;
35294 if (!BYTES_BIG_ENDIAN
35295 && icode == CODE_FOR_altivec_vpkuhum_direct
35296 && ((GET_CODE (op0) == REG
35297 && GET_MODE (op0) != V8HImode)
35298 || (GET_CODE (op0) == SUBREG
35299 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35300 continue;
35301
35302 /* For little-endian, the two input operands must be swapped
35303 (or swapped back) to ensure proper right-to-left numbering
35304 from 0 to 2N-1. */
35305 if (swapped ^ !BYTES_BIG_ENDIAN)
35306 std::swap (op0, op1);
35307 if (imode != V16QImode)
35308 {
35309 op0 = gen_lowpart (imode, op0);
35310 op1 = gen_lowpart (imode, op1);
35311 }
35312 if (omode == V16QImode)
35313 x = target;
35314 else
35315 x = gen_reg_rtx (omode);
35316 emit_insn (GEN_FCN (icode) (x, op0, op1));
35317 if (omode != V16QImode)
35318 emit_move_insn (target, gen_lowpart (V16QImode, x));
35319 return true;
35320 }
35321 }
35322
35323 if (!BYTES_BIG_ENDIAN)
35324 {
35325 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
35326 return true;
35327 }
35328
35329 return false;
35330 }
35331
35332 /* Expand a VSX Permute Doubleword constant permutation.
35333 Return true if we match an efficient implementation. */
35334
35335 static bool
35336 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35337 unsigned char perm0, unsigned char perm1)
35338 {
35339 rtx x;
35340
35341 /* If both selectors come from the same operand, fold to single op. */
35342 if ((perm0 & 2) == (perm1 & 2))
35343 {
35344 if (perm0 & 2)
35345 op0 = op1;
35346 else
35347 op1 = op0;
35348 }
35349 /* If both operands are equal, fold to simpler permutation. */
35350 if (rtx_equal_p (op0, op1))
35351 {
35352 perm0 = perm0 & 1;
35353 perm1 = (perm1 & 1) + 2;
35354 }
35355 /* If the first selector comes from the second operand, swap. */
35356 else if (perm0 & 2)
35357 {
35358 if (perm1 & 2)
35359 return false;
35360 perm0 -= 2;
35361 perm1 += 2;
35362 std::swap (op0, op1);
35363 }
35364 /* If the second selector does not come from the second operand, fail. */
35365 else if ((perm1 & 2) == 0)
35366 return false;
35367
35368 /* Success! */
35369 if (target != NULL)
35370 {
35371 machine_mode vmode, dmode;
35372 rtvec v;
35373
35374 vmode = GET_MODE (target);
35375 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35376 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35377 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35378 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35379 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35380 emit_insn (gen_rtx_SET (target, x));
35381 }
35382 return true;
35383 }
35384
35385 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35386
35387 static bool
35388 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
35389 rtx op1, const vec_perm_indices &sel)
35390 {
35391 bool testing_p = !target;
35392
35393 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35394 if (TARGET_ALTIVEC && testing_p)
35395 return true;
35396
35397 /* Check for ps_merge* or xxpermdi insns. */
35398 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
35399 {
35400 if (testing_p)
35401 {
35402 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35403 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35404 }
35405 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
35406 return true;
35407 }
35408
35409 if (TARGET_ALTIVEC)
35410 {
35411 /* Force the target-independent code to lower to V16QImode. */
35412 if (vmode != V16QImode)
35413 return false;
35414 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
35415 return true;
35416 }
35417
35418 return false;
35419 }
35420
35421 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
35422 OP0 and OP1 are the input vectors and TARGET is the output vector.
35423 PERM specifies the constant permutation vector. */
35424
35425 static void
35426 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35427 machine_mode vmode, const vec_perm_builder &perm)
35428 {
35429 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
35430 if (x != target)
35431 emit_move_insn (target, x);
35432 }
35433
35434 /* Expand an extract even operation. */
35435
35436 void
35437 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35438 {
35439 machine_mode vmode = GET_MODE (target);
35440 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35441 vec_perm_builder perm (nelt, nelt, 1);
35442
35443 for (i = 0; i < nelt; i++)
35444 perm.quick_push (i * 2);
35445
35446 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35447 }
35448
35449 /* Expand a vector interleave operation. */
35450
35451 void
35452 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35453 {
35454 machine_mode vmode = GET_MODE (target);
35455 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35456 vec_perm_builder perm (nelt, nelt, 1);
35457
35458 high = (highp ? 0 : nelt / 2);
35459 for (i = 0; i < nelt / 2; i++)
35460 {
35461 perm.quick_push (i + high);
35462 perm.quick_push (i + nelt + high);
35463 }
35464
35465 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35466 }
35467
35468 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35469 void
35470 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35471 {
35472 HOST_WIDE_INT hwi_scale (scale);
35473 REAL_VALUE_TYPE r_pow;
35474 rtvec v = rtvec_alloc (2);
35475 rtx elt;
35476 rtx scale_vec = gen_reg_rtx (V2DFmode);
35477 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35478 elt = const_double_from_real_value (r_pow, DFmode);
35479 RTVEC_ELT (v, 0) = elt;
35480 RTVEC_ELT (v, 1) = elt;
35481 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35482 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35483 }
35484
35485 /* Return an RTX representing where to find the function value of a
35486 function returning MODE. */
35487 static rtx
35488 rs6000_complex_function_value (machine_mode mode)
35489 {
35490 unsigned int regno;
35491 rtx r1, r2;
35492 machine_mode inner = GET_MODE_INNER (mode);
35493 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35494
35495 if (TARGET_FLOAT128_TYPE
35496 && (mode == KCmode
35497 || (mode == TCmode && TARGET_IEEEQUAD)))
35498 regno = ALTIVEC_ARG_RETURN;
35499
35500 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35501 regno = FP_ARG_RETURN;
35502
35503 else
35504 {
35505 regno = GP_ARG_RETURN;
35506
35507 /* 32-bit is OK since it'll go in r3/r4. */
35508 if (TARGET_32BIT && inner_bytes >= 4)
35509 return gen_rtx_REG (mode, regno);
35510 }
35511
35512 if (inner_bytes >= 8)
35513 return gen_rtx_REG (mode, regno);
35514
35515 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35516 const0_rtx);
35517 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35518 GEN_INT (inner_bytes));
35519 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35520 }
35521
35522 /* Return an rtx describing a return value of MODE as a PARALLEL
35523 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35524 stride REG_STRIDE. */
35525
35526 static rtx
35527 rs6000_parallel_return (machine_mode mode,
35528 int n_elts, machine_mode elt_mode,
35529 unsigned int regno, unsigned int reg_stride)
35530 {
35531 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35532
35533 int i;
35534 for (i = 0; i < n_elts; i++)
35535 {
35536 rtx r = gen_rtx_REG (elt_mode, regno);
35537 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35538 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35539 regno += reg_stride;
35540 }
35541
35542 return par;
35543 }
35544
35545 /* Target hook for TARGET_FUNCTION_VALUE.
35546
35547 An integer value is in r3 and a floating-point value is in fp1,
35548 unless -msoft-float. */
35549
35550 static rtx
35551 rs6000_function_value (const_tree valtype,
35552 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35553 bool outgoing ATTRIBUTE_UNUSED)
35554 {
35555 machine_mode mode;
35556 unsigned int regno;
35557 machine_mode elt_mode;
35558 int n_elts;
35559
35560 /* Special handling for structs in darwin64. */
35561 if (TARGET_MACHO
35562 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35563 {
35564 CUMULATIVE_ARGS valcum;
35565 rtx valret;
35566
35567 valcum.words = 0;
35568 valcum.fregno = FP_ARG_MIN_REG;
35569 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35570 /* Do a trial code generation as if this were going to be passed as
35571 an argument; if any part goes in memory, we return NULL. */
35572 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35573 if (valret)
35574 return valret;
35575 /* Otherwise fall through to standard ABI rules. */
35576 }
35577
35578 mode = TYPE_MODE (valtype);
35579
35580 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35581 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35582 {
35583 int first_reg, n_regs;
35584
35585 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35586 {
35587 /* _Decimal128 must use even/odd register pairs. */
35588 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35589 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35590 }
35591 else
35592 {
35593 first_reg = ALTIVEC_ARG_RETURN;
35594 n_regs = 1;
35595 }
35596
35597 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35598 }
35599
35600 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35601 if (TARGET_32BIT && TARGET_POWERPC64)
35602 switch (mode)
35603 {
35604 default:
35605 break;
35606 case E_DImode:
35607 case E_SCmode:
35608 case E_DCmode:
35609 case E_TCmode:
35610 int count = GET_MODE_SIZE (mode) / 4;
35611 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35612 }
35613
35614 if ((INTEGRAL_TYPE_P (valtype)
35615 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35616 || POINTER_TYPE_P (valtype))
35617 mode = TARGET_32BIT ? SImode : DImode;
35618
35619 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35620 /* _Decimal128 must use an even/odd register pair. */
35621 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35622 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
35623 && !FLOAT128_VECTOR_P (mode))
35624 regno = FP_ARG_RETURN;
35625 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35626 && targetm.calls.split_complex_arg)
35627 return rs6000_complex_function_value (mode);
35628 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35629 return register is used in both cases, and we won't see V2DImode/V2DFmode
35630 for pure altivec, combine the two cases. */
35631 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35632 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35633 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35634 regno = ALTIVEC_ARG_RETURN;
35635 else
35636 regno = GP_ARG_RETURN;
35637
35638 return gen_rtx_REG (mode, regno);
35639 }
35640
35641 /* Define how to find the value returned by a library function
35642 assuming the value has mode MODE. */
35643 rtx
35644 rs6000_libcall_value (machine_mode mode)
35645 {
35646 unsigned int regno;
35647
35648 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35649 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35650 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35651
35652 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35653 /* _Decimal128 must use an even/odd register pair. */
35654 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35655 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
35656 regno = FP_ARG_RETURN;
35657 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35658 return register is used in both cases, and we won't see V2DImode/V2DFmode
35659 for pure altivec, combine the two cases. */
35660 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
35661 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
35662 regno = ALTIVEC_ARG_RETURN;
35663 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
35664 return rs6000_complex_function_value (mode);
35665 else
35666 regno = GP_ARG_RETURN;
35667
35668 return gen_rtx_REG (mode, regno);
35669 }
35670
35671 /* Compute register pressure classes. We implement the target hook to avoid
35672 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
35673 lead to incorrect estimates of number of available registers and therefor
35674 increased register pressure/spill. */
35675 static int
35676 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
35677 {
35678 int n;
35679
35680 n = 0;
35681 pressure_classes[n++] = GENERAL_REGS;
35682 if (TARGET_VSX)
35683 pressure_classes[n++] = VSX_REGS;
35684 else
35685 {
35686 if (TARGET_ALTIVEC)
35687 pressure_classes[n++] = ALTIVEC_REGS;
35688 if (TARGET_HARD_FLOAT)
35689 pressure_classes[n++] = FLOAT_REGS;
35690 }
35691 pressure_classes[n++] = CR_REGS;
35692 pressure_classes[n++] = SPECIAL_REGS;
35693
35694 return n;
35695 }
35696
35697 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35698 Frame pointer elimination is automatically handled.
35699
35700 For the RS/6000, if frame pointer elimination is being done, we would like
35701 to convert ap into fp, not sp.
35702
35703 We need r30 if -mminimal-toc was specified, and there are constant pool
35704 references. */
35705
35706 static bool
35707 rs6000_can_eliminate (const int from, const int to)
35708 {
35709 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
35710 ? ! frame_pointer_needed
35711 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
35712 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
35713 || constant_pool_empty_p ()
35714 : true);
35715 }
35716
35717 /* Define the offset between two registers, FROM to be eliminated and its
35718 replacement TO, at the start of a routine. */
35719 HOST_WIDE_INT
35720 rs6000_initial_elimination_offset (int from, int to)
35721 {
35722 rs6000_stack_t *info = rs6000_stack_info ();
35723 HOST_WIDE_INT offset;
35724
35725 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35726 offset = info->push_p ? 0 : -info->total_size;
35727 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35728 {
35729 offset = info->push_p ? 0 : -info->total_size;
35730 if (FRAME_GROWS_DOWNWARD)
35731 offset += info->fixed_size + info->vars_size + info->parm_size;
35732 }
35733 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35734 offset = FRAME_GROWS_DOWNWARD
35735 ? info->fixed_size + info->vars_size + info->parm_size
35736 : 0;
35737 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35738 offset = info->total_size;
35739 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35740 offset = info->push_p ? info->total_size : 0;
35741 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
35742 offset = 0;
35743 else
35744 gcc_unreachable ();
35745
35746 return offset;
35747 }
35748
35749 /* Fill in sizes of registers used by unwinder. */
35750
35751 static void
35752 rs6000_init_dwarf_reg_sizes_extra (tree address)
35753 {
35754 if (TARGET_MACHO && ! TARGET_ALTIVEC)
35755 {
35756 int i;
35757 machine_mode mode = TYPE_MODE (char_type_node);
35758 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
35759 rtx mem = gen_rtx_MEM (BLKmode, addr);
35760 rtx value = gen_int_mode (16, mode);
35761
35762 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
35763 The unwinder still needs to know the size of Altivec registers. */
35764
35765 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
35766 {
35767 int column = DWARF_REG_TO_UNWIND_COLUMN
35768 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
35769 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
35770
35771 emit_move_insn (adjust_address (mem, mode, offset), value);
35772 }
35773 }
35774 }
35775
35776 /* Map internal gcc register numbers to debug format register numbers.
35777 FORMAT specifies the type of debug register number to use:
35778 0 -- debug information, except for frame-related sections
35779 1 -- DWARF .debug_frame section
35780 2 -- DWARF .eh_frame section */
35781
35782 unsigned int
35783 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
35784 {
35785 /* Except for the above, we use the internal number for non-DWARF
35786 debug information, and also for .eh_frame. */
35787 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
35788 return regno;
35789
35790 /* On some platforms, we use the standard DWARF register
35791 numbering for .debug_info and .debug_frame. */
35792 #ifdef RS6000_USE_DWARF_NUMBERING
35793 if (regno <= 63)
35794 return regno;
35795 if (regno == LR_REGNO)
35796 return 108;
35797 if (regno == CTR_REGNO)
35798 return 109;
35799 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
35800 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
35801 The actual code emitted saves the whole of CR, so we map CR2_REGNO
35802 to the DWARF reg for CR. */
35803 if (format == 1 && regno == CR2_REGNO)
35804 return 64;
35805 if (CR_REGNO_P (regno))
35806 return regno - CR0_REGNO + 86;
35807 if (regno == CA_REGNO)
35808 return 101; /* XER */
35809 if (ALTIVEC_REGNO_P (regno))
35810 return regno - FIRST_ALTIVEC_REGNO + 1124;
35811 if (regno == VRSAVE_REGNO)
35812 return 356;
35813 if (regno == VSCR_REGNO)
35814 return 67;
35815 #endif
35816 return regno;
35817 }
35818
35819 /* target hook eh_return_filter_mode */
35820 static scalar_int_mode
35821 rs6000_eh_return_filter_mode (void)
35822 {
35823 return TARGET_32BIT ? SImode : word_mode;
35824 }
35825
35826 /* Target hook for translate_mode_attribute. */
35827 static machine_mode
35828 rs6000_translate_mode_attribute (machine_mode mode)
35829 {
35830 if ((FLOAT128_IEEE_P (mode)
35831 && ieee128_float_type_node == long_double_type_node)
35832 || (FLOAT128_IBM_P (mode)
35833 && ibm128_float_type_node == long_double_type_node))
35834 return COMPLEX_MODE_P (mode) ? E_TCmode : E_TFmode;
35835 return mode;
35836 }
35837
35838 /* Target hook for scalar_mode_supported_p. */
35839 static bool
35840 rs6000_scalar_mode_supported_p (scalar_mode mode)
35841 {
35842 /* -m32 does not support TImode. This is the default, from
35843 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
35844 same ABI as for -m32. But default_scalar_mode_supported_p allows
35845 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
35846 for -mpowerpc64. */
35847 if (TARGET_32BIT && mode == TImode)
35848 return false;
35849
35850 if (DECIMAL_FLOAT_MODE_P (mode))
35851 return default_decimal_float_supported_p ();
35852 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
35853 return true;
35854 else
35855 return default_scalar_mode_supported_p (mode);
35856 }
35857
35858 /* Target hook for vector_mode_supported_p. */
35859 static bool
35860 rs6000_vector_mode_supported_p (machine_mode mode)
35861 {
35862 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
35863 128-bit, the compiler might try to widen IEEE 128-bit to IBM
35864 double-double. */
35865 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
35866 return true;
35867
35868 else
35869 return false;
35870 }
35871
35872 /* Target hook for floatn_mode. */
35873 static opt_scalar_float_mode
35874 rs6000_floatn_mode (int n, bool extended)
35875 {
35876 if (extended)
35877 {
35878 switch (n)
35879 {
35880 case 32:
35881 return DFmode;
35882
35883 case 64:
35884 if (TARGET_FLOAT128_TYPE)
35885 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35886 else
35887 return opt_scalar_float_mode ();
35888
35889 case 128:
35890 return opt_scalar_float_mode ();
35891
35892 default:
35893 /* Those are the only valid _FloatNx types. */
35894 gcc_unreachable ();
35895 }
35896 }
35897 else
35898 {
35899 switch (n)
35900 {
35901 case 32:
35902 return SFmode;
35903
35904 case 64:
35905 return DFmode;
35906
35907 case 128:
35908 if (TARGET_FLOAT128_TYPE)
35909 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35910 else
35911 return opt_scalar_float_mode ();
35912
35913 default:
35914 return opt_scalar_float_mode ();
35915 }
35916 }
35917
35918 }
35919
35920 /* Target hook for c_mode_for_suffix. */
35921 static machine_mode
35922 rs6000_c_mode_for_suffix (char suffix)
35923 {
35924 if (TARGET_FLOAT128_TYPE)
35925 {
35926 if (suffix == 'q' || suffix == 'Q')
35927 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
35928
35929 /* At the moment, we are not defining a suffix for IBM extended double.
35930 If/when the default for -mabi=ieeelongdouble is changed, and we want
35931 to support __ibm128 constants in legacy library code, we may need to
35932 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
35933 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
35934 __float80 constants. */
35935 }
35936
35937 return VOIDmode;
35938 }
35939
35940 /* Target hook for invalid_arg_for_unprototyped_fn. */
35941 static const char *
35942 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
35943 {
35944 return (!rs6000_darwin64_abi
35945 && typelist == 0
35946 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
35947 && (funcdecl == NULL_TREE
35948 || (TREE_CODE (funcdecl) == FUNCTION_DECL
35949 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
35950 ? N_("AltiVec argument passed to unprototyped function")
35951 : NULL;
35952 }
35953
35954 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
35955 setup by using __stack_chk_fail_local hidden function instead of
35956 calling __stack_chk_fail directly. Otherwise it is better to call
35957 __stack_chk_fail directly. */
35958
35959 static tree ATTRIBUTE_UNUSED
35960 rs6000_stack_protect_fail (void)
35961 {
35962 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
35963 ? default_hidden_stack_protect_fail ()
35964 : default_external_stack_protect_fail ();
35965 }
35966
35967 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
35968
35969 #if TARGET_ELF
35970 static unsigned HOST_WIDE_INT
35971 rs6000_asan_shadow_offset (void)
35972 {
35973 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
35974 }
35975 #endif
35976 \f
35977 /* Mask options that we want to support inside of attribute((target)) and
35978 #pragma GCC target operations. Note, we do not include things like
35979 64/32-bit, endianness, hard/soft floating point, etc. that would have
35980 different calling sequences. */
35981
35982 struct rs6000_opt_mask {
35983 const char *name; /* option name */
35984 HOST_WIDE_INT mask; /* mask to set */
35985 bool invert; /* invert sense of mask */
35986 bool valid_target; /* option is a target option */
35987 };
35988
35989 static struct rs6000_opt_mask const rs6000_opt_masks[] =
35990 {
35991 { "altivec", OPTION_MASK_ALTIVEC, false, true },
35992 { "cmpb", OPTION_MASK_CMPB, false, true },
35993 { "crypto", OPTION_MASK_CRYPTO, false, true },
35994 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
35995 { "dlmzb", OPTION_MASK_DLMZB, false, true },
35996 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
35997 false, true },
35998 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
35999 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36000 { "fprnd", OPTION_MASK_FPRND, false, true },
36001 { "hard-dfp", OPTION_MASK_DFP, false, true },
36002 { "htm", OPTION_MASK_HTM, false, true },
36003 { "isel", OPTION_MASK_ISEL, false, true },
36004 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36005 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36006 { "modulo", OPTION_MASK_MODULO, false, true },
36007 { "mulhw", OPTION_MASK_MULHW, false, true },
36008 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36009 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36010 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36011 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36012 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36013 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36014 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
36015 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36016 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36017 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36018 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36019 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36020 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36021 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36022 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36023 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36024 { "string", 0, false, true },
36025 { "update", OPTION_MASK_NO_UPDATE, true , true },
36026 { "vsx", OPTION_MASK_VSX, false, true },
36027 #ifdef OPTION_MASK_64BIT
36028 #if TARGET_AIX_OS
36029 { "aix64", OPTION_MASK_64BIT, false, false },
36030 { "aix32", OPTION_MASK_64BIT, true, false },
36031 #else
36032 { "64", OPTION_MASK_64BIT, false, false },
36033 { "32", OPTION_MASK_64BIT, true, false },
36034 #endif
36035 #endif
36036 #ifdef OPTION_MASK_EABI
36037 { "eabi", OPTION_MASK_EABI, false, false },
36038 #endif
36039 #ifdef OPTION_MASK_LITTLE_ENDIAN
36040 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36041 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36042 #endif
36043 #ifdef OPTION_MASK_RELOCATABLE
36044 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36045 #endif
36046 #ifdef OPTION_MASK_STRICT_ALIGN
36047 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36048 #endif
36049 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36050 { "string", 0, false, false },
36051 };
36052
36053 /* Builtin mask mapping for printing the flags. */
36054 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36055 {
36056 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36057 { "vsx", RS6000_BTM_VSX, false, false },
36058 { "fre", RS6000_BTM_FRE, false, false },
36059 { "fres", RS6000_BTM_FRES, false, false },
36060 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36061 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36062 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36063 { "cell", RS6000_BTM_CELL, false, false },
36064 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36065 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36066 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36067 { "crypto", RS6000_BTM_CRYPTO, false, false },
36068 { "htm", RS6000_BTM_HTM, false, false },
36069 { "hard-dfp", RS6000_BTM_DFP, false, false },
36070 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36071 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36072 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
36073 { "float128", RS6000_BTM_FLOAT128, false, false },
36074 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36075 };
36076
36077 /* Option variables that we want to support inside attribute((target)) and
36078 #pragma GCC target operations. */
36079
36080 struct rs6000_opt_var {
36081 const char *name; /* option name */
36082 size_t global_offset; /* offset of the option in global_options. */
36083 size_t target_offset; /* offset of the option in target options. */
36084 };
36085
36086 static struct rs6000_opt_var const rs6000_opt_vars[] =
36087 {
36088 { "friz",
36089 offsetof (struct gcc_options, x_TARGET_FRIZ),
36090 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36091 { "avoid-indexed-addresses",
36092 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36093 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36094 { "longcall",
36095 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36096 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36097 { "optimize-swaps",
36098 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36099 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36100 { "allow-movmisalign",
36101 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36102 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36103 { "sched-groups",
36104 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36105 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36106 { "always-hint",
36107 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36108 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36109 { "align-branch-targets",
36110 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36111 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36112 { "tls-markers",
36113 offsetof (struct gcc_options, x_tls_markers),
36114 offsetof (struct cl_target_option, x_tls_markers), },
36115 { "sched-prolog",
36116 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36117 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36118 { "sched-epilog",
36119 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36120 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36121 { "speculate-indirect-jumps",
36122 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
36123 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
36124 };
36125
36126 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36127 parsing. Return true if there were no errors. */
36128
36129 static bool
36130 rs6000_inner_target_options (tree args, bool attr_p)
36131 {
36132 bool ret = true;
36133
36134 if (args == NULL_TREE)
36135 ;
36136
36137 else if (TREE_CODE (args) == STRING_CST)
36138 {
36139 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36140 char *q;
36141
36142 while ((q = strtok (p, ",")) != NULL)
36143 {
36144 bool error_p = false;
36145 bool not_valid_p = false;
36146 const char *cpu_opt = NULL;
36147
36148 p = NULL;
36149 if (strncmp (q, "cpu=", 4) == 0)
36150 {
36151 int cpu_index = rs6000_cpu_name_lookup (q+4);
36152 if (cpu_index >= 0)
36153 rs6000_cpu_index = cpu_index;
36154 else
36155 {
36156 error_p = true;
36157 cpu_opt = q+4;
36158 }
36159 }
36160 else if (strncmp (q, "tune=", 5) == 0)
36161 {
36162 int tune_index = rs6000_cpu_name_lookup (q+5);
36163 if (tune_index >= 0)
36164 rs6000_tune_index = tune_index;
36165 else
36166 {
36167 error_p = true;
36168 cpu_opt = q+5;
36169 }
36170 }
36171 else
36172 {
36173 size_t i;
36174 bool invert = false;
36175 char *r = q;
36176
36177 error_p = true;
36178 if (strncmp (r, "no-", 3) == 0)
36179 {
36180 invert = true;
36181 r += 3;
36182 }
36183
36184 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36185 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36186 {
36187 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36188
36189 if (!rs6000_opt_masks[i].valid_target)
36190 not_valid_p = true;
36191 else
36192 {
36193 error_p = false;
36194 rs6000_isa_flags_explicit |= mask;
36195
36196 /* VSX needs altivec, so -mvsx automagically sets
36197 altivec and disables -mavoid-indexed-addresses. */
36198 if (!invert)
36199 {
36200 if (mask == OPTION_MASK_VSX)
36201 {
36202 mask |= OPTION_MASK_ALTIVEC;
36203 TARGET_AVOID_XFORM = 0;
36204 }
36205 }
36206
36207 if (rs6000_opt_masks[i].invert)
36208 invert = !invert;
36209
36210 if (invert)
36211 rs6000_isa_flags &= ~mask;
36212 else
36213 rs6000_isa_flags |= mask;
36214 }
36215 break;
36216 }
36217
36218 if (error_p && !not_valid_p)
36219 {
36220 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36221 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36222 {
36223 size_t j = rs6000_opt_vars[i].global_offset;
36224 *((int *) ((char *)&global_options + j)) = !invert;
36225 error_p = false;
36226 not_valid_p = false;
36227 break;
36228 }
36229 }
36230 }
36231
36232 if (error_p)
36233 {
36234 const char *eprefix, *esuffix;
36235
36236 ret = false;
36237 if (attr_p)
36238 {
36239 eprefix = "__attribute__((__target__(";
36240 esuffix = ")))";
36241 }
36242 else
36243 {
36244 eprefix = "#pragma GCC target ";
36245 esuffix = "";
36246 }
36247
36248 if (cpu_opt)
36249 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36250 q, esuffix);
36251 else if (not_valid_p)
36252 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36253 else
36254 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36255 }
36256 }
36257 }
36258
36259 else if (TREE_CODE (args) == TREE_LIST)
36260 {
36261 do
36262 {
36263 tree value = TREE_VALUE (args);
36264 if (value)
36265 {
36266 bool ret2 = rs6000_inner_target_options (value, attr_p);
36267 if (!ret2)
36268 ret = false;
36269 }
36270 args = TREE_CHAIN (args);
36271 }
36272 while (args != NULL_TREE);
36273 }
36274
36275 else
36276 {
36277 error ("attribute %<target%> argument not a string");
36278 return false;
36279 }
36280
36281 return ret;
36282 }
36283
36284 /* Print out the target options as a list for -mdebug=target. */
36285
36286 static void
36287 rs6000_debug_target_options (tree args, const char *prefix)
36288 {
36289 if (args == NULL_TREE)
36290 fprintf (stderr, "%s<NULL>", prefix);
36291
36292 else if (TREE_CODE (args) == STRING_CST)
36293 {
36294 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36295 char *q;
36296
36297 while ((q = strtok (p, ",")) != NULL)
36298 {
36299 p = NULL;
36300 fprintf (stderr, "%s\"%s\"", prefix, q);
36301 prefix = ", ";
36302 }
36303 }
36304
36305 else if (TREE_CODE (args) == TREE_LIST)
36306 {
36307 do
36308 {
36309 tree value = TREE_VALUE (args);
36310 if (value)
36311 {
36312 rs6000_debug_target_options (value, prefix);
36313 prefix = ", ";
36314 }
36315 args = TREE_CHAIN (args);
36316 }
36317 while (args != NULL_TREE);
36318 }
36319
36320 else
36321 gcc_unreachable ();
36322
36323 return;
36324 }
36325
36326 \f
36327 /* Hook to validate attribute((target("..."))). */
36328
36329 static bool
36330 rs6000_valid_attribute_p (tree fndecl,
36331 tree ARG_UNUSED (name),
36332 tree args,
36333 int flags)
36334 {
36335 struct cl_target_option cur_target;
36336 bool ret;
36337 tree old_optimize;
36338 tree new_target, new_optimize;
36339 tree func_optimize;
36340
36341 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36342
36343 if (TARGET_DEBUG_TARGET)
36344 {
36345 tree tname = DECL_NAME (fndecl);
36346 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36347 if (tname)
36348 fprintf (stderr, "function: %.*s\n",
36349 (int) IDENTIFIER_LENGTH (tname),
36350 IDENTIFIER_POINTER (tname));
36351 else
36352 fprintf (stderr, "function: unknown\n");
36353
36354 fprintf (stderr, "args:");
36355 rs6000_debug_target_options (args, " ");
36356 fprintf (stderr, "\n");
36357
36358 if (flags)
36359 fprintf (stderr, "flags: 0x%x\n", flags);
36360
36361 fprintf (stderr, "--------------------\n");
36362 }
36363
36364 /* attribute((target("default"))) does nothing, beyond
36365 affecting multi-versioning. */
36366 if (TREE_VALUE (args)
36367 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36368 && TREE_CHAIN (args) == NULL_TREE
36369 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36370 return true;
36371
36372 old_optimize = build_optimization_node (&global_options);
36373 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36374
36375 /* If the function changed the optimization levels as well as setting target
36376 options, start with the optimizations specified. */
36377 if (func_optimize && func_optimize != old_optimize)
36378 cl_optimization_restore (&global_options,
36379 TREE_OPTIMIZATION (func_optimize));
36380
36381 /* The target attributes may also change some optimization flags, so update
36382 the optimization options if necessary. */
36383 cl_target_option_save (&cur_target, &global_options);
36384 rs6000_cpu_index = rs6000_tune_index = -1;
36385 ret = rs6000_inner_target_options (args, true);
36386
36387 /* Set up any additional state. */
36388 if (ret)
36389 {
36390 ret = rs6000_option_override_internal (false);
36391 new_target = build_target_option_node (&global_options);
36392 }
36393 else
36394 new_target = NULL;
36395
36396 new_optimize = build_optimization_node (&global_options);
36397
36398 if (!new_target)
36399 ret = false;
36400
36401 else if (fndecl)
36402 {
36403 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36404
36405 if (old_optimize != new_optimize)
36406 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36407 }
36408
36409 cl_target_option_restore (&global_options, &cur_target);
36410
36411 if (old_optimize != new_optimize)
36412 cl_optimization_restore (&global_options,
36413 TREE_OPTIMIZATION (old_optimize));
36414
36415 return ret;
36416 }
36417
36418 \f
36419 /* Hook to validate the current #pragma GCC target and set the state, and
36420 update the macros based on what was changed. If ARGS is NULL, then
36421 POP_TARGET is used to reset the options. */
36422
36423 bool
36424 rs6000_pragma_target_parse (tree args, tree pop_target)
36425 {
36426 tree prev_tree = build_target_option_node (&global_options);
36427 tree cur_tree;
36428 struct cl_target_option *prev_opt, *cur_opt;
36429 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36430 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36431
36432 if (TARGET_DEBUG_TARGET)
36433 {
36434 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36435 fprintf (stderr, "args:");
36436 rs6000_debug_target_options (args, " ");
36437 fprintf (stderr, "\n");
36438
36439 if (pop_target)
36440 {
36441 fprintf (stderr, "pop_target:\n");
36442 debug_tree (pop_target);
36443 }
36444 else
36445 fprintf (stderr, "pop_target: <NULL>\n");
36446
36447 fprintf (stderr, "--------------------\n");
36448 }
36449
36450 if (! args)
36451 {
36452 cur_tree = ((pop_target)
36453 ? pop_target
36454 : target_option_default_node);
36455 cl_target_option_restore (&global_options,
36456 TREE_TARGET_OPTION (cur_tree));
36457 }
36458 else
36459 {
36460 rs6000_cpu_index = rs6000_tune_index = -1;
36461 if (!rs6000_inner_target_options (args, false)
36462 || !rs6000_option_override_internal (false)
36463 || (cur_tree = build_target_option_node (&global_options))
36464 == NULL_TREE)
36465 {
36466 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36467 fprintf (stderr, "invalid pragma\n");
36468
36469 return false;
36470 }
36471 }
36472
36473 target_option_current_node = cur_tree;
36474 rs6000_activate_target_options (target_option_current_node);
36475
36476 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36477 change the macros that are defined. */
36478 if (rs6000_target_modify_macros_ptr)
36479 {
36480 prev_opt = TREE_TARGET_OPTION (prev_tree);
36481 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36482 prev_flags = prev_opt->x_rs6000_isa_flags;
36483
36484 cur_opt = TREE_TARGET_OPTION (cur_tree);
36485 cur_flags = cur_opt->x_rs6000_isa_flags;
36486 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36487
36488 diff_bumask = (prev_bumask ^ cur_bumask);
36489 diff_flags = (prev_flags ^ cur_flags);
36490
36491 if ((diff_flags != 0) || (diff_bumask != 0))
36492 {
36493 /* Delete old macros. */
36494 rs6000_target_modify_macros_ptr (false,
36495 prev_flags & diff_flags,
36496 prev_bumask & diff_bumask);
36497
36498 /* Define new macros. */
36499 rs6000_target_modify_macros_ptr (true,
36500 cur_flags & diff_flags,
36501 cur_bumask & diff_bumask);
36502 }
36503 }
36504
36505 return true;
36506 }
36507
36508 \f
36509 /* Remember the last target of rs6000_set_current_function. */
36510 static GTY(()) tree rs6000_previous_fndecl;
36511
36512 /* Restore target's globals from NEW_TREE and invalidate the
36513 rs6000_previous_fndecl cache. */
36514
36515 void
36516 rs6000_activate_target_options (tree new_tree)
36517 {
36518 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36519 if (TREE_TARGET_GLOBALS (new_tree))
36520 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36521 else if (new_tree == target_option_default_node)
36522 restore_target_globals (&default_target_globals);
36523 else
36524 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36525 rs6000_previous_fndecl = NULL_TREE;
36526 }
36527
36528 /* Establish appropriate back-end context for processing the function
36529 FNDECL. The argument might be NULL to indicate processing at top
36530 level, outside of any function scope. */
36531 static void
36532 rs6000_set_current_function (tree fndecl)
36533 {
36534 if (TARGET_DEBUG_TARGET)
36535 {
36536 fprintf (stderr, "\n==================== rs6000_set_current_function");
36537
36538 if (fndecl)
36539 fprintf (stderr, ", fndecl %s (%p)",
36540 (DECL_NAME (fndecl)
36541 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36542 : "<unknown>"), (void *)fndecl);
36543
36544 if (rs6000_previous_fndecl)
36545 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36546
36547 fprintf (stderr, "\n");
36548 }
36549
36550 /* Only change the context if the function changes. This hook is called
36551 several times in the course of compiling a function, and we don't want to
36552 slow things down too much or call target_reinit when it isn't safe. */
36553 if (fndecl == rs6000_previous_fndecl)
36554 return;
36555
36556 tree old_tree;
36557 if (rs6000_previous_fndecl == NULL_TREE)
36558 old_tree = target_option_current_node;
36559 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
36560 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
36561 else
36562 old_tree = target_option_default_node;
36563
36564 tree new_tree;
36565 if (fndecl == NULL_TREE)
36566 {
36567 if (old_tree != target_option_current_node)
36568 new_tree = target_option_current_node;
36569 else
36570 new_tree = NULL_TREE;
36571 }
36572 else
36573 {
36574 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36575 if (new_tree == NULL_TREE)
36576 new_tree = target_option_default_node;
36577 }
36578
36579 if (TARGET_DEBUG_TARGET)
36580 {
36581 if (new_tree)
36582 {
36583 fprintf (stderr, "\nnew fndecl target specific options:\n");
36584 debug_tree (new_tree);
36585 }
36586
36587 if (old_tree)
36588 {
36589 fprintf (stderr, "\nold fndecl target specific options:\n");
36590 debug_tree (old_tree);
36591 }
36592
36593 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
36594 fprintf (stderr, "--------------------\n");
36595 }
36596
36597 if (new_tree && old_tree != new_tree)
36598 rs6000_activate_target_options (new_tree);
36599
36600 if (fndecl)
36601 rs6000_previous_fndecl = fndecl;
36602 }
36603
36604 \f
36605 /* Save the current options */
36606
36607 static void
36608 rs6000_function_specific_save (struct cl_target_option *ptr,
36609 struct gcc_options *opts)
36610 {
36611 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36612 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36613 }
36614
36615 /* Restore the current options */
36616
36617 static void
36618 rs6000_function_specific_restore (struct gcc_options *opts,
36619 struct cl_target_option *ptr)
36620
36621 {
36622 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36623 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36624 (void) rs6000_option_override_internal (false);
36625 }
36626
36627 /* Print the current options */
36628
36629 static void
36630 rs6000_function_specific_print (FILE *file, int indent,
36631 struct cl_target_option *ptr)
36632 {
36633 rs6000_print_isa_options (file, indent, "Isa options set",
36634 ptr->x_rs6000_isa_flags);
36635
36636 rs6000_print_isa_options (file, indent, "Isa options explicit",
36637 ptr->x_rs6000_isa_flags_explicit);
36638 }
36639
36640 /* Helper function to print the current isa or misc options on a line. */
36641
36642 static void
36643 rs6000_print_options_internal (FILE *file,
36644 int indent,
36645 const char *string,
36646 HOST_WIDE_INT flags,
36647 const char *prefix,
36648 const struct rs6000_opt_mask *opts,
36649 size_t num_elements)
36650 {
36651 size_t i;
36652 size_t start_column = 0;
36653 size_t cur_column;
36654 size_t max_column = 120;
36655 size_t prefix_len = strlen (prefix);
36656 size_t comma_len = 0;
36657 const char *comma = "";
36658
36659 if (indent)
36660 start_column += fprintf (file, "%*s", indent, "");
36661
36662 if (!flags)
36663 {
36664 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
36665 return;
36666 }
36667
36668 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
36669
36670 /* Print the various mask options. */
36671 cur_column = start_column;
36672 for (i = 0; i < num_elements; i++)
36673 {
36674 bool invert = opts[i].invert;
36675 const char *name = opts[i].name;
36676 const char *no_str = "";
36677 HOST_WIDE_INT mask = opts[i].mask;
36678 size_t len = comma_len + prefix_len + strlen (name);
36679
36680 if (!invert)
36681 {
36682 if ((flags & mask) == 0)
36683 {
36684 no_str = "no-";
36685 len += sizeof ("no-") - 1;
36686 }
36687
36688 flags &= ~mask;
36689 }
36690
36691 else
36692 {
36693 if ((flags & mask) != 0)
36694 {
36695 no_str = "no-";
36696 len += sizeof ("no-") - 1;
36697 }
36698
36699 flags |= mask;
36700 }
36701
36702 cur_column += len;
36703 if (cur_column > max_column)
36704 {
36705 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
36706 cur_column = start_column + len;
36707 comma = "";
36708 }
36709
36710 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
36711 comma = ", ";
36712 comma_len = sizeof (", ") - 1;
36713 }
36714
36715 fputs ("\n", file);
36716 }
36717
36718 /* Helper function to print the current isa options on a line. */
36719
36720 static void
36721 rs6000_print_isa_options (FILE *file, int indent, const char *string,
36722 HOST_WIDE_INT flags)
36723 {
36724 rs6000_print_options_internal (file, indent, string, flags, "-m",
36725 &rs6000_opt_masks[0],
36726 ARRAY_SIZE (rs6000_opt_masks));
36727 }
36728
36729 static void
36730 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
36731 HOST_WIDE_INT flags)
36732 {
36733 rs6000_print_options_internal (file, indent, string, flags, "",
36734 &rs6000_builtin_mask_names[0],
36735 ARRAY_SIZE (rs6000_builtin_mask_names));
36736 }
36737
36738 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
36739 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
36740 -mupper-regs-df, etc.).
36741
36742 If the user used -mno-power8-vector, we need to turn off all of the implicit
36743 ISA 2.07 and 3.0 options that relate to the vector unit.
36744
36745 If the user used -mno-power9-vector, we need to turn off all of the implicit
36746 ISA 3.0 options that relate to the vector unit.
36747
36748 This function does not handle explicit options such as the user specifying
36749 -mdirect-move. These are handled in rs6000_option_override_internal, and
36750 the appropriate error is given if needed.
36751
36752 We return a mask of all of the implicit options that should not be enabled
36753 by default. */
36754
36755 static HOST_WIDE_INT
36756 rs6000_disable_incompatible_switches (void)
36757 {
36758 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
36759 size_t i, j;
36760
36761 static const struct {
36762 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
36763 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
36764 const char *const name; /* name of the switch. */
36765 } flags[] = {
36766 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
36767 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
36768 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
36769 };
36770
36771 for (i = 0; i < ARRAY_SIZE (flags); i++)
36772 {
36773 HOST_WIDE_INT no_flag = flags[i].no_flag;
36774
36775 if ((rs6000_isa_flags & no_flag) == 0
36776 && (rs6000_isa_flags_explicit & no_flag) != 0)
36777 {
36778 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
36779 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
36780 & rs6000_isa_flags
36781 & dep_flags);
36782
36783 if (set_flags)
36784 {
36785 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
36786 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
36787 {
36788 set_flags &= ~rs6000_opt_masks[j].mask;
36789 error ("%<-mno-%s%> turns off %<-m%s%>",
36790 flags[i].name,
36791 rs6000_opt_masks[j].name);
36792 }
36793
36794 gcc_assert (!set_flags);
36795 }
36796
36797 rs6000_isa_flags &= ~dep_flags;
36798 ignore_masks |= no_flag | dep_flags;
36799 }
36800 }
36801
36802 return ignore_masks;
36803 }
36804
36805 \f
36806 /* Helper function for printing the function name when debugging. */
36807
36808 static const char *
36809 get_decl_name (tree fn)
36810 {
36811 tree name;
36812
36813 if (!fn)
36814 return "<null>";
36815
36816 name = DECL_NAME (fn);
36817 if (!name)
36818 return "<no-name>";
36819
36820 return IDENTIFIER_POINTER (name);
36821 }
36822
36823 /* Return the clone id of the target we are compiling code for in a target
36824 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
36825 the priority list for the target clones (ordered from lowest to
36826 highest). */
36827
36828 static int
36829 rs6000_clone_priority (tree fndecl)
36830 {
36831 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36832 HOST_WIDE_INT isa_masks;
36833 int ret = CLONE_DEFAULT;
36834 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
36835 const char *attrs_str = NULL;
36836
36837 attrs = TREE_VALUE (TREE_VALUE (attrs));
36838 attrs_str = TREE_STRING_POINTER (attrs);
36839
36840 /* Return priority zero for default function. Return the ISA needed for the
36841 function if it is not the default. */
36842 if (strcmp (attrs_str, "default") != 0)
36843 {
36844 if (fn_opts == NULL_TREE)
36845 fn_opts = target_option_default_node;
36846
36847 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
36848 isa_masks = rs6000_isa_flags;
36849 else
36850 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
36851
36852 for (ret = CLONE_MAX - 1; ret != 0; ret--)
36853 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
36854 break;
36855 }
36856
36857 if (TARGET_DEBUG_TARGET)
36858 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
36859 get_decl_name (fndecl), ret);
36860
36861 return ret;
36862 }
36863
36864 /* This compares the priority of target features in function DECL1 and DECL2.
36865 It returns positive value if DECL1 is higher priority, negative value if
36866 DECL2 is higher priority and 0 if they are the same. Note, priorities are
36867 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
36868
36869 static int
36870 rs6000_compare_version_priority (tree decl1, tree decl2)
36871 {
36872 int priority1 = rs6000_clone_priority (decl1);
36873 int priority2 = rs6000_clone_priority (decl2);
36874 int ret = priority1 - priority2;
36875
36876 if (TARGET_DEBUG_TARGET)
36877 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
36878 get_decl_name (decl1), get_decl_name (decl2), ret);
36879
36880 return ret;
36881 }
36882
36883 /* Make a dispatcher declaration for the multi-versioned function DECL.
36884 Calls to DECL function will be replaced with calls to the dispatcher
36885 by the front-end. Returns the decl of the dispatcher function. */
36886
36887 static tree
36888 rs6000_get_function_versions_dispatcher (void *decl)
36889 {
36890 tree fn = (tree) decl;
36891 struct cgraph_node *node = NULL;
36892 struct cgraph_node *default_node = NULL;
36893 struct cgraph_function_version_info *node_v = NULL;
36894 struct cgraph_function_version_info *first_v = NULL;
36895
36896 tree dispatch_decl = NULL;
36897
36898 struct cgraph_function_version_info *default_version_info = NULL;
36899 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
36900
36901 if (TARGET_DEBUG_TARGET)
36902 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
36903 get_decl_name (fn));
36904
36905 node = cgraph_node::get (fn);
36906 gcc_assert (node != NULL);
36907
36908 node_v = node->function_version ();
36909 gcc_assert (node_v != NULL);
36910
36911 if (node_v->dispatcher_resolver != NULL)
36912 return node_v->dispatcher_resolver;
36913
36914 /* Find the default version and make it the first node. */
36915 first_v = node_v;
36916 /* Go to the beginning of the chain. */
36917 while (first_v->prev != NULL)
36918 first_v = first_v->prev;
36919
36920 default_version_info = first_v;
36921 while (default_version_info != NULL)
36922 {
36923 const tree decl2 = default_version_info->this_node->decl;
36924 if (is_function_default_version (decl2))
36925 break;
36926 default_version_info = default_version_info->next;
36927 }
36928
36929 /* If there is no default node, just return NULL. */
36930 if (default_version_info == NULL)
36931 return NULL;
36932
36933 /* Make default info the first node. */
36934 if (first_v != default_version_info)
36935 {
36936 default_version_info->prev->next = default_version_info->next;
36937 if (default_version_info->next)
36938 default_version_info->next->prev = default_version_info->prev;
36939 first_v->prev = default_version_info;
36940 default_version_info->next = first_v;
36941 default_version_info->prev = NULL;
36942 }
36943
36944 default_node = default_version_info->this_node;
36945
36946 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
36947 error_at (DECL_SOURCE_LOCATION (default_node->decl),
36948 "target_clones attribute needs GLIBC (2.23 and newer) that "
36949 "exports hardware capability bits");
36950 #else
36951
36952 if (targetm.has_ifunc_p ())
36953 {
36954 struct cgraph_function_version_info *it_v = NULL;
36955 struct cgraph_node *dispatcher_node = NULL;
36956 struct cgraph_function_version_info *dispatcher_version_info = NULL;
36957
36958 /* Right now, the dispatching is done via ifunc. */
36959 dispatch_decl = make_dispatcher_decl (default_node->decl);
36960
36961 dispatcher_node = cgraph_node::get_create (dispatch_decl);
36962 gcc_assert (dispatcher_node != NULL);
36963 dispatcher_node->dispatcher_function = 1;
36964 dispatcher_version_info
36965 = dispatcher_node->insert_new_function_version ();
36966 dispatcher_version_info->next = default_version_info;
36967 dispatcher_node->definition = 1;
36968
36969 /* Set the dispatcher for all the versions. */
36970 it_v = default_version_info;
36971 while (it_v != NULL)
36972 {
36973 it_v->dispatcher_resolver = dispatch_decl;
36974 it_v = it_v->next;
36975 }
36976 }
36977 else
36978 {
36979 error_at (DECL_SOURCE_LOCATION (default_node->decl),
36980 "multiversioning needs ifunc which is not supported "
36981 "on this target");
36982 }
36983 #endif
36984
36985 return dispatch_decl;
36986 }
36987
36988 /* Make the resolver function decl to dispatch the versions of a multi-
36989 versioned function, DEFAULT_DECL. Create an empty basic block in the
36990 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
36991 function. */
36992
36993 static tree
36994 make_resolver_func (const tree default_decl,
36995 const tree dispatch_decl,
36996 basic_block *empty_bb)
36997 {
36998 /* Make the resolver function static. The resolver function returns
36999 void *. */
37000 tree decl_name = clone_function_name (default_decl, "resolver");
37001 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37002 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37003 tree decl = build_fn_decl (resolver_name, type);
37004 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37005
37006 DECL_NAME (decl) = decl_name;
37007 TREE_USED (decl) = 1;
37008 DECL_ARTIFICIAL (decl) = 1;
37009 DECL_IGNORED_P (decl) = 0;
37010 TREE_PUBLIC (decl) = 0;
37011 DECL_UNINLINABLE (decl) = 1;
37012
37013 /* Resolver is not external, body is generated. */
37014 DECL_EXTERNAL (decl) = 0;
37015 DECL_EXTERNAL (dispatch_decl) = 0;
37016
37017 DECL_CONTEXT (decl) = NULL_TREE;
37018 DECL_INITIAL (decl) = make_node (BLOCK);
37019 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37020
37021 /* Build result decl and add to function_decl. */
37022 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37023 DECL_ARTIFICIAL (t) = 1;
37024 DECL_IGNORED_P (t) = 1;
37025 DECL_RESULT (decl) = t;
37026
37027 gimplify_function_tree (decl);
37028 push_cfun (DECL_STRUCT_FUNCTION (decl));
37029 *empty_bb = init_lowered_empty_function (decl, false,
37030 profile_count::uninitialized ());
37031
37032 cgraph_node::add_new_function (decl, true);
37033 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37034
37035 pop_cfun ();
37036
37037 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37038 DECL_ATTRIBUTES (dispatch_decl)
37039 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37040
37041 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37042
37043 return decl;
37044 }
37045
37046 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37047 return a pointer to VERSION_DECL if we are running on a machine that
37048 supports the index CLONE_ISA hardware architecture bits. This function will
37049 be called during version dispatch to decide which function version to
37050 execute. It returns the basic block at the end, to which more conditions
37051 can be added. */
37052
37053 static basic_block
37054 add_condition_to_bb (tree function_decl, tree version_decl,
37055 int clone_isa, basic_block new_bb)
37056 {
37057 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37058
37059 gcc_assert (new_bb != NULL);
37060 gimple_seq gseq = bb_seq (new_bb);
37061
37062
37063 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37064 build_fold_addr_expr (version_decl));
37065 tree result_var = create_tmp_var (ptr_type_node);
37066 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37067 gimple *return_stmt = gimple_build_return (result_var);
37068
37069 if (clone_isa == CLONE_DEFAULT)
37070 {
37071 gimple_seq_add_stmt (&gseq, convert_stmt);
37072 gimple_seq_add_stmt (&gseq, return_stmt);
37073 set_bb_seq (new_bb, gseq);
37074 gimple_set_bb (convert_stmt, new_bb);
37075 gimple_set_bb (return_stmt, new_bb);
37076 pop_cfun ();
37077 return new_bb;
37078 }
37079
37080 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37081 tree cond_var = create_tmp_var (bool_int_type_node);
37082 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37083 const char *arg_str = rs6000_clone_map[clone_isa].name;
37084 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37085 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37086 gimple_call_set_lhs (call_cond_stmt, cond_var);
37087
37088 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37089 gimple_set_bb (call_cond_stmt, new_bb);
37090 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37091
37092 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37093 NULL_TREE, NULL_TREE);
37094 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37095 gimple_set_bb (if_else_stmt, new_bb);
37096 gimple_seq_add_stmt (&gseq, if_else_stmt);
37097
37098 gimple_seq_add_stmt (&gseq, convert_stmt);
37099 gimple_seq_add_stmt (&gseq, return_stmt);
37100 set_bb_seq (new_bb, gseq);
37101
37102 basic_block bb1 = new_bb;
37103 edge e12 = split_block (bb1, if_else_stmt);
37104 basic_block bb2 = e12->dest;
37105 e12->flags &= ~EDGE_FALLTHRU;
37106 e12->flags |= EDGE_TRUE_VALUE;
37107
37108 edge e23 = split_block (bb2, return_stmt);
37109 gimple_set_bb (convert_stmt, bb2);
37110 gimple_set_bb (return_stmt, bb2);
37111
37112 basic_block bb3 = e23->dest;
37113 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37114
37115 remove_edge (e23);
37116 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37117
37118 pop_cfun ();
37119 return bb3;
37120 }
37121
37122 /* This function generates the dispatch function for multi-versioned functions.
37123 DISPATCH_DECL is the function which will contain the dispatch logic.
37124 FNDECLS are the function choices for dispatch, and is a tree chain.
37125 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37126 code is generated. */
37127
37128 static int
37129 dispatch_function_versions (tree dispatch_decl,
37130 void *fndecls_p,
37131 basic_block *empty_bb)
37132 {
37133 int ix;
37134 tree ele;
37135 vec<tree> *fndecls;
37136 tree clones[CLONE_MAX];
37137
37138 if (TARGET_DEBUG_TARGET)
37139 fputs ("dispatch_function_versions, top\n", stderr);
37140
37141 gcc_assert (dispatch_decl != NULL
37142 && fndecls_p != NULL
37143 && empty_bb != NULL);
37144
37145 /* fndecls_p is actually a vector. */
37146 fndecls = static_cast<vec<tree> *> (fndecls_p);
37147
37148 /* At least one more version other than the default. */
37149 gcc_assert (fndecls->length () >= 2);
37150
37151 /* The first version in the vector is the default decl. */
37152 memset ((void *) clones, '\0', sizeof (clones));
37153 clones[CLONE_DEFAULT] = (*fndecls)[0];
37154
37155 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37156 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37157 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37158 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37159 to insert the code here to do the call. */
37160
37161 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37162 {
37163 int priority = rs6000_clone_priority (ele);
37164 if (!clones[priority])
37165 clones[priority] = ele;
37166 }
37167
37168 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37169 if (clones[ix])
37170 {
37171 if (TARGET_DEBUG_TARGET)
37172 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37173 ix, get_decl_name (clones[ix]));
37174
37175 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37176 *empty_bb);
37177 }
37178
37179 return 0;
37180 }
37181
37182 /* Generate the dispatching code body to dispatch multi-versioned function
37183 DECL. The target hook is called to process the "target" attributes and
37184 provide the code to dispatch the right function at run-time. NODE points
37185 to the dispatcher decl whose body will be created. */
37186
37187 static tree
37188 rs6000_generate_version_dispatcher_body (void *node_p)
37189 {
37190 tree resolver;
37191 basic_block empty_bb;
37192 struct cgraph_node *node = (cgraph_node *) node_p;
37193 struct cgraph_function_version_info *ninfo = node->function_version ();
37194
37195 if (ninfo->dispatcher_resolver)
37196 return ninfo->dispatcher_resolver;
37197
37198 /* node is going to be an alias, so remove the finalized bit. */
37199 node->definition = false;
37200
37201 /* The first version in the chain corresponds to the default version. */
37202 ninfo->dispatcher_resolver = resolver
37203 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37204
37205 if (TARGET_DEBUG_TARGET)
37206 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37207 get_decl_name (resolver));
37208
37209 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37210 auto_vec<tree, 2> fn_ver_vec;
37211
37212 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37213 vinfo;
37214 vinfo = vinfo->next)
37215 {
37216 struct cgraph_node *version = vinfo->this_node;
37217 /* Check for virtual functions here again, as by this time it should
37218 have been determined if this function needs a vtable index or
37219 not. This happens for methods in derived classes that override
37220 virtual methods in base classes but are not explicitly marked as
37221 virtual. */
37222 if (DECL_VINDEX (version->decl))
37223 sorry ("Virtual function multiversioning not supported");
37224
37225 fn_ver_vec.safe_push (version->decl);
37226 }
37227
37228 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37229 cgraph_edge::rebuild_edges ();
37230 pop_cfun ();
37231 return resolver;
37232 }
37233
37234 \f
37235 /* Hook to determine if one function can safely inline another. */
37236
37237 static bool
37238 rs6000_can_inline_p (tree caller, tree callee)
37239 {
37240 bool ret = false;
37241 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37242 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37243
37244 /* If callee has no option attributes, then it is ok to inline. */
37245 if (!callee_tree)
37246 ret = true;
37247
37248 /* If caller has no option attributes, but callee does then it is not ok to
37249 inline. */
37250 else if (!caller_tree)
37251 ret = false;
37252
37253 else
37254 {
37255 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37256 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37257
37258 /* Callee's options should a subset of the caller's, i.e. a vsx function
37259 can inline an altivec function but a non-vsx function can't inline a
37260 vsx function. */
37261 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37262 == callee_opts->x_rs6000_isa_flags)
37263 ret = true;
37264 }
37265
37266 if (TARGET_DEBUG_TARGET)
37267 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37268 get_decl_name (caller), get_decl_name (callee),
37269 (ret ? "can" : "cannot"));
37270
37271 return ret;
37272 }
37273 \f
37274 /* Allocate a stack temp and fixup the address so it meets the particular
37275 memory requirements (either offetable or REG+REG addressing). */
37276
37277 rtx
37278 rs6000_allocate_stack_temp (machine_mode mode,
37279 bool offsettable_p,
37280 bool reg_reg_p)
37281 {
37282 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37283 rtx addr = XEXP (stack, 0);
37284 int strict_p = reload_completed;
37285
37286 if (!legitimate_indirect_address_p (addr, strict_p))
37287 {
37288 if (offsettable_p
37289 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37290 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37291
37292 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37293 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37294 }
37295
37296 return stack;
37297 }
37298
37299 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37300 to such a form to deal with memory reference instructions like STFIWX that
37301 only take reg+reg addressing. */
37302
37303 rtx
37304 rs6000_address_for_fpconvert (rtx x)
37305 {
37306 rtx addr;
37307
37308 gcc_assert (MEM_P (x));
37309 addr = XEXP (x, 0);
37310 if (can_create_pseudo_p ()
37311 && ! legitimate_indirect_address_p (addr, reload_completed)
37312 && ! legitimate_indexed_address_p (addr, reload_completed))
37313 {
37314 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37315 {
37316 rtx reg = XEXP (addr, 0);
37317 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37318 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37319 gcc_assert (REG_P (reg));
37320 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37321 addr = reg;
37322 }
37323 else if (GET_CODE (addr) == PRE_MODIFY)
37324 {
37325 rtx reg = XEXP (addr, 0);
37326 rtx expr = XEXP (addr, 1);
37327 gcc_assert (REG_P (reg));
37328 gcc_assert (GET_CODE (expr) == PLUS);
37329 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37330 addr = reg;
37331 }
37332
37333 x = replace_equiv_address (x, copy_addr_to_reg (addr));
37334 }
37335
37336 return x;
37337 }
37338
37339 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37340
37341 On the RS/6000, all integer constants are acceptable, most won't be valid
37342 for particular insns, though. Only easy FP constants are acceptable. */
37343
37344 static bool
37345 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37346 {
37347 if (TARGET_ELF && tls_referenced_p (x))
37348 return false;
37349
37350 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
37351 || GET_MODE (x) == VOIDmode
37352 || (TARGET_POWERPC64 && mode == DImode)
37353 || easy_fp_constant (x, mode)
37354 || easy_vector_constant (x, mode));
37355 }
37356
37357 \f
37358 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37359
37360 static bool
37361 chain_already_loaded (rtx_insn *last)
37362 {
37363 for (; last != NULL; last = PREV_INSN (last))
37364 {
37365 if (NONJUMP_INSN_P (last))
37366 {
37367 rtx patt = PATTERN (last);
37368
37369 if (GET_CODE (patt) == SET)
37370 {
37371 rtx lhs = XEXP (patt, 0);
37372
37373 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37374 return true;
37375 }
37376 }
37377 }
37378 return false;
37379 }
37380
37381 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37382
37383 void
37384 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37385 {
37386 const bool direct_call_p
37387 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
37388 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37389 rtx toc_load = NULL_RTX;
37390 rtx toc_restore = NULL_RTX;
37391 rtx func_addr;
37392 rtx abi_reg = NULL_RTX;
37393 rtx call[4];
37394 int n_call;
37395 rtx insn;
37396
37397 /* Handle longcall attributes. */
37398 if (INTVAL (cookie) & CALL_LONG)
37399 func_desc = rs6000_longcall_ref (func_desc);
37400
37401 /* Handle indirect calls. */
37402 if (GET_CODE (func_desc) != SYMBOL_REF
37403 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
37404 {
37405 /* Save the TOC into its reserved slot before the call,
37406 and prepare to restore it after the call. */
37407 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37408 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37409 rtx stack_toc_mem = gen_frame_mem (Pmode,
37410 gen_rtx_PLUS (Pmode, stack_ptr,
37411 stack_toc_offset));
37412 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37413 gen_rtvec (1, stack_toc_offset),
37414 UNSPEC_TOCSLOT);
37415 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37416
37417 /* Can we optimize saving the TOC in the prologue or
37418 do we need to do it at every call? */
37419 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37420 cfun->machine->save_toc_in_prologue = true;
37421 else
37422 {
37423 MEM_VOLATILE_P (stack_toc_mem) = 1;
37424 emit_move_insn (stack_toc_mem, toc_reg);
37425 }
37426
37427 if (DEFAULT_ABI == ABI_ELFv2)
37428 {
37429 /* A function pointer in the ELFv2 ABI is just a plain address, but
37430 the ABI requires it to be loaded into r12 before the call. */
37431 func_addr = gen_rtx_REG (Pmode, 12);
37432 emit_move_insn (func_addr, func_desc);
37433 abi_reg = func_addr;
37434 }
37435 else
37436 {
37437 /* A function pointer under AIX is a pointer to a data area whose
37438 first word contains the actual address of the function, whose
37439 second word contains a pointer to its TOC, and whose third word
37440 contains a value to place in the static chain register (r11).
37441 Note that if we load the static chain, our "trampoline" need
37442 not have any executable code. */
37443
37444 /* Load up address of the actual function. */
37445 func_desc = force_reg (Pmode, func_desc);
37446 func_addr = gen_reg_rtx (Pmode);
37447 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
37448
37449 /* Prepare to load the TOC of the called function. Note that the
37450 TOC load must happen immediately before the actual call so
37451 that unwinding the TOC registers works correctly. See the
37452 comment in frob_update_context. */
37453 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37454 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37455 gen_rtx_PLUS (Pmode, func_desc,
37456 func_toc_offset));
37457 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37458
37459 /* If we have a static chain, load it up. But, if the call was
37460 originally direct, the 3rd word has not been written since no
37461 trampoline has been built, so we ought not to load it, lest we
37462 override a static chain value. */
37463 if (!direct_call_p
37464 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37465 && !chain_already_loaded (get_current_sequence ()->next->last))
37466 {
37467 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37468 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37469 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37470 gen_rtx_PLUS (Pmode, func_desc,
37471 func_sc_offset));
37472 emit_move_insn (sc_reg, func_sc_mem);
37473 abi_reg = sc_reg;
37474 }
37475 }
37476 }
37477 else
37478 {
37479 /* Direct calls use the TOC: for local calls, the callee will
37480 assume the TOC register is set; for non-local calls, the
37481 PLT stub needs the TOC register. */
37482 abi_reg = toc_reg;
37483 func_addr = func_desc;
37484 }
37485
37486 /* Create the call. */
37487 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
37488 if (value != NULL_RTX)
37489 call[0] = gen_rtx_SET (value, call[0]);
37490 n_call = 1;
37491
37492 if (toc_load)
37493 call[n_call++] = toc_load;
37494 if (toc_restore)
37495 call[n_call++] = toc_restore;
37496
37497 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
37498
37499 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37500 insn = emit_call_insn (insn);
37501
37502 /* Mention all registers defined by the ABI to hold information
37503 as uses in CALL_INSN_FUNCTION_USAGE. */
37504 if (abi_reg)
37505 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37506 }
37507
37508 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37509
37510 void
37511 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
37512 {
37513 rtx call[2];
37514 rtx insn;
37515
37516 gcc_assert (INTVAL (cookie) == 0);
37517
37518 /* Create the call. */
37519 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
37520 if (value != NULL_RTX)
37521 call[0] = gen_rtx_SET (value, call[0]);
37522
37523 call[1] = simple_return_rtx;
37524
37525 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37526 insn = emit_call_insn (insn);
37527
37528 /* Note use of the TOC register. */
37529 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37530 }
37531
37532 /* Return whether we need to always update the saved TOC pointer when we update
37533 the stack pointer. */
37534
37535 static bool
37536 rs6000_save_toc_in_prologue_p (void)
37537 {
37538 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
37539 }
37540
37541 #ifdef HAVE_GAS_HIDDEN
37542 # define USE_HIDDEN_LINKONCE 1
37543 #else
37544 # define USE_HIDDEN_LINKONCE 0
37545 #endif
37546
37547 /* Fills in the label name that should be used for a 476 link stack thunk. */
37548
37549 void
37550 get_ppc476_thunk_name (char name[32])
37551 {
37552 gcc_assert (TARGET_LINK_STACK);
37553
37554 if (USE_HIDDEN_LINKONCE)
37555 sprintf (name, "__ppc476.get_thunk");
37556 else
37557 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
37558 }
37559
37560 /* This function emits the simple thunk routine that is used to preserve
37561 the link stack on the 476 cpu. */
37562
37563 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
37564 static void
37565 rs6000_code_end (void)
37566 {
37567 char name[32];
37568 tree decl;
37569
37570 if (!TARGET_LINK_STACK)
37571 return;
37572
37573 get_ppc476_thunk_name (name);
37574
37575 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
37576 build_function_type_list (void_type_node, NULL_TREE));
37577 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
37578 NULL_TREE, void_type_node);
37579 TREE_PUBLIC (decl) = 1;
37580 TREE_STATIC (decl) = 1;
37581
37582 #if RS6000_WEAK
37583 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
37584 {
37585 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
37586 targetm.asm_out.unique_section (decl, 0);
37587 switch_to_section (get_named_section (decl, NULL, 0));
37588 DECL_WEAK (decl) = 1;
37589 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
37590 targetm.asm_out.globalize_label (asm_out_file, name);
37591 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
37592 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
37593 }
37594 else
37595 #endif
37596 {
37597 switch_to_section (text_section);
37598 ASM_OUTPUT_LABEL (asm_out_file, name);
37599 }
37600
37601 DECL_INITIAL (decl) = make_node (BLOCK);
37602 current_function_decl = decl;
37603 allocate_struct_function (decl, false);
37604 init_function_start (decl);
37605 first_function_block_is_cold = false;
37606 /* Make sure unwind info is emitted for the thunk if needed. */
37607 final_start_function (emit_barrier (), asm_out_file, 1);
37608
37609 fputs ("\tblr\n", asm_out_file);
37610
37611 final_end_function ();
37612 init_insn_lengths ();
37613 free_after_compilation (cfun);
37614 set_cfun (NULL);
37615 current_function_decl = NULL;
37616 }
37617
37618 /* Add r30 to hard reg set if the prologue sets it up and it is not
37619 pic_offset_table_rtx. */
37620
37621 static void
37622 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
37623 {
37624 if (!TARGET_SINGLE_PIC_BASE
37625 && TARGET_TOC
37626 && TARGET_MINIMAL_TOC
37627 && !constant_pool_empty_p ())
37628 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
37629 if (cfun->machine->split_stack_argp_used)
37630 add_to_hard_reg_set (&set->set, Pmode, 12);
37631
37632 /* Make sure the hard reg set doesn't include r2, which was possibly added
37633 via PIC_OFFSET_TABLE_REGNUM. */
37634 if (TARGET_TOC)
37635 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
37636 }
37637
37638 \f
37639 /* Helper function for rs6000_split_logical to emit a logical instruction after
37640 spliting the operation to single GPR registers.
37641
37642 DEST is the destination register.
37643 OP1 and OP2 are the input source registers.
37644 CODE is the base operation (AND, IOR, XOR, NOT).
37645 MODE is the machine mode.
37646 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37647 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37648 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37649
37650 static void
37651 rs6000_split_logical_inner (rtx dest,
37652 rtx op1,
37653 rtx op2,
37654 enum rtx_code code,
37655 machine_mode mode,
37656 bool complement_final_p,
37657 bool complement_op1_p,
37658 bool complement_op2_p)
37659 {
37660 rtx bool_rtx;
37661
37662 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
37663 if (op2 && GET_CODE (op2) == CONST_INT
37664 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
37665 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37666 {
37667 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
37668 HOST_WIDE_INT value = INTVAL (op2) & mask;
37669
37670 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
37671 if (code == AND)
37672 {
37673 if (value == 0)
37674 {
37675 emit_insn (gen_rtx_SET (dest, const0_rtx));
37676 return;
37677 }
37678
37679 else if (value == mask)
37680 {
37681 if (!rtx_equal_p (dest, op1))
37682 emit_insn (gen_rtx_SET (dest, op1));
37683 return;
37684 }
37685 }
37686
37687 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
37688 into separate ORI/ORIS or XORI/XORIS instrucitons. */
37689 else if (code == IOR || code == XOR)
37690 {
37691 if (value == 0)
37692 {
37693 if (!rtx_equal_p (dest, op1))
37694 emit_insn (gen_rtx_SET (dest, op1));
37695 return;
37696 }
37697 }
37698 }
37699
37700 if (code == AND && mode == SImode
37701 && !complement_final_p && !complement_op1_p && !complement_op2_p)
37702 {
37703 emit_insn (gen_andsi3 (dest, op1, op2));
37704 return;
37705 }
37706
37707 if (complement_op1_p)
37708 op1 = gen_rtx_NOT (mode, op1);
37709
37710 if (complement_op2_p)
37711 op2 = gen_rtx_NOT (mode, op2);
37712
37713 /* For canonical RTL, if only one arm is inverted it is the first. */
37714 if (!complement_op1_p && complement_op2_p)
37715 std::swap (op1, op2);
37716
37717 bool_rtx = ((code == NOT)
37718 ? gen_rtx_NOT (mode, op1)
37719 : gen_rtx_fmt_ee (code, mode, op1, op2));
37720
37721 if (complement_final_p)
37722 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
37723
37724 emit_insn (gen_rtx_SET (dest, bool_rtx));
37725 }
37726
37727 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
37728 operations are split immediately during RTL generation to allow for more
37729 optimizations of the AND/IOR/XOR.
37730
37731 OPERANDS is an array containing the destination and two input operands.
37732 CODE is the base operation (AND, IOR, XOR, NOT).
37733 MODE is the machine mode.
37734 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37735 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37736 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
37737 CLOBBER_REG is either NULL or a scratch register of type CC to allow
37738 formation of the AND instructions. */
37739
37740 static void
37741 rs6000_split_logical_di (rtx operands[3],
37742 enum rtx_code code,
37743 bool complement_final_p,
37744 bool complement_op1_p,
37745 bool complement_op2_p)
37746 {
37747 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
37748 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
37749 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
37750 enum hi_lo { hi = 0, lo = 1 };
37751 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
37752 size_t i;
37753
37754 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
37755 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
37756 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
37757 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
37758
37759 if (code == NOT)
37760 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
37761 else
37762 {
37763 if (GET_CODE (operands[2]) != CONST_INT)
37764 {
37765 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
37766 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
37767 }
37768 else
37769 {
37770 HOST_WIDE_INT value = INTVAL (operands[2]);
37771 HOST_WIDE_INT value_hi_lo[2];
37772
37773 gcc_assert (!complement_final_p);
37774 gcc_assert (!complement_op1_p);
37775 gcc_assert (!complement_op2_p);
37776
37777 value_hi_lo[hi] = value >> 32;
37778 value_hi_lo[lo] = value & lower_32bits;
37779
37780 for (i = 0; i < 2; i++)
37781 {
37782 HOST_WIDE_INT sub_value = value_hi_lo[i];
37783
37784 if (sub_value & sign_bit)
37785 sub_value |= upper_32bits;
37786
37787 op2_hi_lo[i] = GEN_INT (sub_value);
37788
37789 /* If this is an AND instruction, check to see if we need to load
37790 the value in a register. */
37791 if (code == AND && sub_value != -1 && sub_value != 0
37792 && !and_operand (op2_hi_lo[i], SImode))
37793 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
37794 }
37795 }
37796 }
37797
37798 for (i = 0; i < 2; i++)
37799 {
37800 /* Split large IOR/XOR operations. */
37801 if ((code == IOR || code == XOR)
37802 && GET_CODE (op2_hi_lo[i]) == CONST_INT
37803 && !complement_final_p
37804 && !complement_op1_p
37805 && !complement_op2_p
37806 && !logical_const_operand (op2_hi_lo[i], SImode))
37807 {
37808 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
37809 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
37810 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
37811 rtx tmp = gen_reg_rtx (SImode);
37812
37813 /* Make sure the constant is sign extended. */
37814 if ((hi_16bits & sign_bit) != 0)
37815 hi_16bits |= upper_32bits;
37816
37817 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
37818 code, SImode, false, false, false);
37819
37820 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
37821 code, SImode, false, false, false);
37822 }
37823 else
37824 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
37825 code, SImode, complement_final_p,
37826 complement_op1_p, complement_op2_p);
37827 }
37828
37829 return;
37830 }
37831
37832 /* Split the insns that make up boolean operations operating on multiple GPR
37833 registers. The boolean MD patterns ensure that the inputs either are
37834 exactly the same as the output registers, or there is no overlap.
37835
37836 OPERANDS is an array containing the destination and two input operands.
37837 CODE is the base operation (AND, IOR, XOR, NOT).
37838 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37839 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37840 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37841
37842 void
37843 rs6000_split_logical (rtx operands[3],
37844 enum rtx_code code,
37845 bool complement_final_p,
37846 bool complement_op1_p,
37847 bool complement_op2_p)
37848 {
37849 machine_mode mode = GET_MODE (operands[0]);
37850 machine_mode sub_mode;
37851 rtx op0, op1, op2;
37852 int sub_size, regno0, regno1, nregs, i;
37853
37854 /* If this is DImode, use the specialized version that can run before
37855 register allocation. */
37856 if (mode == DImode && !TARGET_POWERPC64)
37857 {
37858 rs6000_split_logical_di (operands, code, complement_final_p,
37859 complement_op1_p, complement_op2_p);
37860 return;
37861 }
37862
37863 op0 = operands[0];
37864 op1 = operands[1];
37865 op2 = (code == NOT) ? NULL_RTX : operands[2];
37866 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
37867 sub_size = GET_MODE_SIZE (sub_mode);
37868 regno0 = REGNO (op0);
37869 regno1 = REGNO (op1);
37870
37871 gcc_assert (reload_completed);
37872 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
37873 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
37874
37875 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
37876 gcc_assert (nregs > 1);
37877
37878 if (op2 && REG_P (op2))
37879 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
37880
37881 for (i = 0; i < nregs; i++)
37882 {
37883 int offset = i * sub_size;
37884 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
37885 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
37886 rtx sub_op2 = ((code == NOT)
37887 ? NULL_RTX
37888 : simplify_subreg (sub_mode, op2, mode, offset));
37889
37890 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
37891 complement_final_p, complement_op1_p,
37892 complement_op2_p);
37893 }
37894
37895 return;
37896 }
37897
37898 \f
37899 /* Return true if the peephole2 can combine a load involving a combination of
37900 an addis instruction and a load with an offset that can be fused together on
37901 a power8. */
37902
37903 bool
37904 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
37905 rtx addis_value, /* addis value. */
37906 rtx target, /* target register that is loaded. */
37907 rtx mem) /* bottom part of the memory addr. */
37908 {
37909 rtx addr;
37910 rtx base_reg;
37911
37912 /* Validate arguments. */
37913 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
37914 return false;
37915
37916 if (!base_reg_operand (target, GET_MODE (target)))
37917 return false;
37918
37919 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
37920 return false;
37921
37922 /* Allow sign/zero extension. */
37923 if (GET_CODE (mem) == ZERO_EXTEND
37924 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
37925 mem = XEXP (mem, 0);
37926
37927 if (!MEM_P (mem))
37928 return false;
37929
37930 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
37931 return false;
37932
37933 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
37934 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
37935 return false;
37936
37937 /* Validate that the register used to load the high value is either the
37938 register being loaded, or we can safely replace its use.
37939
37940 This function is only called from the peephole2 pass and we assume that
37941 there are 2 instructions in the peephole (addis and load), so we want to
37942 check if the target register was not used in the memory address and the
37943 register to hold the addis result is dead after the peephole. */
37944 if (REGNO (addis_reg) != REGNO (target))
37945 {
37946 if (reg_mentioned_p (target, mem))
37947 return false;
37948
37949 if (!peep2_reg_dead_p (2, addis_reg))
37950 return false;
37951
37952 /* If the target register being loaded is the stack pointer, we must
37953 avoid loading any other value into it, even temporarily. */
37954 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
37955 return false;
37956 }
37957
37958 base_reg = XEXP (addr, 0);
37959 return REGNO (addis_reg) == REGNO (base_reg);
37960 }
37961
37962 /* During the peephole2 pass, adjust and expand the insns for a load fusion
37963 sequence. We adjust the addis register to use the target register. If the
37964 load sign extends, we adjust the code to do the zero extending load, and an
37965 explicit sign extension later since the fusion only covers zero extending
37966 loads.
37967
37968 The operands are:
37969 operands[0] register set with addis (to be replaced with target)
37970 operands[1] value set via addis
37971 operands[2] target register being loaded
37972 operands[3] D-form memory reference using operands[0]. */
37973
37974 void
37975 expand_fusion_gpr_load (rtx *operands)
37976 {
37977 rtx addis_value = operands[1];
37978 rtx target = operands[2];
37979 rtx orig_mem = operands[3];
37980 rtx new_addr, new_mem, orig_addr, offset;
37981 enum rtx_code plus_or_lo_sum;
37982 machine_mode target_mode = GET_MODE (target);
37983 machine_mode extend_mode = target_mode;
37984 machine_mode ptr_mode = Pmode;
37985 enum rtx_code extend = UNKNOWN;
37986
37987 if (GET_CODE (orig_mem) == ZERO_EXTEND
37988 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
37989 {
37990 extend = GET_CODE (orig_mem);
37991 orig_mem = XEXP (orig_mem, 0);
37992 target_mode = GET_MODE (orig_mem);
37993 }
37994
37995 gcc_assert (MEM_P (orig_mem));
37996
37997 orig_addr = XEXP (orig_mem, 0);
37998 plus_or_lo_sum = GET_CODE (orig_addr);
37999 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38000
38001 offset = XEXP (orig_addr, 1);
38002 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38003 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38004
38005 if (extend != UNKNOWN)
38006 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38007
38008 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38009 UNSPEC_FUSION_GPR);
38010 emit_insn (gen_rtx_SET (target, new_mem));
38011
38012 if (extend == SIGN_EXTEND)
38013 {
38014 int sub_off = ((BYTES_BIG_ENDIAN)
38015 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38016 : 0);
38017 rtx sign_reg
38018 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38019
38020 emit_insn (gen_rtx_SET (target,
38021 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38022 }
38023
38024 return;
38025 }
38026
38027 /* Emit the addis instruction that will be part of a fused instruction
38028 sequence. */
38029
38030 void
38031 emit_fusion_addis (rtx target, rtx addis_value)
38032 {
38033 rtx fuse_ops[10];
38034 const char *addis_str = NULL;
38035
38036 /* Emit the addis instruction. */
38037 fuse_ops[0] = target;
38038 if (satisfies_constraint_L (addis_value))
38039 {
38040 fuse_ops[1] = addis_value;
38041 addis_str = "lis %0,%v1";
38042 }
38043
38044 else if (GET_CODE (addis_value) == PLUS)
38045 {
38046 rtx op0 = XEXP (addis_value, 0);
38047 rtx op1 = XEXP (addis_value, 1);
38048
38049 if (REG_P (op0) && CONST_INT_P (op1)
38050 && satisfies_constraint_L (op1))
38051 {
38052 fuse_ops[1] = op0;
38053 fuse_ops[2] = op1;
38054 addis_str = "addis %0,%1,%v2";
38055 }
38056 }
38057
38058 else if (GET_CODE (addis_value) == HIGH)
38059 {
38060 rtx value = XEXP (addis_value, 0);
38061 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38062 {
38063 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38064 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38065 if (TARGET_ELF)
38066 addis_str = "addis %0,%2,%1@toc@ha";
38067
38068 else if (TARGET_XCOFF)
38069 addis_str = "addis %0,%1@u(%2)";
38070
38071 else
38072 gcc_unreachable ();
38073 }
38074
38075 else if (GET_CODE (value) == PLUS)
38076 {
38077 rtx op0 = XEXP (value, 0);
38078 rtx op1 = XEXP (value, 1);
38079
38080 if (GET_CODE (op0) == UNSPEC
38081 && XINT (op0, 1) == UNSPEC_TOCREL
38082 && CONST_INT_P (op1))
38083 {
38084 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38085 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38086 fuse_ops[3] = op1;
38087 if (TARGET_ELF)
38088 addis_str = "addis %0,%2,%1+%3@toc@ha";
38089
38090 else if (TARGET_XCOFF)
38091 addis_str = "addis %0,%1+%3@u(%2)";
38092
38093 else
38094 gcc_unreachable ();
38095 }
38096 }
38097
38098 else if (satisfies_constraint_L (value))
38099 {
38100 fuse_ops[1] = value;
38101 addis_str = "lis %0,%v1";
38102 }
38103
38104 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38105 {
38106 fuse_ops[1] = value;
38107 addis_str = "lis %0,%1@ha";
38108 }
38109 }
38110
38111 if (!addis_str)
38112 fatal_insn ("Could not generate addis value for fusion", addis_value);
38113
38114 output_asm_insn (addis_str, fuse_ops);
38115 }
38116
38117 /* Emit a D-form load or store instruction that is the second instruction
38118 of a fusion sequence. */
38119
38120 void
38121 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
38122 const char *insn_str)
38123 {
38124 rtx fuse_ops[10];
38125 char insn_template[80];
38126
38127 fuse_ops[0] = load_store_reg;
38128 fuse_ops[1] = addis_reg;
38129
38130 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38131 {
38132 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38133 fuse_ops[2] = offset;
38134 output_asm_insn (insn_template, fuse_ops);
38135 }
38136
38137 else if (GET_CODE (offset) == UNSPEC
38138 && XINT (offset, 1) == UNSPEC_TOCREL)
38139 {
38140 if (TARGET_ELF)
38141 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38142
38143 else if (TARGET_XCOFF)
38144 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38145
38146 else
38147 gcc_unreachable ();
38148
38149 fuse_ops[2] = XVECEXP (offset, 0, 0);
38150 output_asm_insn (insn_template, fuse_ops);
38151 }
38152
38153 else if (GET_CODE (offset) == PLUS
38154 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38155 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38156 && CONST_INT_P (XEXP (offset, 1)))
38157 {
38158 rtx tocrel_unspec = XEXP (offset, 0);
38159 if (TARGET_ELF)
38160 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38161
38162 else if (TARGET_XCOFF)
38163 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38164
38165 else
38166 gcc_unreachable ();
38167
38168 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38169 fuse_ops[3] = XEXP (offset, 1);
38170 output_asm_insn (insn_template, fuse_ops);
38171 }
38172
38173 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38174 {
38175 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38176
38177 fuse_ops[2] = offset;
38178 output_asm_insn (insn_template, fuse_ops);
38179 }
38180
38181 else
38182 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38183
38184 return;
38185 }
38186
38187 /* Given an address, convert it into the addis and load offset parts. Addresses
38188 created during the peephole2 process look like:
38189 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38190 (unspec [(...)] UNSPEC_TOCREL)) */
38191
38192 static void
38193 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38194 {
38195 rtx hi, lo;
38196
38197 if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38198 {
38199 hi = XEXP (addr, 0);
38200 lo = XEXP (addr, 1);
38201 }
38202 else
38203 gcc_unreachable ();
38204
38205 *p_hi = hi;
38206 *p_lo = lo;
38207 }
38208
38209 /* Return a string to fuse an addis instruction with a gpr load to the same
38210 register that we loaded up the addis instruction. The address that is used
38211 is the logical address that was formed during peephole2:
38212 (lo_sum (high) (low-part))
38213
38214 The code is complicated, so we call output_asm_insn directly, and just
38215 return "". */
38216
38217 const char *
38218 emit_fusion_gpr_load (rtx target, rtx mem)
38219 {
38220 rtx addis_value;
38221 rtx addr;
38222 rtx load_offset;
38223 const char *load_str = NULL;
38224 machine_mode mode;
38225
38226 if (GET_CODE (mem) == ZERO_EXTEND)
38227 mem = XEXP (mem, 0);
38228
38229 gcc_assert (REG_P (target) && MEM_P (mem));
38230
38231 addr = XEXP (mem, 0);
38232 fusion_split_address (addr, &addis_value, &load_offset);
38233
38234 /* Now emit the load instruction to the same register. */
38235 mode = GET_MODE (mem);
38236 switch (mode)
38237 {
38238 case E_QImode:
38239 load_str = "lbz";
38240 break;
38241
38242 case E_HImode:
38243 load_str = "lhz";
38244 break;
38245
38246 case E_SImode:
38247 case E_SFmode:
38248 load_str = "lwz";
38249 break;
38250
38251 case E_DImode:
38252 case E_DFmode:
38253 gcc_assert (TARGET_POWERPC64);
38254 load_str = "ld";
38255 break;
38256
38257 default:
38258 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38259 }
38260
38261 /* Emit the addis instruction. */
38262 emit_fusion_addis (target, addis_value);
38263
38264 /* Emit the D-form load instruction. */
38265 emit_fusion_load_store (target, target, load_offset, load_str);
38266
38267 return "";
38268 }
38269 \f
38270
38271 /* Return true if the peephole2 can combine a load/store involving a
38272 combination of an addis instruction and the memory operation. This was
38273 added to the ISA 3.0 (power9) hardware. */
38274
38275 bool
38276 fusion_p9_p (rtx addis_reg, /* register set via addis. */
38277 rtx addis_value, /* addis value. */
38278 rtx dest, /* destination (memory or register). */
38279 rtx src) /* source (register or memory). */
38280 {
38281 rtx addr, mem, offset;
38282 machine_mode mode = GET_MODE (src);
38283
38284 /* Validate arguments. */
38285 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38286 return false;
38287
38288 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38289 return false;
38290
38291 /* Ignore extend operations that are part of the load. */
38292 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
38293 src = XEXP (src, 0);
38294
38295 /* Test for memory<-register or register<-memory. */
38296 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
38297 {
38298 if (!MEM_P (dest))
38299 return false;
38300
38301 mem = dest;
38302 }
38303
38304 else if (MEM_P (src))
38305 {
38306 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
38307 return false;
38308
38309 mem = src;
38310 }
38311
38312 else
38313 return false;
38314
38315 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38316 if (GET_CODE (addr) == PLUS)
38317 {
38318 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38319 return false;
38320
38321 return satisfies_constraint_I (XEXP (addr, 1));
38322 }
38323
38324 else if (GET_CODE (addr) == LO_SUM)
38325 {
38326 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
38327 return false;
38328
38329 offset = XEXP (addr, 1);
38330 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
38331 return small_toc_ref (offset, GET_MODE (offset));
38332
38333 else if (TARGET_ELF && !TARGET_POWERPC64)
38334 return CONSTANT_P (offset);
38335 }
38336
38337 return false;
38338 }
38339
38340 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38341 load sequence.
38342
38343 The operands are:
38344 operands[0] register set with addis
38345 operands[1] value set via addis
38346 operands[2] target register being loaded
38347 operands[3] D-form memory reference using operands[0].
38348
38349 This is similar to the fusion introduced with power8, except it scales to
38350 both loads/stores and does not require the result register to be the same as
38351 the base register. At the moment, we only do this if register set with addis
38352 is dead. */
38353
38354 void
38355 expand_fusion_p9_load (rtx *operands)
38356 {
38357 rtx tmp_reg = operands[0];
38358 rtx addis_value = operands[1];
38359 rtx target = operands[2];
38360 rtx orig_mem = operands[3];
38361 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
38362 enum rtx_code plus_or_lo_sum;
38363 machine_mode target_mode = GET_MODE (target);
38364 machine_mode extend_mode = target_mode;
38365 machine_mode ptr_mode = Pmode;
38366 enum rtx_code extend = UNKNOWN;
38367
38368 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
38369 {
38370 extend = GET_CODE (orig_mem);
38371 orig_mem = XEXP (orig_mem, 0);
38372 target_mode = GET_MODE (orig_mem);
38373 }
38374
38375 gcc_assert (MEM_P (orig_mem));
38376
38377 orig_addr = XEXP (orig_mem, 0);
38378 plus_or_lo_sum = GET_CODE (orig_addr);
38379 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38380
38381 offset = XEXP (orig_addr, 1);
38382 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38383 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38384
38385 if (extend != UNKNOWN)
38386 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
38387
38388 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38389 UNSPEC_FUSION_P9);
38390
38391 set = gen_rtx_SET (target, new_mem);
38392 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38393 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38394 emit_insn (insn);
38395
38396 return;
38397 }
38398
38399 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38400 store sequence.
38401
38402 The operands are:
38403 operands[0] register set with addis
38404 operands[1] value set via addis
38405 operands[2] target D-form memory being stored to
38406 operands[3] register being stored
38407
38408 This is similar to the fusion introduced with power8, except it scales to
38409 both loads/stores and does not require the result register to be the same as
38410 the base register. At the moment, we only do this if register set with addis
38411 is dead. */
38412
38413 void
38414 expand_fusion_p9_store (rtx *operands)
38415 {
38416 rtx tmp_reg = operands[0];
38417 rtx addis_value = operands[1];
38418 rtx orig_mem = operands[2];
38419 rtx src = operands[3];
38420 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
38421 enum rtx_code plus_or_lo_sum;
38422 machine_mode target_mode = GET_MODE (orig_mem);
38423 machine_mode ptr_mode = Pmode;
38424
38425 gcc_assert (MEM_P (orig_mem));
38426
38427 orig_addr = XEXP (orig_mem, 0);
38428 plus_or_lo_sum = GET_CODE (orig_addr);
38429 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38430
38431 offset = XEXP (orig_addr, 1);
38432 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38433 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38434
38435 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
38436 UNSPEC_FUSION_P9);
38437
38438 set = gen_rtx_SET (new_mem, new_src);
38439 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
38440 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
38441 emit_insn (insn);
38442
38443 return;
38444 }
38445
38446 /* Return a string to fuse an addis instruction with a load using extended
38447 fusion. The address that is used is the logical address that was formed
38448 during peephole2: (lo_sum (high) (low-part))
38449
38450 The code is complicated, so we call output_asm_insn directly, and just
38451 return "". */
38452
38453 const char *
38454 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
38455 {
38456 machine_mode mode = GET_MODE (reg);
38457 rtx hi;
38458 rtx lo;
38459 rtx addr;
38460 const char *load_string;
38461 int r;
38462
38463 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
38464 {
38465 mem = XEXP (mem, 0);
38466 mode = GET_MODE (mem);
38467 }
38468
38469 if (GET_CODE (reg) == SUBREG)
38470 {
38471 gcc_assert (SUBREG_BYTE (reg) == 0);
38472 reg = SUBREG_REG (reg);
38473 }
38474
38475 if (!REG_P (reg))
38476 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
38477
38478 r = REGNO (reg);
38479 if (FP_REGNO_P (r))
38480 {
38481 if (mode == SFmode)
38482 load_string = "lfs";
38483 else if (mode == DFmode || mode == DImode)
38484 load_string = "lfd";
38485 else
38486 gcc_unreachable ();
38487 }
38488 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
38489 {
38490 if (mode == SFmode)
38491 load_string = "lxssp";
38492 else if (mode == DFmode || mode == DImode)
38493 load_string = "lxsd";
38494 else
38495 gcc_unreachable ();
38496 }
38497 else if (INT_REGNO_P (r))
38498 {
38499 switch (mode)
38500 {
38501 case E_QImode:
38502 load_string = "lbz";
38503 break;
38504 case E_HImode:
38505 load_string = "lhz";
38506 break;
38507 case E_SImode:
38508 case E_SFmode:
38509 load_string = "lwz";
38510 break;
38511 case E_DImode:
38512 case E_DFmode:
38513 if (!TARGET_POWERPC64)
38514 gcc_unreachable ();
38515 load_string = "ld";
38516 break;
38517 default:
38518 gcc_unreachable ();
38519 }
38520 }
38521 else
38522 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
38523
38524 if (!MEM_P (mem))
38525 fatal_insn ("emit_fusion_p9_load not MEM", mem);
38526
38527 addr = XEXP (mem, 0);
38528 fusion_split_address (addr, &hi, &lo);
38529
38530 /* Emit the addis instruction. */
38531 emit_fusion_addis (tmp_reg, hi);
38532
38533 /* Emit the D-form load instruction. */
38534 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
38535
38536 return "";
38537 }
38538
38539 /* Return a string to fuse an addis instruction with a store using extended
38540 fusion. The address that is used is the logical address that was formed
38541 during peephole2: (lo_sum (high) (low-part))
38542
38543 The code is complicated, so we call output_asm_insn directly, and just
38544 return "". */
38545
38546 const char *
38547 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
38548 {
38549 machine_mode mode = GET_MODE (reg);
38550 rtx hi;
38551 rtx lo;
38552 rtx addr;
38553 const char *store_string;
38554 int r;
38555
38556 if (GET_CODE (reg) == SUBREG)
38557 {
38558 gcc_assert (SUBREG_BYTE (reg) == 0);
38559 reg = SUBREG_REG (reg);
38560 }
38561
38562 if (!REG_P (reg))
38563 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
38564
38565 r = REGNO (reg);
38566 if (FP_REGNO_P (r))
38567 {
38568 if (mode == SFmode)
38569 store_string = "stfs";
38570 else if (mode == DFmode)
38571 store_string = "stfd";
38572 else
38573 gcc_unreachable ();
38574 }
38575 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_VECTOR)
38576 {
38577 if (mode == SFmode)
38578 store_string = "stxssp";
38579 else if (mode == DFmode || mode == DImode)
38580 store_string = "stxsd";
38581 else
38582 gcc_unreachable ();
38583 }
38584 else if (INT_REGNO_P (r))
38585 {
38586 switch (mode)
38587 {
38588 case E_QImode:
38589 store_string = "stb";
38590 break;
38591 case E_HImode:
38592 store_string = "sth";
38593 break;
38594 case E_SImode:
38595 case E_SFmode:
38596 store_string = "stw";
38597 break;
38598 case E_DImode:
38599 case E_DFmode:
38600 if (!TARGET_POWERPC64)
38601 gcc_unreachable ();
38602 store_string = "std";
38603 break;
38604 default:
38605 gcc_unreachable ();
38606 }
38607 }
38608 else
38609 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
38610
38611 if (!MEM_P (mem))
38612 fatal_insn ("emit_fusion_p9_store not MEM", mem);
38613
38614 addr = XEXP (mem, 0);
38615 fusion_split_address (addr, &hi, &lo);
38616
38617 /* Emit the addis instruction. */
38618 emit_fusion_addis (tmp_reg, hi);
38619
38620 /* Emit the D-form load instruction. */
38621 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
38622
38623 return "";
38624 }
38625
38626 #ifdef RS6000_GLIBC_ATOMIC_FENV
38627 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38628 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
38629 #endif
38630
38631 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38632
38633 static void
38634 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
38635 {
38636 if (!TARGET_HARD_FLOAT)
38637 {
38638 #ifdef RS6000_GLIBC_ATOMIC_FENV
38639 if (atomic_hold_decl == NULL_TREE)
38640 {
38641 atomic_hold_decl
38642 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38643 get_identifier ("__atomic_feholdexcept"),
38644 build_function_type_list (void_type_node,
38645 double_ptr_type_node,
38646 NULL_TREE));
38647 TREE_PUBLIC (atomic_hold_decl) = 1;
38648 DECL_EXTERNAL (atomic_hold_decl) = 1;
38649 }
38650
38651 if (atomic_clear_decl == NULL_TREE)
38652 {
38653 atomic_clear_decl
38654 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38655 get_identifier ("__atomic_feclearexcept"),
38656 build_function_type_list (void_type_node,
38657 NULL_TREE));
38658 TREE_PUBLIC (atomic_clear_decl) = 1;
38659 DECL_EXTERNAL (atomic_clear_decl) = 1;
38660 }
38661
38662 tree const_double = build_qualified_type (double_type_node,
38663 TYPE_QUAL_CONST);
38664 tree const_double_ptr = build_pointer_type (const_double);
38665 if (atomic_update_decl == NULL_TREE)
38666 {
38667 atomic_update_decl
38668 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38669 get_identifier ("__atomic_feupdateenv"),
38670 build_function_type_list (void_type_node,
38671 const_double_ptr,
38672 NULL_TREE));
38673 TREE_PUBLIC (atomic_update_decl) = 1;
38674 DECL_EXTERNAL (atomic_update_decl) = 1;
38675 }
38676
38677 tree fenv_var = create_tmp_var_raw (double_type_node);
38678 TREE_ADDRESSABLE (fenv_var) = 1;
38679 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
38680
38681 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
38682 *clear = build_call_expr (atomic_clear_decl, 0);
38683 *update = build_call_expr (atomic_update_decl, 1,
38684 fold_convert (const_double_ptr, fenv_addr));
38685 #endif
38686 return;
38687 }
38688
38689 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
38690 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
38691 tree call_mffs = build_call_expr (mffs, 0);
38692
38693 /* Generates the equivalent of feholdexcept (&fenv_var)
38694
38695 *fenv_var = __builtin_mffs ();
38696 double fenv_hold;
38697 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38698 __builtin_mtfsf (0xff, fenv_hold); */
38699
38700 /* Mask to clear everything except for the rounding modes and non-IEEE
38701 arithmetic flag. */
38702 const unsigned HOST_WIDE_INT hold_exception_mask =
38703 HOST_WIDE_INT_C (0xffffffff00000007);
38704
38705 tree fenv_var = create_tmp_var_raw (double_type_node);
38706
38707 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
38708
38709 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
38710 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38711 build_int_cst (uint64_type_node,
38712 hold_exception_mask));
38713
38714 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38715 fenv_llu_and);
38716
38717 tree hold_mtfsf = build_call_expr (mtfsf, 2,
38718 build_int_cst (unsigned_type_node, 0xff),
38719 fenv_hold_mtfsf);
38720
38721 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
38722
38723 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38724
38725 double fenv_clear = __builtin_mffs ();
38726 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38727 __builtin_mtfsf (0xff, fenv_clear); */
38728
38729 /* Mask to clear everything except for the rounding modes and non-IEEE
38730 arithmetic flag. */
38731 const unsigned HOST_WIDE_INT clear_exception_mask =
38732 HOST_WIDE_INT_C (0xffffffff00000000);
38733
38734 tree fenv_clear = create_tmp_var_raw (double_type_node);
38735
38736 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
38737
38738 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
38739 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
38740 fenv_clean_llu,
38741 build_int_cst (uint64_type_node,
38742 clear_exception_mask));
38743
38744 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38745 fenv_clear_llu_and);
38746
38747 tree clear_mtfsf = build_call_expr (mtfsf, 2,
38748 build_int_cst (unsigned_type_node, 0xff),
38749 fenv_clear_mtfsf);
38750
38751 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
38752
38753 /* Generates the equivalent of feupdateenv (&fenv_var)
38754
38755 double old_fenv = __builtin_mffs ();
38756 double fenv_update;
38757 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
38758 (*(uint64_t*)fenv_var 0x1ff80fff);
38759 __builtin_mtfsf (0xff, fenv_update); */
38760
38761 const unsigned HOST_WIDE_INT update_exception_mask =
38762 HOST_WIDE_INT_C (0xffffffff1fffff00);
38763 const unsigned HOST_WIDE_INT new_exception_mask =
38764 HOST_WIDE_INT_C (0x1ff80fff);
38765
38766 tree old_fenv = create_tmp_var_raw (double_type_node);
38767 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
38768
38769 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
38770 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
38771 build_int_cst (uint64_type_node,
38772 update_exception_mask));
38773
38774 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38775 build_int_cst (uint64_type_node,
38776 new_exception_mask));
38777
38778 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
38779 old_llu_and, new_llu_and);
38780
38781 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38782 new_llu_mask);
38783
38784 tree update_mtfsf = build_call_expr (mtfsf, 2,
38785 build_int_cst (unsigned_type_node, 0xff),
38786 fenv_update_mtfsf);
38787
38788 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
38789 }
38790
38791 void
38792 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
38793 {
38794 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38795
38796 rtx_tmp0 = gen_reg_rtx (V2DFmode);
38797 rtx_tmp1 = gen_reg_rtx (V2DFmode);
38798
38799 /* The destination of the vmrgew instruction layout is:
38800 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38801 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38802 vmrgew instruction will be correct. */
38803 if (BYTES_BIG_ENDIAN)
38804 {
38805 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
38806 GEN_INT (0)));
38807 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
38808 GEN_INT (3)));
38809 }
38810 else
38811 {
38812 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
38813 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
38814 }
38815
38816 rtx_tmp2 = gen_reg_rtx (V4SFmode);
38817 rtx_tmp3 = gen_reg_rtx (V4SFmode);
38818
38819 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
38820 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
38821
38822 if (BYTES_BIG_ENDIAN)
38823 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
38824 else
38825 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
38826 }
38827
38828 void
38829 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
38830 {
38831 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38832
38833 rtx_tmp0 = gen_reg_rtx (V2DImode);
38834 rtx_tmp1 = gen_reg_rtx (V2DImode);
38835
38836 /* The destination of the vmrgew instruction layout is:
38837 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38838 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38839 vmrgew instruction will be correct. */
38840 if (BYTES_BIG_ENDIAN)
38841 {
38842 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
38843 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
38844 }
38845 else
38846 {
38847 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
38848 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
38849 }
38850
38851 rtx_tmp2 = gen_reg_rtx (V4SFmode);
38852 rtx_tmp3 = gen_reg_rtx (V4SFmode);
38853
38854 if (signed_convert)
38855 {
38856 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
38857 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
38858 }
38859 else
38860 {
38861 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
38862 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
38863 }
38864
38865 if (BYTES_BIG_ENDIAN)
38866 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
38867 else
38868 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
38869 }
38870
38871 void
38872 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
38873 rtx src2)
38874 {
38875 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38876
38877 rtx_tmp0 = gen_reg_rtx (V2DFmode);
38878 rtx_tmp1 = gen_reg_rtx (V2DFmode);
38879
38880 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
38881 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
38882
38883 rtx_tmp2 = gen_reg_rtx (V4SImode);
38884 rtx_tmp3 = gen_reg_rtx (V4SImode);
38885
38886 if (signed_convert)
38887 {
38888 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
38889 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
38890 }
38891 else
38892 {
38893 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
38894 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
38895 }
38896
38897 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
38898 }
38899
38900 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
38901
38902 static bool
38903 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
38904 optimization_type opt_type)
38905 {
38906 switch (op)
38907 {
38908 case rsqrt_optab:
38909 return (opt_type == OPTIMIZE_FOR_SPEED
38910 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
38911
38912 default:
38913 return true;
38914 }
38915 }
38916
38917 /* Implement TARGET_CONSTANT_ALIGNMENT. */
38918
38919 static HOST_WIDE_INT
38920 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
38921 {
38922 if (TREE_CODE (exp) == STRING_CST
38923 && (STRICT_ALIGNMENT || !optimize_size))
38924 return MAX (align, BITS_PER_WORD);
38925 return align;
38926 }
38927
38928 /* Implement TARGET_STARTING_FRAME_OFFSET. */
38929
38930 static HOST_WIDE_INT
38931 rs6000_starting_frame_offset (void)
38932 {
38933 if (FRAME_GROWS_DOWNWARD)
38934 return 0;
38935 return RS6000_STARTING_FRAME_OFFSET;
38936 }
38937 \f
38938
38939 /* Create an alias for a mangled name where we have changed the mangling (in
38940 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
38941 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
38942
38943 #if TARGET_ELF && RS6000_WEAK
38944 static void
38945 rs6000_globalize_decl_name (FILE * stream, tree decl)
38946 {
38947 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
38948
38949 targetm.asm_out.globalize_label (stream, name);
38950
38951 if (rs6000_passes_ieee128 && name[0] == '_' && name[1] == 'Z')
38952 {
38953 tree save_asm_name = DECL_ASSEMBLER_NAME (decl);
38954 const char *old_name;
38955
38956 ieee128_mangling_gcc_8_1 = true;
38957 lang_hooks.set_decl_assembler_name (decl);
38958 old_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
38959 SET_DECL_ASSEMBLER_NAME (decl, save_asm_name);
38960 ieee128_mangling_gcc_8_1 = false;
38961
38962 if (strcmp (name, old_name) != 0)
38963 {
38964 fprintf (stream, "\t.weak %s\n", old_name);
38965 fprintf (stream, "\t.set %s,%s\n", old_name, name);
38966 }
38967 }
38968 }
38969 #endif
38970
38971 \f
38972 /* On 64-bit Linux and Freebsd systems, possibly switch the long double library
38973 function names from <foo>l to <foo>f128 if the default long double type is
38974 IEEE 128-bit. Typically, with the C and C++ languages, the standard math.h
38975 include file switches the names on systems that support long double as IEEE
38976 128-bit, but that doesn't work if the user uses __builtin_<foo>l directly.
38977 In the future, glibc will export names like __ieee128_sinf128 and we can
38978 switch to using those instead of using sinf128, which pollutes the user's
38979 namespace.
38980
38981 This will switch the names for Fortran math functions as well (which doesn't
38982 use math.h). However, Fortran needs other changes to the compiler and
38983 library before you can switch the real*16 type at compile time.
38984
38985 We use the TARGET_MANGLE_DECL_ASSEMBLER_NAME hook to change this name. We
38986 only do this if the default is that long double is IBM extended double, and
38987 the user asked for IEEE 128-bit. */
38988
38989 static tree
38990 rs6000_mangle_decl_assembler_name (tree decl, tree id)
38991 {
38992 if (!TARGET_IEEEQUAD_DEFAULT && TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
38993 && TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl) )
38994 {
38995 size_t len = IDENTIFIER_LENGTH (id);
38996 const char *name = IDENTIFIER_POINTER (id);
38997
38998 if (name[len - 1] == 'l')
38999 {
39000 bool uses_ieee128_p = false;
39001 tree type = TREE_TYPE (decl);
39002 machine_mode ret_mode = TYPE_MODE (type);
39003
39004 /* See if the function returns a IEEE 128-bit floating point type or
39005 complex type. */
39006 if (ret_mode == TFmode || ret_mode == TCmode)
39007 uses_ieee128_p = true;
39008 else
39009 {
39010 function_args_iterator args_iter;
39011 tree arg;
39012
39013 /* See if the function passes a IEEE 128-bit floating point type
39014 or complex type. */
39015 FOREACH_FUNCTION_ARGS (type, arg, args_iter)
39016 {
39017 machine_mode arg_mode = TYPE_MODE (arg);
39018 if (arg_mode == TFmode || arg_mode == TCmode)
39019 {
39020 uses_ieee128_p = true;
39021 break;
39022 }
39023 }
39024 }
39025
39026 /* If we passed or returned an IEEE 128-bit floating point type,
39027 change the name. */
39028 if (uses_ieee128_p)
39029 {
39030 char *name2 = (char *) alloca (len + 4);
39031 memcpy (name2, name, len - 1);
39032 strcpy (name2 + len - 1, "f128");
39033 id = get_identifier (name2);
39034 }
39035 }
39036 }
39037
39038 return id;
39039 }
39040
39041 \f
39042 struct gcc_target targetm = TARGET_INITIALIZER;
39043
39044 #include "gt-rs6000.h"