]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/rs6000/rs6000.c
Update copyright years.
[thirdparty/gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2019 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
84
85 /* This file should be included last. */
86 #include "target-def.h"
87
88 #ifndef TARGET_NO_PROTOTYPE
89 #define TARGET_NO_PROTOTYPE 0
90 #endif
91
92 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
93 systems will also set long double to be IEEE 128-bit. AIX and Darwin
94 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
95 those systems will not pick up this default. This needs to be after all
96 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
97 properly defined. */
98 #ifndef TARGET_IEEEQUAD_DEFAULT
99 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
100 #define TARGET_IEEEQUAD_DEFAULT 1
101 #else
102 #define TARGET_IEEEQUAD_DEFAULT 0
103 #endif
104 #endif
105
106 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
107
108 /* Structure used to define the rs6000 stack */
109 typedef struct rs6000_stack {
110 int reload_completed; /* stack info won't change from here on */
111 int first_gp_reg_save; /* first callee saved GP register used */
112 int first_fp_reg_save; /* first callee saved FP register used */
113 int first_altivec_reg_save; /* first callee saved AltiVec register used */
114 int lr_save_p; /* true if the link reg needs to be saved */
115 int cr_save_p; /* true if the CR reg needs to be saved */
116 unsigned int vrsave_mask; /* mask of vec registers to save */
117 int push_p; /* true if we need to allocate stack space */
118 int calls_p; /* true if the function makes any calls */
119 int world_save_p; /* true if we're saving *everything*:
120 r13-r31, cr, f14-f31, vrsave, v20-v31 */
121 enum rs6000_abi abi; /* which ABI to use */
122 int gp_save_offset; /* offset to save GP regs from initial SP */
123 int fp_save_offset; /* offset to save FP regs from initial SP */
124 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
125 int lr_save_offset; /* offset to save LR from initial SP */
126 int cr_save_offset; /* offset to save CR from initial SP */
127 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
128 int varargs_save_offset; /* offset to save the varargs registers */
129 int ehrd_offset; /* offset to EH return data */
130 int ehcr_offset; /* offset to EH CR field data */
131 int reg_size; /* register size (4 or 8) */
132 HOST_WIDE_INT vars_size; /* variable save area size */
133 int parm_size; /* outgoing parameter size */
134 int save_size; /* save area size */
135 int fixed_size; /* fixed size of stack frame */
136 int gp_size; /* size of saved GP registers */
137 int fp_size; /* size of saved FP registers */
138 int altivec_size; /* size of saved AltiVec registers */
139 int cr_size; /* size to hold CR if not in fixed area */
140 int vrsave_size; /* size to hold VRSAVE */
141 int altivec_padding_size; /* size of altivec alignment padding */
142 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
143 int savres_strategy;
144 } rs6000_stack_t;
145
146 /* A C structure for machine-specific, per-function data.
147 This is added to the cfun structure. */
148 typedef struct GTY(()) machine_function
149 {
150 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
151 int ra_needs_full_frame;
152 /* Flags if __builtin_return_address (0) was used. */
153 int ra_need_lr;
154 /* Cache lr_save_p after expansion of builtin_eh_return. */
155 int lr_save_state;
156 /* Whether we need to save the TOC to the reserved stack location in the
157 function prologue. */
158 bool save_toc_in_prologue;
159 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
160 varargs save area. */
161 HOST_WIDE_INT varargs_save_offset;
162 /* Alternative internal arg pointer for -fsplit-stack. */
163 rtx split_stack_arg_pointer;
164 bool split_stack_argp_used;
165 /* Flag if r2 setup is needed with ELFv2 ABI. */
166 bool r2_setup_needed;
167 /* The number of components we use for separate shrink-wrapping. */
168 int n_components;
169 /* The components already handled by separate shrink-wrapping, which should
170 not be considered by the prologue and epilogue. */
171 bool gpr_is_wrapped_separately[32];
172 bool fpr_is_wrapped_separately[32];
173 bool lr_is_wrapped_separately;
174 bool toc_is_wrapped_separately;
175 } machine_function;
176
177 /* Support targetm.vectorize.builtin_mask_for_load. */
178 static GTY(()) tree altivec_builtin_mask_for_load;
179
180 /* Set to nonzero once AIX common-mode calls have been defined. */
181 static GTY(()) int common_mode_defined;
182
183 /* Label number of label created for -mrelocatable, to call to so we can
184 get the address of the GOT section */
185 static int rs6000_pic_labelno;
186
187 #ifdef USING_ELFOS_H
188 /* Counter for labels which are to be placed in .fixup. */
189 int fixuplabelno = 0;
190 #endif
191
192 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
193 int dot_symbols;
194
195 /* Specify the machine mode that pointers have. After generation of rtl, the
196 compiler makes no further distinction between pointers and any other objects
197 of this machine mode. */
198 scalar_int_mode rs6000_pmode;
199
200 #if TARGET_ELF
201 /* Note whether IEEE 128-bit floating point was passed or returned, either as
202 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
203 floating point. We changed the default C++ mangling for these types and we
204 may want to generate a weak alias of the old mangling (U10__float128) to the
205 new mangling (u9__ieee128). */
206 static bool rs6000_passes_ieee128;
207 #endif
208
209 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
210 name used in current releases (i.e. u9__ieee128). */
211 static bool ieee128_mangling_gcc_8_1;
212
213 /* Width in bits of a pointer. */
214 unsigned rs6000_pointer_size;
215
216 #ifdef HAVE_AS_GNU_ATTRIBUTE
217 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
218 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
219 # endif
220 /* Flag whether floating point values have been passed/returned.
221 Note that this doesn't say whether fprs are used, since the
222 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
223 should be set for soft-float values passed in gprs and ieee128
224 values passed in vsx registers. */
225 static bool rs6000_passes_float;
226 static bool rs6000_passes_long_double;
227 /* Flag whether vector values have been passed/returned. */
228 static bool rs6000_passes_vector;
229 /* Flag whether small (<= 8 byte) structures have been returned. */
230 static bool rs6000_returns_struct;
231 #endif
232
233 /* Value is TRUE if register/mode pair is acceptable. */
234 static bool rs6000_hard_regno_mode_ok_p
235 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
236
237 /* Maximum number of registers needed for a given register class and mode. */
238 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
239
240 /* How many registers are needed for a given register and mode. */
241 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
242
243 /* Map register number to register class. */
244 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
245
246 static int dbg_cost_ctrl;
247
248 /* Built in types. */
249 tree rs6000_builtin_types[RS6000_BTI_MAX];
250 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
251
252 /* Flag to say the TOC is initialized */
253 int toc_initialized, need_toc_init;
254 char toc_label_name[10];
255
256 /* Cached value of rs6000_variable_issue. This is cached in
257 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
258 static short cached_can_issue_more;
259
260 static GTY(()) section *read_only_data_section;
261 static GTY(()) section *private_data_section;
262 static GTY(()) section *tls_data_section;
263 static GTY(()) section *tls_private_data_section;
264 static GTY(()) section *read_only_private_data_section;
265 static GTY(()) section *sdata2_section;
266 static GTY(()) section *toc_section;
267
268 struct builtin_description
269 {
270 const HOST_WIDE_INT mask;
271 const enum insn_code icode;
272 const char *const name;
273 const enum rs6000_builtins code;
274 };
275
276 /* Describe the vector unit used for modes. */
277 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
278 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
279
280 /* Register classes for various constraints that are based on the target
281 switches. */
282 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
283
284 /* Describe the alignment of a vector. */
285 int rs6000_vector_align[NUM_MACHINE_MODES];
286
287 /* Map selected modes to types for builtins. */
288 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
289
290 /* What modes to automatically generate reciprocal divide estimate (fre) and
291 reciprocal sqrt (frsqrte) for. */
292 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
293
294 /* Masks to determine which reciprocal esitmate instructions to generate
295 automatically. */
296 enum rs6000_recip_mask {
297 RECIP_SF_DIV = 0x001, /* Use divide estimate */
298 RECIP_DF_DIV = 0x002,
299 RECIP_V4SF_DIV = 0x004,
300 RECIP_V2DF_DIV = 0x008,
301
302 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
303 RECIP_DF_RSQRT = 0x020,
304 RECIP_V4SF_RSQRT = 0x040,
305 RECIP_V2DF_RSQRT = 0x080,
306
307 /* Various combination of flags for -mrecip=xxx. */
308 RECIP_NONE = 0,
309 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
310 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
311 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
312
313 RECIP_HIGH_PRECISION = RECIP_ALL,
314
315 /* On low precision machines like the power5, don't enable double precision
316 reciprocal square root estimate, since it isn't accurate enough. */
317 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
318 };
319
320 /* -mrecip options. */
321 static struct
322 {
323 const char *string; /* option name */
324 unsigned int mask; /* mask bits to set */
325 } recip_options[] = {
326 { "all", RECIP_ALL },
327 { "none", RECIP_NONE },
328 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
329 | RECIP_V2DF_DIV) },
330 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
331 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
332 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
333 | RECIP_V2DF_RSQRT) },
334 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
335 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
336 };
337
338 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
339 static const struct
340 {
341 const char *cpu;
342 unsigned int cpuid;
343 } cpu_is_info[] = {
344 { "power9", PPC_PLATFORM_POWER9 },
345 { "power8", PPC_PLATFORM_POWER8 },
346 { "power7", PPC_PLATFORM_POWER7 },
347 { "power6x", PPC_PLATFORM_POWER6X },
348 { "power6", PPC_PLATFORM_POWER6 },
349 { "power5+", PPC_PLATFORM_POWER5_PLUS },
350 { "power5", PPC_PLATFORM_POWER5 },
351 { "ppc970", PPC_PLATFORM_PPC970 },
352 { "power4", PPC_PLATFORM_POWER4 },
353 { "ppca2", PPC_PLATFORM_PPCA2 },
354 { "ppc476", PPC_PLATFORM_PPC476 },
355 { "ppc464", PPC_PLATFORM_PPC464 },
356 { "ppc440", PPC_PLATFORM_PPC440 },
357 { "ppc405", PPC_PLATFORM_PPC405 },
358 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
359 };
360
361 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
362 static const struct
363 {
364 const char *hwcap;
365 int mask;
366 unsigned int id;
367 } cpu_supports_info[] = {
368 /* AT_HWCAP masks. */
369 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
370 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
371 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
372 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
373 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
374 { "booke", PPC_FEATURE_BOOKE, 0 },
375 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
376 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
377 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
378 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
379 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
380 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
381 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
382 { "notb", PPC_FEATURE_NO_TB, 0 },
383 { "pa6t", PPC_FEATURE_PA6T, 0 },
384 { "power4", PPC_FEATURE_POWER4, 0 },
385 { "power5", PPC_FEATURE_POWER5, 0 },
386 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
387 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
388 { "ppc32", PPC_FEATURE_32, 0 },
389 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
390 { "ppc64", PPC_FEATURE_64, 0 },
391 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
392 { "smt", PPC_FEATURE_SMT, 0 },
393 { "spe", PPC_FEATURE_HAS_SPE, 0 },
394 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
395 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
396 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
397
398 /* AT_HWCAP2 masks. */
399 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
400 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
401 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
402 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
403 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
404 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
405 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
406 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
407 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
408 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
409 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
410 { "darn", PPC_FEATURE2_DARN, 1 },
411 { "scv", PPC_FEATURE2_SCV, 1 }
412 };
413
414 /* On PowerPC, we have a limited number of target clones that we care about
415 which means we can use an array to hold the options, rather than having more
416 elaborate data structures to identify each possible variation. Order the
417 clones from the default to the highest ISA. */
418 enum {
419 CLONE_DEFAULT = 0, /* default clone. */
420 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
421 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
422 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
423 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
424 CLONE_MAX
425 };
426
427 /* Map compiler ISA bits into HWCAP names. */
428 struct clone_map {
429 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
430 const char *name; /* name to use in __builtin_cpu_supports. */
431 };
432
433 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
434 { 0, "" }, /* Default options. */
435 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
436 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
437 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
438 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
439 };
440
441
442 /* Newer LIBCs explicitly export this symbol to declare that they provide
443 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
444 reference to this symbol whenever we expand a CPU builtin, so that
445 we never link against an old LIBC. */
446 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
447
448 /* True if we have expanded a CPU builtin. */
449 bool cpu_builtin_p;
450
451 /* Pointer to function (in rs6000-c.c) that can define or undefine target
452 macros that have changed. Languages that don't support the preprocessor
453 don't link in rs6000-c.c, so we can't call it directly. */
454 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
455
456 /* Simplfy register classes into simpler classifications. We assume
457 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
458 check for standard register classes (gpr/floating/altivec/vsx) and
459 floating/vector classes (float/altivec/vsx). */
460
461 enum rs6000_reg_type {
462 NO_REG_TYPE,
463 PSEUDO_REG_TYPE,
464 GPR_REG_TYPE,
465 VSX_REG_TYPE,
466 ALTIVEC_REG_TYPE,
467 FPR_REG_TYPE,
468 SPR_REG_TYPE,
469 CR_REG_TYPE
470 };
471
472 /* Map register class to register type. */
473 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
474
475 /* First/last register type for the 'normal' register types (i.e. general
476 purpose, floating point, altivec, and VSX registers). */
477 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
478
479 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
480
481
482 /* Register classes we care about in secondary reload or go if legitimate
483 address. We only need to worry about GPR, FPR, and Altivec registers here,
484 along an ANY field that is the OR of the 3 register classes. */
485
486 enum rs6000_reload_reg_type {
487 RELOAD_REG_GPR, /* General purpose registers. */
488 RELOAD_REG_FPR, /* Traditional floating point regs. */
489 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
490 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
491 N_RELOAD_REG
492 };
493
494 /* For setting up register classes, loop through the 3 register classes mapping
495 into real registers, and skip the ANY class, which is just an OR of the
496 bits. */
497 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
498 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
499
500 /* Map reload register type to a register in the register class. */
501 struct reload_reg_map_type {
502 const char *name; /* Register class name. */
503 int reg; /* Register in the register class. */
504 };
505
506 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
507 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
508 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
509 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
510 { "Any", -1 }, /* RELOAD_REG_ANY. */
511 };
512
513 /* Mask bits for each register class, indexed per mode. Historically the
514 compiler has been more restrictive which types can do PRE_MODIFY instead of
515 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
516 typedef unsigned char addr_mask_type;
517
518 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
519 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
520 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
521 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
522 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
523 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
524 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
525 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
526
527 /* Register type masks based on the type, of valid addressing modes. */
528 struct rs6000_reg_addr {
529 enum insn_code reload_load; /* INSN to reload for loading. */
530 enum insn_code reload_store; /* INSN to reload for storing. */
531 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
532 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
533 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
534 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
535 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
536 };
537
538 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
539
540 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
541 static inline bool
542 mode_supports_pre_incdec_p (machine_mode mode)
543 {
544 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
545 != 0);
546 }
547
548 /* Helper function to say whether a mode supports PRE_MODIFY. */
549 static inline bool
550 mode_supports_pre_modify_p (machine_mode mode)
551 {
552 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
553 != 0);
554 }
555
556 /* Return true if we have D-form addressing in altivec registers. */
557 static inline bool
558 mode_supports_vmx_dform (machine_mode mode)
559 {
560 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
561 }
562
563 /* Return true if we have D-form addressing in VSX registers. This addressing
564 is more limited than normal d-form addressing in that the offset must be
565 aligned on a 16-byte boundary. */
566 static inline bool
567 mode_supports_dq_form (machine_mode mode)
568 {
569 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
570 != 0);
571 }
572
573 /* Given that there exists at least one variable that is set (produced)
574 by OUT_INSN and read (consumed) by IN_INSN, return true iff
575 IN_INSN represents one or more memory store operations and none of
576 the variables set by OUT_INSN is used by IN_INSN as the address of a
577 store operation. If either IN_INSN or OUT_INSN does not represent
578 a "single" RTL SET expression (as loosely defined by the
579 implementation of the single_set function) or a PARALLEL with only
580 SETs, CLOBBERs, and USEs inside, this function returns false.
581
582 This rs6000-specific version of store_data_bypass_p checks for
583 certain conditions that result in assertion failures (and internal
584 compiler errors) in the generic store_data_bypass_p function and
585 returns false rather than calling store_data_bypass_p if one of the
586 problematic conditions is detected. */
587
588 int
589 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
590 {
591 rtx out_set, in_set;
592 rtx out_pat, in_pat;
593 rtx out_exp, in_exp;
594 int i, j;
595
596 in_set = single_set (in_insn);
597 if (in_set)
598 {
599 if (MEM_P (SET_DEST (in_set)))
600 {
601 out_set = single_set (out_insn);
602 if (!out_set)
603 {
604 out_pat = PATTERN (out_insn);
605 if (GET_CODE (out_pat) == PARALLEL)
606 {
607 for (i = 0; i < XVECLEN (out_pat, 0); i++)
608 {
609 out_exp = XVECEXP (out_pat, 0, i);
610 if ((GET_CODE (out_exp) == CLOBBER)
611 || (GET_CODE (out_exp) == USE))
612 continue;
613 else if (GET_CODE (out_exp) != SET)
614 return false;
615 }
616 }
617 }
618 }
619 }
620 else
621 {
622 in_pat = PATTERN (in_insn);
623 if (GET_CODE (in_pat) != PARALLEL)
624 return false;
625
626 for (i = 0; i < XVECLEN (in_pat, 0); i++)
627 {
628 in_exp = XVECEXP (in_pat, 0, i);
629 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
630 continue;
631 else if (GET_CODE (in_exp) != SET)
632 return false;
633
634 if (MEM_P (SET_DEST (in_exp)))
635 {
636 out_set = single_set (out_insn);
637 if (!out_set)
638 {
639 out_pat = PATTERN (out_insn);
640 if (GET_CODE (out_pat) != PARALLEL)
641 return false;
642 for (j = 0; j < XVECLEN (out_pat, 0); j++)
643 {
644 out_exp = XVECEXP (out_pat, 0, j);
645 if ((GET_CODE (out_exp) == CLOBBER)
646 || (GET_CODE (out_exp) == USE))
647 continue;
648 else if (GET_CODE (out_exp) != SET)
649 return false;
650 }
651 }
652 }
653 }
654 }
655 return store_data_bypass_p (out_insn, in_insn);
656 }
657
658 \f
659 /* Processor costs (relative to an add) */
660
661 const struct processor_costs *rs6000_cost;
662
663 /* Instruction size costs on 32bit processors. */
664 static const
665 struct processor_costs size32_cost = {
666 COSTS_N_INSNS (1), /* mulsi */
667 COSTS_N_INSNS (1), /* mulsi_const */
668 COSTS_N_INSNS (1), /* mulsi_const9 */
669 COSTS_N_INSNS (1), /* muldi */
670 COSTS_N_INSNS (1), /* divsi */
671 COSTS_N_INSNS (1), /* divdi */
672 COSTS_N_INSNS (1), /* fp */
673 COSTS_N_INSNS (1), /* dmul */
674 COSTS_N_INSNS (1), /* sdiv */
675 COSTS_N_INSNS (1), /* ddiv */
676 32, /* cache line size */
677 0, /* l1 cache */
678 0, /* l2 cache */
679 0, /* streams */
680 0, /* SF->DF convert */
681 };
682
683 /* Instruction size costs on 64bit processors. */
684 static const
685 struct processor_costs size64_cost = {
686 COSTS_N_INSNS (1), /* mulsi */
687 COSTS_N_INSNS (1), /* mulsi_const */
688 COSTS_N_INSNS (1), /* mulsi_const9 */
689 COSTS_N_INSNS (1), /* muldi */
690 COSTS_N_INSNS (1), /* divsi */
691 COSTS_N_INSNS (1), /* divdi */
692 COSTS_N_INSNS (1), /* fp */
693 COSTS_N_INSNS (1), /* dmul */
694 COSTS_N_INSNS (1), /* sdiv */
695 COSTS_N_INSNS (1), /* ddiv */
696 128, /* cache line size */
697 0, /* l1 cache */
698 0, /* l2 cache */
699 0, /* streams */
700 0, /* SF->DF convert */
701 };
702
703 /* Instruction costs on RS64A processors. */
704 static const
705 struct processor_costs rs64a_cost = {
706 COSTS_N_INSNS (20), /* mulsi */
707 COSTS_N_INSNS (12), /* mulsi_const */
708 COSTS_N_INSNS (8), /* mulsi_const9 */
709 COSTS_N_INSNS (34), /* muldi */
710 COSTS_N_INSNS (65), /* divsi */
711 COSTS_N_INSNS (67), /* divdi */
712 COSTS_N_INSNS (4), /* fp */
713 COSTS_N_INSNS (4), /* dmul */
714 COSTS_N_INSNS (31), /* sdiv */
715 COSTS_N_INSNS (31), /* ddiv */
716 128, /* cache line size */
717 128, /* l1 cache */
718 2048, /* l2 cache */
719 1, /* streams */
720 0, /* SF->DF convert */
721 };
722
723 /* Instruction costs on MPCCORE processors. */
724 static const
725 struct processor_costs mpccore_cost = {
726 COSTS_N_INSNS (2), /* mulsi */
727 COSTS_N_INSNS (2), /* mulsi_const */
728 COSTS_N_INSNS (2), /* mulsi_const9 */
729 COSTS_N_INSNS (2), /* muldi */
730 COSTS_N_INSNS (6), /* divsi */
731 COSTS_N_INSNS (6), /* divdi */
732 COSTS_N_INSNS (4), /* fp */
733 COSTS_N_INSNS (5), /* dmul */
734 COSTS_N_INSNS (10), /* sdiv */
735 COSTS_N_INSNS (17), /* ddiv */
736 32, /* cache line size */
737 4, /* l1 cache */
738 16, /* l2 cache */
739 1, /* streams */
740 0, /* SF->DF convert */
741 };
742
743 /* Instruction costs on PPC403 processors. */
744 static const
745 struct processor_costs ppc403_cost = {
746 COSTS_N_INSNS (4), /* mulsi */
747 COSTS_N_INSNS (4), /* mulsi_const */
748 COSTS_N_INSNS (4), /* mulsi_const9 */
749 COSTS_N_INSNS (4), /* muldi */
750 COSTS_N_INSNS (33), /* divsi */
751 COSTS_N_INSNS (33), /* divdi */
752 COSTS_N_INSNS (11), /* fp */
753 COSTS_N_INSNS (11), /* dmul */
754 COSTS_N_INSNS (11), /* sdiv */
755 COSTS_N_INSNS (11), /* ddiv */
756 32, /* cache line size */
757 4, /* l1 cache */
758 16, /* l2 cache */
759 1, /* streams */
760 0, /* SF->DF convert */
761 };
762
763 /* Instruction costs on PPC405 processors. */
764 static const
765 struct processor_costs ppc405_cost = {
766 COSTS_N_INSNS (5), /* mulsi */
767 COSTS_N_INSNS (4), /* mulsi_const */
768 COSTS_N_INSNS (3), /* mulsi_const9 */
769 COSTS_N_INSNS (5), /* muldi */
770 COSTS_N_INSNS (35), /* divsi */
771 COSTS_N_INSNS (35), /* divdi */
772 COSTS_N_INSNS (11), /* fp */
773 COSTS_N_INSNS (11), /* dmul */
774 COSTS_N_INSNS (11), /* sdiv */
775 COSTS_N_INSNS (11), /* ddiv */
776 32, /* cache line size */
777 16, /* l1 cache */
778 128, /* l2 cache */
779 1, /* streams */
780 0, /* SF->DF convert */
781 };
782
783 /* Instruction costs on PPC440 processors. */
784 static const
785 struct processor_costs ppc440_cost = {
786 COSTS_N_INSNS (3), /* mulsi */
787 COSTS_N_INSNS (2), /* mulsi_const */
788 COSTS_N_INSNS (2), /* mulsi_const9 */
789 COSTS_N_INSNS (3), /* muldi */
790 COSTS_N_INSNS (34), /* divsi */
791 COSTS_N_INSNS (34), /* divdi */
792 COSTS_N_INSNS (5), /* fp */
793 COSTS_N_INSNS (5), /* dmul */
794 COSTS_N_INSNS (19), /* sdiv */
795 COSTS_N_INSNS (33), /* ddiv */
796 32, /* cache line size */
797 32, /* l1 cache */
798 256, /* l2 cache */
799 1, /* streams */
800 0, /* SF->DF convert */
801 };
802
803 /* Instruction costs on PPC476 processors. */
804 static const
805 struct processor_costs ppc476_cost = {
806 COSTS_N_INSNS (4), /* mulsi */
807 COSTS_N_INSNS (4), /* mulsi_const */
808 COSTS_N_INSNS (4), /* mulsi_const9 */
809 COSTS_N_INSNS (4), /* muldi */
810 COSTS_N_INSNS (11), /* divsi */
811 COSTS_N_INSNS (11), /* divdi */
812 COSTS_N_INSNS (6), /* fp */
813 COSTS_N_INSNS (6), /* dmul */
814 COSTS_N_INSNS (19), /* sdiv */
815 COSTS_N_INSNS (33), /* ddiv */
816 32, /* l1 cache line size */
817 32, /* l1 cache */
818 512, /* l2 cache */
819 1, /* streams */
820 0, /* SF->DF convert */
821 };
822
823 /* Instruction costs on PPC601 processors. */
824 static const
825 struct processor_costs ppc601_cost = {
826 COSTS_N_INSNS (5), /* mulsi */
827 COSTS_N_INSNS (5), /* mulsi_const */
828 COSTS_N_INSNS (5), /* mulsi_const9 */
829 COSTS_N_INSNS (5), /* muldi */
830 COSTS_N_INSNS (36), /* divsi */
831 COSTS_N_INSNS (36), /* divdi */
832 COSTS_N_INSNS (4), /* fp */
833 COSTS_N_INSNS (5), /* dmul */
834 COSTS_N_INSNS (17), /* sdiv */
835 COSTS_N_INSNS (31), /* ddiv */
836 32, /* cache line size */
837 32, /* l1 cache */
838 256, /* l2 cache */
839 1, /* streams */
840 0, /* SF->DF convert */
841 };
842
843 /* Instruction costs on PPC603 processors. */
844 static const
845 struct processor_costs ppc603_cost = {
846 COSTS_N_INSNS (5), /* mulsi */
847 COSTS_N_INSNS (3), /* mulsi_const */
848 COSTS_N_INSNS (2), /* mulsi_const9 */
849 COSTS_N_INSNS (5), /* muldi */
850 COSTS_N_INSNS (37), /* divsi */
851 COSTS_N_INSNS (37), /* divdi */
852 COSTS_N_INSNS (3), /* fp */
853 COSTS_N_INSNS (4), /* dmul */
854 COSTS_N_INSNS (18), /* sdiv */
855 COSTS_N_INSNS (33), /* ddiv */
856 32, /* cache line size */
857 8, /* l1 cache */
858 64, /* l2 cache */
859 1, /* streams */
860 0, /* SF->DF convert */
861 };
862
863 /* Instruction costs on PPC604 processors. */
864 static const
865 struct processor_costs ppc604_cost = {
866 COSTS_N_INSNS (4), /* mulsi */
867 COSTS_N_INSNS (4), /* mulsi_const */
868 COSTS_N_INSNS (4), /* mulsi_const9 */
869 COSTS_N_INSNS (4), /* muldi */
870 COSTS_N_INSNS (20), /* divsi */
871 COSTS_N_INSNS (20), /* divdi */
872 COSTS_N_INSNS (3), /* fp */
873 COSTS_N_INSNS (3), /* dmul */
874 COSTS_N_INSNS (18), /* sdiv */
875 COSTS_N_INSNS (32), /* ddiv */
876 32, /* cache line size */
877 16, /* l1 cache */
878 512, /* l2 cache */
879 1, /* streams */
880 0, /* SF->DF convert */
881 };
882
883 /* Instruction costs on PPC604e processors. */
884 static const
885 struct processor_costs ppc604e_cost = {
886 COSTS_N_INSNS (2), /* mulsi */
887 COSTS_N_INSNS (2), /* mulsi_const */
888 COSTS_N_INSNS (2), /* mulsi_const9 */
889 COSTS_N_INSNS (2), /* muldi */
890 COSTS_N_INSNS (20), /* divsi */
891 COSTS_N_INSNS (20), /* divdi */
892 COSTS_N_INSNS (3), /* fp */
893 COSTS_N_INSNS (3), /* dmul */
894 COSTS_N_INSNS (18), /* sdiv */
895 COSTS_N_INSNS (32), /* ddiv */
896 32, /* cache line size */
897 32, /* l1 cache */
898 1024, /* l2 cache */
899 1, /* streams */
900 0, /* SF->DF convert */
901 };
902
903 /* Instruction costs on PPC620 processors. */
904 static const
905 struct processor_costs ppc620_cost = {
906 COSTS_N_INSNS (5), /* mulsi */
907 COSTS_N_INSNS (4), /* mulsi_const */
908 COSTS_N_INSNS (3), /* mulsi_const9 */
909 COSTS_N_INSNS (7), /* muldi */
910 COSTS_N_INSNS (21), /* divsi */
911 COSTS_N_INSNS (37), /* divdi */
912 COSTS_N_INSNS (3), /* fp */
913 COSTS_N_INSNS (3), /* dmul */
914 COSTS_N_INSNS (18), /* sdiv */
915 COSTS_N_INSNS (32), /* ddiv */
916 128, /* cache line size */
917 32, /* l1 cache */
918 1024, /* l2 cache */
919 1, /* streams */
920 0, /* SF->DF convert */
921 };
922
923 /* Instruction costs on PPC630 processors. */
924 static const
925 struct processor_costs ppc630_cost = {
926 COSTS_N_INSNS (5), /* mulsi */
927 COSTS_N_INSNS (4), /* mulsi_const */
928 COSTS_N_INSNS (3), /* mulsi_const9 */
929 COSTS_N_INSNS (7), /* muldi */
930 COSTS_N_INSNS (21), /* divsi */
931 COSTS_N_INSNS (37), /* divdi */
932 COSTS_N_INSNS (3), /* fp */
933 COSTS_N_INSNS (3), /* dmul */
934 COSTS_N_INSNS (17), /* sdiv */
935 COSTS_N_INSNS (21), /* ddiv */
936 128, /* cache line size */
937 64, /* l1 cache */
938 1024, /* l2 cache */
939 1, /* streams */
940 0, /* SF->DF convert */
941 };
942
943 /* Instruction costs on Cell processor. */
944 /* COSTS_N_INSNS (1) ~ one add. */
945 static const
946 struct processor_costs ppccell_cost = {
947 COSTS_N_INSNS (9/2)+2, /* mulsi */
948 COSTS_N_INSNS (6/2), /* mulsi_const */
949 COSTS_N_INSNS (6/2), /* mulsi_const9 */
950 COSTS_N_INSNS (15/2)+2, /* muldi */
951 COSTS_N_INSNS (38/2), /* divsi */
952 COSTS_N_INSNS (70/2), /* divdi */
953 COSTS_N_INSNS (10/2), /* fp */
954 COSTS_N_INSNS (10/2), /* dmul */
955 COSTS_N_INSNS (74/2), /* sdiv */
956 COSTS_N_INSNS (74/2), /* ddiv */
957 128, /* cache line size */
958 32, /* l1 cache */
959 512, /* l2 cache */
960 6, /* streams */
961 0, /* SF->DF convert */
962 };
963
964 /* Instruction costs on PPC750 and PPC7400 processors. */
965 static const
966 struct processor_costs ppc750_cost = {
967 COSTS_N_INSNS (5), /* mulsi */
968 COSTS_N_INSNS (3), /* mulsi_const */
969 COSTS_N_INSNS (2), /* mulsi_const9 */
970 COSTS_N_INSNS (5), /* muldi */
971 COSTS_N_INSNS (17), /* divsi */
972 COSTS_N_INSNS (17), /* divdi */
973 COSTS_N_INSNS (3), /* fp */
974 COSTS_N_INSNS (3), /* dmul */
975 COSTS_N_INSNS (17), /* sdiv */
976 COSTS_N_INSNS (31), /* ddiv */
977 32, /* cache line size */
978 32, /* l1 cache */
979 512, /* l2 cache */
980 1, /* streams */
981 0, /* SF->DF convert */
982 };
983
984 /* Instruction costs on PPC7450 processors. */
985 static const
986 struct processor_costs ppc7450_cost = {
987 COSTS_N_INSNS (4), /* mulsi */
988 COSTS_N_INSNS (3), /* mulsi_const */
989 COSTS_N_INSNS (3), /* mulsi_const9 */
990 COSTS_N_INSNS (4), /* muldi */
991 COSTS_N_INSNS (23), /* divsi */
992 COSTS_N_INSNS (23), /* divdi */
993 COSTS_N_INSNS (5), /* fp */
994 COSTS_N_INSNS (5), /* dmul */
995 COSTS_N_INSNS (21), /* sdiv */
996 COSTS_N_INSNS (35), /* ddiv */
997 32, /* cache line size */
998 32, /* l1 cache */
999 1024, /* l2 cache */
1000 1, /* streams */
1001 0, /* SF->DF convert */
1002 };
1003
1004 /* Instruction costs on PPC8540 processors. */
1005 static const
1006 struct processor_costs ppc8540_cost = {
1007 COSTS_N_INSNS (4), /* mulsi */
1008 COSTS_N_INSNS (4), /* mulsi_const */
1009 COSTS_N_INSNS (4), /* mulsi_const9 */
1010 COSTS_N_INSNS (4), /* muldi */
1011 COSTS_N_INSNS (19), /* divsi */
1012 COSTS_N_INSNS (19), /* divdi */
1013 COSTS_N_INSNS (4), /* fp */
1014 COSTS_N_INSNS (4), /* dmul */
1015 COSTS_N_INSNS (29), /* sdiv */
1016 COSTS_N_INSNS (29), /* ddiv */
1017 32, /* cache line size */
1018 32, /* l1 cache */
1019 256, /* l2 cache */
1020 1, /* prefetch streams /*/
1021 0, /* SF->DF convert */
1022 };
1023
1024 /* Instruction costs on E300C2 and E300C3 cores. */
1025 static const
1026 struct processor_costs ppce300c2c3_cost = {
1027 COSTS_N_INSNS (4), /* mulsi */
1028 COSTS_N_INSNS (4), /* mulsi_const */
1029 COSTS_N_INSNS (4), /* mulsi_const9 */
1030 COSTS_N_INSNS (4), /* muldi */
1031 COSTS_N_INSNS (19), /* divsi */
1032 COSTS_N_INSNS (19), /* divdi */
1033 COSTS_N_INSNS (3), /* fp */
1034 COSTS_N_INSNS (4), /* dmul */
1035 COSTS_N_INSNS (18), /* sdiv */
1036 COSTS_N_INSNS (33), /* ddiv */
1037 32,
1038 16, /* l1 cache */
1039 16, /* l2 cache */
1040 1, /* prefetch streams /*/
1041 0, /* SF->DF convert */
1042 };
1043
1044 /* Instruction costs on PPCE500MC processors. */
1045 static const
1046 struct processor_costs ppce500mc_cost = {
1047 COSTS_N_INSNS (4), /* mulsi */
1048 COSTS_N_INSNS (4), /* mulsi_const */
1049 COSTS_N_INSNS (4), /* mulsi_const9 */
1050 COSTS_N_INSNS (4), /* muldi */
1051 COSTS_N_INSNS (14), /* divsi */
1052 COSTS_N_INSNS (14), /* divdi */
1053 COSTS_N_INSNS (8), /* fp */
1054 COSTS_N_INSNS (10), /* dmul */
1055 COSTS_N_INSNS (36), /* sdiv */
1056 COSTS_N_INSNS (66), /* ddiv */
1057 64, /* cache line size */
1058 32, /* l1 cache */
1059 128, /* l2 cache */
1060 1, /* prefetch streams /*/
1061 0, /* SF->DF convert */
1062 };
1063
1064 /* Instruction costs on PPCE500MC64 processors. */
1065 static const
1066 struct processor_costs ppce500mc64_cost = {
1067 COSTS_N_INSNS (4), /* mulsi */
1068 COSTS_N_INSNS (4), /* mulsi_const */
1069 COSTS_N_INSNS (4), /* mulsi_const9 */
1070 COSTS_N_INSNS (4), /* muldi */
1071 COSTS_N_INSNS (14), /* divsi */
1072 COSTS_N_INSNS (14), /* divdi */
1073 COSTS_N_INSNS (4), /* fp */
1074 COSTS_N_INSNS (10), /* dmul */
1075 COSTS_N_INSNS (36), /* sdiv */
1076 COSTS_N_INSNS (66), /* ddiv */
1077 64, /* cache line size */
1078 32, /* l1 cache */
1079 128, /* l2 cache */
1080 1, /* prefetch streams /*/
1081 0, /* SF->DF convert */
1082 };
1083
1084 /* Instruction costs on PPCE5500 processors. */
1085 static const
1086 struct processor_costs ppce5500_cost = {
1087 COSTS_N_INSNS (5), /* mulsi */
1088 COSTS_N_INSNS (5), /* mulsi_const */
1089 COSTS_N_INSNS (4), /* mulsi_const9 */
1090 COSTS_N_INSNS (5), /* muldi */
1091 COSTS_N_INSNS (14), /* divsi */
1092 COSTS_N_INSNS (14), /* divdi */
1093 COSTS_N_INSNS (7), /* fp */
1094 COSTS_N_INSNS (10), /* dmul */
1095 COSTS_N_INSNS (36), /* sdiv */
1096 COSTS_N_INSNS (66), /* ddiv */
1097 64, /* cache line size */
1098 32, /* l1 cache */
1099 128, /* l2 cache */
1100 1, /* prefetch streams /*/
1101 0, /* SF->DF convert */
1102 };
1103
1104 /* Instruction costs on PPCE6500 processors. */
1105 static const
1106 struct processor_costs ppce6500_cost = {
1107 COSTS_N_INSNS (5), /* mulsi */
1108 COSTS_N_INSNS (5), /* mulsi_const */
1109 COSTS_N_INSNS (4), /* mulsi_const9 */
1110 COSTS_N_INSNS (5), /* muldi */
1111 COSTS_N_INSNS (14), /* divsi */
1112 COSTS_N_INSNS (14), /* divdi */
1113 COSTS_N_INSNS (7), /* fp */
1114 COSTS_N_INSNS (10), /* dmul */
1115 COSTS_N_INSNS (36), /* sdiv */
1116 COSTS_N_INSNS (66), /* ddiv */
1117 64, /* cache line size */
1118 32, /* l1 cache */
1119 128, /* l2 cache */
1120 1, /* prefetch streams /*/
1121 0, /* SF->DF convert */
1122 };
1123
1124 /* Instruction costs on AppliedMicro Titan processors. */
1125 static const
1126 struct processor_costs titan_cost = {
1127 COSTS_N_INSNS (5), /* mulsi */
1128 COSTS_N_INSNS (5), /* mulsi_const */
1129 COSTS_N_INSNS (5), /* mulsi_const9 */
1130 COSTS_N_INSNS (5), /* muldi */
1131 COSTS_N_INSNS (18), /* divsi */
1132 COSTS_N_INSNS (18), /* divdi */
1133 COSTS_N_INSNS (10), /* fp */
1134 COSTS_N_INSNS (10), /* dmul */
1135 COSTS_N_INSNS (46), /* sdiv */
1136 COSTS_N_INSNS (72), /* ddiv */
1137 32, /* cache line size */
1138 32, /* l1 cache */
1139 512, /* l2 cache */
1140 1, /* prefetch streams /*/
1141 0, /* SF->DF convert */
1142 };
1143
1144 /* Instruction costs on POWER4 and POWER5 processors. */
1145 static const
1146 struct processor_costs power4_cost = {
1147 COSTS_N_INSNS (3), /* mulsi */
1148 COSTS_N_INSNS (2), /* mulsi_const */
1149 COSTS_N_INSNS (2), /* mulsi_const9 */
1150 COSTS_N_INSNS (4), /* muldi */
1151 COSTS_N_INSNS (18), /* divsi */
1152 COSTS_N_INSNS (34), /* divdi */
1153 COSTS_N_INSNS (3), /* fp */
1154 COSTS_N_INSNS (3), /* dmul */
1155 COSTS_N_INSNS (17), /* sdiv */
1156 COSTS_N_INSNS (17), /* ddiv */
1157 128, /* cache line size */
1158 32, /* l1 cache */
1159 1024, /* l2 cache */
1160 8, /* prefetch streams /*/
1161 0, /* SF->DF convert */
1162 };
1163
1164 /* Instruction costs on POWER6 processors. */
1165 static const
1166 struct processor_costs power6_cost = {
1167 COSTS_N_INSNS (8), /* mulsi */
1168 COSTS_N_INSNS (8), /* mulsi_const */
1169 COSTS_N_INSNS (8), /* mulsi_const9 */
1170 COSTS_N_INSNS (8), /* muldi */
1171 COSTS_N_INSNS (22), /* divsi */
1172 COSTS_N_INSNS (28), /* divdi */
1173 COSTS_N_INSNS (3), /* fp */
1174 COSTS_N_INSNS (3), /* dmul */
1175 COSTS_N_INSNS (13), /* sdiv */
1176 COSTS_N_INSNS (16), /* ddiv */
1177 128, /* cache line size */
1178 64, /* l1 cache */
1179 2048, /* l2 cache */
1180 16, /* prefetch streams */
1181 0, /* SF->DF convert */
1182 };
1183
1184 /* Instruction costs on POWER7 processors. */
1185 static const
1186 struct processor_costs power7_cost = {
1187 COSTS_N_INSNS (2), /* mulsi */
1188 COSTS_N_INSNS (2), /* mulsi_const */
1189 COSTS_N_INSNS (2), /* mulsi_const9 */
1190 COSTS_N_INSNS (2), /* muldi */
1191 COSTS_N_INSNS (18), /* divsi */
1192 COSTS_N_INSNS (34), /* divdi */
1193 COSTS_N_INSNS (3), /* fp */
1194 COSTS_N_INSNS (3), /* dmul */
1195 COSTS_N_INSNS (13), /* sdiv */
1196 COSTS_N_INSNS (16), /* ddiv */
1197 128, /* cache line size */
1198 32, /* l1 cache */
1199 256, /* l2 cache */
1200 12, /* prefetch streams */
1201 COSTS_N_INSNS (3), /* SF->DF convert */
1202 };
1203
1204 /* Instruction costs on POWER8 processors. */
1205 static const
1206 struct processor_costs power8_cost = {
1207 COSTS_N_INSNS (3), /* mulsi */
1208 COSTS_N_INSNS (3), /* mulsi_const */
1209 COSTS_N_INSNS (3), /* mulsi_const9 */
1210 COSTS_N_INSNS (3), /* muldi */
1211 COSTS_N_INSNS (19), /* divsi */
1212 COSTS_N_INSNS (35), /* divdi */
1213 COSTS_N_INSNS (3), /* fp */
1214 COSTS_N_INSNS (3), /* dmul */
1215 COSTS_N_INSNS (14), /* sdiv */
1216 COSTS_N_INSNS (17), /* ddiv */
1217 128, /* cache line size */
1218 32, /* l1 cache */
1219 256, /* l2 cache */
1220 12, /* prefetch streams */
1221 COSTS_N_INSNS (3), /* SF->DF convert */
1222 };
1223
1224 /* Instruction costs on POWER9 processors. */
1225 static const
1226 struct processor_costs power9_cost = {
1227 COSTS_N_INSNS (3), /* mulsi */
1228 COSTS_N_INSNS (3), /* mulsi_const */
1229 COSTS_N_INSNS (3), /* mulsi_const9 */
1230 COSTS_N_INSNS (3), /* muldi */
1231 COSTS_N_INSNS (8), /* divsi */
1232 COSTS_N_INSNS (12), /* divdi */
1233 COSTS_N_INSNS (3), /* fp */
1234 COSTS_N_INSNS (3), /* dmul */
1235 COSTS_N_INSNS (13), /* sdiv */
1236 COSTS_N_INSNS (18), /* ddiv */
1237 128, /* cache line size */
1238 32, /* l1 cache */
1239 512, /* l2 cache */
1240 8, /* prefetch streams */
1241 COSTS_N_INSNS (3), /* SF->DF convert */
1242 };
1243
1244 /* Instruction costs on POWER A2 processors. */
1245 static const
1246 struct processor_costs ppca2_cost = {
1247 COSTS_N_INSNS (16), /* mulsi */
1248 COSTS_N_INSNS (16), /* mulsi_const */
1249 COSTS_N_INSNS (16), /* mulsi_const9 */
1250 COSTS_N_INSNS (16), /* muldi */
1251 COSTS_N_INSNS (22), /* divsi */
1252 COSTS_N_INSNS (28), /* divdi */
1253 COSTS_N_INSNS (3), /* fp */
1254 COSTS_N_INSNS (3), /* dmul */
1255 COSTS_N_INSNS (59), /* sdiv */
1256 COSTS_N_INSNS (72), /* ddiv */
1257 64,
1258 16, /* l1 cache */
1259 2048, /* l2 cache */
1260 16, /* prefetch streams */
1261 0, /* SF->DF convert */
1262 };
1263
1264 \f
1265 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1266 #undef RS6000_BUILTIN_0
1267 #undef RS6000_BUILTIN_1
1268 #undef RS6000_BUILTIN_2
1269 #undef RS6000_BUILTIN_3
1270 #undef RS6000_BUILTIN_A
1271 #undef RS6000_BUILTIN_D
1272 #undef RS6000_BUILTIN_H
1273 #undef RS6000_BUILTIN_P
1274 #undef RS6000_BUILTIN_X
1275
1276 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1277 { NAME, ICODE, MASK, ATTR },
1278
1279 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1280 { NAME, ICODE, MASK, ATTR },
1281
1282 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1283 { NAME, ICODE, MASK, ATTR },
1284
1285 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1286 { NAME, ICODE, MASK, ATTR },
1287
1288 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1289 { NAME, ICODE, MASK, ATTR },
1290
1291 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1292 { NAME, ICODE, MASK, ATTR },
1293
1294 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1295 { NAME, ICODE, MASK, ATTR },
1296
1297 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1298 { NAME, ICODE, MASK, ATTR },
1299
1300 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1301 { NAME, ICODE, MASK, ATTR },
1302
1303 struct rs6000_builtin_info_type {
1304 const char *name;
1305 const enum insn_code icode;
1306 const HOST_WIDE_INT mask;
1307 const unsigned attr;
1308 };
1309
1310 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1311 {
1312 #include "rs6000-builtin.def"
1313 };
1314
1315 #undef RS6000_BUILTIN_0
1316 #undef RS6000_BUILTIN_1
1317 #undef RS6000_BUILTIN_2
1318 #undef RS6000_BUILTIN_3
1319 #undef RS6000_BUILTIN_A
1320 #undef RS6000_BUILTIN_D
1321 #undef RS6000_BUILTIN_H
1322 #undef RS6000_BUILTIN_P
1323 #undef RS6000_BUILTIN_X
1324
1325 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1326 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1327
1328 \f
1329 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1330 static struct machine_function * rs6000_init_machine_status (void);
1331 static int rs6000_ra_ever_killed (void);
1332 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1333 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1334 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1335 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1336 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1337 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1338 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1339 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1340 bool);
1341 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1342 unsigned int);
1343 static bool is_microcoded_insn (rtx_insn *);
1344 static bool is_nonpipeline_insn (rtx_insn *);
1345 static bool is_cracked_insn (rtx_insn *);
1346 static bool is_load_insn (rtx, rtx *);
1347 static bool is_store_insn (rtx, rtx *);
1348 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1349 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1350 static bool insn_must_be_first_in_group (rtx_insn *);
1351 static bool insn_must_be_last_in_group (rtx_insn *);
1352 static void altivec_init_builtins (void);
1353 static tree builtin_function_type (machine_mode, machine_mode,
1354 machine_mode, machine_mode,
1355 enum rs6000_builtins, const char *name);
1356 static void rs6000_common_init_builtins (void);
1357 static void htm_init_builtins (void);
1358 static rs6000_stack_t *rs6000_stack_info (void);
1359 static void is_altivec_return_reg (rtx, void *);
1360 int easy_vector_constant (rtx, machine_mode);
1361 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1362 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1363 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1364 bool, bool);
1365 #if TARGET_MACHO
1366 static void macho_branch_islands (void);
1367 static tree get_prev_label (tree);
1368 #endif
1369 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1370 int, int *);
1371 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1372 int, int, int *);
1373 static bool rs6000_mode_dependent_address (const_rtx);
1374 static bool rs6000_debug_mode_dependent_address (const_rtx);
1375 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1376 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1377 machine_mode, rtx);
1378 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1379 machine_mode,
1380 rtx);
1381 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1382 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1383 enum reg_class);
1384 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1385 reg_class_t,
1386 reg_class_t);
1387 static bool rs6000_debug_can_change_mode_class (machine_mode,
1388 machine_mode,
1389 reg_class_t);
1390 static bool rs6000_save_toc_in_prologue_p (void);
1391 static rtx rs6000_internal_arg_pointer (void);
1392
1393 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1394 int, int *)
1395 = rs6000_legitimize_reload_address;
1396
1397 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1398 = rs6000_mode_dependent_address;
1399
1400 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1401 machine_mode, rtx)
1402 = rs6000_secondary_reload_class;
1403
1404 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1405 = rs6000_preferred_reload_class;
1406
1407 const int INSN_NOT_AVAILABLE = -1;
1408
1409 static void rs6000_print_isa_options (FILE *, int, const char *,
1410 HOST_WIDE_INT);
1411 static void rs6000_print_builtin_options (FILE *, int, const char *,
1412 HOST_WIDE_INT);
1413 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1414
1415 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1416 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1417 enum rs6000_reg_type,
1418 machine_mode,
1419 secondary_reload_info *,
1420 bool);
1421 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1422 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1423 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1424
1425 /* Hash table stuff for keeping track of TOC entries. */
1426
1427 struct GTY((for_user)) toc_hash_struct
1428 {
1429 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1430 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1431 rtx key;
1432 machine_mode key_mode;
1433 int labelno;
1434 };
1435
1436 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1437 {
1438 static hashval_t hash (toc_hash_struct *);
1439 static bool equal (toc_hash_struct *, toc_hash_struct *);
1440 };
1441
1442 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1443
1444 /* Hash table to keep track of the argument types for builtin functions. */
1445
1446 struct GTY((for_user)) builtin_hash_struct
1447 {
1448 tree type;
1449 machine_mode mode[4]; /* return value + 3 arguments. */
1450 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1451 };
1452
1453 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1454 {
1455 static hashval_t hash (builtin_hash_struct *);
1456 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1457 };
1458
1459 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1460
1461 \f
1462 /* Default register names. */
1463 char rs6000_reg_names[][8] =
1464 {
1465 "0", "1", "2", "3", "4", "5", "6", "7",
1466 "8", "9", "10", "11", "12", "13", "14", "15",
1467 "16", "17", "18", "19", "20", "21", "22", "23",
1468 "24", "25", "26", "27", "28", "29", "30", "31",
1469 "0", "1", "2", "3", "4", "5", "6", "7",
1470 "8", "9", "10", "11", "12", "13", "14", "15",
1471 "16", "17", "18", "19", "20", "21", "22", "23",
1472 "24", "25", "26", "27", "28", "29", "30", "31",
1473 "mq", "lr", "ctr","ap",
1474 "0", "1", "2", "3", "4", "5", "6", "7",
1475 "ca",
1476 /* AltiVec registers. */
1477 "0", "1", "2", "3", "4", "5", "6", "7",
1478 "8", "9", "10", "11", "12", "13", "14", "15",
1479 "16", "17", "18", "19", "20", "21", "22", "23",
1480 "24", "25", "26", "27", "28", "29", "30", "31",
1481 "vrsave", "vscr",
1482 /* Soft frame pointer. */
1483 "sfp",
1484 /* HTM SPR registers. */
1485 "tfhar", "tfiar", "texasr"
1486 };
1487
1488 #ifdef TARGET_REGNAMES
1489 static const char alt_reg_names[][8] =
1490 {
1491 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1492 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1493 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1494 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1495 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1496 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1497 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1498 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1499 "mq", "lr", "ctr", "ap",
1500 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1501 "ca",
1502 /* AltiVec registers. */
1503 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1504 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1505 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1506 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1507 "vrsave", "vscr",
1508 /* Soft frame pointer. */
1509 "sfp",
1510 /* HTM SPR registers. */
1511 "tfhar", "tfiar", "texasr"
1512 };
1513 #endif
1514
1515 /* Table of valid machine attributes. */
1516
1517 static const struct attribute_spec rs6000_attribute_table[] =
1518 {
1519 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1520 affects_type_identity, handler, exclude } */
1521 { "altivec", 1, 1, false, true, false, false,
1522 rs6000_handle_altivec_attribute, NULL },
1523 { "longcall", 0, 0, false, true, true, false,
1524 rs6000_handle_longcall_attribute, NULL },
1525 { "shortcall", 0, 0, false, true, true, false,
1526 rs6000_handle_longcall_attribute, NULL },
1527 { "ms_struct", 0, 0, false, false, false, false,
1528 rs6000_handle_struct_attribute, NULL },
1529 { "gcc_struct", 0, 0, false, false, false, false,
1530 rs6000_handle_struct_attribute, NULL },
1531 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1532 SUBTARGET_ATTRIBUTE_TABLE,
1533 #endif
1534 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1535 };
1536 \f
1537 #ifndef TARGET_PROFILE_KERNEL
1538 #define TARGET_PROFILE_KERNEL 0
1539 #endif
1540
1541 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1542 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1543 \f
1544 /* Initialize the GCC target structure. */
1545 #undef TARGET_ATTRIBUTE_TABLE
1546 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1547 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1548 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1549 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1550 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1551
1552 #undef TARGET_ASM_ALIGNED_DI_OP
1553 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1554
1555 /* Default unaligned ops are only provided for ELF. Find the ops needed
1556 for non-ELF systems. */
1557 #ifndef OBJECT_FORMAT_ELF
1558 #if TARGET_XCOFF
1559 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1560 64-bit targets. */
1561 #undef TARGET_ASM_UNALIGNED_HI_OP
1562 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1563 #undef TARGET_ASM_UNALIGNED_SI_OP
1564 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1565 #undef TARGET_ASM_UNALIGNED_DI_OP
1566 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1567 #else
1568 /* For Darwin. */
1569 #undef TARGET_ASM_UNALIGNED_HI_OP
1570 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1571 #undef TARGET_ASM_UNALIGNED_SI_OP
1572 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1573 #undef TARGET_ASM_UNALIGNED_DI_OP
1574 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1575 #undef TARGET_ASM_ALIGNED_DI_OP
1576 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1577 #endif
1578 #endif
1579
1580 /* This hook deals with fixups for relocatable code and DI-mode objects
1581 in 64-bit code. */
1582 #undef TARGET_ASM_INTEGER
1583 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1584
1585 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1586 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1587 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1588 #endif
1589
1590 #undef TARGET_SET_UP_BY_PROLOGUE
1591 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1592
1593 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1594 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1595 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1596 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1597 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1598 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1599 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1600 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1601 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1602 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1603 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1604 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1605
1606 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1607 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1608
1609 #undef TARGET_INTERNAL_ARG_POINTER
1610 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1611
1612 #undef TARGET_HAVE_TLS
1613 #define TARGET_HAVE_TLS HAVE_AS_TLS
1614
1615 #undef TARGET_CANNOT_FORCE_CONST_MEM
1616 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1617
1618 #undef TARGET_DELEGITIMIZE_ADDRESS
1619 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1620
1621 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1622 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1623
1624 #undef TARGET_LEGITIMATE_COMBINED_INSN
1625 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1626
1627 #undef TARGET_ASM_FUNCTION_PROLOGUE
1628 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1629 #undef TARGET_ASM_FUNCTION_EPILOGUE
1630 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1631
1632 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1633 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1634
1635 #undef TARGET_LEGITIMIZE_ADDRESS
1636 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1637
1638 #undef TARGET_SCHED_VARIABLE_ISSUE
1639 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1640
1641 #undef TARGET_SCHED_ISSUE_RATE
1642 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1643 #undef TARGET_SCHED_ADJUST_COST
1644 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1645 #undef TARGET_SCHED_ADJUST_PRIORITY
1646 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1647 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1648 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1649 #undef TARGET_SCHED_INIT
1650 #define TARGET_SCHED_INIT rs6000_sched_init
1651 #undef TARGET_SCHED_FINISH
1652 #define TARGET_SCHED_FINISH rs6000_sched_finish
1653 #undef TARGET_SCHED_REORDER
1654 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1655 #undef TARGET_SCHED_REORDER2
1656 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1657
1658 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1659 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1660
1661 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1662 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1663
1664 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1665 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1666 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1667 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1668 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1669 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1670 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1671 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1672
1673 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1674 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1675
1676 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1677 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1678 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1679 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1680 rs6000_builtin_support_vector_misalignment
1681 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1682 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1683 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1684 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1685 rs6000_builtin_vectorization_cost
1686 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1687 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1688 rs6000_preferred_simd_mode
1689 #undef TARGET_VECTORIZE_INIT_COST
1690 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1691 #undef TARGET_VECTORIZE_ADD_STMT_COST
1692 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1693 #undef TARGET_VECTORIZE_FINISH_COST
1694 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1695 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1696 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1697
1698 #undef TARGET_INIT_BUILTINS
1699 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1700 #undef TARGET_BUILTIN_DECL
1701 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1702
1703 #undef TARGET_FOLD_BUILTIN
1704 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1705 #undef TARGET_GIMPLE_FOLD_BUILTIN
1706 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1707
1708 #undef TARGET_EXPAND_BUILTIN
1709 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1710
1711 #undef TARGET_MANGLE_TYPE
1712 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1713
1714 #undef TARGET_INIT_LIBFUNCS
1715 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1716
1717 #if TARGET_MACHO
1718 #undef TARGET_BINDS_LOCAL_P
1719 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1720 #endif
1721
1722 #undef TARGET_MS_BITFIELD_LAYOUT_P
1723 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1724
1725 #undef TARGET_ASM_OUTPUT_MI_THUNK
1726 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1727
1728 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1729 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1730
1731 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1732 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1733
1734 #undef TARGET_REGISTER_MOVE_COST
1735 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1736 #undef TARGET_MEMORY_MOVE_COST
1737 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1738 #undef TARGET_CANNOT_COPY_INSN_P
1739 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1740 #undef TARGET_RTX_COSTS
1741 #define TARGET_RTX_COSTS rs6000_rtx_costs
1742 #undef TARGET_ADDRESS_COST
1743 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1744 #undef TARGET_INSN_COST
1745 #define TARGET_INSN_COST rs6000_insn_cost
1746
1747 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1748 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1749
1750 #undef TARGET_PROMOTE_FUNCTION_MODE
1751 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1752
1753 #undef TARGET_RETURN_IN_MEMORY
1754 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1755
1756 #undef TARGET_RETURN_IN_MSB
1757 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1758
1759 #undef TARGET_SETUP_INCOMING_VARARGS
1760 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1761
1762 /* Always strict argument naming on rs6000. */
1763 #undef TARGET_STRICT_ARGUMENT_NAMING
1764 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1765 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1766 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1767 #undef TARGET_SPLIT_COMPLEX_ARG
1768 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1769 #undef TARGET_MUST_PASS_IN_STACK
1770 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1771 #undef TARGET_PASS_BY_REFERENCE
1772 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1773 #undef TARGET_ARG_PARTIAL_BYTES
1774 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1775 #undef TARGET_FUNCTION_ARG_ADVANCE
1776 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1777 #undef TARGET_FUNCTION_ARG
1778 #define TARGET_FUNCTION_ARG rs6000_function_arg
1779 #undef TARGET_FUNCTION_ARG_PADDING
1780 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1781 #undef TARGET_FUNCTION_ARG_BOUNDARY
1782 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1783
1784 #undef TARGET_BUILD_BUILTIN_VA_LIST
1785 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1786
1787 #undef TARGET_EXPAND_BUILTIN_VA_START
1788 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1789
1790 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1791 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1792
1793 #undef TARGET_EH_RETURN_FILTER_MODE
1794 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1795
1796 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1797 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1798
1799 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1800 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1801
1802 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1803 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1804
1805 #undef TARGET_FLOATN_MODE
1806 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1807
1808 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1809 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1810
1811 #undef TARGET_MD_ASM_ADJUST
1812 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1813
1814 #undef TARGET_OPTION_OVERRIDE
1815 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1816
1817 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1818 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1819 rs6000_builtin_vectorized_function
1820
1821 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1822 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1823 rs6000_builtin_md_vectorized_function
1824
1825 #undef TARGET_STACK_PROTECT_GUARD
1826 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1827
1828 #if !TARGET_MACHO
1829 #undef TARGET_STACK_PROTECT_FAIL
1830 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1831 #endif
1832
1833 #ifdef HAVE_AS_TLS
1834 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1835 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1836 #endif
1837
1838 /* Use a 32-bit anchor range. This leads to sequences like:
1839
1840 addis tmp,anchor,high
1841 add dest,tmp,low
1842
1843 where tmp itself acts as an anchor, and can be shared between
1844 accesses to the same 64k page. */
1845 #undef TARGET_MIN_ANCHOR_OFFSET
1846 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1847 #undef TARGET_MAX_ANCHOR_OFFSET
1848 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1849 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1850 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1851 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1852 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1853
1854 #undef TARGET_BUILTIN_RECIPROCAL
1855 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1856
1857 #undef TARGET_SECONDARY_RELOAD
1858 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1859 #undef TARGET_SECONDARY_MEMORY_NEEDED
1860 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1861 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1862 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1863
1864 #undef TARGET_LEGITIMATE_ADDRESS_P
1865 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1866
1867 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1868 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1869
1870 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1871 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1872
1873 #undef TARGET_CAN_ELIMINATE
1874 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1875
1876 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1877 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1878
1879 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1880 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1881
1882 #undef TARGET_TRAMPOLINE_INIT
1883 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1884
1885 #undef TARGET_FUNCTION_VALUE
1886 #define TARGET_FUNCTION_VALUE rs6000_function_value
1887
1888 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1889 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1890
1891 #undef TARGET_OPTION_SAVE
1892 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1893
1894 #undef TARGET_OPTION_RESTORE
1895 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1896
1897 #undef TARGET_OPTION_PRINT
1898 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1899
1900 #undef TARGET_CAN_INLINE_P
1901 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1902
1903 #undef TARGET_SET_CURRENT_FUNCTION
1904 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1905
1906 #undef TARGET_LEGITIMATE_CONSTANT_P
1907 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1908
1909 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1910 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1911
1912 #undef TARGET_CAN_USE_DOLOOP_P
1913 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1914
1915 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1916 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1917
1918 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1919 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1920 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1921 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1922 #undef TARGET_UNWIND_WORD_MODE
1923 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1924
1925 #undef TARGET_OFFLOAD_OPTIONS
1926 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1927
1928 #undef TARGET_C_MODE_FOR_SUFFIX
1929 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1930
1931 #undef TARGET_INVALID_BINARY_OP
1932 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1933
1934 #undef TARGET_OPTAB_SUPPORTED_P
1935 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1936
1937 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1938 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1939
1940 #undef TARGET_COMPARE_VERSION_PRIORITY
1941 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1942
1943 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1944 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1945 rs6000_generate_version_dispatcher_body
1946
1947 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1948 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1949 rs6000_get_function_versions_dispatcher
1950
1951 #undef TARGET_OPTION_FUNCTION_VERSIONS
1952 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1953
1954 #undef TARGET_HARD_REGNO_NREGS
1955 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1956 #undef TARGET_HARD_REGNO_MODE_OK
1957 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1958
1959 #undef TARGET_MODES_TIEABLE_P
1960 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1961
1962 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1963 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1964 rs6000_hard_regno_call_part_clobbered
1965
1966 #undef TARGET_SLOW_UNALIGNED_ACCESS
1967 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1968
1969 #undef TARGET_CAN_CHANGE_MODE_CLASS
1970 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1971
1972 #undef TARGET_CONSTANT_ALIGNMENT
1973 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1974
1975 #undef TARGET_STARTING_FRAME_OFFSET
1976 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1977
1978 #if TARGET_ELF && RS6000_WEAK
1979 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1980 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1981 #endif
1982
1983 #undef TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P
1984 #define TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P hook_bool_void_true
1985
1986 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
1987 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME rs6000_mangle_decl_assembler_name
1988 \f
1989
1990 /* Processor table. */
1991 struct rs6000_ptt
1992 {
1993 const char *const name; /* Canonical processor name. */
1994 const enum processor_type processor; /* Processor type enum value. */
1995 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1996 };
1997
1998 static struct rs6000_ptt const processor_target_table[] =
1999 {
2000 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
2001 #include "rs6000-cpus.def"
2002 #undef RS6000_CPU
2003 };
2004
2005 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2006 name is invalid. */
2007
2008 static int
2009 rs6000_cpu_name_lookup (const char *name)
2010 {
2011 size_t i;
2012
2013 if (name != NULL)
2014 {
2015 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2016 if (! strcmp (name, processor_target_table[i].name))
2017 return (int)i;
2018 }
2019
2020 return -1;
2021 }
2022
2023 \f
2024 /* Return number of consecutive hard regs needed starting at reg REGNO
2025 to hold something of mode MODE.
2026 This is ordinarily the length in words of a value of mode MODE
2027 but can be less for certain modes in special long registers.
2028
2029 POWER and PowerPC GPRs hold 32 bits worth;
2030 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2031
2032 static int
2033 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2034 {
2035 unsigned HOST_WIDE_INT reg_size;
2036
2037 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2038 128-bit floating point that can go in vector registers, which has VSX
2039 memory addressing. */
2040 if (FP_REGNO_P (regno))
2041 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2042 ? UNITS_PER_VSX_WORD
2043 : UNITS_PER_FP_WORD);
2044
2045 else if (ALTIVEC_REGNO_P (regno))
2046 reg_size = UNITS_PER_ALTIVEC_WORD;
2047
2048 else
2049 reg_size = UNITS_PER_WORD;
2050
2051 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2052 }
2053
2054 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2055 MODE. */
2056 static int
2057 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2058 {
2059 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2060
2061 if (COMPLEX_MODE_P (mode))
2062 mode = GET_MODE_INNER (mode);
2063
2064 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2065 register combinations, and use PTImode where we need to deal with quad
2066 word memory operations. Don't allow quad words in the argument or frame
2067 pointer registers, just registers 0..31. */
2068 if (mode == PTImode)
2069 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2070 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2071 && ((regno & 1) == 0));
2072
2073 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2074 implementations. Don't allow an item to be split between a FP register
2075 and an Altivec register. Allow TImode in all VSX registers if the user
2076 asked for it. */
2077 if (TARGET_VSX && VSX_REGNO_P (regno)
2078 && (VECTOR_MEM_VSX_P (mode)
2079 || FLOAT128_VECTOR_P (mode)
2080 || reg_addr[mode].scalar_in_vmx_p
2081 || mode == TImode
2082 || (TARGET_VADDUQM && mode == V1TImode)))
2083 {
2084 if (FP_REGNO_P (regno))
2085 return FP_REGNO_P (last_regno);
2086
2087 if (ALTIVEC_REGNO_P (regno))
2088 {
2089 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2090 return 0;
2091
2092 return ALTIVEC_REGNO_P (last_regno);
2093 }
2094 }
2095
2096 /* The GPRs can hold any mode, but values bigger than one register
2097 cannot go past R31. */
2098 if (INT_REGNO_P (regno))
2099 return INT_REGNO_P (last_regno);
2100
2101 /* The float registers (except for VSX vector modes) can only hold floating
2102 modes and DImode. */
2103 if (FP_REGNO_P (regno))
2104 {
2105 if (FLOAT128_VECTOR_P (mode))
2106 return false;
2107
2108 if (SCALAR_FLOAT_MODE_P (mode)
2109 && (mode != TDmode || (regno % 2) == 0)
2110 && FP_REGNO_P (last_regno))
2111 return 1;
2112
2113 if (GET_MODE_CLASS (mode) == MODE_INT)
2114 {
2115 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2116 return 1;
2117
2118 if (TARGET_P8_VECTOR && (mode == SImode))
2119 return 1;
2120
2121 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2122 return 1;
2123 }
2124
2125 return 0;
2126 }
2127
2128 /* The CR register can only hold CC modes. */
2129 if (CR_REGNO_P (regno))
2130 return GET_MODE_CLASS (mode) == MODE_CC;
2131
2132 if (CA_REGNO_P (regno))
2133 return mode == Pmode || mode == SImode;
2134
2135 /* AltiVec only in AldyVec registers. */
2136 if (ALTIVEC_REGNO_P (regno))
2137 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2138 || mode == V1TImode);
2139
2140 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2141 and it must be able to fit within the register set. */
2142
2143 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2144 }
2145
2146 /* Implement TARGET_HARD_REGNO_NREGS. */
2147
2148 static unsigned int
2149 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2150 {
2151 return rs6000_hard_regno_nregs[mode][regno];
2152 }
2153
2154 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2155
2156 static bool
2157 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2158 {
2159 return rs6000_hard_regno_mode_ok_p[mode][regno];
2160 }
2161
2162 /* Implement TARGET_MODES_TIEABLE_P.
2163
2164 PTImode cannot tie with other modes because PTImode is restricted to even
2165 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2166 57744).
2167
2168 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2169 128-bit floating point on VSX systems ties with other vectors. */
2170
2171 static bool
2172 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2173 {
2174 if (mode1 == PTImode)
2175 return mode2 == PTImode;
2176 if (mode2 == PTImode)
2177 return false;
2178
2179 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2180 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2181 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2182 return false;
2183
2184 if (SCALAR_FLOAT_MODE_P (mode1))
2185 return SCALAR_FLOAT_MODE_P (mode2);
2186 if (SCALAR_FLOAT_MODE_P (mode2))
2187 return false;
2188
2189 if (GET_MODE_CLASS (mode1) == MODE_CC)
2190 return GET_MODE_CLASS (mode2) == MODE_CC;
2191 if (GET_MODE_CLASS (mode2) == MODE_CC)
2192 return false;
2193
2194 return true;
2195 }
2196
2197 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2198
2199 static bool
2200 rs6000_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
2201 {
2202 if (TARGET_32BIT
2203 && TARGET_POWERPC64
2204 && GET_MODE_SIZE (mode) > 4
2205 && INT_REGNO_P (regno))
2206 return true;
2207
2208 if (TARGET_VSX
2209 && FP_REGNO_P (regno)
2210 && GET_MODE_SIZE (mode) > 8
2211 && !FLOAT128_2REG_P (mode))
2212 return true;
2213
2214 return false;
2215 }
2216
2217 /* Print interesting facts about registers. */
2218 static void
2219 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2220 {
2221 int r, m;
2222
2223 for (r = first_regno; r <= last_regno; ++r)
2224 {
2225 const char *comma = "";
2226 int len;
2227
2228 if (first_regno == last_regno)
2229 fprintf (stderr, "%s:\t", reg_name);
2230 else
2231 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2232
2233 len = 8;
2234 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2235 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2236 {
2237 if (len > 70)
2238 {
2239 fprintf (stderr, ",\n\t");
2240 len = 8;
2241 comma = "";
2242 }
2243
2244 if (rs6000_hard_regno_nregs[m][r] > 1)
2245 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2246 rs6000_hard_regno_nregs[m][r]);
2247 else
2248 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2249
2250 comma = ", ";
2251 }
2252
2253 if (call_used_regs[r])
2254 {
2255 if (len > 70)
2256 {
2257 fprintf (stderr, ",\n\t");
2258 len = 8;
2259 comma = "";
2260 }
2261
2262 len += fprintf (stderr, "%s%s", comma, "call-used");
2263 comma = ", ";
2264 }
2265
2266 if (fixed_regs[r])
2267 {
2268 if (len > 70)
2269 {
2270 fprintf (stderr, ",\n\t");
2271 len = 8;
2272 comma = "";
2273 }
2274
2275 len += fprintf (stderr, "%s%s", comma, "fixed");
2276 comma = ", ";
2277 }
2278
2279 if (len > 70)
2280 {
2281 fprintf (stderr, ",\n\t");
2282 comma = "";
2283 }
2284
2285 len += fprintf (stderr, "%sreg-class = %s", comma,
2286 reg_class_names[(int)rs6000_regno_regclass[r]]);
2287 comma = ", ";
2288
2289 if (len > 70)
2290 {
2291 fprintf (stderr, ",\n\t");
2292 comma = "";
2293 }
2294
2295 fprintf (stderr, "%sregno = %d\n", comma, r);
2296 }
2297 }
2298
2299 static const char *
2300 rs6000_debug_vector_unit (enum rs6000_vector v)
2301 {
2302 const char *ret;
2303
2304 switch (v)
2305 {
2306 case VECTOR_NONE: ret = "none"; break;
2307 case VECTOR_ALTIVEC: ret = "altivec"; break;
2308 case VECTOR_VSX: ret = "vsx"; break;
2309 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2310 default: ret = "unknown"; break;
2311 }
2312
2313 return ret;
2314 }
2315
2316 /* Inner function printing just the address mask for a particular reload
2317 register class. */
2318 DEBUG_FUNCTION char *
2319 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2320 {
2321 static char ret[8];
2322 char *p = ret;
2323
2324 if ((mask & RELOAD_REG_VALID) != 0)
2325 *p++ = 'v';
2326 else if (keep_spaces)
2327 *p++ = ' ';
2328
2329 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2330 *p++ = 'm';
2331 else if (keep_spaces)
2332 *p++ = ' ';
2333
2334 if ((mask & RELOAD_REG_INDEXED) != 0)
2335 *p++ = 'i';
2336 else if (keep_spaces)
2337 *p++ = ' ';
2338
2339 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2340 *p++ = 'O';
2341 else if ((mask & RELOAD_REG_OFFSET) != 0)
2342 *p++ = 'o';
2343 else if (keep_spaces)
2344 *p++ = ' ';
2345
2346 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2347 *p++ = '+';
2348 else if (keep_spaces)
2349 *p++ = ' ';
2350
2351 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2352 *p++ = '+';
2353 else if (keep_spaces)
2354 *p++ = ' ';
2355
2356 if ((mask & RELOAD_REG_AND_M16) != 0)
2357 *p++ = '&';
2358 else if (keep_spaces)
2359 *p++ = ' ';
2360
2361 *p = '\0';
2362
2363 return ret;
2364 }
2365
2366 /* Print the address masks in a human readble fashion. */
2367 DEBUG_FUNCTION void
2368 rs6000_debug_print_mode (ssize_t m)
2369 {
2370 ssize_t rc;
2371 int spaces = 0;
2372
2373 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2374 for (rc = 0; rc < N_RELOAD_REG; rc++)
2375 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2376 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2377
2378 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2379 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2380 {
2381 fprintf (stderr, "%*s Reload=%c%c", spaces, "",
2382 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2383 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2384 spaces = 0;
2385 }
2386 else
2387 spaces += sizeof (" Reload=sl") - 1;
2388
2389 if (reg_addr[m].scalar_in_vmx_p)
2390 {
2391 fprintf (stderr, "%*s Upper=y", spaces, "");
2392 spaces = 0;
2393 }
2394 else
2395 spaces += sizeof (" Upper=y") - 1;
2396
2397 if (rs6000_vector_unit[m] != VECTOR_NONE
2398 || rs6000_vector_mem[m] != VECTOR_NONE)
2399 {
2400 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2401 spaces, "",
2402 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2403 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2404 }
2405
2406 fputs ("\n", stderr);
2407 }
2408
2409 #define DEBUG_FMT_ID "%-32s= "
2410 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2411 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2412 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2413
2414 /* Print various interesting information with -mdebug=reg. */
2415 static void
2416 rs6000_debug_reg_global (void)
2417 {
2418 static const char *const tf[2] = { "false", "true" };
2419 const char *nl = (const char *)0;
2420 int m;
2421 size_t m1, m2, v;
2422 char costly_num[20];
2423 char nop_num[20];
2424 char flags_buffer[40];
2425 const char *costly_str;
2426 const char *nop_str;
2427 const char *trace_str;
2428 const char *abi_str;
2429 const char *cmodel_str;
2430 struct cl_target_option cl_opts;
2431
2432 /* Modes we want tieable information on. */
2433 static const machine_mode print_tieable_modes[] = {
2434 QImode,
2435 HImode,
2436 SImode,
2437 DImode,
2438 TImode,
2439 PTImode,
2440 SFmode,
2441 DFmode,
2442 TFmode,
2443 IFmode,
2444 KFmode,
2445 SDmode,
2446 DDmode,
2447 TDmode,
2448 V16QImode,
2449 V8HImode,
2450 V4SImode,
2451 V2DImode,
2452 V1TImode,
2453 V32QImode,
2454 V16HImode,
2455 V8SImode,
2456 V4DImode,
2457 V2TImode,
2458 V4SFmode,
2459 V2DFmode,
2460 V8SFmode,
2461 V4DFmode,
2462 CCmode,
2463 CCUNSmode,
2464 CCEQmode,
2465 };
2466
2467 /* Virtual regs we are interested in. */
2468 const static struct {
2469 int regno; /* register number. */
2470 const char *name; /* register name. */
2471 } virtual_regs[] = {
2472 { STACK_POINTER_REGNUM, "stack pointer:" },
2473 { TOC_REGNUM, "toc: " },
2474 { STATIC_CHAIN_REGNUM, "static chain: " },
2475 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2476 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2477 { ARG_POINTER_REGNUM, "arg pointer: " },
2478 { FRAME_POINTER_REGNUM, "frame pointer:" },
2479 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2480 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2481 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2482 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2483 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2484 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2485 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2486 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2487 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2488 };
2489
2490 fputs ("\nHard register information:\n", stderr);
2491 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2492 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2493 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2494 LAST_ALTIVEC_REGNO,
2495 "vs");
2496 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2497 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2498 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2499 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2500 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2501 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2502
2503 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2504 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2505 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2506
2507 fprintf (stderr,
2508 "\n"
2509 "d reg_class = %s\n"
2510 "f reg_class = %s\n"
2511 "v reg_class = %s\n"
2512 "wa reg_class = %s\n"
2513 "wb reg_class = %s\n"
2514 "wd reg_class = %s\n"
2515 "we reg_class = %s\n"
2516 "wf reg_class = %s\n"
2517 "wg reg_class = %s\n"
2518 "wh reg_class = %s\n"
2519 "wi reg_class = %s\n"
2520 "wj reg_class = %s\n"
2521 "wk reg_class = %s\n"
2522 "wl reg_class = %s\n"
2523 "wm reg_class = %s\n"
2524 "wo reg_class = %s\n"
2525 "wp reg_class = %s\n"
2526 "wq reg_class = %s\n"
2527 "wr reg_class = %s\n"
2528 "ws reg_class = %s\n"
2529 "wt reg_class = %s\n"
2530 "wu reg_class = %s\n"
2531 "wv reg_class = %s\n"
2532 "ww reg_class = %s\n"
2533 "wx reg_class = %s\n"
2534 "wy reg_class = %s\n"
2535 "wz reg_class = %s\n"
2536 "wA reg_class = %s\n"
2537 "wH reg_class = %s\n"
2538 "wI reg_class = %s\n"
2539 "wJ reg_class = %s\n"
2540 "wK reg_class = %s\n"
2541 "\n",
2542 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2543 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2544 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2545 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2546 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2547 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2548 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2549 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2550 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2551 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2552 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2553 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2554 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2555 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2556 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2557 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2558 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2559 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2560 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2561 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2562 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2563 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2564 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2565 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2566 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2567 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2568 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2569 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2570 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2571 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2572 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2573 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2574
2575 nl = "\n";
2576 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2577 rs6000_debug_print_mode (m);
2578
2579 fputs ("\n", stderr);
2580
2581 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2582 {
2583 machine_mode mode1 = print_tieable_modes[m1];
2584 bool first_time = true;
2585
2586 nl = (const char *)0;
2587 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2588 {
2589 machine_mode mode2 = print_tieable_modes[m2];
2590 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2591 {
2592 if (first_time)
2593 {
2594 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2595 nl = "\n";
2596 first_time = false;
2597 }
2598
2599 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2600 }
2601 }
2602
2603 if (!first_time)
2604 fputs ("\n", stderr);
2605 }
2606
2607 if (nl)
2608 fputs (nl, stderr);
2609
2610 if (rs6000_recip_control)
2611 {
2612 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2613
2614 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2615 if (rs6000_recip_bits[m])
2616 {
2617 fprintf (stderr,
2618 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2619 GET_MODE_NAME (m),
2620 (RS6000_RECIP_AUTO_RE_P (m)
2621 ? "auto"
2622 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2623 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2624 ? "auto"
2625 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2626 }
2627
2628 fputs ("\n", stderr);
2629 }
2630
2631 if (rs6000_cpu_index >= 0)
2632 {
2633 const char *name = processor_target_table[rs6000_cpu_index].name;
2634 HOST_WIDE_INT flags
2635 = processor_target_table[rs6000_cpu_index].target_enable;
2636
2637 sprintf (flags_buffer, "-mcpu=%s flags", name);
2638 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2639 }
2640 else
2641 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2642
2643 if (rs6000_tune_index >= 0)
2644 {
2645 const char *name = processor_target_table[rs6000_tune_index].name;
2646 HOST_WIDE_INT flags
2647 = processor_target_table[rs6000_tune_index].target_enable;
2648
2649 sprintf (flags_buffer, "-mtune=%s flags", name);
2650 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2651 }
2652 else
2653 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2654
2655 cl_target_option_save (&cl_opts, &global_options);
2656 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2657 rs6000_isa_flags);
2658
2659 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2660 rs6000_isa_flags_explicit);
2661
2662 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2663 rs6000_builtin_mask);
2664
2665 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2666
2667 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2668 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2669
2670 switch (rs6000_sched_costly_dep)
2671 {
2672 case max_dep_latency:
2673 costly_str = "max_dep_latency";
2674 break;
2675
2676 case no_dep_costly:
2677 costly_str = "no_dep_costly";
2678 break;
2679
2680 case all_deps_costly:
2681 costly_str = "all_deps_costly";
2682 break;
2683
2684 case true_store_to_load_dep_costly:
2685 costly_str = "true_store_to_load_dep_costly";
2686 break;
2687
2688 case store_to_load_dep_costly:
2689 costly_str = "store_to_load_dep_costly";
2690 break;
2691
2692 default:
2693 costly_str = costly_num;
2694 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2695 break;
2696 }
2697
2698 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2699
2700 switch (rs6000_sched_insert_nops)
2701 {
2702 case sched_finish_regroup_exact:
2703 nop_str = "sched_finish_regroup_exact";
2704 break;
2705
2706 case sched_finish_pad_groups:
2707 nop_str = "sched_finish_pad_groups";
2708 break;
2709
2710 case sched_finish_none:
2711 nop_str = "sched_finish_none";
2712 break;
2713
2714 default:
2715 nop_str = nop_num;
2716 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2717 break;
2718 }
2719
2720 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2721
2722 switch (rs6000_sdata)
2723 {
2724 default:
2725 case SDATA_NONE:
2726 break;
2727
2728 case SDATA_DATA:
2729 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2730 break;
2731
2732 case SDATA_SYSV:
2733 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2734 break;
2735
2736 case SDATA_EABI:
2737 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2738 break;
2739
2740 }
2741
2742 switch (rs6000_traceback)
2743 {
2744 case traceback_default: trace_str = "default"; break;
2745 case traceback_none: trace_str = "none"; break;
2746 case traceback_part: trace_str = "part"; break;
2747 case traceback_full: trace_str = "full"; break;
2748 default: trace_str = "unknown"; break;
2749 }
2750
2751 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2752
2753 switch (rs6000_current_cmodel)
2754 {
2755 case CMODEL_SMALL: cmodel_str = "small"; break;
2756 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2757 case CMODEL_LARGE: cmodel_str = "large"; break;
2758 default: cmodel_str = "unknown"; break;
2759 }
2760
2761 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2762
2763 switch (rs6000_current_abi)
2764 {
2765 case ABI_NONE: abi_str = "none"; break;
2766 case ABI_AIX: abi_str = "aix"; break;
2767 case ABI_ELFv2: abi_str = "ELFv2"; break;
2768 case ABI_V4: abi_str = "V4"; break;
2769 case ABI_DARWIN: abi_str = "darwin"; break;
2770 default: abi_str = "unknown"; break;
2771 }
2772
2773 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2774
2775 if (rs6000_altivec_abi)
2776 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2777
2778 if (rs6000_darwin64_abi)
2779 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2780
2781 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2782 (TARGET_SOFT_FLOAT ? "true" : "false"));
2783
2784 if (TARGET_LINK_STACK)
2785 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2786
2787 if (TARGET_P8_FUSION)
2788 {
2789 char options[80];
2790
2791 strcpy (options, "power8");
2792 if (TARGET_P8_FUSION_SIGN)
2793 strcat (options, ", sign");
2794
2795 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2796 }
2797
2798 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2799 TARGET_SECURE_PLT ? "secure" : "bss");
2800 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2801 aix_struct_return ? "aix" : "sysv");
2802 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2803 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2804 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2805 tf[!!rs6000_align_branch_targets]);
2806 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2807 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2808 rs6000_long_double_type_size);
2809 if (rs6000_long_double_type_size > 64)
2810 {
2811 fprintf (stderr, DEBUG_FMT_S, "long double type",
2812 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2813 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2814 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2815 }
2816 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2817 (int)rs6000_sched_restricted_insns_priority);
2818 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2819 (int)END_BUILTINS);
2820 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2821 (int)RS6000_BUILTIN_COUNT);
2822
2823 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2824 (int)TARGET_FLOAT128_ENABLE_TYPE);
2825
2826 if (TARGET_VSX)
2827 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2828 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2829
2830 if (TARGET_DIRECT_MOVE_128)
2831 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2832 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2833 }
2834
2835 \f
2836 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2837 legitimate address support to figure out the appropriate addressing to
2838 use. */
2839
2840 static void
2841 rs6000_setup_reg_addr_masks (void)
2842 {
2843 ssize_t rc, reg, m, nregs;
2844 addr_mask_type any_addr_mask, addr_mask;
2845
2846 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2847 {
2848 machine_mode m2 = (machine_mode) m;
2849 bool complex_p = false;
2850 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2851 size_t msize;
2852
2853 if (COMPLEX_MODE_P (m2))
2854 {
2855 complex_p = true;
2856 m2 = GET_MODE_INNER (m2);
2857 }
2858
2859 msize = GET_MODE_SIZE (m2);
2860
2861 /* SDmode is special in that we want to access it only via REG+REG
2862 addressing on power7 and above, since we want to use the LFIWZX and
2863 STFIWZX instructions to load it. */
2864 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2865
2866 any_addr_mask = 0;
2867 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2868 {
2869 addr_mask = 0;
2870 reg = reload_reg_map[rc].reg;
2871
2872 /* Can mode values go in the GPR/FPR/Altivec registers? */
2873 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2874 {
2875 bool small_int_vsx_p = (small_int_p
2876 && (rc == RELOAD_REG_FPR
2877 || rc == RELOAD_REG_VMX));
2878
2879 nregs = rs6000_hard_regno_nregs[m][reg];
2880 addr_mask |= RELOAD_REG_VALID;
2881
2882 /* Indicate if the mode takes more than 1 physical register. If
2883 it takes a single register, indicate it can do REG+REG
2884 addressing. Small integers in VSX registers can only do
2885 REG+REG addressing. */
2886 if (small_int_vsx_p)
2887 addr_mask |= RELOAD_REG_INDEXED;
2888 else if (nregs > 1 || m == BLKmode || complex_p)
2889 addr_mask |= RELOAD_REG_MULTIPLE;
2890 else
2891 addr_mask |= RELOAD_REG_INDEXED;
2892
2893 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2894 addressing. If we allow scalars into Altivec registers,
2895 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2896
2897 For VSX systems, we don't allow update addressing for
2898 DFmode/SFmode if those registers can go in both the
2899 traditional floating point registers and Altivec registers.
2900 The load/store instructions for the Altivec registers do not
2901 have update forms. If we allowed update addressing, it seems
2902 to break IV-OPT code using floating point if the index type is
2903 int instead of long (PR target/81550 and target/84042). */
2904
2905 if (TARGET_UPDATE
2906 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2907 && msize <= 8
2908 && !VECTOR_MODE_P (m2)
2909 && !FLOAT128_VECTOR_P (m2)
2910 && !complex_p
2911 && (m != E_DFmode || !TARGET_VSX)
2912 && (m != E_SFmode || !TARGET_P8_VECTOR)
2913 && !small_int_vsx_p)
2914 {
2915 addr_mask |= RELOAD_REG_PRE_INCDEC;
2916
2917 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2918 we don't allow PRE_MODIFY for some multi-register
2919 operations. */
2920 switch (m)
2921 {
2922 default:
2923 addr_mask |= RELOAD_REG_PRE_MODIFY;
2924 break;
2925
2926 case E_DImode:
2927 if (TARGET_POWERPC64)
2928 addr_mask |= RELOAD_REG_PRE_MODIFY;
2929 break;
2930
2931 case E_DFmode:
2932 case E_DDmode:
2933 if (TARGET_HARD_FLOAT)
2934 addr_mask |= RELOAD_REG_PRE_MODIFY;
2935 break;
2936 }
2937 }
2938 }
2939
2940 /* GPR and FPR registers can do REG+OFFSET addressing, except
2941 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2942 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2943 if ((addr_mask != 0) && !indexed_only_p
2944 && msize <= 8
2945 && (rc == RELOAD_REG_GPR
2946 || ((msize == 8 || m2 == SFmode)
2947 && (rc == RELOAD_REG_FPR
2948 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
2949 addr_mask |= RELOAD_REG_OFFSET;
2950
2951 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2952 instructions are enabled. The offset for 128-bit VSX registers is
2953 only 12-bits. While GPRs can handle the full offset range, VSX
2954 registers can only handle the restricted range. */
2955 else if ((addr_mask != 0) && !indexed_only_p
2956 && msize == 16 && TARGET_P9_VECTOR
2957 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2958 || (m2 == TImode && TARGET_VSX)))
2959 {
2960 addr_mask |= RELOAD_REG_OFFSET;
2961 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2962 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2963 }
2964
2965 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2966 addressing on 128-bit types. */
2967 if (rc == RELOAD_REG_VMX && msize == 16
2968 && (addr_mask & RELOAD_REG_VALID) != 0)
2969 addr_mask |= RELOAD_REG_AND_M16;
2970
2971 reg_addr[m].addr_mask[rc] = addr_mask;
2972 any_addr_mask |= addr_mask;
2973 }
2974
2975 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2976 }
2977 }
2978
2979 \f
2980 /* Initialize the various global tables that are based on register size. */
2981 static void
2982 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2983 {
2984 ssize_t r, m, c;
2985 int align64;
2986 int align32;
2987
2988 /* Precalculate REGNO_REG_CLASS. */
2989 rs6000_regno_regclass[0] = GENERAL_REGS;
2990 for (r = 1; r < 32; ++r)
2991 rs6000_regno_regclass[r] = BASE_REGS;
2992
2993 for (r = 32; r < 64; ++r)
2994 rs6000_regno_regclass[r] = FLOAT_REGS;
2995
2996 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2997 rs6000_regno_regclass[r] = NO_REGS;
2998
2999 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3000 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3001
3002 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3003 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3004 rs6000_regno_regclass[r] = CR_REGS;
3005
3006 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3007 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3008 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3009 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3010 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3011 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3012 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3013 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3014 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3015 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3016
3017 /* Precalculate register class to simpler reload register class. We don't
3018 need all of the register classes that are combinations of different
3019 classes, just the simple ones that have constraint letters. */
3020 for (c = 0; c < N_REG_CLASSES; c++)
3021 reg_class_to_reg_type[c] = NO_REG_TYPE;
3022
3023 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3024 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3025 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3026 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3027 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3028 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3029 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3030 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3031 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3032 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3033
3034 if (TARGET_VSX)
3035 {
3036 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3037 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3038 }
3039 else
3040 {
3041 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3042 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3043 }
3044
3045 /* Precalculate the valid memory formats as well as the vector information,
3046 this must be set up before the rs6000_hard_regno_nregs_internal calls
3047 below. */
3048 gcc_assert ((int)VECTOR_NONE == 0);
3049 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3050 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3051
3052 gcc_assert ((int)CODE_FOR_nothing == 0);
3053 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3054
3055 gcc_assert ((int)NO_REGS == 0);
3056 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3057
3058 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3059 believes it can use native alignment or still uses 128-bit alignment. */
3060 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3061 {
3062 align64 = 64;
3063 align32 = 32;
3064 }
3065 else
3066 {
3067 align64 = 128;
3068 align32 = 128;
3069 }
3070
3071 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3072 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3073 if (TARGET_FLOAT128_TYPE)
3074 {
3075 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3076 rs6000_vector_align[KFmode] = 128;
3077
3078 if (FLOAT128_IEEE_P (TFmode))
3079 {
3080 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3081 rs6000_vector_align[TFmode] = 128;
3082 }
3083 }
3084
3085 /* V2DF mode, VSX only. */
3086 if (TARGET_VSX)
3087 {
3088 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3089 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3090 rs6000_vector_align[V2DFmode] = align64;
3091 }
3092
3093 /* V4SF mode, either VSX or Altivec. */
3094 if (TARGET_VSX)
3095 {
3096 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3097 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3098 rs6000_vector_align[V4SFmode] = align32;
3099 }
3100 else if (TARGET_ALTIVEC)
3101 {
3102 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3103 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3104 rs6000_vector_align[V4SFmode] = align32;
3105 }
3106
3107 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3108 and stores. */
3109 if (TARGET_ALTIVEC)
3110 {
3111 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3112 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3113 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3114 rs6000_vector_align[V4SImode] = align32;
3115 rs6000_vector_align[V8HImode] = align32;
3116 rs6000_vector_align[V16QImode] = align32;
3117
3118 if (TARGET_VSX)
3119 {
3120 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3121 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3122 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3123 }
3124 else
3125 {
3126 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3127 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3128 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3129 }
3130 }
3131
3132 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3133 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3134 if (TARGET_VSX)
3135 {
3136 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3137 rs6000_vector_unit[V2DImode]
3138 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3139 rs6000_vector_align[V2DImode] = align64;
3140
3141 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3142 rs6000_vector_unit[V1TImode]
3143 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3144 rs6000_vector_align[V1TImode] = 128;
3145 }
3146
3147 /* DFmode, see if we want to use the VSX unit. Memory is handled
3148 differently, so don't set rs6000_vector_mem. */
3149 if (TARGET_VSX)
3150 {
3151 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3152 rs6000_vector_align[DFmode] = 64;
3153 }
3154
3155 /* SFmode, see if we want to use the VSX unit. */
3156 if (TARGET_P8_VECTOR)
3157 {
3158 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3159 rs6000_vector_align[SFmode] = 32;
3160 }
3161
3162 /* Allow TImode in VSX register and set the VSX memory macros. */
3163 if (TARGET_VSX)
3164 {
3165 rs6000_vector_mem[TImode] = VECTOR_VSX;
3166 rs6000_vector_align[TImode] = align64;
3167 }
3168
3169 /* Register class constraints for the constraints that depend on compile
3170 switches. When the VSX code was added, different constraints were added
3171 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3172 of the VSX registers are used. The register classes for scalar floating
3173 point types is set, based on whether we allow that type into the upper
3174 (Altivec) registers. GCC has register classes to target the Altivec
3175 registers for load/store operations, to select using a VSX memory
3176 operation instead of the traditional floating point operation. The
3177 constraints are:
3178
3179 d - Register class to use with traditional DFmode instructions.
3180 f - Register class to use with traditional SFmode instructions.
3181 v - Altivec register.
3182 wa - Any VSX register.
3183 wc - Reserved to represent individual CR bits (used in LLVM).
3184 wd - Preferred register class for V2DFmode.
3185 wf - Preferred register class for V4SFmode.
3186 wg - Float register for power6x move insns.
3187 wh - FP register for direct move instructions.
3188 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3189 wj - FP or VSX register to hold 64-bit integers for direct moves.
3190 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3191 wl - Float register if we can do 32-bit signed int loads.
3192 wm - VSX register for ISA 2.07 direct move operations.
3193 wn - always NO_REGS.
3194 wr - GPR if 64-bit mode is permitted.
3195 ws - Register class to do ISA 2.06 DF operations.
3196 wt - VSX register for TImode in VSX registers.
3197 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3198 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3199 ww - Register class to do SF conversions in with VSX operations.
3200 wx - Float register if we can do 32-bit int stores.
3201 wy - Register class to do ISA 2.07 SF operations.
3202 wz - Float register if we can do 32-bit unsigned int loads.
3203 wH - Altivec register if SImode is allowed in VSX registers.
3204 wI - VSX register if SImode is allowed in VSX registers.
3205 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3206 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3207
3208 if (TARGET_HARD_FLOAT)
3209 {
3210 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3211 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3212 }
3213
3214 if (TARGET_VSX)
3215 {
3216 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3217 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3218 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3219 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3220 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3221 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3222 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3223 }
3224
3225 /* Add conditional constraints based on various options, to allow us to
3226 collapse multiple insn patterns. */
3227 if (TARGET_ALTIVEC)
3228 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3229
3230 if (TARGET_MFPGPR) /* DFmode */
3231 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3232
3233 if (TARGET_LFIWAX)
3234 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3235
3236 if (TARGET_DIRECT_MOVE)
3237 {
3238 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3239 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3240 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3241 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3242 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3243 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3244 }
3245
3246 if (TARGET_POWERPC64)
3247 {
3248 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3249 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3250 }
3251
3252 if (TARGET_P8_VECTOR) /* SFmode */
3253 {
3254 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3255 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3256 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3257 }
3258 else if (TARGET_VSX)
3259 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3260
3261 if (TARGET_STFIWX)
3262 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3263
3264 if (TARGET_LFIWZX)
3265 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3266
3267 if (TARGET_FLOAT128_TYPE)
3268 {
3269 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3270 if (FLOAT128_IEEE_P (TFmode))
3271 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3272 }
3273
3274 if (TARGET_P9_VECTOR)
3275 {
3276 /* Support for new D-form instructions. */
3277 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3278
3279 /* Support for ISA 3.0 (power9) vectors. */
3280 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3281 }
3282
3283 /* Support for new direct moves (ISA 3.0 + 64bit). */
3284 if (TARGET_DIRECT_MOVE_128)
3285 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3286
3287 /* Support small integers in VSX registers. */
3288 if (TARGET_P8_VECTOR)
3289 {
3290 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3291 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3292 if (TARGET_P9_VECTOR)
3293 {
3294 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3295 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3296 }
3297 }
3298
3299 /* Set up the reload helper and direct move functions. */
3300 if (TARGET_VSX || TARGET_ALTIVEC)
3301 {
3302 if (TARGET_64BIT)
3303 {
3304 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3305 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3306 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3307 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3308 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3309 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3310 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3311 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3312 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3313 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3314 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3315 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3316 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3317 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3318 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3319 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3320 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3321 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3322 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3323 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3324
3325 if (FLOAT128_VECTOR_P (KFmode))
3326 {
3327 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3328 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3329 }
3330
3331 if (FLOAT128_VECTOR_P (TFmode))
3332 {
3333 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3334 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3335 }
3336
3337 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3338 available. */
3339 if (TARGET_NO_SDMODE_STACK)
3340 {
3341 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3342 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3343 }
3344
3345 if (TARGET_VSX)
3346 {
3347 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3348 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3349 }
3350
3351 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3352 {
3353 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3354 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3355 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3356 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3357 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3358 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3359 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3360 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3361 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3362
3363 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3364 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3365 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3366 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3367 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3368 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3369 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3370 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3371 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3372
3373 if (FLOAT128_VECTOR_P (KFmode))
3374 {
3375 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3376 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3377 }
3378
3379 if (FLOAT128_VECTOR_P (TFmode))
3380 {
3381 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3382 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3383 }
3384 }
3385 }
3386 else
3387 {
3388 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3389 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3390 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3391 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3392 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3393 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3394 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3395 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3396 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3397 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3398 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3399 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3400 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3401 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3402 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3403 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3404 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3405 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3406 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3407 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3408
3409 if (FLOAT128_VECTOR_P (KFmode))
3410 {
3411 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3412 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3413 }
3414
3415 if (FLOAT128_IEEE_P (TFmode))
3416 {
3417 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3418 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3419 }
3420
3421 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3422 available. */
3423 if (TARGET_NO_SDMODE_STACK)
3424 {
3425 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3426 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3427 }
3428
3429 if (TARGET_VSX)
3430 {
3431 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3432 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3433 }
3434
3435 if (TARGET_DIRECT_MOVE)
3436 {
3437 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3438 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3439 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3440 }
3441 }
3442
3443 reg_addr[DFmode].scalar_in_vmx_p = true;
3444 reg_addr[DImode].scalar_in_vmx_p = true;
3445
3446 if (TARGET_P8_VECTOR)
3447 {
3448 reg_addr[SFmode].scalar_in_vmx_p = true;
3449 reg_addr[SImode].scalar_in_vmx_p = true;
3450
3451 if (TARGET_P9_VECTOR)
3452 {
3453 reg_addr[HImode].scalar_in_vmx_p = true;
3454 reg_addr[QImode].scalar_in_vmx_p = true;
3455 }
3456 }
3457 }
3458
3459 /* Precalculate HARD_REGNO_NREGS. */
3460 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3461 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3462 rs6000_hard_regno_nregs[m][r]
3463 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3464
3465 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3466 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3467 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3468 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3469 rs6000_hard_regno_mode_ok_p[m][r] = true;
3470
3471 /* Precalculate CLASS_MAX_NREGS sizes. */
3472 for (c = 0; c < LIM_REG_CLASSES; ++c)
3473 {
3474 int reg_size;
3475
3476 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3477 reg_size = UNITS_PER_VSX_WORD;
3478
3479 else if (c == ALTIVEC_REGS)
3480 reg_size = UNITS_PER_ALTIVEC_WORD;
3481
3482 else if (c == FLOAT_REGS)
3483 reg_size = UNITS_PER_FP_WORD;
3484
3485 else
3486 reg_size = UNITS_PER_WORD;
3487
3488 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3489 {
3490 machine_mode m2 = (machine_mode)m;
3491 int reg_size2 = reg_size;
3492
3493 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3494 in VSX. */
3495 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3496 reg_size2 = UNITS_PER_FP_WORD;
3497
3498 rs6000_class_max_nregs[m][c]
3499 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3500 }
3501 }
3502
3503 /* Calculate which modes to automatically generate code to use a the
3504 reciprocal divide and square root instructions. In the future, possibly
3505 automatically generate the instructions even if the user did not specify
3506 -mrecip. The older machines double precision reciprocal sqrt estimate is
3507 not accurate enough. */
3508 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3509 if (TARGET_FRES)
3510 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3511 if (TARGET_FRE)
3512 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3513 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3514 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3515 if (VECTOR_UNIT_VSX_P (V2DFmode))
3516 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3517
3518 if (TARGET_FRSQRTES)
3519 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3520 if (TARGET_FRSQRTE)
3521 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3522 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3523 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3524 if (VECTOR_UNIT_VSX_P (V2DFmode))
3525 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3526
3527 if (rs6000_recip_control)
3528 {
3529 if (!flag_finite_math_only)
3530 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3531 "-ffast-math");
3532 if (flag_trapping_math)
3533 warning (0, "%qs requires %qs or %qs", "-mrecip",
3534 "-fno-trapping-math", "-ffast-math");
3535 if (!flag_reciprocal_math)
3536 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3537 "-ffast-math");
3538 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3539 {
3540 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3541 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3542 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3543
3544 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3545 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3546 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3547
3548 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3549 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3550 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3551
3552 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3553 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3554 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3555
3556 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3557 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3558 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3559
3560 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3561 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3562 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3563
3564 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3565 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3566 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3567
3568 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3569 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3570 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3571 }
3572 }
3573
3574 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3575 legitimate address support to figure out the appropriate addressing to
3576 use. */
3577 rs6000_setup_reg_addr_masks ();
3578
3579 if (global_init_p || TARGET_DEBUG_TARGET)
3580 {
3581 if (TARGET_DEBUG_REG)
3582 rs6000_debug_reg_global ();
3583
3584 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3585 fprintf (stderr,
3586 "SImode variable mult cost = %d\n"
3587 "SImode constant mult cost = %d\n"
3588 "SImode short constant mult cost = %d\n"
3589 "DImode multipliciation cost = %d\n"
3590 "SImode division cost = %d\n"
3591 "DImode division cost = %d\n"
3592 "Simple fp operation cost = %d\n"
3593 "DFmode multiplication cost = %d\n"
3594 "SFmode division cost = %d\n"
3595 "DFmode division cost = %d\n"
3596 "cache line size = %d\n"
3597 "l1 cache size = %d\n"
3598 "l2 cache size = %d\n"
3599 "simultaneous prefetches = %d\n"
3600 "\n",
3601 rs6000_cost->mulsi,
3602 rs6000_cost->mulsi_const,
3603 rs6000_cost->mulsi_const9,
3604 rs6000_cost->muldi,
3605 rs6000_cost->divsi,
3606 rs6000_cost->divdi,
3607 rs6000_cost->fp,
3608 rs6000_cost->dmul,
3609 rs6000_cost->sdiv,
3610 rs6000_cost->ddiv,
3611 rs6000_cost->cache_line_size,
3612 rs6000_cost->l1_cache_size,
3613 rs6000_cost->l2_cache_size,
3614 rs6000_cost->simultaneous_prefetches);
3615 }
3616 }
3617
3618 #if TARGET_MACHO
3619 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3620
3621 static void
3622 darwin_rs6000_override_options (void)
3623 {
3624 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3625 off. */
3626 rs6000_altivec_abi = 1;
3627 TARGET_ALTIVEC_VRSAVE = 1;
3628 rs6000_current_abi = ABI_DARWIN;
3629
3630 if (DEFAULT_ABI == ABI_DARWIN
3631 && TARGET_64BIT)
3632 darwin_one_byte_bool = 1;
3633
3634 if (TARGET_64BIT && ! TARGET_POWERPC64)
3635 {
3636 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3637 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3638 }
3639 if (flag_mkernel)
3640 {
3641 rs6000_default_long_calls = 1;
3642 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3643 }
3644
3645 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3646 Altivec. */
3647 if (!flag_mkernel && !flag_apple_kext
3648 && TARGET_64BIT
3649 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3650 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3651
3652 /* Unless the user (not the configurer) has explicitly overridden
3653 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3654 G4 unless targeting the kernel. */
3655 if (!flag_mkernel
3656 && !flag_apple_kext
3657 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3658 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3659 && ! global_options_set.x_rs6000_cpu_index)
3660 {
3661 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3662 }
3663 }
3664 #endif
3665
3666 /* If not otherwise specified by a target, make 'long double' equivalent to
3667 'double'. */
3668
3669 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3670 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3671 #endif
3672
3673 /* Return the builtin mask of the various options used that could affect which
3674 builtins were used. In the past we used target_flags, but we've run out of
3675 bits, and some options are no longer in target_flags. */
3676
3677 HOST_WIDE_INT
3678 rs6000_builtin_mask_calculate (void)
3679 {
3680 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3681 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3682 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3683 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3684 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3685 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3686 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3687 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3688 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3689 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3690 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3691 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3692 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3693 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3694 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3695 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3696 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3697 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3698 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3699 | ((TARGET_LONG_DOUBLE_128
3700 && TARGET_HARD_FLOAT
3701 && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0)
3702 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3703 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3704 }
3705
3706 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3707 to clobber the XER[CA] bit because clobbering that bit without telling
3708 the compiler worked just fine with versions of GCC before GCC 5, and
3709 breaking a lot of older code in ways that are hard to track down is
3710 not such a great idea. */
3711
3712 static rtx_insn *
3713 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3714 vec<const char *> &/*constraints*/,
3715 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3716 {
3717 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3718 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3719 return NULL;
3720 }
3721
3722 /* Override command line options.
3723
3724 Combine build-specific configuration information with options
3725 specified on the command line to set various state variables which
3726 influence code generation, optimization, and expansion of built-in
3727 functions. Assure that command-line configuration preferences are
3728 compatible with each other and with the build configuration; issue
3729 warnings while adjusting configuration or error messages while
3730 rejecting configuration.
3731
3732 Upon entry to this function:
3733
3734 This function is called once at the beginning of
3735 compilation, and then again at the start and end of compiling
3736 each section of code that has a different configuration, as
3737 indicated, for example, by adding the
3738
3739 __attribute__((__target__("cpu=power9")))
3740
3741 qualifier to a function definition or, for example, by bracketing
3742 code between
3743
3744 #pragma GCC target("altivec")
3745
3746 and
3747
3748 #pragma GCC reset_options
3749
3750 directives. Parameter global_init_p is true for the initial
3751 invocation, which initializes global variables, and false for all
3752 subsequent invocations.
3753
3754
3755 Various global state information is assumed to be valid. This
3756 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3757 default CPU specified at build configure time, TARGET_DEFAULT,
3758 representing the default set of option flags for the default
3759 target, and global_options_set.x_rs6000_isa_flags, representing
3760 which options were requested on the command line.
3761
3762 Upon return from this function:
3763
3764 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3765 was set by name on the command line. Additionally, if certain
3766 attributes are automatically enabled or disabled by this function
3767 in order to assure compatibility between options and
3768 configuration, the flags associated with those attributes are
3769 also set. By setting these "explicit bits", we avoid the risk
3770 that other code might accidentally overwrite these particular
3771 attributes with "default values".
3772
3773 The various bits of rs6000_isa_flags are set to indicate the
3774 target options that have been selected for the most current
3775 compilation efforts. This has the effect of also turning on the
3776 associated TARGET_XXX values since these are macros which are
3777 generally defined to test the corresponding bit of the
3778 rs6000_isa_flags variable.
3779
3780 The variable rs6000_builtin_mask is set to represent the target
3781 options for the most current compilation efforts, consistent with
3782 the current contents of rs6000_isa_flags. This variable controls
3783 expansion of built-in functions.
3784
3785 Various other global variables and fields of global structures
3786 (over 50 in all) are initialized to reflect the desired options
3787 for the most current compilation efforts. */
3788
3789 static bool
3790 rs6000_option_override_internal (bool global_init_p)
3791 {
3792 bool ret = true;
3793
3794 HOST_WIDE_INT set_masks;
3795 HOST_WIDE_INT ignore_masks;
3796 int cpu_index = -1;
3797 int tune_index;
3798 struct cl_target_option *main_target_opt
3799 = ((global_init_p || target_option_default_node == NULL)
3800 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3801
3802 /* Print defaults. */
3803 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3804 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3805
3806 /* Remember the explicit arguments. */
3807 if (global_init_p)
3808 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3809
3810 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3811 library functions, so warn about it. The flag may be useful for
3812 performance studies from time to time though, so don't disable it
3813 entirely. */
3814 if (global_options_set.x_rs6000_alignment_flags
3815 && rs6000_alignment_flags == MASK_ALIGN_POWER
3816 && DEFAULT_ABI == ABI_DARWIN
3817 && TARGET_64BIT)
3818 warning (0, "%qs is not supported for 64-bit Darwin;"
3819 " it is incompatible with the installed C and C++ libraries",
3820 "-malign-power");
3821
3822 /* Numerous experiment shows that IRA based loop pressure
3823 calculation works better for RTL loop invariant motion on targets
3824 with enough (>= 32) registers. It is an expensive optimization.
3825 So it is on only for peak performance. */
3826 if (optimize >= 3 && global_init_p
3827 && !global_options_set.x_flag_ira_loop_pressure)
3828 flag_ira_loop_pressure = 1;
3829
3830 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3831 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3832 options were already specified. */
3833 if (flag_sanitize & SANITIZE_USER_ADDRESS
3834 && !global_options_set.x_flag_asynchronous_unwind_tables)
3835 flag_asynchronous_unwind_tables = 1;
3836
3837 /* Set the pointer size. */
3838 if (TARGET_64BIT)
3839 {
3840 rs6000_pmode = DImode;
3841 rs6000_pointer_size = 64;
3842 }
3843 else
3844 {
3845 rs6000_pmode = SImode;
3846 rs6000_pointer_size = 32;
3847 }
3848
3849 /* Some OSs don't support saving the high part of 64-bit registers on context
3850 switch. Other OSs don't support saving Altivec registers. On those OSs,
3851 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3852 if the user wants either, the user must explicitly specify them and we
3853 won't interfere with the user's specification. */
3854
3855 set_masks = POWERPC_MASKS;
3856 #ifdef OS_MISSING_POWERPC64
3857 if (OS_MISSING_POWERPC64)
3858 set_masks &= ~OPTION_MASK_POWERPC64;
3859 #endif
3860 #ifdef OS_MISSING_ALTIVEC
3861 if (OS_MISSING_ALTIVEC)
3862 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3863 | OTHER_VSX_VECTOR_MASKS);
3864 #endif
3865
3866 /* Don't override by the processor default if given explicitly. */
3867 set_masks &= ~rs6000_isa_flags_explicit;
3868
3869 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3870 the cpu in a target attribute or pragma, but did not specify a tuning
3871 option, use the cpu for the tuning option rather than the option specified
3872 with -mtune on the command line. Process a '--with-cpu' configuration
3873 request as an implicit --cpu. */
3874 if (rs6000_cpu_index >= 0)
3875 cpu_index = rs6000_cpu_index;
3876 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3877 cpu_index = main_target_opt->x_rs6000_cpu_index;
3878 else if (OPTION_TARGET_CPU_DEFAULT)
3879 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
3880
3881 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3882 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3883 with those from the cpu, except for options that were explicitly set. If
3884 we don't have a cpu, do not override the target bits set in
3885 TARGET_DEFAULT. */
3886 if (cpu_index >= 0)
3887 {
3888 rs6000_cpu_index = cpu_index;
3889 rs6000_isa_flags &= ~set_masks;
3890 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3891 & set_masks);
3892 }
3893 else
3894 {
3895 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3896 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3897 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3898 to using rs6000_isa_flags, we need to do the initialization here.
3899
3900 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3901 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3902 HOST_WIDE_INT flags;
3903 if (TARGET_DEFAULT)
3904 flags = TARGET_DEFAULT;
3905 else
3906 {
3907 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3908 const char *default_cpu = (!TARGET_POWERPC64
3909 ? "powerpc"
3910 : (BYTES_BIG_ENDIAN
3911 ? "powerpc64"
3912 : "powerpc64le"));
3913 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
3914 flags = processor_target_table[default_cpu_index].target_enable;
3915 }
3916 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3917 }
3918
3919 if (rs6000_tune_index >= 0)
3920 tune_index = rs6000_tune_index;
3921 else if (cpu_index >= 0)
3922 rs6000_tune_index = tune_index = cpu_index;
3923 else
3924 {
3925 size_t i;
3926 enum processor_type tune_proc
3927 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3928
3929 tune_index = -1;
3930 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3931 if (processor_target_table[i].processor == tune_proc)
3932 {
3933 tune_index = i;
3934 break;
3935 }
3936 }
3937
3938 if (cpu_index >= 0)
3939 rs6000_cpu = processor_target_table[cpu_index].processor;
3940 else
3941 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
3942
3943 gcc_assert (tune_index >= 0);
3944 rs6000_tune = processor_target_table[tune_index].processor;
3945
3946 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3947 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3948 || rs6000_cpu == PROCESSOR_PPCE5500)
3949 {
3950 if (TARGET_ALTIVEC)
3951 error ("AltiVec not supported in this target");
3952 }
3953
3954 /* If we are optimizing big endian systems for space, use the load/store
3955 multiple instructions. */
3956 if (BYTES_BIG_ENDIAN && optimize_size)
3957 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
3958
3959 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
3960 because the hardware doesn't support the instructions used in little
3961 endian mode, and causes an alignment trap. The 750 does not cause an
3962 alignment trap (except when the target is unaligned). */
3963
3964 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
3965 {
3966 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3967 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3968 warning (0, "%qs is not supported on little endian systems",
3969 "-mmultiple");
3970 }
3971
3972 /* If little-endian, default to -mstrict-align on older processors.
3973 Testing for htm matches power8 and later. */
3974 if (!BYTES_BIG_ENDIAN
3975 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3976 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3977
3978 if (!rs6000_fold_gimple)
3979 fprintf (stderr,
3980 "gimple folding of rs6000 builtins has been disabled.\n");
3981
3982 /* Add some warnings for VSX. */
3983 if (TARGET_VSX)
3984 {
3985 const char *msg = NULL;
3986 if (!TARGET_HARD_FLOAT)
3987 {
3988 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3989 msg = N_("-mvsx requires hardware floating point");
3990 else
3991 {
3992 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3993 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3994 }
3995 }
3996 else if (TARGET_AVOID_XFORM > 0)
3997 msg = N_("-mvsx needs indexed addressing");
3998 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3999 & OPTION_MASK_ALTIVEC))
4000 {
4001 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4002 msg = N_("-mvsx and -mno-altivec are incompatible");
4003 else
4004 msg = N_("-mno-altivec disables vsx");
4005 }
4006
4007 if (msg)
4008 {
4009 warning (0, msg);
4010 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4011 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4012 }
4013 }
4014
4015 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4016 the -mcpu setting to enable options that conflict. */
4017 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4018 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4019 | OPTION_MASK_ALTIVEC
4020 | OPTION_MASK_VSX)) != 0)
4021 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4022 | OPTION_MASK_DIRECT_MOVE)
4023 & ~rs6000_isa_flags_explicit);
4024
4025 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4026 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4027
4028 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4029 off all of the options that depend on those flags. */
4030 ignore_masks = rs6000_disable_incompatible_switches ();
4031
4032 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4033 unless the user explicitly used the -mno-<option> to disable the code. */
4034 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4035 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4036 else if (TARGET_P9_MINMAX)
4037 {
4038 if (cpu_index >= 0)
4039 {
4040 if (cpu_index == PROCESSOR_POWER9)
4041 {
4042 /* legacy behavior: allow -mcpu=power9 with certain
4043 capabilities explicitly disabled. */
4044 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4045 }
4046 else
4047 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4048 "for <xxx> less than power9", "-mcpu");
4049 }
4050 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4051 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4052 & rs6000_isa_flags_explicit))
4053 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4054 were explicitly cleared. */
4055 error ("%qs incompatible with explicitly disabled options",
4056 "-mpower9-minmax");
4057 else
4058 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4059 }
4060 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4061 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4062 else if (TARGET_VSX)
4063 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4064 else if (TARGET_POPCNTD)
4065 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4066 else if (TARGET_DFP)
4067 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4068 else if (TARGET_CMPB)
4069 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4070 else if (TARGET_FPRND)
4071 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4072 else if (TARGET_POPCNTB)
4073 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4074 else if (TARGET_ALTIVEC)
4075 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4076
4077 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4078 {
4079 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4080 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4081 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4082 }
4083
4084 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4085 {
4086 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4087 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4088 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4089 }
4090
4091 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4092 {
4093 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4094 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4095 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4096 }
4097
4098 if (TARGET_P8_VECTOR && !TARGET_VSX)
4099 {
4100 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4101 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4102 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4103 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4104 {
4105 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4106 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4107 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4108 }
4109 else
4110 {
4111 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4112 not explicit. */
4113 rs6000_isa_flags |= OPTION_MASK_VSX;
4114 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4115 }
4116 }
4117
4118 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4119 {
4120 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4121 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4122 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4123 }
4124
4125 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4126 silently turn off quad memory mode. */
4127 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4128 {
4129 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4130 warning (0, N_("-mquad-memory requires 64-bit mode"));
4131
4132 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4133 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4134
4135 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4136 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4137 }
4138
4139 /* Non-atomic quad memory load/store are disabled for little endian, since
4140 the words are reversed, but atomic operations can still be done by
4141 swapping the words. */
4142 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4143 {
4144 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4145 warning (0, N_("-mquad-memory is not available in little endian "
4146 "mode"));
4147
4148 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4149 }
4150
4151 /* Assume if the user asked for normal quad memory instructions, they want
4152 the atomic versions as well, unless they explicity told us not to use quad
4153 word atomic instructions. */
4154 if (TARGET_QUAD_MEMORY
4155 && !TARGET_QUAD_MEMORY_ATOMIC
4156 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4157 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4158
4159 /* If we can shrink-wrap the TOC register save separately, then use
4160 -msave-toc-indirect unless explicitly disabled. */
4161 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4162 && flag_shrink_wrap_separate
4163 && optimize_function_for_speed_p (cfun))
4164 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4165
4166 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4167 generating power8 instructions. Power9 does not optimize power8 fusion
4168 cases. */
4169 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4170 {
4171 if (processor_target_table[tune_index].processor == PROCESSOR_POWER8)
4172 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4173 else
4174 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4175 }
4176
4177 /* Setting additional fusion flags turns on base fusion. */
4178 if (!TARGET_P8_FUSION && TARGET_P8_FUSION_SIGN)
4179 {
4180 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4181 {
4182 if (TARGET_P8_FUSION_SIGN)
4183 error ("%qs requires %qs", "-mpower8-fusion-sign",
4184 "-mpower8-fusion");
4185
4186 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4187 }
4188 else
4189 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4190 }
4191
4192 /* Power8 does not fuse sign extended loads with the addis. If we are
4193 optimizing at high levels for speed, convert a sign extended load into a
4194 zero extending load, and an explicit sign extension. */
4195 if (TARGET_P8_FUSION
4196 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4197 && optimize_function_for_speed_p (cfun)
4198 && optimize >= 3)
4199 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4200
4201 /* ISA 3.0 vector instructions include ISA 2.07. */
4202 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4203 {
4204 /* We prefer to not mention undocumented options in
4205 error messages. However, if users have managed to select
4206 power9-vector without selecting power8-vector, they
4207 already know about undocumented flags. */
4208 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4209 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4210 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4211 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4212 {
4213 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4214 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4215 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4216 }
4217 else
4218 {
4219 /* OPTION_MASK_P9_VECTOR is explicit and
4220 OPTION_MASK_P8_VECTOR is not explicit. */
4221 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4222 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4223 }
4224 }
4225
4226 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4227 support. If we only have ISA 2.06 support, and the user did not specify
4228 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4229 but we don't enable the full vectorization support */
4230 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4231 TARGET_ALLOW_MOVMISALIGN = 1;
4232
4233 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4234 {
4235 if (TARGET_ALLOW_MOVMISALIGN > 0
4236 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4237 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4238
4239 TARGET_ALLOW_MOVMISALIGN = 0;
4240 }
4241
4242 /* Determine when unaligned vector accesses are permitted, and when
4243 they are preferred over masked Altivec loads. Note that if
4244 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4245 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4246 not true. */
4247 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4248 {
4249 if (!TARGET_VSX)
4250 {
4251 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4252 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4253
4254 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4255 }
4256
4257 else if (!TARGET_ALLOW_MOVMISALIGN)
4258 {
4259 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4260 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4261 "-mallow-movmisalign");
4262
4263 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4264 }
4265 }
4266
4267 /* Use long double size to select the appropriate long double. We use
4268 TYPE_PRECISION to differentiate the 3 different long double types. We map
4269 128 into the precision used for TFmode. */
4270 int default_long_double_size = (RS6000_DEFAULT_LONG_DOUBLE_SIZE == 64
4271 ? 64
4272 : FLOAT_PRECISION_TFmode);
4273
4274 /* Set long double size before the IEEE 128-bit tests. */
4275 if (!global_options_set.x_rs6000_long_double_type_size)
4276 {
4277 if (main_target_opt != NULL
4278 && (main_target_opt->x_rs6000_long_double_type_size
4279 != default_long_double_size))
4280 error ("target attribute or pragma changes long double size");
4281 else
4282 rs6000_long_double_type_size = default_long_double_size;
4283 }
4284 else if (rs6000_long_double_type_size == 128)
4285 rs6000_long_double_type_size = FLOAT_PRECISION_TFmode;
4286 else if (global_options_set.x_rs6000_ieeequad)
4287 {
4288 if (global_options.x_rs6000_ieeequad)
4289 error ("%qs requires %qs", "-mabi=ieeelongdouble", "-mlong-double-128");
4290 else
4291 error ("%qs requires %qs", "-mabi=ibmlongdouble", "-mlong-double-128");
4292 }
4293
4294 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4295 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4296 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4297 those systems will not pick up this default. Warn if the user changes the
4298 default unless -Wno-psabi. */
4299 if (!global_options_set.x_rs6000_ieeequad)
4300 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4301
4302 else
4303 {
4304 if (global_options.x_rs6000_ieeequad
4305 && (!TARGET_POPCNTD || !TARGET_VSX))
4306 error ("%qs requires full ISA 2.06 support", "-mabi=ieeelongdouble");
4307
4308 if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4309 {
4310 static bool warned_change_long_double;
4311 if (!warned_change_long_double)
4312 {
4313 warned_change_long_double = true;
4314 if (TARGET_IEEEQUAD)
4315 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4316 else
4317 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4318 }
4319 }
4320 }
4321
4322 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4323 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4324 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4325 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4326 the keyword as well as the type. */
4327 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4328
4329 /* IEEE 128-bit floating point requires VSX support. */
4330 if (TARGET_FLOAT128_KEYWORD)
4331 {
4332 if (!TARGET_VSX)
4333 {
4334 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4335 error ("%qs requires VSX support", "-mfloat128");
4336
4337 TARGET_FLOAT128_TYPE = 0;
4338 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4339 | OPTION_MASK_FLOAT128_HW);
4340 }
4341 else if (!TARGET_FLOAT128_TYPE)
4342 {
4343 TARGET_FLOAT128_TYPE = 1;
4344 warning (0, "The -mfloat128 option may not be fully supported");
4345 }
4346 }
4347
4348 /* Enable the __float128 keyword under Linux by default. */
4349 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4350 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4351 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4352
4353 /* If we have are supporting the float128 type and full ISA 3.0 support,
4354 enable -mfloat128-hardware by default. However, don't enable the
4355 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4356 because sometimes the compiler wants to put things in an integer
4357 container, and if we don't have __int128 support, it is impossible. */
4358 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4359 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4360 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4361 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4362
4363 if (TARGET_FLOAT128_HW
4364 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4365 {
4366 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4367 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4368
4369 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4370 }
4371
4372 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4373 {
4374 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4375 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4376
4377 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4378 }
4379
4380 /* Print the options after updating the defaults. */
4381 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4382 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4383
4384 /* E500mc does "better" if we inline more aggressively. Respect the
4385 user's opinion, though. */
4386 if (rs6000_block_move_inline_limit == 0
4387 && (rs6000_tune == PROCESSOR_PPCE500MC
4388 || rs6000_tune == PROCESSOR_PPCE500MC64
4389 || rs6000_tune == PROCESSOR_PPCE5500
4390 || rs6000_tune == PROCESSOR_PPCE6500))
4391 rs6000_block_move_inline_limit = 128;
4392
4393 /* store_one_arg depends on expand_block_move to handle at least the
4394 size of reg_parm_stack_space. */
4395 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4396 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4397
4398 if (global_init_p)
4399 {
4400 /* If the appropriate debug option is enabled, replace the target hooks
4401 with debug versions that call the real version and then prints
4402 debugging information. */
4403 if (TARGET_DEBUG_COST)
4404 {
4405 targetm.rtx_costs = rs6000_debug_rtx_costs;
4406 targetm.address_cost = rs6000_debug_address_cost;
4407 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4408 }
4409
4410 if (TARGET_DEBUG_ADDR)
4411 {
4412 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4413 targetm.legitimize_address = rs6000_debug_legitimize_address;
4414 rs6000_secondary_reload_class_ptr
4415 = rs6000_debug_secondary_reload_class;
4416 targetm.secondary_memory_needed
4417 = rs6000_debug_secondary_memory_needed;
4418 targetm.can_change_mode_class
4419 = rs6000_debug_can_change_mode_class;
4420 rs6000_preferred_reload_class_ptr
4421 = rs6000_debug_preferred_reload_class;
4422 rs6000_legitimize_reload_address_ptr
4423 = rs6000_debug_legitimize_reload_address;
4424 rs6000_mode_dependent_address_ptr
4425 = rs6000_debug_mode_dependent_address;
4426 }
4427
4428 if (rs6000_veclibabi_name)
4429 {
4430 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4431 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4432 else
4433 {
4434 error ("unknown vectorization library ABI type (%qs) for "
4435 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4436 ret = false;
4437 }
4438 }
4439 }
4440
4441 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4442 target attribute or pragma which automatically enables both options,
4443 unless the altivec ABI was set. This is set by default for 64-bit, but
4444 not for 32-bit. */
4445 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4446 {
4447 TARGET_FLOAT128_TYPE = 0;
4448 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4449 | OPTION_MASK_FLOAT128_KEYWORD)
4450 & ~rs6000_isa_flags_explicit);
4451 }
4452
4453 /* Enable Altivec ABI for AIX -maltivec. */
4454 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4455 {
4456 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4457 error ("target attribute or pragma changes AltiVec ABI");
4458 else
4459 rs6000_altivec_abi = 1;
4460 }
4461
4462 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4463 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4464 be explicitly overridden in either case. */
4465 if (TARGET_ELF)
4466 {
4467 if (!global_options_set.x_rs6000_altivec_abi
4468 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4469 {
4470 if (main_target_opt != NULL &&
4471 !main_target_opt->x_rs6000_altivec_abi)
4472 error ("target attribute or pragma changes AltiVec ABI");
4473 else
4474 rs6000_altivec_abi = 1;
4475 }
4476 }
4477
4478 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4479 So far, the only darwin64 targets are also MACH-O. */
4480 if (TARGET_MACHO
4481 && DEFAULT_ABI == ABI_DARWIN
4482 && TARGET_64BIT)
4483 {
4484 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4485 error ("target attribute or pragma changes darwin64 ABI");
4486 else
4487 {
4488 rs6000_darwin64_abi = 1;
4489 /* Default to natural alignment, for better performance. */
4490 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4491 }
4492 }
4493
4494 /* Place FP constants in the constant pool instead of TOC
4495 if section anchors enabled. */
4496 if (flag_section_anchors
4497 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4498 TARGET_NO_FP_IN_TOC = 1;
4499
4500 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4501 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4502
4503 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4504 SUBTARGET_OVERRIDE_OPTIONS;
4505 #endif
4506 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4507 SUBSUBTARGET_OVERRIDE_OPTIONS;
4508 #endif
4509 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4510 SUB3TARGET_OVERRIDE_OPTIONS;
4511 #endif
4512
4513 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4514 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4515
4516 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4517 && rs6000_tune != PROCESSOR_POWER5
4518 && rs6000_tune != PROCESSOR_POWER6
4519 && rs6000_tune != PROCESSOR_POWER7
4520 && rs6000_tune != PROCESSOR_POWER8
4521 && rs6000_tune != PROCESSOR_POWER9
4522 && rs6000_tune != PROCESSOR_PPCA2
4523 && rs6000_tune != PROCESSOR_CELL
4524 && rs6000_tune != PROCESSOR_PPC476);
4525 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4526 || rs6000_tune == PROCESSOR_POWER5
4527 || rs6000_tune == PROCESSOR_POWER7
4528 || rs6000_tune == PROCESSOR_POWER8);
4529 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4530 || rs6000_tune == PROCESSOR_POWER5
4531 || rs6000_tune == PROCESSOR_POWER6
4532 || rs6000_tune == PROCESSOR_POWER7
4533 || rs6000_tune == PROCESSOR_POWER8
4534 || rs6000_tune == PROCESSOR_POWER9
4535 || rs6000_tune == PROCESSOR_PPCE500MC
4536 || rs6000_tune == PROCESSOR_PPCE500MC64
4537 || rs6000_tune == PROCESSOR_PPCE5500
4538 || rs6000_tune == PROCESSOR_PPCE6500);
4539
4540 /* Allow debug switches to override the above settings. These are set to -1
4541 in rs6000.opt to indicate the user hasn't directly set the switch. */
4542 if (TARGET_ALWAYS_HINT >= 0)
4543 rs6000_always_hint = TARGET_ALWAYS_HINT;
4544
4545 if (TARGET_SCHED_GROUPS >= 0)
4546 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4547
4548 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4549 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4550
4551 rs6000_sched_restricted_insns_priority
4552 = (rs6000_sched_groups ? 1 : 0);
4553
4554 /* Handle -msched-costly-dep option. */
4555 rs6000_sched_costly_dep
4556 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4557
4558 if (rs6000_sched_costly_dep_str)
4559 {
4560 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4561 rs6000_sched_costly_dep = no_dep_costly;
4562 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4563 rs6000_sched_costly_dep = all_deps_costly;
4564 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4565 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4566 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4567 rs6000_sched_costly_dep = store_to_load_dep_costly;
4568 else
4569 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4570 atoi (rs6000_sched_costly_dep_str));
4571 }
4572
4573 /* Handle -minsert-sched-nops option. */
4574 rs6000_sched_insert_nops
4575 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4576
4577 if (rs6000_sched_insert_nops_str)
4578 {
4579 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4580 rs6000_sched_insert_nops = sched_finish_none;
4581 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4582 rs6000_sched_insert_nops = sched_finish_pad_groups;
4583 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4584 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4585 else
4586 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4587 atoi (rs6000_sched_insert_nops_str));
4588 }
4589
4590 /* Handle stack protector */
4591 if (!global_options_set.x_rs6000_stack_protector_guard)
4592 #ifdef TARGET_THREAD_SSP_OFFSET
4593 rs6000_stack_protector_guard = SSP_TLS;
4594 #else
4595 rs6000_stack_protector_guard = SSP_GLOBAL;
4596 #endif
4597
4598 #ifdef TARGET_THREAD_SSP_OFFSET
4599 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4600 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4601 #endif
4602
4603 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4604 {
4605 char *endp;
4606 const char *str = rs6000_stack_protector_guard_offset_str;
4607
4608 errno = 0;
4609 long offset = strtol (str, &endp, 0);
4610 if (!*str || *endp || errno)
4611 error ("%qs is not a valid number in %qs", str,
4612 "-mstack-protector-guard-offset=");
4613
4614 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4615 || (TARGET_64BIT && (offset & 3)))
4616 error ("%qs is not a valid offset in %qs", str,
4617 "-mstack-protector-guard-offset=");
4618
4619 rs6000_stack_protector_guard_offset = offset;
4620 }
4621
4622 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4623 {
4624 const char *str = rs6000_stack_protector_guard_reg_str;
4625 int reg = decode_reg_name (str);
4626
4627 if (!IN_RANGE (reg, 1, 31))
4628 error ("%qs is not a valid base register in %qs", str,
4629 "-mstack-protector-guard-reg=");
4630
4631 rs6000_stack_protector_guard_reg = reg;
4632 }
4633
4634 if (rs6000_stack_protector_guard == SSP_TLS
4635 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4636 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4637
4638 if (global_init_p)
4639 {
4640 #ifdef TARGET_REGNAMES
4641 /* If the user desires alternate register names, copy in the
4642 alternate names now. */
4643 if (TARGET_REGNAMES)
4644 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4645 #endif
4646
4647 /* Set aix_struct_return last, after the ABI is determined.
4648 If -maix-struct-return or -msvr4-struct-return was explicitly
4649 used, don't override with the ABI default. */
4650 if (!global_options_set.x_aix_struct_return)
4651 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4652
4653 #if 0
4654 /* IBM XL compiler defaults to unsigned bitfields. */
4655 if (TARGET_XL_COMPAT)
4656 flag_signed_bitfields = 0;
4657 #endif
4658
4659 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4660 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4661
4662 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4663
4664 /* We can only guarantee the availability of DI pseudo-ops when
4665 assembling for 64-bit targets. */
4666 if (!TARGET_64BIT)
4667 {
4668 targetm.asm_out.aligned_op.di = NULL;
4669 targetm.asm_out.unaligned_op.di = NULL;
4670 }
4671
4672
4673 /* Set branch target alignment, if not optimizing for size. */
4674 if (!optimize_size)
4675 {
4676 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4677 aligned 8byte to avoid misprediction by the branch predictor. */
4678 if (rs6000_tune == PROCESSOR_TITAN
4679 || rs6000_tune == PROCESSOR_CELL)
4680 {
4681 if (flag_align_functions && !str_align_functions)
4682 str_align_functions = "8";
4683 if (flag_align_jumps && !str_align_jumps)
4684 str_align_jumps = "8";
4685 if (flag_align_loops && !str_align_loops)
4686 str_align_loops = "8";
4687 }
4688 if (rs6000_align_branch_targets)
4689 {
4690 if (flag_align_functions && !str_align_functions)
4691 str_align_functions = "16";
4692 if (flag_align_jumps && !str_align_jumps)
4693 str_align_jumps = "16";
4694 if (flag_align_loops && !str_align_loops)
4695 {
4696 can_override_loop_align = 1;
4697 str_align_loops = "16";
4698 }
4699 }
4700
4701 if (flag_align_jumps && !str_align_jumps)
4702 str_align_jumps = "16";
4703 if (flag_align_loops && !str_align_loops)
4704 str_align_loops = "16";
4705 }
4706
4707 /* Arrange to save and restore machine status around nested functions. */
4708 init_machine_status = rs6000_init_machine_status;
4709
4710 /* We should always be splitting complex arguments, but we can't break
4711 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4712 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4713 targetm.calls.split_complex_arg = NULL;
4714
4715 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4716 if (DEFAULT_ABI == ABI_AIX)
4717 targetm.calls.custom_function_descriptors = 0;
4718 }
4719
4720 /* Initialize rs6000_cost with the appropriate target costs. */
4721 if (optimize_size)
4722 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4723 else
4724 switch (rs6000_tune)
4725 {
4726 case PROCESSOR_RS64A:
4727 rs6000_cost = &rs64a_cost;
4728 break;
4729
4730 case PROCESSOR_MPCCORE:
4731 rs6000_cost = &mpccore_cost;
4732 break;
4733
4734 case PROCESSOR_PPC403:
4735 rs6000_cost = &ppc403_cost;
4736 break;
4737
4738 case PROCESSOR_PPC405:
4739 rs6000_cost = &ppc405_cost;
4740 break;
4741
4742 case PROCESSOR_PPC440:
4743 rs6000_cost = &ppc440_cost;
4744 break;
4745
4746 case PROCESSOR_PPC476:
4747 rs6000_cost = &ppc476_cost;
4748 break;
4749
4750 case PROCESSOR_PPC601:
4751 rs6000_cost = &ppc601_cost;
4752 break;
4753
4754 case PROCESSOR_PPC603:
4755 rs6000_cost = &ppc603_cost;
4756 break;
4757
4758 case PROCESSOR_PPC604:
4759 rs6000_cost = &ppc604_cost;
4760 break;
4761
4762 case PROCESSOR_PPC604e:
4763 rs6000_cost = &ppc604e_cost;
4764 break;
4765
4766 case PROCESSOR_PPC620:
4767 rs6000_cost = &ppc620_cost;
4768 break;
4769
4770 case PROCESSOR_PPC630:
4771 rs6000_cost = &ppc630_cost;
4772 break;
4773
4774 case PROCESSOR_CELL:
4775 rs6000_cost = &ppccell_cost;
4776 break;
4777
4778 case PROCESSOR_PPC750:
4779 case PROCESSOR_PPC7400:
4780 rs6000_cost = &ppc750_cost;
4781 break;
4782
4783 case PROCESSOR_PPC7450:
4784 rs6000_cost = &ppc7450_cost;
4785 break;
4786
4787 case PROCESSOR_PPC8540:
4788 case PROCESSOR_PPC8548:
4789 rs6000_cost = &ppc8540_cost;
4790 break;
4791
4792 case PROCESSOR_PPCE300C2:
4793 case PROCESSOR_PPCE300C3:
4794 rs6000_cost = &ppce300c2c3_cost;
4795 break;
4796
4797 case PROCESSOR_PPCE500MC:
4798 rs6000_cost = &ppce500mc_cost;
4799 break;
4800
4801 case PROCESSOR_PPCE500MC64:
4802 rs6000_cost = &ppce500mc64_cost;
4803 break;
4804
4805 case PROCESSOR_PPCE5500:
4806 rs6000_cost = &ppce5500_cost;
4807 break;
4808
4809 case PROCESSOR_PPCE6500:
4810 rs6000_cost = &ppce6500_cost;
4811 break;
4812
4813 case PROCESSOR_TITAN:
4814 rs6000_cost = &titan_cost;
4815 break;
4816
4817 case PROCESSOR_POWER4:
4818 case PROCESSOR_POWER5:
4819 rs6000_cost = &power4_cost;
4820 break;
4821
4822 case PROCESSOR_POWER6:
4823 rs6000_cost = &power6_cost;
4824 break;
4825
4826 case PROCESSOR_POWER7:
4827 rs6000_cost = &power7_cost;
4828 break;
4829
4830 case PROCESSOR_POWER8:
4831 rs6000_cost = &power8_cost;
4832 break;
4833
4834 case PROCESSOR_POWER9:
4835 rs6000_cost = &power9_cost;
4836 break;
4837
4838 case PROCESSOR_PPCA2:
4839 rs6000_cost = &ppca2_cost;
4840 break;
4841
4842 default:
4843 gcc_unreachable ();
4844 }
4845
4846 if (global_init_p)
4847 {
4848 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4849 rs6000_cost->simultaneous_prefetches,
4850 global_options.x_param_values,
4851 global_options_set.x_param_values);
4852 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4853 global_options.x_param_values,
4854 global_options_set.x_param_values);
4855 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4856 rs6000_cost->cache_line_size,
4857 global_options.x_param_values,
4858 global_options_set.x_param_values);
4859 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4860 global_options.x_param_values,
4861 global_options_set.x_param_values);
4862
4863 /* Increase loop peeling limits based on performance analysis. */
4864 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4865 global_options.x_param_values,
4866 global_options_set.x_param_values);
4867 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4868 global_options.x_param_values,
4869 global_options_set.x_param_values);
4870
4871 /* Use the 'model' -fsched-pressure algorithm by default. */
4872 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
4873 SCHED_PRESSURE_MODEL,
4874 global_options.x_param_values,
4875 global_options_set.x_param_values);
4876
4877 /* If using typedef char *va_list, signal that
4878 __builtin_va_start (&ap, 0) can be optimized to
4879 ap = __builtin_next_arg (0). */
4880 if (DEFAULT_ABI != ABI_V4)
4881 targetm.expand_builtin_va_start = NULL;
4882 }
4883
4884 /* If not explicitly specified via option, decide whether to generate indexed
4885 load/store instructions. A value of -1 indicates that the
4886 initial value of this variable has not been overwritten. During
4887 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
4888 if (TARGET_AVOID_XFORM == -1)
4889 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4890 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4891 need indexed accesses and the type used is the scalar type of the element
4892 being loaded or stored. */
4893 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
4894 && !TARGET_ALTIVEC);
4895
4896 /* Set the -mrecip options. */
4897 if (rs6000_recip_name)
4898 {
4899 char *p = ASTRDUP (rs6000_recip_name);
4900 char *q;
4901 unsigned int mask, i;
4902 bool invert;
4903
4904 while ((q = strtok (p, ",")) != NULL)
4905 {
4906 p = NULL;
4907 if (*q == '!')
4908 {
4909 invert = true;
4910 q++;
4911 }
4912 else
4913 invert = false;
4914
4915 if (!strcmp (q, "default"))
4916 mask = ((TARGET_RECIP_PRECISION)
4917 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4918 else
4919 {
4920 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4921 if (!strcmp (q, recip_options[i].string))
4922 {
4923 mask = recip_options[i].mask;
4924 break;
4925 }
4926
4927 if (i == ARRAY_SIZE (recip_options))
4928 {
4929 error ("unknown option for %<%s=%s%>", "-mrecip", q);
4930 invert = false;
4931 mask = 0;
4932 ret = false;
4933 }
4934 }
4935
4936 if (invert)
4937 rs6000_recip_control &= ~mask;
4938 else
4939 rs6000_recip_control |= mask;
4940 }
4941 }
4942
4943 /* Set the builtin mask of the various options used that could affect which
4944 builtins were used. In the past we used target_flags, but we've run out
4945 of bits, and some options are no longer in target_flags. */
4946 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4947 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4948 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4949 rs6000_builtin_mask);
4950
4951 /* Initialize all of the registers. */
4952 rs6000_init_hard_regno_mode_ok (global_init_p);
4953
4954 /* Save the initial options in case the user does function specific options */
4955 if (global_init_p)
4956 target_option_default_node = target_option_current_node
4957 = build_target_option_node (&global_options);
4958
4959 /* If not explicitly specified via option, decide whether to generate the
4960 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4961 if (TARGET_LINK_STACK == -1)
4962 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
4963
4964 /* Deprecate use of -mno-speculate-indirect-jumps. */
4965 if (!rs6000_speculate_indirect_jumps)
4966 warning (0, "%qs is deprecated and not recommended in any circumstances",
4967 "-mno-speculate-indirect-jumps");
4968
4969 return ret;
4970 }
4971
4972 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4973 define the target cpu type. */
4974
4975 static void
4976 rs6000_option_override (void)
4977 {
4978 (void) rs6000_option_override_internal (true);
4979 }
4980
4981 \f
4982 /* Implement targetm.vectorize.builtin_mask_for_load. */
4983 static tree
4984 rs6000_builtin_mask_for_load (void)
4985 {
4986 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4987 if ((TARGET_ALTIVEC && !TARGET_VSX)
4988 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
4989 return altivec_builtin_mask_for_load;
4990 else
4991 return 0;
4992 }
4993
4994 /* Implement LOOP_ALIGN. */
4995 align_flags
4996 rs6000_loop_align (rtx label)
4997 {
4998 basic_block bb;
4999 int ninsns;
5000
5001 /* Don't override loop alignment if -falign-loops was specified. */
5002 if (!can_override_loop_align)
5003 return align_loops;
5004
5005 bb = BLOCK_FOR_INSN (label);
5006 ninsns = num_loop_insns(bb->loop_father);
5007
5008 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5009 if (ninsns > 4 && ninsns <= 8
5010 && (rs6000_tune == PROCESSOR_POWER4
5011 || rs6000_tune == PROCESSOR_POWER5
5012 || rs6000_tune == PROCESSOR_POWER6
5013 || rs6000_tune == PROCESSOR_POWER7
5014 || rs6000_tune == PROCESSOR_POWER8))
5015 return align_flags (5);
5016 else
5017 return align_loops;
5018 }
5019
5020 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5021 after applying N number of iterations. This routine does not determine
5022 how may iterations are required to reach desired alignment. */
5023
5024 static bool
5025 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5026 {
5027 if (is_packed)
5028 return false;
5029
5030 if (TARGET_32BIT)
5031 {
5032 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5033 return true;
5034
5035 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5036 return true;
5037
5038 return false;
5039 }
5040 else
5041 {
5042 if (TARGET_MACHO)
5043 return false;
5044
5045 /* Assuming that all other types are naturally aligned. CHECKME! */
5046 return true;
5047 }
5048 }
5049
5050 /* Return true if the vector misalignment factor is supported by the
5051 target. */
5052 static bool
5053 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5054 const_tree type,
5055 int misalignment,
5056 bool is_packed)
5057 {
5058 if (TARGET_VSX)
5059 {
5060 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5061 return true;
5062
5063 /* Return if movmisalign pattern is not supported for this mode. */
5064 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5065 return false;
5066
5067 if (misalignment == -1)
5068 {
5069 /* Misalignment factor is unknown at compile time but we know
5070 it's word aligned. */
5071 if (rs6000_vector_alignment_reachable (type, is_packed))
5072 {
5073 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5074
5075 if (element_size == 64 || element_size == 32)
5076 return true;
5077 }
5078
5079 return false;
5080 }
5081
5082 /* VSX supports word-aligned vector. */
5083 if (misalignment % 4 == 0)
5084 return true;
5085 }
5086 return false;
5087 }
5088
5089 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5090 static int
5091 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5092 tree vectype, int misalign)
5093 {
5094 unsigned elements;
5095 tree elem_type;
5096
5097 switch (type_of_cost)
5098 {
5099 case scalar_stmt:
5100 case scalar_load:
5101 case scalar_store:
5102 case vector_stmt:
5103 case vector_load:
5104 case vector_store:
5105 case vec_to_scalar:
5106 case scalar_to_vec:
5107 case cond_branch_not_taken:
5108 return 1;
5109
5110 case vec_perm:
5111 if (TARGET_VSX)
5112 return 3;
5113 else
5114 return 1;
5115
5116 case vec_promote_demote:
5117 if (TARGET_VSX)
5118 return 4;
5119 else
5120 return 1;
5121
5122 case cond_branch_taken:
5123 return 3;
5124
5125 case unaligned_load:
5126 case vector_gather_load:
5127 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5128 return 1;
5129
5130 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5131 {
5132 elements = TYPE_VECTOR_SUBPARTS (vectype);
5133 if (elements == 2)
5134 /* Double word aligned. */
5135 return 2;
5136
5137 if (elements == 4)
5138 {
5139 switch (misalign)
5140 {
5141 case 8:
5142 /* Double word aligned. */
5143 return 2;
5144
5145 case -1:
5146 /* Unknown misalignment. */
5147 case 4:
5148 case 12:
5149 /* Word aligned. */
5150 return 22;
5151
5152 default:
5153 gcc_unreachable ();
5154 }
5155 }
5156 }
5157
5158 if (TARGET_ALTIVEC)
5159 /* Misaligned loads are not supported. */
5160 gcc_unreachable ();
5161
5162 return 2;
5163
5164 case unaligned_store:
5165 case vector_scatter_store:
5166 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5167 return 1;
5168
5169 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5170 {
5171 elements = TYPE_VECTOR_SUBPARTS (vectype);
5172 if (elements == 2)
5173 /* Double word aligned. */
5174 return 2;
5175
5176 if (elements == 4)
5177 {
5178 switch (misalign)
5179 {
5180 case 8:
5181 /* Double word aligned. */
5182 return 2;
5183
5184 case -1:
5185 /* Unknown misalignment. */
5186 case 4:
5187 case 12:
5188 /* Word aligned. */
5189 return 23;
5190
5191 default:
5192 gcc_unreachable ();
5193 }
5194 }
5195 }
5196
5197 if (TARGET_ALTIVEC)
5198 /* Misaligned stores are not supported. */
5199 gcc_unreachable ();
5200
5201 return 2;
5202
5203 case vec_construct:
5204 /* This is a rough approximation assuming non-constant elements
5205 constructed into a vector via element insertion. FIXME:
5206 vec_construct is not granular enough for uniformly good
5207 decisions. If the initialization is a splat, this is
5208 cheaper than we estimate. Improve this someday. */
5209 elem_type = TREE_TYPE (vectype);
5210 /* 32-bit vectors loaded into registers are stored as double
5211 precision, so we need 2 permutes, 2 converts, and 1 merge
5212 to construct a vector of short floats from them. */
5213 if (SCALAR_FLOAT_TYPE_P (elem_type)
5214 && TYPE_PRECISION (elem_type) == 32)
5215 return 5;
5216 /* On POWER9, integer vector types are built up in GPRs and then
5217 use a direct move (2 cycles). For POWER8 this is even worse,
5218 as we need two direct moves and a merge, and the direct moves
5219 are five cycles. */
5220 else if (INTEGRAL_TYPE_P (elem_type))
5221 {
5222 if (TARGET_P9_VECTOR)
5223 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5224 else
5225 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5226 }
5227 else
5228 /* V2DFmode doesn't need a direct move. */
5229 return 2;
5230
5231 default:
5232 gcc_unreachable ();
5233 }
5234 }
5235
5236 /* Implement targetm.vectorize.preferred_simd_mode. */
5237
5238 static machine_mode
5239 rs6000_preferred_simd_mode (scalar_mode mode)
5240 {
5241 if (TARGET_VSX)
5242 switch (mode)
5243 {
5244 case E_DFmode:
5245 return V2DFmode;
5246 default:;
5247 }
5248 if (TARGET_ALTIVEC || TARGET_VSX)
5249 switch (mode)
5250 {
5251 case E_SFmode:
5252 return V4SFmode;
5253 case E_TImode:
5254 return V1TImode;
5255 case E_DImode:
5256 return V2DImode;
5257 case E_SImode:
5258 return V4SImode;
5259 case E_HImode:
5260 return V8HImode;
5261 case E_QImode:
5262 return V16QImode;
5263 default:;
5264 }
5265 return word_mode;
5266 }
5267
5268 typedef struct _rs6000_cost_data
5269 {
5270 struct loop *loop_info;
5271 unsigned cost[3];
5272 } rs6000_cost_data;
5273
5274 /* Test for likely overcommitment of vector hardware resources. If a
5275 loop iteration is relatively large, and too large a percentage of
5276 instructions in the loop are vectorized, the cost model may not
5277 adequately reflect delays from unavailable vector resources.
5278 Penalize the loop body cost for this case. */
5279
5280 static void
5281 rs6000_density_test (rs6000_cost_data *data)
5282 {
5283 const int DENSITY_PCT_THRESHOLD = 85;
5284 const int DENSITY_SIZE_THRESHOLD = 70;
5285 const int DENSITY_PENALTY = 10;
5286 struct loop *loop = data->loop_info;
5287 basic_block *bbs = get_loop_body (loop);
5288 int nbbs = loop->num_nodes;
5289 loop_vec_info loop_vinfo = loop_vec_info_for_loop (data->loop_info);
5290 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5291 int i, density_pct;
5292
5293 for (i = 0; i < nbbs; i++)
5294 {
5295 basic_block bb = bbs[i];
5296 gimple_stmt_iterator gsi;
5297
5298 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5299 {
5300 gimple *stmt = gsi_stmt (gsi);
5301 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
5302
5303 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5304 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5305 not_vec_cost++;
5306 }
5307 }
5308
5309 free (bbs);
5310 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5311
5312 if (density_pct > DENSITY_PCT_THRESHOLD
5313 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5314 {
5315 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5316 if (dump_enabled_p ())
5317 dump_printf_loc (MSG_NOTE, vect_location,
5318 "density %d%%, cost %d exceeds threshold, penalizing "
5319 "loop body cost by %d%%", density_pct,
5320 vec_cost + not_vec_cost, DENSITY_PENALTY);
5321 }
5322 }
5323
5324 /* Implement targetm.vectorize.init_cost. */
5325
5326 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5327 instruction is needed by the vectorization. */
5328 static bool rs6000_vect_nonmem;
5329
5330 static void *
5331 rs6000_init_cost (struct loop *loop_info)
5332 {
5333 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5334 data->loop_info = loop_info;
5335 data->cost[vect_prologue] = 0;
5336 data->cost[vect_body] = 0;
5337 data->cost[vect_epilogue] = 0;
5338 rs6000_vect_nonmem = false;
5339 return data;
5340 }
5341
5342 /* Implement targetm.vectorize.add_stmt_cost. */
5343
5344 static unsigned
5345 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5346 struct _stmt_vec_info *stmt_info, int misalign,
5347 enum vect_cost_model_location where)
5348 {
5349 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5350 unsigned retval = 0;
5351
5352 if (flag_vect_cost_model)
5353 {
5354 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5355 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5356 misalign);
5357 /* Statements in an inner loop relative to the loop being
5358 vectorized are weighted more heavily. The value here is
5359 arbitrary and could potentially be improved with analysis. */
5360 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5361 count *= 50; /* FIXME. */
5362
5363 retval = (unsigned) (count * stmt_cost);
5364 cost_data->cost[where] += retval;
5365
5366 /* Check whether we're doing something other than just a copy loop.
5367 Not all such loops may be profitably vectorized; see
5368 rs6000_finish_cost. */
5369 if ((kind == vec_to_scalar || kind == vec_perm
5370 || kind == vec_promote_demote || kind == vec_construct
5371 || kind == scalar_to_vec)
5372 || (where == vect_body && kind == vector_stmt))
5373 rs6000_vect_nonmem = true;
5374 }
5375
5376 return retval;
5377 }
5378
5379 /* Implement targetm.vectorize.finish_cost. */
5380
5381 static void
5382 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5383 unsigned *body_cost, unsigned *epilogue_cost)
5384 {
5385 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5386
5387 if (cost_data->loop_info)
5388 rs6000_density_test (cost_data);
5389
5390 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5391 that require versioning for any reason. The vectorization is at
5392 best a wash inside the loop, and the versioning checks make
5393 profitability highly unlikely and potentially quite harmful. */
5394 if (cost_data->loop_info)
5395 {
5396 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5397 if (!rs6000_vect_nonmem
5398 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5399 && LOOP_REQUIRES_VERSIONING (vec_info))
5400 cost_data->cost[vect_body] += 10000;
5401 }
5402
5403 *prologue_cost = cost_data->cost[vect_prologue];
5404 *body_cost = cost_data->cost[vect_body];
5405 *epilogue_cost = cost_data->cost[vect_epilogue];
5406 }
5407
5408 /* Implement targetm.vectorize.destroy_cost_data. */
5409
5410 static void
5411 rs6000_destroy_cost_data (void *data)
5412 {
5413 free (data);
5414 }
5415
5416 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5417 library with vectorized intrinsics. */
5418
5419 static tree
5420 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5421 tree type_in)
5422 {
5423 char name[32];
5424 const char *suffix = NULL;
5425 tree fntype, new_fndecl, bdecl = NULL_TREE;
5426 int n_args = 1;
5427 const char *bname;
5428 machine_mode el_mode, in_mode;
5429 int n, in_n;
5430
5431 /* Libmass is suitable for unsafe math only as it does not correctly support
5432 parts of IEEE with the required precision such as denormals. Only support
5433 it if we have VSX to use the simd d2 or f4 functions.
5434 XXX: Add variable length support. */
5435 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5436 return NULL_TREE;
5437
5438 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5439 n = TYPE_VECTOR_SUBPARTS (type_out);
5440 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5441 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5442 if (el_mode != in_mode
5443 || n != in_n)
5444 return NULL_TREE;
5445
5446 switch (fn)
5447 {
5448 CASE_CFN_ATAN2:
5449 CASE_CFN_HYPOT:
5450 CASE_CFN_POW:
5451 n_args = 2;
5452 gcc_fallthrough ();
5453
5454 CASE_CFN_ACOS:
5455 CASE_CFN_ACOSH:
5456 CASE_CFN_ASIN:
5457 CASE_CFN_ASINH:
5458 CASE_CFN_ATAN:
5459 CASE_CFN_ATANH:
5460 CASE_CFN_CBRT:
5461 CASE_CFN_COS:
5462 CASE_CFN_COSH:
5463 CASE_CFN_ERF:
5464 CASE_CFN_ERFC:
5465 CASE_CFN_EXP2:
5466 CASE_CFN_EXP:
5467 CASE_CFN_EXPM1:
5468 CASE_CFN_LGAMMA:
5469 CASE_CFN_LOG10:
5470 CASE_CFN_LOG1P:
5471 CASE_CFN_LOG2:
5472 CASE_CFN_LOG:
5473 CASE_CFN_SIN:
5474 CASE_CFN_SINH:
5475 CASE_CFN_SQRT:
5476 CASE_CFN_TAN:
5477 CASE_CFN_TANH:
5478 if (el_mode == DFmode && n == 2)
5479 {
5480 bdecl = mathfn_built_in (double_type_node, fn);
5481 suffix = "d2"; /* pow -> powd2 */
5482 }
5483 else if (el_mode == SFmode && n == 4)
5484 {
5485 bdecl = mathfn_built_in (float_type_node, fn);
5486 suffix = "4"; /* powf -> powf4 */
5487 }
5488 else
5489 return NULL_TREE;
5490 if (!bdecl)
5491 return NULL_TREE;
5492 break;
5493
5494 default:
5495 return NULL_TREE;
5496 }
5497
5498 gcc_assert (suffix != NULL);
5499 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5500 if (!bname)
5501 return NULL_TREE;
5502
5503 strcpy (name, bname + sizeof ("__builtin_") - 1);
5504 strcat (name, suffix);
5505
5506 if (n_args == 1)
5507 fntype = build_function_type_list (type_out, type_in, NULL);
5508 else if (n_args == 2)
5509 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5510 else
5511 gcc_unreachable ();
5512
5513 /* Build a function declaration for the vectorized function. */
5514 new_fndecl = build_decl (BUILTINS_LOCATION,
5515 FUNCTION_DECL, get_identifier (name), fntype);
5516 TREE_PUBLIC (new_fndecl) = 1;
5517 DECL_EXTERNAL (new_fndecl) = 1;
5518 DECL_IS_NOVOPS (new_fndecl) = 1;
5519 TREE_READONLY (new_fndecl) = 1;
5520
5521 return new_fndecl;
5522 }
5523
5524 /* Returns a function decl for a vectorized version of the builtin function
5525 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5526 if it is not available. */
5527
5528 static tree
5529 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5530 tree type_in)
5531 {
5532 machine_mode in_mode, out_mode;
5533 int in_n, out_n;
5534
5535 if (TARGET_DEBUG_BUILTIN)
5536 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5537 combined_fn_name (combined_fn (fn)),
5538 GET_MODE_NAME (TYPE_MODE (type_out)),
5539 GET_MODE_NAME (TYPE_MODE (type_in)));
5540
5541 if (TREE_CODE (type_out) != VECTOR_TYPE
5542 || TREE_CODE (type_in) != VECTOR_TYPE)
5543 return NULL_TREE;
5544
5545 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5546 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5547 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5548 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5549
5550 switch (fn)
5551 {
5552 CASE_CFN_COPYSIGN:
5553 if (VECTOR_UNIT_VSX_P (V2DFmode)
5554 && out_mode == DFmode && out_n == 2
5555 && in_mode == DFmode && in_n == 2)
5556 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5557 if (VECTOR_UNIT_VSX_P (V4SFmode)
5558 && out_mode == SFmode && out_n == 4
5559 && in_mode == SFmode && in_n == 4)
5560 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5561 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5562 && out_mode == SFmode && out_n == 4
5563 && in_mode == SFmode && in_n == 4)
5564 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5565 break;
5566 CASE_CFN_CEIL:
5567 if (VECTOR_UNIT_VSX_P (V2DFmode)
5568 && out_mode == DFmode && out_n == 2
5569 && in_mode == DFmode && in_n == 2)
5570 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5571 if (VECTOR_UNIT_VSX_P (V4SFmode)
5572 && out_mode == SFmode && out_n == 4
5573 && in_mode == SFmode && in_n == 4)
5574 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5575 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5576 && out_mode == SFmode && out_n == 4
5577 && in_mode == SFmode && in_n == 4)
5578 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5579 break;
5580 CASE_CFN_FLOOR:
5581 if (VECTOR_UNIT_VSX_P (V2DFmode)
5582 && out_mode == DFmode && out_n == 2
5583 && in_mode == DFmode && in_n == 2)
5584 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5585 if (VECTOR_UNIT_VSX_P (V4SFmode)
5586 && out_mode == SFmode && out_n == 4
5587 && in_mode == SFmode && in_n == 4)
5588 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5589 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5590 && out_mode == SFmode && out_n == 4
5591 && in_mode == SFmode && in_n == 4)
5592 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5593 break;
5594 CASE_CFN_FMA:
5595 if (VECTOR_UNIT_VSX_P (V2DFmode)
5596 && out_mode == DFmode && out_n == 2
5597 && in_mode == DFmode && in_n == 2)
5598 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5599 if (VECTOR_UNIT_VSX_P (V4SFmode)
5600 && out_mode == SFmode && out_n == 4
5601 && in_mode == SFmode && in_n == 4)
5602 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5603 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5604 && out_mode == SFmode && out_n == 4
5605 && in_mode == SFmode && in_n == 4)
5606 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5607 break;
5608 CASE_CFN_TRUNC:
5609 if (VECTOR_UNIT_VSX_P (V2DFmode)
5610 && out_mode == DFmode && out_n == 2
5611 && in_mode == DFmode && in_n == 2)
5612 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5613 if (VECTOR_UNIT_VSX_P (V4SFmode)
5614 && out_mode == SFmode && out_n == 4
5615 && in_mode == SFmode && in_n == 4)
5616 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5617 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5618 && out_mode == SFmode && out_n == 4
5619 && in_mode == SFmode && in_n == 4)
5620 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5621 break;
5622 CASE_CFN_NEARBYINT:
5623 if (VECTOR_UNIT_VSX_P (V2DFmode)
5624 && flag_unsafe_math_optimizations
5625 && out_mode == DFmode && out_n == 2
5626 && in_mode == DFmode && in_n == 2)
5627 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5628 if (VECTOR_UNIT_VSX_P (V4SFmode)
5629 && flag_unsafe_math_optimizations
5630 && out_mode == SFmode && out_n == 4
5631 && in_mode == SFmode && in_n == 4)
5632 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5633 break;
5634 CASE_CFN_RINT:
5635 if (VECTOR_UNIT_VSX_P (V2DFmode)
5636 && !flag_trapping_math
5637 && out_mode == DFmode && out_n == 2
5638 && in_mode == DFmode && in_n == 2)
5639 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5640 if (VECTOR_UNIT_VSX_P (V4SFmode)
5641 && !flag_trapping_math
5642 && out_mode == SFmode && out_n == 4
5643 && in_mode == SFmode && in_n == 4)
5644 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5645 break;
5646 default:
5647 break;
5648 }
5649
5650 /* Generate calls to libmass if appropriate. */
5651 if (rs6000_veclib_handler)
5652 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5653
5654 return NULL_TREE;
5655 }
5656
5657 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5658
5659 static tree
5660 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5661 tree type_in)
5662 {
5663 machine_mode in_mode, out_mode;
5664 int in_n, out_n;
5665
5666 if (TARGET_DEBUG_BUILTIN)
5667 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5668 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5669 GET_MODE_NAME (TYPE_MODE (type_out)),
5670 GET_MODE_NAME (TYPE_MODE (type_in)));
5671
5672 if (TREE_CODE (type_out) != VECTOR_TYPE
5673 || TREE_CODE (type_in) != VECTOR_TYPE)
5674 return NULL_TREE;
5675
5676 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5677 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5678 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5679 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5680
5681 enum rs6000_builtins fn
5682 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5683 switch (fn)
5684 {
5685 case RS6000_BUILTIN_RSQRTF:
5686 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5687 && out_mode == SFmode && out_n == 4
5688 && in_mode == SFmode && in_n == 4)
5689 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5690 break;
5691 case RS6000_BUILTIN_RSQRT:
5692 if (VECTOR_UNIT_VSX_P (V2DFmode)
5693 && out_mode == DFmode && out_n == 2
5694 && in_mode == DFmode && in_n == 2)
5695 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5696 break;
5697 case RS6000_BUILTIN_RECIPF:
5698 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5699 && out_mode == SFmode && out_n == 4
5700 && in_mode == SFmode && in_n == 4)
5701 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5702 break;
5703 case RS6000_BUILTIN_RECIP:
5704 if (VECTOR_UNIT_VSX_P (V2DFmode)
5705 && out_mode == DFmode && out_n == 2
5706 && in_mode == DFmode && in_n == 2)
5707 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5708 break;
5709 default:
5710 break;
5711 }
5712 return NULL_TREE;
5713 }
5714 \f
5715 /* Default CPU string for rs6000*_file_start functions. */
5716 static const char *rs6000_default_cpu;
5717
5718 /* Do anything needed at the start of the asm file. */
5719
5720 static void
5721 rs6000_file_start (void)
5722 {
5723 char buffer[80];
5724 const char *start = buffer;
5725 FILE *file = asm_out_file;
5726
5727 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5728
5729 default_file_start ();
5730
5731 if (flag_verbose_asm)
5732 {
5733 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5734
5735 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5736 {
5737 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5738 start = "";
5739 }
5740
5741 if (global_options_set.x_rs6000_cpu_index)
5742 {
5743 fprintf (file, "%s -mcpu=%s", start,
5744 processor_target_table[rs6000_cpu_index].name);
5745 start = "";
5746 }
5747
5748 if (global_options_set.x_rs6000_tune_index)
5749 {
5750 fprintf (file, "%s -mtune=%s", start,
5751 processor_target_table[rs6000_tune_index].name);
5752 start = "";
5753 }
5754
5755 if (PPC405_ERRATUM77)
5756 {
5757 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5758 start = "";
5759 }
5760
5761 #ifdef USING_ELFOS_H
5762 switch (rs6000_sdata)
5763 {
5764 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5765 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5766 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5767 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5768 }
5769
5770 if (rs6000_sdata && g_switch_value)
5771 {
5772 fprintf (file, "%s -G %d", start,
5773 g_switch_value);
5774 start = "";
5775 }
5776 #endif
5777
5778 if (*start == '\0')
5779 putc ('\n', file);
5780 }
5781
5782 #ifdef USING_ELFOS_H
5783 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5784 && !global_options_set.x_rs6000_cpu_index)
5785 {
5786 fputs ("\t.machine ", asm_out_file);
5787 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
5788 fputs ("power9\n", asm_out_file);
5789 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
5790 fputs ("power8\n", asm_out_file);
5791 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
5792 fputs ("power7\n", asm_out_file);
5793 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
5794 fputs ("power6\n", asm_out_file);
5795 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
5796 fputs ("power5\n", asm_out_file);
5797 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
5798 fputs ("power4\n", asm_out_file);
5799 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5800 fputs ("ppc64\n", asm_out_file);
5801 else
5802 fputs ("ppc\n", asm_out_file);
5803 }
5804 #endif
5805
5806 if (DEFAULT_ABI == ABI_ELFv2)
5807 fprintf (file, "\t.abiversion 2\n");
5808 }
5809
5810 \f
5811 /* Return nonzero if this function is known to have a null epilogue. */
5812
5813 int
5814 direct_return (void)
5815 {
5816 if (reload_completed)
5817 {
5818 rs6000_stack_t *info = rs6000_stack_info ();
5819
5820 if (info->first_gp_reg_save == 32
5821 && info->first_fp_reg_save == 64
5822 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5823 && ! info->lr_save_p
5824 && ! info->cr_save_p
5825 && info->vrsave_size == 0
5826 && ! info->push_p)
5827 return 1;
5828 }
5829
5830 return 0;
5831 }
5832
5833 /* Helper for num_insns_constant. Calculate number of instructions to
5834 load VALUE to a single gpr using combinations of addi, addis, ori,
5835 oris and sldi instructions. */
5836
5837 static int
5838 num_insns_constant_gpr (HOST_WIDE_INT value)
5839 {
5840 /* signed constant loadable with addi */
5841 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5842 return 1;
5843
5844 /* constant loadable with addis */
5845 else if ((value & 0xffff) == 0
5846 && (value >> 31 == -1 || value >> 31 == 0))
5847 return 1;
5848
5849 else if (TARGET_POWERPC64)
5850 {
5851 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5852 HOST_WIDE_INT high = value >> 31;
5853
5854 if (high == 0 || high == -1)
5855 return 2;
5856
5857 high >>= 1;
5858
5859 if (low == 0)
5860 return num_insns_constant_gpr (high) + 1;
5861 else if (high == 0)
5862 return num_insns_constant_gpr (low) + 1;
5863 else
5864 return (num_insns_constant_gpr (high)
5865 + num_insns_constant_gpr (low) + 1);
5866 }
5867
5868 else
5869 return 2;
5870 }
5871
5872 /* Helper for num_insns_constant. Allow constants formed by the
5873 num_insns_constant_gpr sequences, plus li -1, rldicl/rldicr/rlwinm,
5874 and handle modes that require multiple gprs. */
5875
5876 static int
5877 num_insns_constant_multi (HOST_WIDE_INT value, machine_mode mode)
5878 {
5879 int nregs = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5880 int total = 0;
5881 while (nregs-- > 0)
5882 {
5883 HOST_WIDE_INT low = sext_hwi (value, BITS_PER_WORD);
5884 int insns = num_insns_constant_gpr (low);
5885 if (insns > 2
5886 /* We won't get more than 2 from num_insns_constant_gpr
5887 except when TARGET_POWERPC64 and mode is DImode or
5888 wider, so the register mode must be DImode. */
5889 && rs6000_is_valid_and_mask (GEN_INT (low), DImode))
5890 insns = 2;
5891 total += insns;
5892 value >>= BITS_PER_WORD;
5893 }
5894 return total;
5895 }
5896
5897 /* Return the number of instructions it takes to form a constant in as
5898 many gprs are needed for MODE. */
5899
5900 int
5901 num_insns_constant (rtx op, machine_mode mode)
5902 {
5903 HOST_WIDE_INT val;
5904
5905 switch (GET_CODE (op))
5906 {
5907 case CONST_INT:
5908 val = INTVAL (op);
5909 break;
5910
5911 case CONST_WIDE_INT:
5912 {
5913 int insns = 0;
5914 for (int i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5915 insns += num_insns_constant_multi (CONST_WIDE_INT_ELT (op, i),
5916 DImode);
5917 return insns;
5918 }
5919
5920 case CONST_DOUBLE:
5921 {
5922 const struct real_value *rv = CONST_DOUBLE_REAL_VALUE (op);
5923
5924 if (mode == SFmode || mode == SDmode)
5925 {
5926 long l;
5927
5928 if (mode == SDmode)
5929 REAL_VALUE_TO_TARGET_DECIMAL32 (*rv, l);
5930 else
5931 REAL_VALUE_TO_TARGET_SINGLE (*rv, l);
5932 /* See the first define_split in rs6000.md handling a
5933 const_double_operand. */
5934 val = l;
5935 mode = SImode;
5936 }
5937 else if (mode == DFmode || mode == DDmode)
5938 {
5939 long l[2];
5940
5941 if (mode == DDmode)
5942 REAL_VALUE_TO_TARGET_DECIMAL64 (*rv, l);
5943 else
5944 REAL_VALUE_TO_TARGET_DOUBLE (*rv, l);
5945
5946 /* See the second (32-bit) and third (64-bit) define_split
5947 in rs6000.md handling a const_double_operand. */
5948 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 1] << 32;
5949 val |= l[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffffUL;
5950 mode = DImode;
5951 }
5952 else if (mode == TFmode || mode == TDmode
5953 || mode == KFmode || mode == IFmode)
5954 {
5955 long l[4];
5956 int insns;
5957
5958 if (mode == TDmode)
5959 REAL_VALUE_TO_TARGET_DECIMAL128 (*rv, l);
5960 else
5961 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*rv, l);
5962
5963 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 3] << 32;
5964 val |= l[WORDS_BIG_ENDIAN ? 1 : 2] & 0xffffffffUL;
5965 insns = num_insns_constant_multi (val, DImode);
5966 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 2 : 1] << 32;
5967 val |= l[WORDS_BIG_ENDIAN ? 3 : 0] & 0xffffffffUL;
5968 insns += num_insns_constant_multi (val, DImode);
5969 return insns;
5970 }
5971 else
5972 gcc_unreachable ();
5973 }
5974 break;
5975
5976 default:
5977 gcc_unreachable ();
5978 }
5979
5980 return num_insns_constant_multi (val, mode);
5981 }
5982
5983 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5984 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5985 corresponding element of the vector, but for V4SFmode, the
5986 corresponding "float" is interpreted as an SImode integer. */
5987
5988 HOST_WIDE_INT
5989 const_vector_elt_as_int (rtx op, unsigned int elt)
5990 {
5991 rtx tmp;
5992
5993 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5994 gcc_assert (GET_MODE (op) != V2DImode
5995 && GET_MODE (op) != V2DFmode);
5996
5997 tmp = CONST_VECTOR_ELT (op, elt);
5998 if (GET_MODE (op) == V4SFmode)
5999 tmp = gen_lowpart (SImode, tmp);
6000 return INTVAL (tmp);
6001 }
6002
6003 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6004 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6005 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6006 all items are set to the same value and contain COPIES replicas of the
6007 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6008 operand and the others are set to the value of the operand's msb. */
6009
6010 static bool
6011 vspltis_constant (rtx op, unsigned step, unsigned copies)
6012 {
6013 machine_mode mode = GET_MODE (op);
6014 machine_mode inner = GET_MODE_INNER (mode);
6015
6016 unsigned i;
6017 unsigned nunits;
6018 unsigned bitsize;
6019 unsigned mask;
6020
6021 HOST_WIDE_INT val;
6022 HOST_WIDE_INT splat_val;
6023 HOST_WIDE_INT msb_val;
6024
6025 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6026 return false;
6027
6028 nunits = GET_MODE_NUNITS (mode);
6029 bitsize = GET_MODE_BITSIZE (inner);
6030 mask = GET_MODE_MASK (inner);
6031
6032 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6033 splat_val = val;
6034 msb_val = val >= 0 ? 0 : -1;
6035
6036 /* Construct the value to be splatted, if possible. If not, return 0. */
6037 for (i = 2; i <= copies; i *= 2)
6038 {
6039 HOST_WIDE_INT small_val;
6040 bitsize /= 2;
6041 small_val = splat_val >> bitsize;
6042 mask >>= bitsize;
6043 if (splat_val != ((HOST_WIDE_INT)
6044 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6045 | (small_val & mask)))
6046 return false;
6047 splat_val = small_val;
6048 }
6049
6050 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6051 if (EASY_VECTOR_15 (splat_val))
6052 ;
6053
6054 /* Also check if we can splat, and then add the result to itself. Do so if
6055 the value is positive, of if the splat instruction is using OP's mode;
6056 for splat_val < 0, the splat and the add should use the same mode. */
6057 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6058 && (splat_val >= 0 || (step == 1 && copies == 1)))
6059 ;
6060
6061 /* Also check if are loading up the most significant bit which can be done by
6062 loading up -1 and shifting the value left by -1. */
6063 else if (EASY_VECTOR_MSB (splat_val, inner))
6064 ;
6065
6066 else
6067 return false;
6068
6069 /* Check if VAL is present in every STEP-th element, and the
6070 other elements are filled with its most significant bit. */
6071 for (i = 1; i < nunits; ++i)
6072 {
6073 HOST_WIDE_INT desired_val;
6074 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6075 if ((i & (step - 1)) == 0)
6076 desired_val = val;
6077 else
6078 desired_val = msb_val;
6079
6080 if (desired_val != const_vector_elt_as_int (op, elt))
6081 return false;
6082 }
6083
6084 return true;
6085 }
6086
6087 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6088 instruction, filling in the bottom elements with 0 or -1.
6089
6090 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6091 for the number of zeroes to shift in, or negative for the number of 0xff
6092 bytes to shift in.
6093
6094 OP is a CONST_VECTOR. */
6095
6096 int
6097 vspltis_shifted (rtx op)
6098 {
6099 machine_mode mode = GET_MODE (op);
6100 machine_mode inner = GET_MODE_INNER (mode);
6101
6102 unsigned i, j;
6103 unsigned nunits;
6104 unsigned mask;
6105
6106 HOST_WIDE_INT val;
6107
6108 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6109 return false;
6110
6111 /* We need to create pseudo registers to do the shift, so don't recognize
6112 shift vector constants after reload. */
6113 if (!can_create_pseudo_p ())
6114 return false;
6115
6116 nunits = GET_MODE_NUNITS (mode);
6117 mask = GET_MODE_MASK (inner);
6118
6119 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6120
6121 /* Check if the value can really be the operand of a vspltis[bhw]. */
6122 if (EASY_VECTOR_15 (val))
6123 ;
6124
6125 /* Also check if we are loading up the most significant bit which can be done
6126 by loading up -1 and shifting the value left by -1. */
6127 else if (EASY_VECTOR_MSB (val, inner))
6128 ;
6129
6130 else
6131 return 0;
6132
6133 /* Check if VAL is present in every STEP-th element until we find elements
6134 that are 0 or all 1 bits. */
6135 for (i = 1; i < nunits; ++i)
6136 {
6137 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6138 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6139
6140 /* If the value isn't the splat value, check for the remaining elements
6141 being 0/-1. */
6142 if (val != elt_val)
6143 {
6144 if (elt_val == 0)
6145 {
6146 for (j = i+1; j < nunits; ++j)
6147 {
6148 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6149 if (const_vector_elt_as_int (op, elt2) != 0)
6150 return 0;
6151 }
6152
6153 return (nunits - i) * GET_MODE_SIZE (inner);
6154 }
6155
6156 else if ((elt_val & mask) == mask)
6157 {
6158 for (j = i+1; j < nunits; ++j)
6159 {
6160 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6161 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6162 return 0;
6163 }
6164
6165 return -((nunits - i) * GET_MODE_SIZE (inner));
6166 }
6167
6168 else
6169 return 0;
6170 }
6171 }
6172
6173 /* If all elements are equal, we don't need to do VLSDOI. */
6174 return 0;
6175 }
6176
6177
6178 /* Return true if OP is of the given MODE and can be synthesized
6179 with a vspltisb, vspltish or vspltisw. */
6180
6181 bool
6182 easy_altivec_constant (rtx op, machine_mode mode)
6183 {
6184 unsigned step, copies;
6185
6186 if (mode == VOIDmode)
6187 mode = GET_MODE (op);
6188 else if (mode != GET_MODE (op))
6189 return false;
6190
6191 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6192 constants. */
6193 if (mode == V2DFmode)
6194 return zero_constant (op, mode);
6195
6196 else if (mode == V2DImode)
6197 {
6198 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6199 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6200 return false;
6201
6202 if (zero_constant (op, mode))
6203 return true;
6204
6205 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6206 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6207 return true;
6208
6209 return false;
6210 }
6211
6212 /* V1TImode is a special container for TImode. Ignore for now. */
6213 else if (mode == V1TImode)
6214 return false;
6215
6216 /* Start with a vspltisw. */
6217 step = GET_MODE_NUNITS (mode) / 4;
6218 copies = 1;
6219
6220 if (vspltis_constant (op, step, copies))
6221 return true;
6222
6223 /* Then try with a vspltish. */
6224 if (step == 1)
6225 copies <<= 1;
6226 else
6227 step >>= 1;
6228
6229 if (vspltis_constant (op, step, copies))
6230 return true;
6231
6232 /* And finally a vspltisb. */
6233 if (step == 1)
6234 copies <<= 1;
6235 else
6236 step >>= 1;
6237
6238 if (vspltis_constant (op, step, copies))
6239 return true;
6240
6241 if (vspltis_shifted (op) != 0)
6242 return true;
6243
6244 return false;
6245 }
6246
6247 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6248 result is OP. Abort if it is not possible. */
6249
6250 rtx
6251 gen_easy_altivec_constant (rtx op)
6252 {
6253 machine_mode mode = GET_MODE (op);
6254 int nunits = GET_MODE_NUNITS (mode);
6255 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6256 unsigned step = nunits / 4;
6257 unsigned copies = 1;
6258
6259 /* Start with a vspltisw. */
6260 if (vspltis_constant (op, step, copies))
6261 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6262
6263 /* Then try with a vspltish. */
6264 if (step == 1)
6265 copies <<= 1;
6266 else
6267 step >>= 1;
6268
6269 if (vspltis_constant (op, step, copies))
6270 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6271
6272 /* And finally a vspltisb. */
6273 if (step == 1)
6274 copies <<= 1;
6275 else
6276 step >>= 1;
6277
6278 if (vspltis_constant (op, step, copies))
6279 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6280
6281 gcc_unreachable ();
6282 }
6283
6284 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6285 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6286
6287 Return the number of instructions needed (1 or 2) into the address pointed
6288 via NUM_INSNS_PTR.
6289
6290 Return the constant that is being split via CONSTANT_PTR. */
6291
6292 bool
6293 xxspltib_constant_p (rtx op,
6294 machine_mode mode,
6295 int *num_insns_ptr,
6296 int *constant_ptr)
6297 {
6298 size_t nunits = GET_MODE_NUNITS (mode);
6299 size_t i;
6300 HOST_WIDE_INT value;
6301 rtx element;
6302
6303 /* Set the returned values to out of bound values. */
6304 *num_insns_ptr = -1;
6305 *constant_ptr = 256;
6306
6307 if (!TARGET_P9_VECTOR)
6308 return false;
6309
6310 if (mode == VOIDmode)
6311 mode = GET_MODE (op);
6312
6313 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6314 return false;
6315
6316 /* Handle (vec_duplicate <constant>). */
6317 if (GET_CODE (op) == VEC_DUPLICATE)
6318 {
6319 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6320 && mode != V2DImode)
6321 return false;
6322
6323 element = XEXP (op, 0);
6324 if (!CONST_INT_P (element))
6325 return false;
6326
6327 value = INTVAL (element);
6328 if (!IN_RANGE (value, -128, 127))
6329 return false;
6330 }
6331
6332 /* Handle (const_vector [...]). */
6333 else if (GET_CODE (op) == CONST_VECTOR)
6334 {
6335 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6336 && mode != V2DImode)
6337 return false;
6338
6339 element = CONST_VECTOR_ELT (op, 0);
6340 if (!CONST_INT_P (element))
6341 return false;
6342
6343 value = INTVAL (element);
6344 if (!IN_RANGE (value, -128, 127))
6345 return false;
6346
6347 for (i = 1; i < nunits; i++)
6348 {
6349 element = CONST_VECTOR_ELT (op, i);
6350 if (!CONST_INT_P (element))
6351 return false;
6352
6353 if (value != INTVAL (element))
6354 return false;
6355 }
6356 }
6357
6358 /* Handle integer constants being loaded into the upper part of the VSX
6359 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6360 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6361 else if (CONST_INT_P (op))
6362 {
6363 if (!SCALAR_INT_MODE_P (mode))
6364 return false;
6365
6366 value = INTVAL (op);
6367 if (!IN_RANGE (value, -128, 127))
6368 return false;
6369
6370 if (!IN_RANGE (value, -1, 0))
6371 {
6372 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6373 return false;
6374
6375 if (EASY_VECTOR_15 (value))
6376 return false;
6377 }
6378 }
6379
6380 else
6381 return false;
6382
6383 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6384 sign extend. Special case 0/-1 to allow getting any VSX register instead
6385 of an Altivec register. */
6386 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6387 && EASY_VECTOR_15 (value))
6388 return false;
6389
6390 /* Return # of instructions and the constant byte for XXSPLTIB. */
6391 if (mode == V16QImode)
6392 *num_insns_ptr = 1;
6393
6394 else if (IN_RANGE (value, -1, 0))
6395 *num_insns_ptr = 1;
6396
6397 else
6398 *num_insns_ptr = 2;
6399
6400 *constant_ptr = (int) value;
6401 return true;
6402 }
6403
6404 const char *
6405 output_vec_const_move (rtx *operands)
6406 {
6407 int shift;
6408 machine_mode mode;
6409 rtx dest, vec;
6410
6411 dest = operands[0];
6412 vec = operands[1];
6413 mode = GET_MODE (dest);
6414
6415 if (TARGET_VSX)
6416 {
6417 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6418 int xxspltib_value = 256;
6419 int num_insns = -1;
6420
6421 if (zero_constant (vec, mode))
6422 {
6423 if (TARGET_P9_VECTOR)
6424 return "xxspltib %x0,0";
6425
6426 else if (dest_vmx_p)
6427 return "vspltisw %0,0";
6428
6429 else
6430 return "xxlxor %x0,%x0,%x0";
6431 }
6432
6433 if (all_ones_constant (vec, mode))
6434 {
6435 if (TARGET_P9_VECTOR)
6436 return "xxspltib %x0,255";
6437
6438 else if (dest_vmx_p)
6439 return "vspltisw %0,-1";
6440
6441 else if (TARGET_P8_VECTOR)
6442 return "xxlorc %x0,%x0,%x0";
6443
6444 else
6445 gcc_unreachable ();
6446 }
6447
6448 if (TARGET_P9_VECTOR
6449 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6450 {
6451 if (num_insns == 1)
6452 {
6453 operands[2] = GEN_INT (xxspltib_value & 0xff);
6454 return "xxspltib %x0,%2";
6455 }
6456
6457 return "#";
6458 }
6459 }
6460
6461 if (TARGET_ALTIVEC)
6462 {
6463 rtx splat_vec;
6464
6465 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6466 if (zero_constant (vec, mode))
6467 return "vspltisw %0,0";
6468
6469 if (all_ones_constant (vec, mode))
6470 return "vspltisw %0,-1";
6471
6472 /* Do we need to construct a value using VSLDOI? */
6473 shift = vspltis_shifted (vec);
6474 if (shift != 0)
6475 return "#";
6476
6477 splat_vec = gen_easy_altivec_constant (vec);
6478 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6479 operands[1] = XEXP (splat_vec, 0);
6480 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6481 return "#";
6482
6483 switch (GET_MODE (splat_vec))
6484 {
6485 case E_V4SImode:
6486 return "vspltisw %0,%1";
6487
6488 case E_V8HImode:
6489 return "vspltish %0,%1";
6490
6491 case E_V16QImode:
6492 return "vspltisb %0,%1";
6493
6494 default:
6495 gcc_unreachable ();
6496 }
6497 }
6498
6499 gcc_unreachable ();
6500 }
6501
6502 /* Initialize vector TARGET to VALS. */
6503
6504 void
6505 rs6000_expand_vector_init (rtx target, rtx vals)
6506 {
6507 machine_mode mode = GET_MODE (target);
6508 machine_mode inner_mode = GET_MODE_INNER (mode);
6509 int n_elts = GET_MODE_NUNITS (mode);
6510 int n_var = 0, one_var = -1;
6511 bool all_same = true, all_const_zero = true;
6512 rtx x, mem;
6513 int i;
6514
6515 for (i = 0; i < n_elts; ++i)
6516 {
6517 x = XVECEXP (vals, 0, i);
6518 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6519 ++n_var, one_var = i;
6520 else if (x != CONST0_RTX (inner_mode))
6521 all_const_zero = false;
6522
6523 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6524 all_same = false;
6525 }
6526
6527 if (n_var == 0)
6528 {
6529 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6530 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6531 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6532 {
6533 /* Zero register. */
6534 emit_move_insn (target, CONST0_RTX (mode));
6535 return;
6536 }
6537 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6538 {
6539 /* Splat immediate. */
6540 emit_insn (gen_rtx_SET (target, const_vec));
6541 return;
6542 }
6543 else
6544 {
6545 /* Load from constant pool. */
6546 emit_move_insn (target, const_vec);
6547 return;
6548 }
6549 }
6550
6551 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6552 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6553 {
6554 rtx op[2];
6555 size_t i;
6556 size_t num_elements = all_same ? 1 : 2;
6557 for (i = 0; i < num_elements; i++)
6558 {
6559 op[i] = XVECEXP (vals, 0, i);
6560 /* Just in case there is a SUBREG with a smaller mode, do a
6561 conversion. */
6562 if (GET_MODE (op[i]) != inner_mode)
6563 {
6564 rtx tmp = gen_reg_rtx (inner_mode);
6565 convert_move (tmp, op[i], 0);
6566 op[i] = tmp;
6567 }
6568 /* Allow load with splat double word. */
6569 else if (MEM_P (op[i]))
6570 {
6571 if (!all_same)
6572 op[i] = force_reg (inner_mode, op[i]);
6573 }
6574 else if (!REG_P (op[i]))
6575 op[i] = force_reg (inner_mode, op[i]);
6576 }
6577
6578 if (all_same)
6579 {
6580 if (mode == V2DFmode)
6581 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6582 else
6583 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6584 }
6585 else
6586 {
6587 if (mode == V2DFmode)
6588 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6589 else
6590 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6591 }
6592 return;
6593 }
6594
6595 /* Special case initializing vector int if we are on 64-bit systems with
6596 direct move or we have the ISA 3.0 instructions. */
6597 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6598 && TARGET_DIRECT_MOVE_64BIT)
6599 {
6600 if (all_same)
6601 {
6602 rtx element0 = XVECEXP (vals, 0, 0);
6603 if (MEM_P (element0))
6604 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6605 else
6606 element0 = force_reg (SImode, element0);
6607
6608 if (TARGET_P9_VECTOR)
6609 emit_insn (gen_vsx_splat_v4si (target, element0));
6610 else
6611 {
6612 rtx tmp = gen_reg_rtx (DImode);
6613 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6614 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6615 }
6616 return;
6617 }
6618 else
6619 {
6620 rtx elements[4];
6621 size_t i;
6622
6623 for (i = 0; i < 4; i++)
6624 elements[i] = force_reg (SImode, XVECEXP (vals, 0, i));
6625
6626 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6627 elements[2], elements[3]));
6628 return;
6629 }
6630 }
6631
6632 /* With single precision floating point on VSX, know that internally single
6633 precision is actually represented as a double, and either make 2 V2DF
6634 vectors, and convert these vectors to single precision, or do one
6635 conversion, and splat the result to the other elements. */
6636 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6637 {
6638 if (all_same)
6639 {
6640 rtx element0 = XVECEXP (vals, 0, 0);
6641
6642 if (TARGET_P9_VECTOR)
6643 {
6644 if (MEM_P (element0))
6645 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6646
6647 emit_insn (gen_vsx_splat_v4sf (target, element0));
6648 }
6649
6650 else
6651 {
6652 rtx freg = gen_reg_rtx (V4SFmode);
6653 rtx sreg = force_reg (SFmode, element0);
6654 rtx cvt = (TARGET_XSCVDPSPN
6655 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6656 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6657
6658 emit_insn (cvt);
6659 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6660 const0_rtx));
6661 }
6662 }
6663 else
6664 {
6665 rtx dbl_even = gen_reg_rtx (V2DFmode);
6666 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6667 rtx flt_even = gen_reg_rtx (V4SFmode);
6668 rtx flt_odd = gen_reg_rtx (V4SFmode);
6669 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6670 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6671 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6672 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6673
6674 /* Use VMRGEW if we can instead of doing a permute. */
6675 if (TARGET_P8_VECTOR)
6676 {
6677 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6678 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6679 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6680 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6681 if (BYTES_BIG_ENDIAN)
6682 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6683 else
6684 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6685 }
6686 else
6687 {
6688 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6689 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6690 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6691 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6692 rs6000_expand_extract_even (target, flt_even, flt_odd);
6693 }
6694 }
6695 return;
6696 }
6697
6698 /* Special case initializing vector short/char that are splats if we are on
6699 64-bit systems with direct move. */
6700 if (all_same && TARGET_DIRECT_MOVE_64BIT
6701 && (mode == V16QImode || mode == V8HImode))
6702 {
6703 rtx op0 = XVECEXP (vals, 0, 0);
6704 rtx di_tmp = gen_reg_rtx (DImode);
6705
6706 if (!REG_P (op0))
6707 op0 = force_reg (GET_MODE_INNER (mode), op0);
6708
6709 if (mode == V16QImode)
6710 {
6711 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6712 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6713 return;
6714 }
6715
6716 if (mode == V8HImode)
6717 {
6718 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6719 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6720 return;
6721 }
6722 }
6723
6724 /* Store value to stack temp. Load vector element. Splat. However, splat
6725 of 64-bit items is not supported on Altivec. */
6726 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6727 {
6728 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6729 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6730 XVECEXP (vals, 0, 0));
6731 x = gen_rtx_UNSPEC (VOIDmode,
6732 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6733 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6734 gen_rtvec (2,
6735 gen_rtx_SET (target, mem),
6736 x)));
6737 x = gen_rtx_VEC_SELECT (inner_mode, target,
6738 gen_rtx_PARALLEL (VOIDmode,
6739 gen_rtvec (1, const0_rtx)));
6740 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6741 return;
6742 }
6743
6744 /* One field is non-constant. Load constant then overwrite
6745 varying field. */
6746 if (n_var == 1)
6747 {
6748 rtx copy = copy_rtx (vals);
6749
6750 /* Load constant part of vector, substitute neighboring value for
6751 varying element. */
6752 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6753 rs6000_expand_vector_init (target, copy);
6754
6755 /* Insert variable. */
6756 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6757 return;
6758 }
6759
6760 /* Construct the vector in memory one field at a time
6761 and load the whole vector. */
6762 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6763 for (i = 0; i < n_elts; i++)
6764 emit_move_insn (adjust_address_nv (mem, inner_mode,
6765 i * GET_MODE_SIZE (inner_mode)),
6766 XVECEXP (vals, 0, i));
6767 emit_move_insn (target, mem);
6768 }
6769
6770 /* Set field ELT of TARGET to VAL. */
6771
6772 void
6773 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6774 {
6775 machine_mode mode = GET_MODE (target);
6776 machine_mode inner_mode = GET_MODE_INNER (mode);
6777 rtx reg = gen_reg_rtx (mode);
6778 rtx mask, mem, x;
6779 int width = GET_MODE_SIZE (inner_mode);
6780 int i;
6781
6782 val = force_reg (GET_MODE (val), val);
6783
6784 if (VECTOR_MEM_VSX_P (mode))
6785 {
6786 rtx insn = NULL_RTX;
6787 rtx elt_rtx = GEN_INT (elt);
6788
6789 if (mode == V2DFmode)
6790 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
6791
6792 else if (mode == V2DImode)
6793 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
6794
6795 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
6796 {
6797 if (mode == V4SImode)
6798 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
6799 else if (mode == V8HImode)
6800 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
6801 else if (mode == V16QImode)
6802 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
6803 else if (mode == V4SFmode)
6804 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
6805 }
6806
6807 if (insn)
6808 {
6809 emit_insn (insn);
6810 return;
6811 }
6812 }
6813
6814 /* Simplify setting single element vectors like V1TImode. */
6815 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6816 {
6817 emit_move_insn (target, gen_lowpart (mode, val));
6818 return;
6819 }
6820
6821 /* Load single variable value. */
6822 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6823 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6824 x = gen_rtx_UNSPEC (VOIDmode,
6825 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6826 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6827 gen_rtvec (2,
6828 gen_rtx_SET (reg, mem),
6829 x)));
6830
6831 /* Linear sequence. */
6832 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6833 for (i = 0; i < 16; ++i)
6834 XVECEXP (mask, 0, i) = GEN_INT (i);
6835
6836 /* Set permute mask to insert element into target. */
6837 for (i = 0; i < width; ++i)
6838 XVECEXP (mask, 0, elt*width + i)
6839 = GEN_INT (i + 0x10);
6840 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6841
6842 if (BYTES_BIG_ENDIAN)
6843 x = gen_rtx_UNSPEC (mode,
6844 gen_rtvec (3, target, reg,
6845 force_reg (V16QImode, x)),
6846 UNSPEC_VPERM);
6847 else
6848 {
6849 if (TARGET_P9_VECTOR)
6850 x = gen_rtx_UNSPEC (mode,
6851 gen_rtvec (3, reg, target,
6852 force_reg (V16QImode, x)),
6853 UNSPEC_VPERMR);
6854 else
6855 {
6856 /* Invert selector. We prefer to generate VNAND on P8 so
6857 that future fusion opportunities can kick in, but must
6858 generate VNOR elsewhere. */
6859 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6860 rtx iorx = (TARGET_P8_VECTOR
6861 ? gen_rtx_IOR (V16QImode, notx, notx)
6862 : gen_rtx_AND (V16QImode, notx, notx));
6863 rtx tmp = gen_reg_rtx (V16QImode);
6864 emit_insn (gen_rtx_SET (tmp, iorx));
6865
6866 /* Permute with operands reversed and adjusted selector. */
6867 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6868 UNSPEC_VPERM);
6869 }
6870 }
6871
6872 emit_insn (gen_rtx_SET (target, x));
6873 }
6874
6875 /* Extract field ELT from VEC into TARGET. */
6876
6877 void
6878 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6879 {
6880 machine_mode mode = GET_MODE (vec);
6881 machine_mode inner_mode = GET_MODE_INNER (mode);
6882 rtx mem;
6883
6884 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6885 {
6886 switch (mode)
6887 {
6888 default:
6889 break;
6890 case E_V1TImode:
6891 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
6892 emit_move_insn (target, gen_lowpart (TImode, vec));
6893 break;
6894 case E_V2DFmode:
6895 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6896 return;
6897 case E_V2DImode:
6898 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6899 return;
6900 case E_V4SFmode:
6901 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6902 return;
6903 case E_V16QImode:
6904 if (TARGET_DIRECT_MOVE_64BIT)
6905 {
6906 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6907 return;
6908 }
6909 else
6910 break;
6911 case E_V8HImode:
6912 if (TARGET_DIRECT_MOVE_64BIT)
6913 {
6914 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6915 return;
6916 }
6917 else
6918 break;
6919 case E_V4SImode:
6920 if (TARGET_DIRECT_MOVE_64BIT)
6921 {
6922 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6923 return;
6924 }
6925 break;
6926 }
6927 }
6928 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
6929 && TARGET_DIRECT_MOVE_64BIT)
6930 {
6931 if (GET_MODE (elt) != DImode)
6932 {
6933 rtx tmp = gen_reg_rtx (DImode);
6934 convert_move (tmp, elt, 0);
6935 elt = tmp;
6936 }
6937 else if (!REG_P (elt))
6938 elt = force_reg (DImode, elt);
6939
6940 switch (mode)
6941 {
6942 case E_V2DFmode:
6943 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
6944 return;
6945
6946 case E_V2DImode:
6947 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
6948 return;
6949
6950 case E_V4SFmode:
6951 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
6952 return;
6953
6954 case E_V4SImode:
6955 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
6956 return;
6957
6958 case E_V8HImode:
6959 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
6960 return;
6961
6962 case E_V16QImode:
6963 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
6964 return;
6965
6966 default:
6967 gcc_unreachable ();
6968 }
6969 }
6970
6971 gcc_assert (CONST_INT_P (elt));
6972
6973 /* Allocate mode-sized buffer. */
6974 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6975
6976 emit_move_insn (mem, vec);
6977
6978 /* Add offset to field within buffer matching vector element. */
6979 mem = adjust_address_nv (mem, inner_mode,
6980 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
6981
6982 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6983 }
6984
6985 /* Helper function to return the register number of a RTX. */
6986 static inline int
6987 regno_or_subregno (rtx op)
6988 {
6989 if (REG_P (op))
6990 return REGNO (op);
6991 else if (SUBREG_P (op))
6992 return subreg_regno (op);
6993 else
6994 gcc_unreachable ();
6995 }
6996
6997 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
6998 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
6999 temporary (BASE_TMP) to fixup the address. Return the new memory address
7000 that is valid for reads or writes to a given register (SCALAR_REG). */
7001
7002 rtx
7003 rs6000_adjust_vec_address (rtx scalar_reg,
7004 rtx mem,
7005 rtx element,
7006 rtx base_tmp,
7007 machine_mode scalar_mode)
7008 {
7009 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7010 rtx addr = XEXP (mem, 0);
7011 rtx element_offset;
7012 rtx new_addr;
7013 bool valid_addr_p;
7014
7015 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7016 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7017
7018 /* Calculate what we need to add to the address to get the element
7019 address. */
7020 if (CONST_INT_P (element))
7021 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7022 else
7023 {
7024 int byte_shift = exact_log2 (scalar_size);
7025 gcc_assert (byte_shift >= 0);
7026
7027 if (byte_shift == 0)
7028 element_offset = element;
7029
7030 else
7031 {
7032 if (TARGET_POWERPC64)
7033 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7034 else
7035 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7036
7037 element_offset = base_tmp;
7038 }
7039 }
7040
7041 /* Create the new address pointing to the element within the vector. If we
7042 are adding 0, we don't have to change the address. */
7043 if (element_offset == const0_rtx)
7044 new_addr = addr;
7045
7046 /* A simple indirect address can be converted into a reg + offset
7047 address. */
7048 else if (REG_P (addr) || SUBREG_P (addr))
7049 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7050
7051 /* Optimize D-FORM addresses with constant offset with a constant element, to
7052 include the element offset in the address directly. */
7053 else if (GET_CODE (addr) == PLUS)
7054 {
7055 rtx op0 = XEXP (addr, 0);
7056 rtx op1 = XEXP (addr, 1);
7057 rtx insn;
7058
7059 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7060 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7061 {
7062 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7063 rtx offset_rtx = GEN_INT (offset);
7064
7065 if (IN_RANGE (offset, -32768, 32767)
7066 && (scalar_size < 8 || (offset & 0x3) == 0))
7067 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7068 else
7069 {
7070 emit_move_insn (base_tmp, offset_rtx);
7071 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7072 }
7073 }
7074 else
7075 {
7076 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7077 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7078
7079 /* Note, ADDI requires the register being added to be a base
7080 register. If the register was R0, load it up into the temporary
7081 and do the add. */
7082 if (op1_reg_p
7083 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7084 {
7085 insn = gen_add3_insn (base_tmp, op1, element_offset);
7086 gcc_assert (insn != NULL_RTX);
7087 emit_insn (insn);
7088 }
7089
7090 else if (ele_reg_p
7091 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7092 {
7093 insn = gen_add3_insn (base_tmp, element_offset, op1);
7094 gcc_assert (insn != NULL_RTX);
7095 emit_insn (insn);
7096 }
7097
7098 else
7099 {
7100 emit_move_insn (base_tmp, op1);
7101 emit_insn (gen_add2_insn (base_tmp, element_offset));
7102 }
7103
7104 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7105 }
7106 }
7107
7108 else
7109 {
7110 emit_move_insn (base_tmp, addr);
7111 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7112 }
7113
7114 /* If we have a PLUS, we need to see whether the particular register class
7115 allows for D-FORM or X-FORM addressing. */
7116 if (GET_CODE (new_addr) == PLUS)
7117 {
7118 rtx op1 = XEXP (new_addr, 1);
7119 addr_mask_type addr_mask;
7120 int scalar_regno = regno_or_subregno (scalar_reg);
7121
7122 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7123 if (INT_REGNO_P (scalar_regno))
7124 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7125
7126 else if (FP_REGNO_P (scalar_regno))
7127 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7128
7129 else if (ALTIVEC_REGNO_P (scalar_regno))
7130 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7131
7132 else
7133 gcc_unreachable ();
7134
7135 if (REG_P (op1) || SUBREG_P (op1))
7136 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7137 else
7138 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7139 }
7140
7141 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7142 valid_addr_p = true;
7143
7144 else
7145 valid_addr_p = false;
7146
7147 if (!valid_addr_p)
7148 {
7149 emit_move_insn (base_tmp, new_addr);
7150 new_addr = base_tmp;
7151 }
7152
7153 return change_address (mem, scalar_mode, new_addr);
7154 }
7155
7156 /* Split a variable vec_extract operation into the component instructions. */
7157
7158 void
7159 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7160 rtx tmp_altivec)
7161 {
7162 machine_mode mode = GET_MODE (src);
7163 machine_mode scalar_mode = GET_MODE (dest);
7164 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7165 int byte_shift = exact_log2 (scalar_size);
7166
7167 gcc_assert (byte_shift >= 0);
7168
7169 /* If we are given a memory address, optimize to load just the element. We
7170 don't have to adjust the vector element number on little endian
7171 systems. */
7172 if (MEM_P (src))
7173 {
7174 gcc_assert (REG_P (tmp_gpr));
7175 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7176 tmp_gpr, scalar_mode));
7177 return;
7178 }
7179
7180 else if (REG_P (src) || SUBREG_P (src))
7181 {
7182 int bit_shift = byte_shift + 3;
7183 rtx element2;
7184 int dest_regno = regno_or_subregno (dest);
7185 int src_regno = regno_or_subregno (src);
7186 int element_regno = regno_or_subregno (element);
7187
7188 gcc_assert (REG_P (tmp_gpr));
7189
7190 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7191 a general purpose register. */
7192 if (TARGET_P9_VECTOR
7193 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7194 && INT_REGNO_P (dest_regno)
7195 && ALTIVEC_REGNO_P (src_regno)
7196 && INT_REGNO_P (element_regno))
7197 {
7198 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7199 rtx element_si = gen_rtx_REG (SImode, element_regno);
7200
7201 if (mode == V16QImode)
7202 emit_insn (BYTES_BIG_ENDIAN
7203 ? gen_vextublx (dest_si, element_si, src)
7204 : gen_vextubrx (dest_si, element_si, src));
7205
7206 else if (mode == V8HImode)
7207 {
7208 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7209 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7210 emit_insn (BYTES_BIG_ENDIAN
7211 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7212 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7213 }
7214
7215
7216 else
7217 {
7218 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7219 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7220 emit_insn (BYTES_BIG_ENDIAN
7221 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7222 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7223 }
7224
7225 return;
7226 }
7227
7228
7229 gcc_assert (REG_P (tmp_altivec));
7230
7231 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7232 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7233 will shift the element into the upper position (adding 3 to convert a
7234 byte shift into a bit shift). */
7235 if (scalar_size == 8)
7236 {
7237 if (!BYTES_BIG_ENDIAN)
7238 {
7239 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7240 element2 = tmp_gpr;
7241 }
7242 else
7243 element2 = element;
7244
7245 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7246 bit. */
7247 emit_insn (gen_rtx_SET (tmp_gpr,
7248 gen_rtx_AND (DImode,
7249 gen_rtx_ASHIFT (DImode,
7250 element2,
7251 GEN_INT (6)),
7252 GEN_INT (64))));
7253 }
7254 else
7255 {
7256 if (!BYTES_BIG_ENDIAN)
7257 {
7258 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7259
7260 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7261 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7262 element2 = tmp_gpr;
7263 }
7264 else
7265 element2 = element;
7266
7267 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7268 }
7269
7270 /* Get the value into the lower byte of the Altivec register where VSLO
7271 expects it. */
7272 if (TARGET_P9_VECTOR)
7273 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7274 else if (can_create_pseudo_p ())
7275 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7276 else
7277 {
7278 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7279 emit_move_insn (tmp_di, tmp_gpr);
7280 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7281 }
7282
7283 /* Do the VSLO to get the value into the final location. */
7284 switch (mode)
7285 {
7286 case E_V2DFmode:
7287 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7288 return;
7289
7290 case E_V2DImode:
7291 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7292 return;
7293
7294 case E_V4SFmode:
7295 {
7296 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7297 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7298 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7299 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7300 tmp_altivec));
7301
7302 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7303 return;
7304 }
7305
7306 case E_V4SImode:
7307 case E_V8HImode:
7308 case E_V16QImode:
7309 {
7310 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7311 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7312 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7313 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7314 tmp_altivec));
7315 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7316 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7317 GEN_INT (64 - (8 * scalar_size))));
7318 return;
7319 }
7320
7321 default:
7322 gcc_unreachable ();
7323 }
7324
7325 return;
7326 }
7327 else
7328 gcc_unreachable ();
7329 }
7330
7331 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7332 selects whether the alignment is abi mandated, optional, or
7333 both abi and optional alignment. */
7334
7335 unsigned int
7336 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7337 {
7338 if (how != align_opt)
7339 {
7340 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7341 align = 128;
7342 }
7343
7344 if (how != align_abi)
7345 {
7346 if (TREE_CODE (type) == ARRAY_TYPE
7347 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7348 {
7349 if (align < BITS_PER_WORD)
7350 align = BITS_PER_WORD;
7351 }
7352 }
7353
7354 return align;
7355 }
7356
7357 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7358 instructions simply ignore the low bits; VSX memory instructions
7359 are aligned to 4 or 8 bytes. */
7360
7361 static bool
7362 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7363 {
7364 return (STRICT_ALIGNMENT
7365 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7366 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7367 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7368 && (int) align < VECTOR_ALIGN (mode)))));
7369 }
7370
7371 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7372
7373 bool
7374 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7375 {
7376 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7377 {
7378 if (computed != 128)
7379 {
7380 static bool warned;
7381 if (!warned && warn_psabi)
7382 {
7383 warned = true;
7384 inform (input_location,
7385 "the layout of aggregates containing vectors with"
7386 " %d-byte alignment has changed in GCC 5",
7387 computed / BITS_PER_UNIT);
7388 }
7389 }
7390 /* In current GCC there is no special case. */
7391 return false;
7392 }
7393
7394 return false;
7395 }
7396
7397 /* AIX increases natural record alignment to doubleword if the first
7398 field is an FP double while the FP fields remain word aligned. */
7399
7400 unsigned int
7401 rs6000_special_round_type_align (tree type, unsigned int computed,
7402 unsigned int specified)
7403 {
7404 unsigned int align = MAX (computed, specified);
7405 tree field = TYPE_FIELDS (type);
7406
7407 /* Skip all non field decls */
7408 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7409 field = DECL_CHAIN (field);
7410
7411 if (field != NULL && field != type)
7412 {
7413 type = TREE_TYPE (field);
7414 while (TREE_CODE (type) == ARRAY_TYPE)
7415 type = TREE_TYPE (type);
7416
7417 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7418 align = MAX (align, 64);
7419 }
7420
7421 return align;
7422 }
7423
7424 /* Darwin increases record alignment to the natural alignment of
7425 the first field. */
7426
7427 unsigned int
7428 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7429 unsigned int specified)
7430 {
7431 unsigned int align = MAX (computed, specified);
7432
7433 if (TYPE_PACKED (type))
7434 return align;
7435
7436 /* Find the first field, looking down into aggregates. */
7437 do {
7438 tree field = TYPE_FIELDS (type);
7439 /* Skip all non field decls */
7440 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7441 field = DECL_CHAIN (field);
7442 if (! field)
7443 break;
7444 /* A packed field does not contribute any extra alignment. */
7445 if (DECL_PACKED (field))
7446 return align;
7447 type = TREE_TYPE (field);
7448 while (TREE_CODE (type) == ARRAY_TYPE)
7449 type = TREE_TYPE (type);
7450 } while (AGGREGATE_TYPE_P (type));
7451
7452 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7453 align = MAX (align, TYPE_ALIGN (type));
7454
7455 return align;
7456 }
7457
7458 /* Return 1 for an operand in small memory on V.4/eabi. */
7459
7460 int
7461 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7462 machine_mode mode ATTRIBUTE_UNUSED)
7463 {
7464 #if TARGET_ELF
7465 rtx sym_ref;
7466
7467 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7468 return 0;
7469
7470 if (DEFAULT_ABI != ABI_V4)
7471 return 0;
7472
7473 if (GET_CODE (op) == SYMBOL_REF)
7474 sym_ref = op;
7475
7476 else if (GET_CODE (op) != CONST
7477 || GET_CODE (XEXP (op, 0)) != PLUS
7478 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
7479 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
7480 return 0;
7481
7482 else
7483 {
7484 rtx sum = XEXP (op, 0);
7485 HOST_WIDE_INT summand;
7486
7487 /* We have to be careful here, because it is the referenced address
7488 that must be 32k from _SDA_BASE_, not just the symbol. */
7489 summand = INTVAL (XEXP (sum, 1));
7490 if (summand < 0 || summand > g_switch_value)
7491 return 0;
7492
7493 sym_ref = XEXP (sum, 0);
7494 }
7495
7496 return SYMBOL_REF_SMALL_P (sym_ref);
7497 #else
7498 return 0;
7499 #endif
7500 }
7501
7502 /* Return true if either operand is a general purpose register. */
7503
7504 bool
7505 gpr_or_gpr_p (rtx op0, rtx op1)
7506 {
7507 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7508 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7509 }
7510
7511 /* Return true if this is a move direct operation between GPR registers and
7512 floating point/VSX registers. */
7513
7514 bool
7515 direct_move_p (rtx op0, rtx op1)
7516 {
7517 int regno0, regno1;
7518
7519 if (!REG_P (op0) || !REG_P (op1))
7520 return false;
7521
7522 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7523 return false;
7524
7525 regno0 = REGNO (op0);
7526 regno1 = REGNO (op1);
7527 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
7528 return false;
7529
7530 if (INT_REGNO_P (regno0))
7531 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7532
7533 else if (INT_REGNO_P (regno1))
7534 {
7535 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7536 return true;
7537
7538 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7539 return true;
7540 }
7541
7542 return false;
7543 }
7544
7545 /* Return true if the OFFSET is valid for the quad address instructions that
7546 use d-form (register + offset) addressing. */
7547
7548 static inline bool
7549 quad_address_offset_p (HOST_WIDE_INT offset)
7550 {
7551 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7552 }
7553
7554 /* Return true if the ADDR is an acceptable address for a quad memory
7555 operation of mode MODE (either LQ/STQ for general purpose registers, or
7556 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7557 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7558 3.0 LXV/STXV instruction. */
7559
7560 bool
7561 quad_address_p (rtx addr, machine_mode mode, bool strict)
7562 {
7563 rtx op0, op1;
7564
7565 if (GET_MODE_SIZE (mode) != 16)
7566 return false;
7567
7568 if (legitimate_indirect_address_p (addr, strict))
7569 return true;
7570
7571 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7572 return false;
7573
7574 if (GET_CODE (addr) != PLUS)
7575 return false;
7576
7577 op0 = XEXP (addr, 0);
7578 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7579 return false;
7580
7581 op1 = XEXP (addr, 1);
7582 if (!CONST_INT_P (op1))
7583 return false;
7584
7585 return quad_address_offset_p (INTVAL (op1));
7586 }
7587
7588 /* Return true if this is a load or store quad operation. This function does
7589 not handle the atomic quad memory instructions. */
7590
7591 bool
7592 quad_load_store_p (rtx op0, rtx op1)
7593 {
7594 bool ret;
7595
7596 if (!TARGET_QUAD_MEMORY)
7597 ret = false;
7598
7599 else if (REG_P (op0) && MEM_P (op1))
7600 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7601 && quad_memory_operand (op1, GET_MODE (op1))
7602 && !reg_overlap_mentioned_p (op0, op1));
7603
7604 else if (MEM_P (op0) && REG_P (op1))
7605 ret = (quad_memory_operand (op0, GET_MODE (op0))
7606 && quad_int_reg_operand (op1, GET_MODE (op1)));
7607
7608 else
7609 ret = false;
7610
7611 if (TARGET_DEBUG_ADDR)
7612 {
7613 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7614 ret ? "true" : "false");
7615 debug_rtx (gen_rtx_SET (op0, op1));
7616 }
7617
7618 return ret;
7619 }
7620
7621 /* Given an address, return a constant offset term if one exists. */
7622
7623 static rtx
7624 address_offset (rtx op)
7625 {
7626 if (GET_CODE (op) == PRE_INC
7627 || GET_CODE (op) == PRE_DEC)
7628 op = XEXP (op, 0);
7629 else if (GET_CODE (op) == PRE_MODIFY
7630 || GET_CODE (op) == LO_SUM)
7631 op = XEXP (op, 1);
7632
7633 if (GET_CODE (op) == CONST)
7634 op = XEXP (op, 0);
7635
7636 if (GET_CODE (op) == PLUS)
7637 op = XEXP (op, 1);
7638
7639 if (CONST_INT_P (op))
7640 return op;
7641
7642 return NULL_RTX;
7643 }
7644
7645 /* Return true if the MEM operand is a memory operand suitable for use
7646 with a (full width, possibly multiple) gpr load/store. On
7647 powerpc64 this means the offset must be divisible by 4.
7648 Implements 'Y' constraint.
7649
7650 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7651 a constraint function we know the operand has satisfied a suitable
7652 memory predicate. Also accept some odd rtl generated by reload
7653 (see rs6000_legitimize_reload_address for various forms). It is
7654 important that reload rtl be accepted by appropriate constraints
7655 but not by the operand predicate.
7656
7657 Offsetting a lo_sum should not be allowed, except where we know by
7658 alignment that a 32k boundary is not crossed, but see the ???
7659 comment in rs6000_legitimize_reload_address. Note that by
7660 "offsetting" here we mean a further offset to access parts of the
7661 MEM. It's fine to have a lo_sum where the inner address is offset
7662 from a sym, since the same sym+offset will appear in the high part
7663 of the address calculation. */
7664
7665 bool
7666 mem_operand_gpr (rtx op, machine_mode mode)
7667 {
7668 unsigned HOST_WIDE_INT offset;
7669 int extra;
7670 rtx addr = XEXP (op, 0);
7671
7672 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
7673 if (TARGET_UPDATE
7674 && (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
7675 && mode_supports_pre_incdec_p (mode)
7676 && legitimate_indirect_address_p (XEXP (addr, 0), false))
7677 return true;
7678
7679 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7680 if (!rs6000_offsettable_memref_p (op, mode, false))
7681 return false;
7682
7683 op = address_offset (addr);
7684 if (op == NULL_RTX)
7685 return true;
7686
7687 offset = INTVAL (op);
7688 if (TARGET_POWERPC64 && (offset & 3) != 0)
7689 return false;
7690
7691 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7692 if (extra < 0)
7693 extra = 0;
7694
7695 if (GET_CODE (addr) == LO_SUM)
7696 /* For lo_sum addresses, we must allow any offset except one that
7697 causes a wrap, so test only the low 16 bits. */
7698 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7699
7700 return offset + 0x8000 < 0x10000u - extra;
7701 }
7702
7703 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
7704 enforce an offset divisible by 4 even for 32-bit. */
7705
7706 bool
7707 mem_operand_ds_form (rtx op, machine_mode mode)
7708 {
7709 unsigned HOST_WIDE_INT offset;
7710 int extra;
7711 rtx addr = XEXP (op, 0);
7712
7713 if (!offsettable_address_p (false, mode, addr))
7714 return false;
7715
7716 op = address_offset (addr);
7717 if (op == NULL_RTX)
7718 return true;
7719
7720 offset = INTVAL (op);
7721 if ((offset & 3) != 0)
7722 return false;
7723
7724 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7725 if (extra < 0)
7726 extra = 0;
7727
7728 if (GET_CODE (addr) == LO_SUM)
7729 /* For lo_sum addresses, we must allow any offset except one that
7730 causes a wrap, so test only the low 16 bits. */
7731 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7732
7733 return offset + 0x8000 < 0x10000u - extra;
7734 }
7735 \f
7736 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7737
7738 static bool
7739 reg_offset_addressing_ok_p (machine_mode mode)
7740 {
7741 switch (mode)
7742 {
7743 case E_V16QImode:
7744 case E_V8HImode:
7745 case E_V4SFmode:
7746 case E_V4SImode:
7747 case E_V2DFmode:
7748 case E_V2DImode:
7749 case E_V1TImode:
7750 case E_TImode:
7751 case E_TFmode:
7752 case E_KFmode:
7753 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7754 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7755 a vector mode, if we want to use the VSX registers to move it around,
7756 we need to restrict ourselves to reg+reg addressing. Similarly for
7757 IEEE 128-bit floating point that is passed in a single vector
7758 register. */
7759 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7760 return mode_supports_dq_form (mode);
7761 break;
7762
7763 case E_SDmode:
7764 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7765 addressing for the LFIWZX and STFIWX instructions. */
7766 if (TARGET_NO_SDMODE_STACK)
7767 return false;
7768 break;
7769
7770 default:
7771 break;
7772 }
7773
7774 return true;
7775 }
7776
7777 static bool
7778 virtual_stack_registers_memory_p (rtx op)
7779 {
7780 int regnum;
7781
7782 if (GET_CODE (op) == REG)
7783 regnum = REGNO (op);
7784
7785 else if (GET_CODE (op) == PLUS
7786 && GET_CODE (XEXP (op, 0)) == REG
7787 && GET_CODE (XEXP (op, 1)) == CONST_INT)
7788 regnum = REGNO (XEXP (op, 0));
7789
7790 else
7791 return false;
7792
7793 return (regnum >= FIRST_VIRTUAL_REGISTER
7794 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7795 }
7796
7797 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7798 is known to not straddle a 32k boundary. This function is used
7799 to determine whether -mcmodel=medium code can use TOC pointer
7800 relative addressing for OP. This means the alignment of the TOC
7801 pointer must also be taken into account, and unfortunately that is
7802 only 8 bytes. */
7803
7804 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7805 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7806 #endif
7807
7808 static bool
7809 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7810 machine_mode mode)
7811 {
7812 tree decl;
7813 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7814
7815 if (GET_CODE (op) != SYMBOL_REF)
7816 return false;
7817
7818 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7819 SYMBOL_REF. */
7820 if (mode_supports_dq_form (mode))
7821 return false;
7822
7823 dsize = GET_MODE_SIZE (mode);
7824 decl = SYMBOL_REF_DECL (op);
7825 if (!decl)
7826 {
7827 if (dsize == 0)
7828 return false;
7829
7830 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7831 replacing memory addresses with an anchor plus offset. We
7832 could find the decl by rummaging around in the block->objects
7833 VEC for the given offset but that seems like too much work. */
7834 dalign = BITS_PER_UNIT;
7835 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7836 && SYMBOL_REF_ANCHOR_P (op)
7837 && SYMBOL_REF_BLOCK (op) != NULL)
7838 {
7839 struct object_block *block = SYMBOL_REF_BLOCK (op);
7840
7841 dalign = block->alignment;
7842 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7843 }
7844 else if (CONSTANT_POOL_ADDRESS_P (op))
7845 {
7846 /* It would be nice to have get_pool_align().. */
7847 machine_mode cmode = get_pool_mode (op);
7848
7849 dalign = GET_MODE_ALIGNMENT (cmode);
7850 }
7851 }
7852 else if (DECL_P (decl))
7853 {
7854 dalign = DECL_ALIGN (decl);
7855
7856 if (dsize == 0)
7857 {
7858 /* Allow BLKmode when the entire object is known to not
7859 cross a 32k boundary. */
7860 if (!DECL_SIZE_UNIT (decl))
7861 return false;
7862
7863 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7864 return false;
7865
7866 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7867 if (dsize > 32768)
7868 return false;
7869
7870 dalign /= BITS_PER_UNIT;
7871 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7872 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7873 return dalign >= dsize;
7874 }
7875 }
7876 else
7877 gcc_unreachable ();
7878
7879 /* Find how many bits of the alignment we know for this access. */
7880 dalign /= BITS_PER_UNIT;
7881 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7882 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7883 mask = dalign - 1;
7884 lsb = offset & -offset;
7885 mask &= lsb - 1;
7886 dalign = mask + 1;
7887
7888 return dalign >= dsize;
7889 }
7890
7891 static bool
7892 constant_pool_expr_p (rtx op)
7893 {
7894 rtx base, offset;
7895
7896 split_const (op, &base, &offset);
7897 return (GET_CODE (base) == SYMBOL_REF
7898 && CONSTANT_POOL_ADDRESS_P (base)
7899 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7900 }
7901
7902 /* These are only used to pass through from print_operand/print_operand_address
7903 to rs6000_output_addr_const_extra over the intervening function
7904 output_addr_const which is not target code. */
7905 static const_rtx tocrel_base_oac, tocrel_offset_oac;
7906
7907 /* Return true if OP is a toc pointer relative address (the output
7908 of create_TOC_reference). If STRICT, do not match non-split
7909 -mcmodel=large/medium toc pointer relative addresses. If the pointers
7910 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
7911 TOCREL_OFFSET_RET respectively. */
7912
7913 bool
7914 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
7915 const_rtx *tocrel_offset_ret)
7916 {
7917 if (!TARGET_TOC)
7918 return false;
7919
7920 if (TARGET_CMODEL != CMODEL_SMALL)
7921 {
7922 /* When strict ensure we have everything tidy. */
7923 if (strict
7924 && !(GET_CODE (op) == LO_SUM
7925 && REG_P (XEXP (op, 0))
7926 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
7927 return false;
7928
7929 /* When not strict, allow non-split TOC addresses and also allow
7930 (lo_sum (high ..)) TOC addresses created during reload. */
7931 if (GET_CODE (op) == LO_SUM)
7932 op = XEXP (op, 1);
7933 }
7934
7935 const_rtx tocrel_base = op;
7936 const_rtx tocrel_offset = const0_rtx;
7937
7938 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7939 {
7940 tocrel_base = XEXP (op, 0);
7941 tocrel_offset = XEXP (op, 1);
7942 }
7943
7944 if (tocrel_base_ret)
7945 *tocrel_base_ret = tocrel_base;
7946 if (tocrel_offset_ret)
7947 *tocrel_offset_ret = tocrel_offset;
7948
7949 return (GET_CODE (tocrel_base) == UNSPEC
7950 && XINT (tocrel_base, 1) == UNSPEC_TOCREL
7951 && REG_P (XVECEXP (tocrel_base, 0, 1))
7952 && REGNO (XVECEXP (tocrel_base, 0, 1)) == TOC_REGISTER);
7953 }
7954
7955 /* Return true if X is a constant pool address, and also for cmodel=medium
7956 if X is a toc-relative address known to be offsettable within MODE. */
7957
7958 bool
7959 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7960 bool strict)
7961 {
7962 const_rtx tocrel_base, tocrel_offset;
7963 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
7964 && (TARGET_CMODEL != CMODEL_MEDIUM
7965 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7966 || mode == QImode
7967 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7968 INTVAL (tocrel_offset), mode)));
7969 }
7970
7971 static bool
7972 legitimate_small_data_p (machine_mode mode, rtx x)
7973 {
7974 return (DEFAULT_ABI == ABI_V4
7975 && !flag_pic && !TARGET_TOC
7976 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
7977 && small_data_operand (x, mode));
7978 }
7979
7980 bool
7981 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7982 bool strict, bool worst_case)
7983 {
7984 unsigned HOST_WIDE_INT offset;
7985 unsigned int extra;
7986
7987 if (GET_CODE (x) != PLUS)
7988 return false;
7989 if (!REG_P (XEXP (x, 0)))
7990 return false;
7991 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7992 return false;
7993 if (mode_supports_dq_form (mode))
7994 return quad_address_p (x, mode, strict);
7995 if (!reg_offset_addressing_ok_p (mode))
7996 return virtual_stack_registers_memory_p (x);
7997 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
7998 return true;
7999 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8000 return false;
8001
8002 offset = INTVAL (XEXP (x, 1));
8003 extra = 0;
8004 switch (mode)
8005 {
8006 case E_DFmode:
8007 case E_DDmode:
8008 case E_DImode:
8009 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8010 addressing. */
8011 if (VECTOR_MEM_VSX_P (mode))
8012 return false;
8013
8014 if (!worst_case)
8015 break;
8016 if (!TARGET_POWERPC64)
8017 extra = 4;
8018 else if (offset & 3)
8019 return false;
8020 break;
8021
8022 case E_TFmode:
8023 case E_IFmode:
8024 case E_KFmode:
8025 case E_TDmode:
8026 case E_TImode:
8027 case E_PTImode:
8028 extra = 8;
8029 if (!worst_case)
8030 break;
8031 if (!TARGET_POWERPC64)
8032 extra = 12;
8033 else if (offset & 3)
8034 return false;
8035 break;
8036
8037 default:
8038 break;
8039 }
8040
8041 offset += 0x8000;
8042 return offset < 0x10000 - extra;
8043 }
8044
8045 bool
8046 legitimate_indexed_address_p (rtx x, int strict)
8047 {
8048 rtx op0, op1;
8049
8050 if (GET_CODE (x) != PLUS)
8051 return false;
8052
8053 op0 = XEXP (x, 0);
8054 op1 = XEXP (x, 1);
8055
8056 return (REG_P (op0) && REG_P (op1)
8057 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8058 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8059 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8060 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8061 }
8062
8063 bool
8064 avoiding_indexed_address_p (machine_mode mode)
8065 {
8066 /* Avoid indexed addressing for modes that have non-indexed
8067 load/store instruction forms. */
8068 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8069 }
8070
8071 bool
8072 legitimate_indirect_address_p (rtx x, int strict)
8073 {
8074 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8075 }
8076
8077 bool
8078 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8079 {
8080 if (!TARGET_MACHO || !flag_pic
8081 || mode != SImode || GET_CODE (x) != MEM)
8082 return false;
8083 x = XEXP (x, 0);
8084
8085 if (GET_CODE (x) != LO_SUM)
8086 return false;
8087 if (GET_CODE (XEXP (x, 0)) != REG)
8088 return false;
8089 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8090 return false;
8091 x = XEXP (x, 1);
8092
8093 return CONSTANT_P (x);
8094 }
8095
8096 static bool
8097 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8098 {
8099 if (GET_CODE (x) != LO_SUM)
8100 return false;
8101 if (GET_CODE (XEXP (x, 0)) != REG)
8102 return false;
8103 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8104 return false;
8105 /* quad word addresses are restricted, and we can't use LO_SUM. */
8106 if (mode_supports_dq_form (mode))
8107 return false;
8108 x = XEXP (x, 1);
8109
8110 if (TARGET_ELF || TARGET_MACHO)
8111 {
8112 bool large_toc_ok;
8113
8114 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8115 return false;
8116 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8117 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8118 recognizes some LO_SUM addresses as valid although this
8119 function says opposite. In most cases, LRA through different
8120 transformations can generate correct code for address reloads.
8121 It can not manage only some LO_SUM cases. So we need to add
8122 code analogous to one in rs6000_legitimize_reload_address for
8123 LOW_SUM here saying that some addresses are still valid. */
8124 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8125 && small_toc_ref (x, VOIDmode));
8126 if (TARGET_TOC && ! large_toc_ok)
8127 return false;
8128 if (GET_MODE_NUNITS (mode) != 1)
8129 return false;
8130 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8131 && !(/* ??? Assume floating point reg based on mode? */
8132 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8133 return false;
8134
8135 return CONSTANT_P (x) || large_toc_ok;
8136 }
8137
8138 return false;
8139 }
8140
8141
8142 /* Try machine-dependent ways of modifying an illegitimate address
8143 to be legitimate. If we find one, return the new, valid address.
8144 This is used from only one place: `memory_address' in explow.c.
8145
8146 OLDX is the address as it was before break_out_memory_refs was
8147 called. In some cases it is useful to look at this to decide what
8148 needs to be done.
8149
8150 It is always safe for this function to do nothing. It exists to
8151 recognize opportunities to optimize the output.
8152
8153 On RS/6000, first check for the sum of a register with a constant
8154 integer that is out of range. If so, generate code to add the
8155 constant with the low-order 16 bits masked to the register and force
8156 this result into another register (this can be done with `cau').
8157 Then generate an address of REG+(CONST&0xffff), allowing for the
8158 possibility of bit 16 being a one.
8159
8160 Then check for the sum of a register and something not constant, try to
8161 load the other things into a register and return the sum. */
8162
8163 static rtx
8164 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8165 machine_mode mode)
8166 {
8167 unsigned int extra;
8168
8169 if (!reg_offset_addressing_ok_p (mode)
8170 || mode_supports_dq_form (mode))
8171 {
8172 if (virtual_stack_registers_memory_p (x))
8173 return x;
8174
8175 /* In theory we should not be seeing addresses of the form reg+0,
8176 but just in case it is generated, optimize it away. */
8177 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8178 return force_reg (Pmode, XEXP (x, 0));
8179
8180 /* For TImode with load/store quad, restrict addresses to just a single
8181 pointer, so it works with both GPRs and VSX registers. */
8182 /* Make sure both operands are registers. */
8183 else if (GET_CODE (x) == PLUS
8184 && (mode != TImode || !TARGET_VSX))
8185 return gen_rtx_PLUS (Pmode,
8186 force_reg (Pmode, XEXP (x, 0)),
8187 force_reg (Pmode, XEXP (x, 1)));
8188 else
8189 return force_reg (Pmode, x);
8190 }
8191 if (GET_CODE (x) == SYMBOL_REF)
8192 {
8193 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8194 if (model != 0)
8195 return rs6000_legitimize_tls_address (x, model);
8196 }
8197
8198 extra = 0;
8199 switch (mode)
8200 {
8201 case E_TFmode:
8202 case E_TDmode:
8203 case E_TImode:
8204 case E_PTImode:
8205 case E_IFmode:
8206 case E_KFmode:
8207 /* As in legitimate_offset_address_p we do not assume
8208 worst-case. The mode here is just a hint as to the registers
8209 used. A TImode is usually in gprs, but may actually be in
8210 fprs. Leave worst-case scenario for reload to handle via
8211 insn constraints. PTImode is only GPRs. */
8212 extra = 8;
8213 break;
8214 default:
8215 break;
8216 }
8217
8218 if (GET_CODE (x) == PLUS
8219 && GET_CODE (XEXP (x, 0)) == REG
8220 && GET_CODE (XEXP (x, 1)) == CONST_INT
8221 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8222 >= 0x10000 - extra))
8223 {
8224 HOST_WIDE_INT high_int, low_int;
8225 rtx sum;
8226 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8227 if (low_int >= 0x8000 - extra)
8228 low_int = 0;
8229 high_int = INTVAL (XEXP (x, 1)) - low_int;
8230 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8231 GEN_INT (high_int)), 0);
8232 return plus_constant (Pmode, sum, low_int);
8233 }
8234 else if (GET_CODE (x) == PLUS
8235 && GET_CODE (XEXP (x, 0)) == REG
8236 && GET_CODE (XEXP (x, 1)) != CONST_INT
8237 && GET_MODE_NUNITS (mode) == 1
8238 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8239 || (/* ??? Assume floating point reg based on mode? */
8240 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8241 && !avoiding_indexed_address_p (mode))
8242 {
8243 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8244 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8245 }
8246 else if ((TARGET_ELF
8247 #if TARGET_MACHO
8248 || !MACHO_DYNAMIC_NO_PIC_P
8249 #endif
8250 )
8251 && TARGET_32BIT
8252 && TARGET_NO_TOC
8253 && ! flag_pic
8254 && GET_CODE (x) != CONST_INT
8255 && GET_CODE (x) != CONST_WIDE_INT
8256 && GET_CODE (x) != CONST_DOUBLE
8257 && CONSTANT_P (x)
8258 && GET_MODE_NUNITS (mode) == 1
8259 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8260 || (/* ??? Assume floating point reg based on mode? */
8261 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8262 {
8263 rtx reg = gen_reg_rtx (Pmode);
8264 if (TARGET_ELF)
8265 emit_insn (gen_elf_high (reg, x));
8266 else
8267 emit_insn (gen_macho_high (reg, x));
8268 return gen_rtx_LO_SUM (Pmode, reg, x);
8269 }
8270 else if (TARGET_TOC
8271 && GET_CODE (x) == SYMBOL_REF
8272 && constant_pool_expr_p (x)
8273 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8274 return create_TOC_reference (x, NULL_RTX);
8275 else
8276 return x;
8277 }
8278
8279 /* Debug version of rs6000_legitimize_address. */
8280 static rtx
8281 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8282 {
8283 rtx ret;
8284 rtx_insn *insns;
8285
8286 start_sequence ();
8287 ret = rs6000_legitimize_address (x, oldx, mode);
8288 insns = get_insns ();
8289 end_sequence ();
8290
8291 if (ret != x)
8292 {
8293 fprintf (stderr,
8294 "\nrs6000_legitimize_address: mode %s, old code %s, "
8295 "new code %s, modified\n",
8296 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8297 GET_RTX_NAME (GET_CODE (ret)));
8298
8299 fprintf (stderr, "Original address:\n");
8300 debug_rtx (x);
8301
8302 fprintf (stderr, "oldx:\n");
8303 debug_rtx (oldx);
8304
8305 fprintf (stderr, "New address:\n");
8306 debug_rtx (ret);
8307
8308 if (insns)
8309 {
8310 fprintf (stderr, "Insns added:\n");
8311 debug_rtx_list (insns, 20);
8312 }
8313 }
8314 else
8315 {
8316 fprintf (stderr,
8317 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8318 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8319
8320 debug_rtx (x);
8321 }
8322
8323 if (insns)
8324 emit_insn (insns);
8325
8326 return ret;
8327 }
8328
8329 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8330 We need to emit DTP-relative relocations. */
8331
8332 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8333 static void
8334 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8335 {
8336 switch (size)
8337 {
8338 case 4:
8339 fputs ("\t.long\t", file);
8340 break;
8341 case 8:
8342 fputs (DOUBLE_INT_ASM_OP, file);
8343 break;
8344 default:
8345 gcc_unreachable ();
8346 }
8347 output_addr_const (file, x);
8348 if (TARGET_ELF)
8349 fputs ("@dtprel+0x8000", file);
8350 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8351 {
8352 switch (SYMBOL_REF_TLS_MODEL (x))
8353 {
8354 case 0:
8355 break;
8356 case TLS_MODEL_LOCAL_EXEC:
8357 fputs ("@le", file);
8358 break;
8359 case TLS_MODEL_INITIAL_EXEC:
8360 fputs ("@ie", file);
8361 break;
8362 case TLS_MODEL_GLOBAL_DYNAMIC:
8363 case TLS_MODEL_LOCAL_DYNAMIC:
8364 fputs ("@m", file);
8365 break;
8366 default:
8367 gcc_unreachable ();
8368 }
8369 }
8370 }
8371
8372 /* Return true if X is a symbol that refers to real (rather than emulated)
8373 TLS. */
8374
8375 static bool
8376 rs6000_real_tls_symbol_ref_p (rtx x)
8377 {
8378 return (GET_CODE (x) == SYMBOL_REF
8379 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8380 }
8381
8382 /* In the name of slightly smaller debug output, and to cater to
8383 general assembler lossage, recognize various UNSPEC sequences
8384 and turn them back into a direct symbol reference. */
8385
8386 static rtx
8387 rs6000_delegitimize_address (rtx orig_x)
8388 {
8389 rtx x, y, offset;
8390
8391 orig_x = delegitimize_mem_from_attrs (orig_x);
8392 x = orig_x;
8393 if (MEM_P (x))
8394 x = XEXP (x, 0);
8395
8396 y = x;
8397 if (TARGET_CMODEL != CMODEL_SMALL
8398 && GET_CODE (y) == LO_SUM)
8399 y = XEXP (y, 1);
8400
8401 offset = NULL_RTX;
8402 if (GET_CODE (y) == PLUS
8403 && GET_MODE (y) == Pmode
8404 && CONST_INT_P (XEXP (y, 1)))
8405 {
8406 offset = XEXP (y, 1);
8407 y = XEXP (y, 0);
8408 }
8409
8410 if (GET_CODE (y) == UNSPEC
8411 && XINT (y, 1) == UNSPEC_TOCREL)
8412 {
8413 y = XVECEXP (y, 0, 0);
8414
8415 #ifdef HAVE_AS_TLS
8416 /* Do not associate thread-local symbols with the original
8417 constant pool symbol. */
8418 if (TARGET_XCOFF
8419 && GET_CODE (y) == SYMBOL_REF
8420 && CONSTANT_POOL_ADDRESS_P (y)
8421 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8422 return orig_x;
8423 #endif
8424
8425 if (offset != NULL_RTX)
8426 y = gen_rtx_PLUS (Pmode, y, offset);
8427 if (!MEM_P (orig_x))
8428 return y;
8429 else
8430 return replace_equiv_address_nv (orig_x, y);
8431 }
8432
8433 if (TARGET_MACHO
8434 && GET_CODE (orig_x) == LO_SUM
8435 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8436 {
8437 y = XEXP (XEXP (orig_x, 1), 0);
8438 if (GET_CODE (y) == UNSPEC
8439 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8440 return XVECEXP (y, 0, 0);
8441 }
8442
8443 return orig_x;
8444 }
8445
8446 /* Return true if X shouldn't be emitted into the debug info.
8447 The linker doesn't like .toc section references from
8448 .debug_* sections, so reject .toc section symbols. */
8449
8450 static bool
8451 rs6000_const_not_ok_for_debug_p (rtx x)
8452 {
8453 if (GET_CODE (x) == UNSPEC)
8454 return true;
8455 if (GET_CODE (x) == SYMBOL_REF
8456 && CONSTANT_POOL_ADDRESS_P (x))
8457 {
8458 rtx c = get_pool_constant (x);
8459 machine_mode cmode = get_pool_mode (x);
8460 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8461 return true;
8462 }
8463
8464 return false;
8465 }
8466
8467 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8468
8469 static bool
8470 rs6000_legitimate_combined_insn (rtx_insn *insn)
8471 {
8472 int icode = INSN_CODE (insn);
8473
8474 /* Reject creating doloop insns. Combine should not be allowed
8475 to create these for a number of reasons:
8476 1) In a nested loop, if combine creates one of these in an
8477 outer loop and the register allocator happens to allocate ctr
8478 to the outer loop insn, then the inner loop can't use ctr.
8479 Inner loops ought to be more highly optimized.
8480 2) Combine often wants to create one of these from what was
8481 originally a three insn sequence, first combining the three
8482 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8483 allocated ctr, the splitter takes use back to the three insn
8484 sequence. It's better to stop combine at the two insn
8485 sequence.
8486 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8487 insns, the register allocator sometimes uses floating point
8488 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8489 jump insn and output reloads are not implemented for jumps,
8490 the ctrsi/ctrdi splitters need to handle all possible cases.
8491 That's a pain, and it gets to be seriously difficult when a
8492 splitter that runs after reload needs memory to transfer from
8493 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8494 for the difficult case. It's better to not create problems
8495 in the first place. */
8496 if (icode != CODE_FOR_nothing
8497 && (icode == CODE_FOR_bdz_si
8498 || icode == CODE_FOR_bdz_di
8499 || icode == CODE_FOR_bdnz_si
8500 || icode == CODE_FOR_bdnz_di
8501 || icode == CODE_FOR_bdztf_si
8502 || icode == CODE_FOR_bdztf_di
8503 || icode == CODE_FOR_bdnztf_si
8504 || icode == CODE_FOR_bdnztf_di))
8505 return false;
8506
8507 return true;
8508 }
8509
8510 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8511
8512 static GTY(()) rtx rs6000_tls_symbol;
8513 static rtx
8514 rs6000_tls_get_addr (void)
8515 {
8516 if (!rs6000_tls_symbol)
8517 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8518
8519 return rs6000_tls_symbol;
8520 }
8521
8522 /* Construct the SYMBOL_REF for TLS GOT references. */
8523
8524 static GTY(()) rtx rs6000_got_symbol;
8525 static rtx
8526 rs6000_got_sym (void)
8527 {
8528 if (!rs6000_got_symbol)
8529 {
8530 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8531 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8532 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8533 }
8534
8535 return rs6000_got_symbol;
8536 }
8537
8538 /* AIX Thread-Local Address support. */
8539
8540 static rtx
8541 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8542 {
8543 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8544 const char *name;
8545 char *tlsname;
8546
8547 name = XSTR (addr, 0);
8548 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8549 or the symbol will be in TLS private data section. */
8550 if (name[strlen (name) - 1] != ']'
8551 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8552 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8553 {
8554 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8555 strcpy (tlsname, name);
8556 strcat (tlsname,
8557 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8558 tlsaddr = copy_rtx (addr);
8559 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8560 }
8561 else
8562 tlsaddr = addr;
8563
8564 /* Place addr into TOC constant pool. */
8565 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8566
8567 /* Output the TOC entry and create the MEM referencing the value. */
8568 if (constant_pool_expr_p (XEXP (sym, 0))
8569 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8570 {
8571 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8572 mem = gen_const_mem (Pmode, tocref);
8573 set_mem_alias_set (mem, get_TOC_alias_set ());
8574 }
8575 else
8576 return sym;
8577
8578 /* Use global-dynamic for local-dynamic. */
8579 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8580 || model == TLS_MODEL_LOCAL_DYNAMIC)
8581 {
8582 /* Create new TOC reference for @m symbol. */
8583 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8584 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8585 strcpy (tlsname, "*LCM");
8586 strcat (tlsname, name + 3);
8587 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8588 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8589 tocref = create_TOC_reference (modaddr, NULL_RTX);
8590 rtx modmem = gen_const_mem (Pmode, tocref);
8591 set_mem_alias_set (modmem, get_TOC_alias_set ());
8592
8593 rtx modreg = gen_reg_rtx (Pmode);
8594 emit_insn (gen_rtx_SET (modreg, modmem));
8595
8596 tmpreg = gen_reg_rtx (Pmode);
8597 emit_insn (gen_rtx_SET (tmpreg, mem));
8598
8599 dest = gen_reg_rtx (Pmode);
8600 if (TARGET_32BIT)
8601 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8602 else
8603 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8604 return dest;
8605 }
8606 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8607 else if (TARGET_32BIT)
8608 {
8609 tlsreg = gen_reg_rtx (SImode);
8610 emit_insn (gen_tls_get_tpointer (tlsreg));
8611 }
8612 else
8613 tlsreg = gen_rtx_REG (DImode, 13);
8614
8615 /* Load the TOC value into temporary register. */
8616 tmpreg = gen_reg_rtx (Pmode);
8617 emit_insn (gen_rtx_SET (tmpreg, mem));
8618 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8619 gen_rtx_MINUS (Pmode, addr, tlsreg));
8620
8621 /* Add TOC symbol value to TLS pointer. */
8622 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8623
8624 return dest;
8625 }
8626
8627 /* Mess with a call, to make it look like the tls_gdld insns when
8628 !TARGET_TLS_MARKERS. These insns have an extra unspec to
8629 differentiate them from standard calls, because they need to emit
8630 the arg setup insns as well as the actual call. That keeps the
8631 arg setup insns immediately adjacent to the branch and link. */
8632
8633 static void
8634 edit_tls_call_insn (rtx arg)
8635 {
8636 rtx call_insn = last_call_insn ();
8637 if (!TARGET_TLS_MARKERS)
8638 {
8639 rtx patt = PATTERN (call_insn);
8640 gcc_assert (GET_CODE (patt) == PARALLEL);
8641 rtvec orig = XVEC (patt, 0);
8642 rtvec v = rtvec_alloc (GET_NUM_ELEM (orig) + 1);
8643 gcc_assert (GET_NUM_ELEM (orig) > 0);
8644 /* The (set (..) (call (mem ..))). */
8645 RTVEC_ELT (v, 0) = RTVEC_ELT (orig, 0);
8646 /* The extra unspec. */
8647 RTVEC_ELT (v, 1) = arg;
8648 /* All other assorted call pattern pieces. */
8649 for (int i = 1; i < GET_NUM_ELEM (orig); i++)
8650 RTVEC_ELT (v, i + 1) = RTVEC_ELT (orig, i);
8651 XVEC (patt, 0) = v;
8652 }
8653 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
8654 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
8655 pic_offset_table_rtx);
8656 }
8657
8658 /* Passes the tls arg value for global dynamic and local dynamic
8659 emit_library_call_value in rs6000_legitimize_tls_address to
8660 rs6000_call_aix and rs6000_call_sysv. This is used to emit the
8661 marker relocs put on __tls_get_addr calls. */
8662 static rtx global_tlsarg;
8663
8664 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8665 this (thread-local) address. */
8666
8667 static rtx
8668 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8669 {
8670 rtx dest, insn;
8671
8672 if (TARGET_XCOFF)
8673 return rs6000_legitimize_tls_address_aix (addr, model);
8674
8675 dest = gen_reg_rtx (Pmode);
8676 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8677 {
8678 rtx tlsreg;
8679
8680 if (TARGET_64BIT)
8681 {
8682 tlsreg = gen_rtx_REG (Pmode, 13);
8683 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8684 }
8685 else
8686 {
8687 tlsreg = gen_rtx_REG (Pmode, 2);
8688 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8689 }
8690 emit_insn (insn);
8691 }
8692 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8693 {
8694 rtx tlsreg, tmp;
8695
8696 tmp = gen_reg_rtx (Pmode);
8697 if (TARGET_64BIT)
8698 {
8699 tlsreg = gen_rtx_REG (Pmode, 13);
8700 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8701 }
8702 else
8703 {
8704 tlsreg = gen_rtx_REG (Pmode, 2);
8705 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8706 }
8707 emit_insn (insn);
8708 if (TARGET_64BIT)
8709 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8710 else
8711 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8712 emit_insn (insn);
8713 }
8714 else
8715 {
8716 rtx got, tga, tmp1, tmp2;
8717
8718 /* We currently use relocations like @got@tlsgd for tls, which
8719 means the linker will handle allocation of tls entries, placing
8720 them in the .got section. So use a pointer to the .got section,
8721 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8722 or to secondary GOT sections used by 32-bit -fPIC. */
8723 if (TARGET_64BIT)
8724 got = gen_rtx_REG (Pmode, 2);
8725 else
8726 {
8727 if (flag_pic == 1)
8728 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8729 else
8730 {
8731 rtx gsym = rs6000_got_sym ();
8732 got = gen_reg_rtx (Pmode);
8733 if (flag_pic == 0)
8734 rs6000_emit_move (got, gsym, Pmode);
8735 else
8736 {
8737 rtx mem, lab;
8738
8739 tmp1 = gen_reg_rtx (Pmode);
8740 tmp2 = gen_reg_rtx (Pmode);
8741 mem = gen_const_mem (Pmode, tmp1);
8742 lab = gen_label_rtx ();
8743 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8744 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8745 if (TARGET_LINK_STACK)
8746 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8747 emit_move_insn (tmp2, mem);
8748 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8749 set_unique_reg_note (last, REG_EQUAL, gsym);
8750 }
8751 }
8752 }
8753
8754 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8755 {
8756 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addr, got),
8757 UNSPEC_TLSGD);
8758 global_tlsarg = arg;
8759 rtx argreg = const0_rtx;
8760 if (TARGET_TLS_MARKERS)
8761 {
8762 argreg = gen_rtx_REG (Pmode, 3);
8763 emit_insn (gen_rtx_SET (argreg, arg));
8764 }
8765
8766 tga = rs6000_tls_get_addr ();
8767 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8768 argreg, Pmode);
8769 global_tlsarg = NULL_RTX;
8770
8771 edit_tls_call_insn (arg);
8772 }
8773 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8774 {
8775 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got),
8776 UNSPEC_TLSLD);
8777 global_tlsarg = arg;
8778 rtx argreg = const0_rtx;
8779 if (TARGET_TLS_MARKERS)
8780 {
8781 argreg = gen_rtx_REG (Pmode, 3);
8782 emit_insn (gen_rtx_SET (argreg, arg));
8783 }
8784
8785 tga = rs6000_tls_get_addr ();
8786 tmp1 = gen_reg_rtx (Pmode);
8787 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8788 argreg, Pmode);
8789 global_tlsarg = NULL_RTX;
8790
8791 edit_tls_call_insn (arg);
8792
8793 if (rs6000_tls_size == 16)
8794 {
8795 if (TARGET_64BIT)
8796 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8797 else
8798 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8799 }
8800 else if (rs6000_tls_size == 32)
8801 {
8802 tmp2 = gen_reg_rtx (Pmode);
8803 if (TARGET_64BIT)
8804 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8805 else
8806 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8807 emit_insn (insn);
8808 if (TARGET_64BIT)
8809 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8810 else
8811 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8812 }
8813 else
8814 {
8815 tmp2 = gen_reg_rtx (Pmode);
8816 if (TARGET_64BIT)
8817 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8818 else
8819 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8820 emit_insn (insn);
8821 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8822 }
8823 emit_insn (insn);
8824 }
8825 else
8826 {
8827 /* IE, or 64-bit offset LE. */
8828 tmp2 = gen_reg_rtx (Pmode);
8829 if (TARGET_64BIT)
8830 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8831 else
8832 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8833 emit_insn (insn);
8834 if (TARGET_64BIT)
8835 insn = gen_tls_tls_64 (dest, tmp2, addr);
8836 else
8837 insn = gen_tls_tls_32 (dest, tmp2, addr);
8838 emit_insn (insn);
8839 }
8840 }
8841
8842 return dest;
8843 }
8844
8845 /* Only create the global variable for the stack protect guard if we are using
8846 the global flavor of that guard. */
8847 static tree
8848 rs6000_init_stack_protect_guard (void)
8849 {
8850 if (rs6000_stack_protector_guard == SSP_GLOBAL)
8851 return default_stack_protect_guard ();
8852
8853 return NULL_TREE;
8854 }
8855
8856 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8857
8858 static bool
8859 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8860 {
8861 if (GET_CODE (x) == HIGH
8862 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8863 return true;
8864
8865 /* A TLS symbol in the TOC cannot contain a sum. */
8866 if (GET_CODE (x) == CONST
8867 && GET_CODE (XEXP (x, 0)) == PLUS
8868 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
8869 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8870 return true;
8871
8872 /* Do not place an ELF TLS symbol in the constant pool. */
8873 return TARGET_ELF && tls_referenced_p (x);
8874 }
8875
8876 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8877 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8878 can be addressed relative to the toc pointer. */
8879
8880 static bool
8881 use_toc_relative_ref (rtx sym, machine_mode mode)
8882 {
8883 return ((constant_pool_expr_p (sym)
8884 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8885 get_pool_mode (sym)))
8886 || (TARGET_CMODEL == CMODEL_MEDIUM
8887 && SYMBOL_REF_LOCAL_P (sym)
8888 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8889 }
8890
8891 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
8892 replace the input X, or the original X if no replacement is called for.
8893 The output parameter *WIN is 1 if the calling macro should goto WIN,
8894 0 if it should not.
8895
8896 For RS/6000, we wish to handle large displacements off a base
8897 register by splitting the addend across an addiu/addis and the mem insn.
8898 This cuts number of extra insns needed from 3 to 1.
8899
8900 On Darwin, we use this to generate code for floating point constants.
8901 A movsf_low is generated so we wind up with 2 instructions rather than 3.
8902 The Darwin code is inside #if TARGET_MACHO because only then are the
8903 machopic_* functions defined. */
8904 static rtx
8905 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
8906 int opnum, int type,
8907 int ind_levels ATTRIBUTE_UNUSED, int *win)
8908 {
8909 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8910 bool quad_offset_p = mode_supports_dq_form (mode);
8911
8912 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
8913 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
8914 if (reg_offset_p
8915 && opnum == 1
8916 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
8917 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
8918 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
8919 && TARGET_P9_VECTOR)
8920 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
8921 && TARGET_P9_VECTOR)))
8922 reg_offset_p = false;
8923
8924 /* We must recognize output that we have already generated ourselves. */
8925 if (GET_CODE (x) == PLUS
8926 && GET_CODE (XEXP (x, 0)) == PLUS
8927 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
8928 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
8929 && GET_CODE (XEXP (x, 1)) == CONST_INT)
8930 {
8931 if (TARGET_DEBUG_ADDR)
8932 {
8933 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
8934 debug_rtx (x);
8935 }
8936 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8937 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8938 opnum, (enum reload_type) type);
8939 *win = 1;
8940 return x;
8941 }
8942
8943 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
8944 if (GET_CODE (x) == LO_SUM
8945 && GET_CODE (XEXP (x, 0)) == HIGH)
8946 {
8947 if (TARGET_DEBUG_ADDR)
8948 {
8949 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
8950 debug_rtx (x);
8951 }
8952 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8953 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8954 opnum, (enum reload_type) type);
8955 *win = 1;
8956 return x;
8957 }
8958
8959 #if TARGET_MACHO
8960 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
8961 && GET_CODE (x) == LO_SUM
8962 && GET_CODE (XEXP (x, 0)) == PLUS
8963 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
8964 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
8965 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
8966 && machopic_operand_p (XEXP (x, 1)))
8967 {
8968 /* Result of previous invocation of this function on Darwin
8969 floating point constant. */
8970 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8971 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8972 opnum, (enum reload_type) type);
8973 *win = 1;
8974 return x;
8975 }
8976 #endif
8977
8978 if (TARGET_CMODEL != CMODEL_SMALL
8979 && reg_offset_p
8980 && !quad_offset_p
8981 && small_toc_ref (x, VOIDmode))
8982 {
8983 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
8984 x = gen_rtx_LO_SUM (Pmode, hi, x);
8985 if (TARGET_DEBUG_ADDR)
8986 {
8987 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
8988 debug_rtx (x);
8989 }
8990 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8991 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8992 opnum, (enum reload_type) type);
8993 *win = 1;
8994 return x;
8995 }
8996
8997 if (GET_CODE (x) == PLUS
8998 && REG_P (XEXP (x, 0))
8999 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
9000 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9001 && CONST_INT_P (XEXP (x, 1))
9002 && reg_offset_p
9003 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9004 {
9005 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9006 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9007 HOST_WIDE_INT high
9008 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9009
9010 /* Check for 32-bit overflow or quad addresses with one of the
9011 four least significant bits set. */
9012 if (high + low != val
9013 || (quad_offset_p && (low & 0xf)))
9014 {
9015 *win = 0;
9016 return x;
9017 }
9018
9019 /* Reload the high part into a base reg; leave the low part
9020 in the mem directly. */
9021
9022 x = gen_rtx_PLUS (GET_MODE (x),
9023 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9024 GEN_INT (high)),
9025 GEN_INT (low));
9026
9027 if (TARGET_DEBUG_ADDR)
9028 {
9029 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9030 debug_rtx (x);
9031 }
9032 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9033 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9034 opnum, (enum reload_type) type);
9035 *win = 1;
9036 return x;
9037 }
9038
9039 if (GET_CODE (x) == SYMBOL_REF
9040 && reg_offset_p
9041 && !quad_offset_p
9042 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9043 #if TARGET_MACHO
9044 && DEFAULT_ABI == ABI_DARWIN
9045 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9046 && machopic_symbol_defined_p (x)
9047 #else
9048 && DEFAULT_ABI == ABI_V4
9049 && !flag_pic
9050 #endif
9051 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9052 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9053 without fprs.
9054 ??? Assume floating point reg based on mode? This assumption is
9055 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9056 where reload ends up doing a DFmode load of a constant from
9057 mem using two gprs. Unfortunately, at this point reload
9058 hasn't yet selected regs so poking around in reload data
9059 won't help and even if we could figure out the regs reliably,
9060 we'd still want to allow this transformation when the mem is
9061 naturally aligned. Since we say the address is good here, we
9062 can't disable offsets from LO_SUMs in mem_operand_gpr.
9063 FIXME: Allow offset from lo_sum for other modes too, when
9064 mem is sufficiently aligned.
9065
9066 Also disallow this if the type can go in VMX/Altivec registers, since
9067 those registers do not have d-form (reg+offset) address modes. */
9068 && !reg_addr[mode].scalar_in_vmx_p
9069 && mode != TFmode
9070 && mode != TDmode
9071 && mode != IFmode
9072 && mode != KFmode
9073 && (mode != TImode || !TARGET_VSX)
9074 && mode != PTImode
9075 && (mode != DImode || TARGET_POWERPC64)
9076 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9077 || TARGET_HARD_FLOAT))
9078 {
9079 #if TARGET_MACHO
9080 if (flag_pic)
9081 {
9082 rtx offset = machopic_gen_offset (x);
9083 x = gen_rtx_LO_SUM (GET_MODE (x),
9084 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9085 gen_rtx_HIGH (Pmode, offset)), offset);
9086 }
9087 else
9088 #endif
9089 x = gen_rtx_LO_SUM (GET_MODE (x),
9090 gen_rtx_HIGH (Pmode, x), x);
9091
9092 if (TARGET_DEBUG_ADDR)
9093 {
9094 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9095 debug_rtx (x);
9096 }
9097 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9098 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9099 opnum, (enum reload_type) type);
9100 *win = 1;
9101 return x;
9102 }
9103
9104 /* Reload an offset address wrapped by an AND that represents the
9105 masking of the lower bits. Strip the outer AND and let reload
9106 convert the offset address into an indirect address. For VSX,
9107 force reload to create the address with an AND in a separate
9108 register, because we can't guarantee an altivec register will
9109 be used. */
9110 if (VECTOR_MEM_ALTIVEC_P (mode)
9111 && GET_CODE (x) == AND
9112 && GET_CODE (XEXP (x, 0)) == PLUS
9113 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9114 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9115 && GET_CODE (XEXP (x, 1)) == CONST_INT
9116 && INTVAL (XEXP (x, 1)) == -16)
9117 {
9118 x = XEXP (x, 0);
9119 *win = 1;
9120 return x;
9121 }
9122
9123 if (TARGET_TOC
9124 && reg_offset_p
9125 && !quad_offset_p
9126 && GET_CODE (x) == SYMBOL_REF
9127 && use_toc_relative_ref (x, mode))
9128 {
9129 x = create_TOC_reference (x, NULL_RTX);
9130 if (TARGET_CMODEL != CMODEL_SMALL)
9131 {
9132 if (TARGET_DEBUG_ADDR)
9133 {
9134 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9135 debug_rtx (x);
9136 }
9137 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9138 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9139 opnum, (enum reload_type) type);
9140 }
9141 *win = 1;
9142 return x;
9143 }
9144 *win = 0;
9145 return x;
9146 }
9147
9148 /* Debug version of rs6000_legitimize_reload_address. */
9149 static rtx
9150 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9151 int opnum, int type,
9152 int ind_levels, int *win)
9153 {
9154 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9155 ind_levels, win);
9156 fprintf (stderr,
9157 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9158 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9159 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9160 debug_rtx (x);
9161
9162 if (x == ret)
9163 fprintf (stderr, "Same address returned\n");
9164 else if (!ret)
9165 fprintf (stderr, "NULL returned\n");
9166 else
9167 {
9168 fprintf (stderr, "New address:\n");
9169 debug_rtx (ret);
9170 }
9171
9172 return ret;
9173 }
9174
9175 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9176 that is a valid memory address for an instruction.
9177 The MODE argument is the machine mode for the MEM expression
9178 that wants to use this address.
9179
9180 On the RS/6000, there are four valid address: a SYMBOL_REF that
9181 refers to a constant pool entry of an address (or the sum of it
9182 plus a constant), a short (16-bit signed) constant plus a register,
9183 the sum of two registers, or a register indirect, possibly with an
9184 auto-increment. For DFmode, DDmode and DImode with a constant plus
9185 register, we must ensure that both words are addressable or PowerPC64
9186 with offset word aligned.
9187
9188 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9189 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9190 because adjacent memory cells are accessed by adding word-sized offsets
9191 during assembly output. */
9192 static bool
9193 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9194 {
9195 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9196 bool quad_offset_p = mode_supports_dq_form (mode);
9197
9198 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9199 if (VECTOR_MEM_ALTIVEC_P (mode)
9200 && GET_CODE (x) == AND
9201 && GET_CODE (XEXP (x, 1)) == CONST_INT
9202 && INTVAL (XEXP (x, 1)) == -16)
9203 x = XEXP (x, 0);
9204
9205 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9206 return 0;
9207 if (legitimate_indirect_address_p (x, reg_ok_strict))
9208 return 1;
9209 if (TARGET_UPDATE
9210 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9211 && mode_supports_pre_incdec_p (mode)
9212 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9213 return 1;
9214 /* Handle restricted vector d-form offsets in ISA 3.0. */
9215 if (quad_offset_p)
9216 {
9217 if (quad_address_p (x, mode, reg_ok_strict))
9218 return 1;
9219 }
9220 else if (virtual_stack_registers_memory_p (x))
9221 return 1;
9222
9223 else if (reg_offset_p)
9224 {
9225 if (legitimate_small_data_p (mode, x))
9226 return 1;
9227 if (legitimate_constant_pool_address_p (x, mode,
9228 reg_ok_strict || lra_in_progress))
9229 return 1;
9230 }
9231
9232 /* For TImode, if we have TImode in VSX registers, only allow register
9233 indirect addresses. This will allow the values to go in either GPRs
9234 or VSX registers without reloading. The vector types would tend to
9235 go into VSX registers, so we allow REG+REG, while TImode seems
9236 somewhat split, in that some uses are GPR based, and some VSX based. */
9237 /* FIXME: We could loosen this by changing the following to
9238 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9239 but currently we cannot allow REG+REG addressing for TImode. See
9240 PR72827 for complete details on how this ends up hoodwinking DSE. */
9241 if (mode == TImode && TARGET_VSX)
9242 return 0;
9243 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9244 if (! reg_ok_strict
9245 && reg_offset_p
9246 && GET_CODE (x) == PLUS
9247 && GET_CODE (XEXP (x, 0)) == REG
9248 && (XEXP (x, 0) == virtual_stack_vars_rtx
9249 || XEXP (x, 0) == arg_pointer_rtx)
9250 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9251 return 1;
9252 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9253 return 1;
9254 if (!FLOAT128_2REG_P (mode)
9255 && (TARGET_HARD_FLOAT
9256 || TARGET_POWERPC64
9257 || (mode != DFmode && mode != DDmode))
9258 && (TARGET_POWERPC64 || mode != DImode)
9259 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9260 && mode != PTImode
9261 && !avoiding_indexed_address_p (mode)
9262 && legitimate_indexed_address_p (x, reg_ok_strict))
9263 return 1;
9264 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9265 && mode_supports_pre_modify_p (mode)
9266 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9267 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9268 reg_ok_strict, false)
9269 || (!avoiding_indexed_address_p (mode)
9270 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9271 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9272 return 1;
9273 if (reg_offset_p && !quad_offset_p
9274 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9275 return 1;
9276 return 0;
9277 }
9278
9279 /* Debug version of rs6000_legitimate_address_p. */
9280 static bool
9281 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9282 bool reg_ok_strict)
9283 {
9284 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9285 fprintf (stderr,
9286 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9287 "strict = %d, reload = %s, code = %s\n",
9288 ret ? "true" : "false",
9289 GET_MODE_NAME (mode),
9290 reg_ok_strict,
9291 (reload_completed ? "after" : "before"),
9292 GET_RTX_NAME (GET_CODE (x)));
9293 debug_rtx (x);
9294
9295 return ret;
9296 }
9297
9298 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9299
9300 static bool
9301 rs6000_mode_dependent_address_p (const_rtx addr,
9302 addr_space_t as ATTRIBUTE_UNUSED)
9303 {
9304 return rs6000_mode_dependent_address_ptr (addr);
9305 }
9306
9307 /* Go to LABEL if ADDR (a legitimate address expression)
9308 has an effect that depends on the machine mode it is used for.
9309
9310 On the RS/6000 this is true of all integral offsets (since AltiVec
9311 and VSX modes don't allow them) or is a pre-increment or decrement.
9312
9313 ??? Except that due to conceptual problems in offsettable_address_p
9314 we can't really report the problems of integral offsets. So leave
9315 this assuming that the adjustable offset must be valid for the
9316 sub-words of a TFmode operand, which is what we had before. */
9317
9318 static bool
9319 rs6000_mode_dependent_address (const_rtx addr)
9320 {
9321 switch (GET_CODE (addr))
9322 {
9323 case PLUS:
9324 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9325 is considered a legitimate address before reload, so there
9326 are no offset restrictions in that case. Note that this
9327 condition is safe in strict mode because any address involving
9328 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9329 been rejected as illegitimate. */
9330 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9331 && XEXP (addr, 0) != arg_pointer_rtx
9332 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9333 {
9334 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9335 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9336 }
9337 break;
9338
9339 case LO_SUM:
9340 /* Anything in the constant pool is sufficiently aligned that
9341 all bytes have the same high part address. */
9342 return !legitimate_constant_pool_address_p (addr, QImode, false);
9343
9344 /* Auto-increment cases are now treated generically in recog.c. */
9345 case PRE_MODIFY:
9346 return TARGET_UPDATE;
9347
9348 /* AND is only allowed in Altivec loads. */
9349 case AND:
9350 return true;
9351
9352 default:
9353 break;
9354 }
9355
9356 return false;
9357 }
9358
9359 /* Debug version of rs6000_mode_dependent_address. */
9360 static bool
9361 rs6000_debug_mode_dependent_address (const_rtx addr)
9362 {
9363 bool ret = rs6000_mode_dependent_address (addr);
9364
9365 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9366 ret ? "true" : "false");
9367 debug_rtx (addr);
9368
9369 return ret;
9370 }
9371
9372 /* Implement FIND_BASE_TERM. */
9373
9374 rtx
9375 rs6000_find_base_term (rtx op)
9376 {
9377 rtx base;
9378
9379 base = op;
9380 if (GET_CODE (base) == CONST)
9381 base = XEXP (base, 0);
9382 if (GET_CODE (base) == PLUS)
9383 base = XEXP (base, 0);
9384 if (GET_CODE (base) == UNSPEC)
9385 switch (XINT (base, 1))
9386 {
9387 case UNSPEC_TOCREL:
9388 case UNSPEC_MACHOPIC_OFFSET:
9389 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9390 for aliasing purposes. */
9391 return XVECEXP (base, 0, 0);
9392 }
9393
9394 return op;
9395 }
9396
9397 /* More elaborate version of recog's offsettable_memref_p predicate
9398 that works around the ??? note of rs6000_mode_dependent_address.
9399 In particular it accepts
9400
9401 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9402
9403 in 32-bit mode, that the recog predicate rejects. */
9404
9405 static bool
9406 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9407 {
9408 bool worst_case;
9409
9410 if (!MEM_P (op))
9411 return false;
9412
9413 /* First mimic offsettable_memref_p. */
9414 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9415 return true;
9416
9417 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9418 the latter predicate knows nothing about the mode of the memory
9419 reference and, therefore, assumes that it is the largest supported
9420 mode (TFmode). As a consequence, legitimate offsettable memory
9421 references are rejected. rs6000_legitimate_offset_address_p contains
9422 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9423 at least with a little bit of help here given that we know the
9424 actual registers used. */
9425 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9426 || GET_MODE_SIZE (reg_mode) == 4);
9427 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9428 strict, worst_case);
9429 }
9430
9431 /* Determine the reassociation width to be used in reassociate_bb.
9432 This takes into account how many parallel operations we
9433 can actually do of a given type, and also the latency.
9434 P8:
9435 int add/sub 6/cycle
9436 mul 2/cycle
9437 vect add/sub/mul 2/cycle
9438 fp add/sub/mul 2/cycle
9439 dfp 1/cycle
9440 */
9441
9442 static int
9443 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9444 machine_mode mode)
9445 {
9446 switch (rs6000_tune)
9447 {
9448 case PROCESSOR_POWER8:
9449 case PROCESSOR_POWER9:
9450 if (DECIMAL_FLOAT_MODE_P (mode))
9451 return 1;
9452 if (VECTOR_MODE_P (mode))
9453 return 4;
9454 if (INTEGRAL_MODE_P (mode))
9455 return 1;
9456 if (FLOAT_MODE_P (mode))
9457 return 4;
9458 break;
9459 default:
9460 break;
9461 }
9462 return 1;
9463 }
9464
9465 /* Change register usage conditional on target flags. */
9466 static void
9467 rs6000_conditional_register_usage (void)
9468 {
9469 int i;
9470
9471 if (TARGET_DEBUG_TARGET)
9472 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9473
9474 /* Set MQ register fixed (already call_used) so that it will not be
9475 allocated. */
9476 fixed_regs[64] = 1;
9477
9478 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9479 if (TARGET_64BIT)
9480 fixed_regs[13] = call_used_regs[13]
9481 = call_really_used_regs[13] = 1;
9482
9483 /* Conditionally disable FPRs. */
9484 if (TARGET_SOFT_FLOAT)
9485 for (i = 32; i < 64; i++)
9486 fixed_regs[i] = call_used_regs[i]
9487 = call_really_used_regs[i] = 1;
9488
9489 /* The TOC register is not killed across calls in a way that is
9490 visible to the compiler. */
9491 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9492 call_really_used_regs[2] = 0;
9493
9494 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9495 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9496
9497 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9498 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9499 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9500 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9501
9502 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9503 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9504 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9505 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9506
9507 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9508 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9509 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9510
9511 if (!TARGET_ALTIVEC && !TARGET_VSX)
9512 {
9513 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9514 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9515 call_really_used_regs[VRSAVE_REGNO] = 1;
9516 }
9517
9518 if (TARGET_ALTIVEC || TARGET_VSX)
9519 global_regs[VSCR_REGNO] = 1;
9520
9521 if (TARGET_ALTIVEC_ABI)
9522 {
9523 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9524 call_used_regs[i] = call_really_used_regs[i] = 1;
9525
9526 /* AIX reserves VR20:31 in non-extended ABI mode. */
9527 if (TARGET_XCOFF)
9528 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9529 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9530 }
9531 }
9532
9533 \f
9534 /* Output insns to set DEST equal to the constant SOURCE as a series of
9535 lis, ori and shl instructions and return TRUE. */
9536
9537 bool
9538 rs6000_emit_set_const (rtx dest, rtx source)
9539 {
9540 machine_mode mode = GET_MODE (dest);
9541 rtx temp, set;
9542 rtx_insn *insn;
9543 HOST_WIDE_INT c;
9544
9545 gcc_checking_assert (CONST_INT_P (source));
9546 c = INTVAL (source);
9547 switch (mode)
9548 {
9549 case E_QImode:
9550 case E_HImode:
9551 emit_insn (gen_rtx_SET (dest, source));
9552 return true;
9553
9554 case E_SImode:
9555 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9556
9557 emit_insn (gen_rtx_SET (copy_rtx (temp),
9558 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9559 emit_insn (gen_rtx_SET (dest,
9560 gen_rtx_IOR (SImode, copy_rtx (temp),
9561 GEN_INT (c & 0xffff))));
9562 break;
9563
9564 case E_DImode:
9565 if (!TARGET_POWERPC64)
9566 {
9567 rtx hi, lo;
9568
9569 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9570 DImode);
9571 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9572 DImode);
9573 emit_move_insn (hi, GEN_INT (c >> 32));
9574 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9575 emit_move_insn (lo, GEN_INT (c));
9576 }
9577 else
9578 rs6000_emit_set_long_const (dest, c);
9579 break;
9580
9581 default:
9582 gcc_unreachable ();
9583 }
9584
9585 insn = get_last_insn ();
9586 set = single_set (insn);
9587 if (! CONSTANT_P (SET_SRC (set)))
9588 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9589
9590 return true;
9591 }
9592
9593 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9594 Output insns to set DEST equal to the constant C as a series of
9595 lis, ori and shl instructions. */
9596
9597 static void
9598 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9599 {
9600 rtx temp;
9601 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9602
9603 ud1 = c & 0xffff;
9604 c = c >> 16;
9605 ud2 = c & 0xffff;
9606 c = c >> 16;
9607 ud3 = c & 0xffff;
9608 c = c >> 16;
9609 ud4 = c & 0xffff;
9610
9611 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9612 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9613 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9614
9615 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9616 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9617 {
9618 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9619
9620 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9621 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9622 if (ud1 != 0)
9623 emit_move_insn (dest,
9624 gen_rtx_IOR (DImode, copy_rtx (temp),
9625 GEN_INT (ud1)));
9626 }
9627 else if (ud3 == 0 && ud4 == 0)
9628 {
9629 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9630
9631 gcc_assert (ud2 & 0x8000);
9632 emit_move_insn (copy_rtx (temp),
9633 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9634 if (ud1 != 0)
9635 emit_move_insn (copy_rtx (temp),
9636 gen_rtx_IOR (DImode, copy_rtx (temp),
9637 GEN_INT (ud1)));
9638 emit_move_insn (dest,
9639 gen_rtx_ZERO_EXTEND (DImode,
9640 gen_lowpart (SImode,
9641 copy_rtx (temp))));
9642 }
9643 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9644 || (ud4 == 0 && ! (ud3 & 0x8000)))
9645 {
9646 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9647
9648 emit_move_insn (copy_rtx (temp),
9649 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9650 if (ud2 != 0)
9651 emit_move_insn (copy_rtx (temp),
9652 gen_rtx_IOR (DImode, copy_rtx (temp),
9653 GEN_INT (ud2)));
9654 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9655 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9656 GEN_INT (16)));
9657 if (ud1 != 0)
9658 emit_move_insn (dest,
9659 gen_rtx_IOR (DImode, copy_rtx (temp),
9660 GEN_INT (ud1)));
9661 }
9662 else
9663 {
9664 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9665
9666 emit_move_insn (copy_rtx (temp),
9667 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9668 if (ud3 != 0)
9669 emit_move_insn (copy_rtx (temp),
9670 gen_rtx_IOR (DImode, copy_rtx (temp),
9671 GEN_INT (ud3)));
9672
9673 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9674 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9675 GEN_INT (32)));
9676 if (ud2 != 0)
9677 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9678 gen_rtx_IOR (DImode, copy_rtx (temp),
9679 GEN_INT (ud2 << 16)));
9680 if (ud1 != 0)
9681 emit_move_insn (dest,
9682 gen_rtx_IOR (DImode, copy_rtx (temp),
9683 GEN_INT (ud1)));
9684 }
9685 }
9686
9687 /* Helper for the following. Get rid of [r+r] memory refs
9688 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9689
9690 static void
9691 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9692 {
9693 if (GET_CODE (operands[0]) == MEM
9694 && GET_CODE (XEXP (operands[0], 0)) != REG
9695 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9696 GET_MODE (operands[0]), false))
9697 operands[0]
9698 = replace_equiv_address (operands[0],
9699 copy_addr_to_reg (XEXP (operands[0], 0)));
9700
9701 if (GET_CODE (operands[1]) == MEM
9702 && GET_CODE (XEXP (operands[1], 0)) != REG
9703 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9704 GET_MODE (operands[1]), false))
9705 operands[1]
9706 = replace_equiv_address (operands[1],
9707 copy_addr_to_reg (XEXP (operands[1], 0)));
9708 }
9709
9710 /* Generate a vector of constants to permute MODE for a little-endian
9711 storage operation by swapping the two halves of a vector. */
9712 static rtvec
9713 rs6000_const_vec (machine_mode mode)
9714 {
9715 int i, subparts;
9716 rtvec v;
9717
9718 switch (mode)
9719 {
9720 case E_V1TImode:
9721 subparts = 1;
9722 break;
9723 case E_V2DFmode:
9724 case E_V2DImode:
9725 subparts = 2;
9726 break;
9727 case E_V4SFmode:
9728 case E_V4SImode:
9729 subparts = 4;
9730 break;
9731 case E_V8HImode:
9732 subparts = 8;
9733 break;
9734 case E_V16QImode:
9735 subparts = 16;
9736 break;
9737 default:
9738 gcc_unreachable();
9739 }
9740
9741 v = rtvec_alloc (subparts);
9742
9743 for (i = 0; i < subparts / 2; ++i)
9744 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9745 for (i = subparts / 2; i < subparts; ++i)
9746 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9747
9748 return v;
9749 }
9750
9751 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
9752 store operation. */
9753 void
9754 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
9755 {
9756 /* Scalar permutations are easier to express in integer modes rather than
9757 floating-point modes, so cast them here. We use V1TImode instead
9758 of TImode to ensure that the values don't go through GPRs. */
9759 if (FLOAT128_VECTOR_P (mode))
9760 {
9761 dest = gen_lowpart (V1TImode, dest);
9762 source = gen_lowpart (V1TImode, source);
9763 mode = V1TImode;
9764 }
9765
9766 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
9767 scalar. */
9768 if (mode == TImode || mode == V1TImode)
9769 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
9770 GEN_INT (64))));
9771 else
9772 {
9773 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9774 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
9775 }
9776 }
9777
9778 /* Emit a little-endian load from vector memory location SOURCE to VSX
9779 register DEST in mode MODE. The load is done with two permuting
9780 insn's that represent an lxvd2x and xxpermdi. */
9781 void
9782 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9783 {
9784 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9785 V1TImode). */
9786 if (mode == TImode || mode == V1TImode)
9787 {
9788 mode = V2DImode;
9789 dest = gen_lowpart (V2DImode, dest);
9790 source = adjust_address (source, V2DImode, 0);
9791 }
9792
9793 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9794 rs6000_emit_le_vsx_permute (tmp, source, mode);
9795 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9796 }
9797
9798 /* Emit a little-endian store to vector memory location DEST from VSX
9799 register SOURCE in mode MODE. The store is done with two permuting
9800 insn's that represent an xxpermdi and an stxvd2x. */
9801 void
9802 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9803 {
9804 /* This should never be called during or after LRA, because it does
9805 not re-permute the source register. It is intended only for use
9806 during expand. */
9807 gcc_assert (!lra_in_progress && !reload_completed);
9808
9809 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9810 V1TImode). */
9811 if (mode == TImode || mode == V1TImode)
9812 {
9813 mode = V2DImode;
9814 dest = adjust_address (dest, V2DImode, 0);
9815 source = gen_lowpart (V2DImode, source);
9816 }
9817
9818 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9819 rs6000_emit_le_vsx_permute (tmp, source, mode);
9820 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9821 }
9822
9823 /* Emit a sequence representing a little-endian VSX load or store,
9824 moving data from SOURCE to DEST in mode MODE. This is done
9825 separately from rs6000_emit_move to ensure it is called only
9826 during expand. LE VSX loads and stores introduced later are
9827 handled with a split. The expand-time RTL generation allows
9828 us to optimize away redundant pairs of register-permutes. */
9829 void
9830 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9831 {
9832 gcc_assert (!BYTES_BIG_ENDIAN
9833 && VECTOR_MEM_VSX_P (mode)
9834 && !TARGET_P9_VECTOR
9835 && !gpr_or_gpr_p (dest, source)
9836 && (MEM_P (source) ^ MEM_P (dest)));
9837
9838 if (MEM_P (source))
9839 {
9840 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
9841 rs6000_emit_le_vsx_load (dest, source, mode);
9842 }
9843 else
9844 {
9845 if (!REG_P (source))
9846 source = force_reg (mode, source);
9847 rs6000_emit_le_vsx_store (dest, source, mode);
9848 }
9849 }
9850
9851 /* Return whether a SFmode or SImode move can be done without converting one
9852 mode to another. This arrises when we have:
9853
9854 (SUBREG:SF (REG:SI ...))
9855 (SUBREG:SI (REG:SF ...))
9856
9857 and one of the values is in a floating point/vector register, where SFmode
9858 scalars are stored in DFmode format. */
9859
9860 bool
9861 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
9862 {
9863 if (TARGET_ALLOW_SF_SUBREG)
9864 return true;
9865
9866 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
9867 return true;
9868
9869 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
9870 return true;
9871
9872 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
9873 if (SUBREG_P (dest))
9874 {
9875 rtx dest_subreg = SUBREG_REG (dest);
9876 rtx src_subreg = SUBREG_REG (src);
9877 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
9878 }
9879
9880 return false;
9881 }
9882
9883
9884 /* Helper function to change moves with:
9885
9886 (SUBREG:SF (REG:SI)) and
9887 (SUBREG:SI (REG:SF))
9888
9889 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
9890 values are stored as DFmode values in the VSX registers. We need to convert
9891 the bits before we can use a direct move or operate on the bits in the
9892 vector register as an integer type.
9893
9894 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
9895
9896 static bool
9897 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
9898 {
9899 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
9900 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
9901 && SUBREG_P (source) && sf_subreg_operand (source, mode))
9902 {
9903 rtx inner_source = SUBREG_REG (source);
9904 machine_mode inner_mode = GET_MODE (inner_source);
9905
9906 if (mode == SImode && inner_mode == SFmode)
9907 {
9908 emit_insn (gen_movsi_from_sf (dest, inner_source));
9909 return true;
9910 }
9911
9912 if (mode == SFmode && inner_mode == SImode)
9913 {
9914 emit_insn (gen_movsf_from_si (dest, inner_source));
9915 return true;
9916 }
9917 }
9918
9919 return false;
9920 }
9921
9922 /* Emit a move from SOURCE to DEST in mode MODE. */
9923 void
9924 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9925 {
9926 rtx operands[2];
9927 operands[0] = dest;
9928 operands[1] = source;
9929
9930 if (TARGET_DEBUG_ADDR)
9931 {
9932 fprintf (stderr,
9933 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
9934 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9935 GET_MODE_NAME (mode),
9936 lra_in_progress,
9937 reload_completed,
9938 can_create_pseudo_p ());
9939 debug_rtx (dest);
9940 fprintf (stderr, "source:\n");
9941 debug_rtx (source);
9942 }
9943
9944 /* Check that we get CONST_WIDE_INT only when we should. */
9945 if (CONST_WIDE_INT_P (operands[1])
9946 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9947 gcc_unreachable ();
9948
9949 #ifdef HAVE_AS_GNU_ATTRIBUTE
9950 /* If we use a long double type, set the flags in .gnu_attribute that say
9951 what the long double type is. This is to allow the linker's warning
9952 message for the wrong long double to be useful, even if the function does
9953 not do a call (for example, doing a 128-bit add on power9 if the long
9954 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
9955 used if they aren't the default long dobule type. */
9956 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
9957 {
9958 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
9959 rs6000_passes_float = rs6000_passes_long_double = true;
9960
9961 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
9962 rs6000_passes_float = rs6000_passes_long_double = true;
9963 }
9964 #endif
9965
9966 /* See if we need to special case SImode/SFmode SUBREG moves. */
9967 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
9968 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
9969 return;
9970
9971 /* Check if GCC is setting up a block move that will end up using FP
9972 registers as temporaries. We must make sure this is acceptable. */
9973 if (GET_CODE (operands[0]) == MEM
9974 && GET_CODE (operands[1]) == MEM
9975 && mode == DImode
9976 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
9977 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
9978 && ! (rs6000_slow_unaligned_access (SImode,
9979 (MEM_ALIGN (operands[0]) > 32
9980 ? 32 : MEM_ALIGN (operands[0])))
9981 || rs6000_slow_unaligned_access (SImode,
9982 (MEM_ALIGN (operands[1]) > 32
9983 ? 32 : MEM_ALIGN (operands[1]))))
9984 && ! MEM_VOLATILE_P (operands [0])
9985 && ! MEM_VOLATILE_P (operands [1]))
9986 {
9987 emit_move_insn (adjust_address (operands[0], SImode, 0),
9988 adjust_address (operands[1], SImode, 0));
9989 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9990 adjust_address (copy_rtx (operands[1]), SImode, 4));
9991 return;
9992 }
9993
9994 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
9995 && !gpc_reg_operand (operands[1], mode))
9996 operands[1] = force_reg (mode, operands[1]);
9997
9998 /* Recognize the case where operand[1] is a reference to thread-local
9999 data and load its address to a register. */
10000 if (tls_referenced_p (operands[1]))
10001 {
10002 enum tls_model model;
10003 rtx tmp = operands[1];
10004 rtx addend = NULL;
10005
10006 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10007 {
10008 addend = XEXP (XEXP (tmp, 0), 1);
10009 tmp = XEXP (XEXP (tmp, 0), 0);
10010 }
10011
10012 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
10013 model = SYMBOL_REF_TLS_MODEL (tmp);
10014 gcc_assert (model != 0);
10015
10016 tmp = rs6000_legitimize_tls_address (tmp, model);
10017 if (addend)
10018 {
10019 tmp = gen_rtx_PLUS (mode, tmp, addend);
10020 tmp = force_operand (tmp, operands[0]);
10021 }
10022 operands[1] = tmp;
10023 }
10024
10025 /* 128-bit constant floating-point values on Darwin should really be loaded
10026 as two parts. However, this premature splitting is a problem when DFmode
10027 values can go into Altivec registers. */
10028 if (TARGET_MACHO && CONST_DOUBLE_P (operands[1]) && FLOAT128_IBM_P (mode)
10029 && !reg_addr[DFmode].scalar_in_vmx_p)
10030 {
10031 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10032 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10033 DFmode);
10034 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10035 GET_MODE_SIZE (DFmode)),
10036 simplify_gen_subreg (DFmode, operands[1], mode,
10037 GET_MODE_SIZE (DFmode)),
10038 DFmode);
10039 return;
10040 }
10041
10042 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10043 p1:SD) if p1 is not of floating point class and p0 is spilled as
10044 we can have no analogous movsd_store for this. */
10045 if (lra_in_progress && mode == DDmode
10046 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10047 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10048 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
10049 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10050 {
10051 enum reg_class cl;
10052 int regno = REGNO (SUBREG_REG (operands[1]));
10053
10054 if (regno >= FIRST_PSEUDO_REGISTER)
10055 {
10056 cl = reg_preferred_class (regno);
10057 regno = reg_renumber[regno];
10058 if (regno < 0)
10059 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10060 }
10061 if (regno >= 0 && ! FP_REGNO_P (regno))
10062 {
10063 mode = SDmode;
10064 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10065 operands[1] = SUBREG_REG (operands[1]);
10066 }
10067 }
10068 if (lra_in_progress
10069 && mode == SDmode
10070 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10071 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10072 && (REG_P (operands[1])
10073 || (GET_CODE (operands[1]) == SUBREG
10074 && REG_P (SUBREG_REG (operands[1])))))
10075 {
10076 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10077 ? SUBREG_REG (operands[1]) : operands[1]);
10078 enum reg_class cl;
10079
10080 if (regno >= FIRST_PSEUDO_REGISTER)
10081 {
10082 cl = reg_preferred_class (regno);
10083 gcc_assert (cl != NO_REGS);
10084 regno = reg_renumber[regno];
10085 if (regno < 0)
10086 regno = ira_class_hard_regs[cl][0];
10087 }
10088 if (FP_REGNO_P (regno))
10089 {
10090 if (GET_MODE (operands[0]) != DDmode)
10091 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10092 emit_insn (gen_movsd_store (operands[0], operands[1]));
10093 }
10094 else if (INT_REGNO_P (regno))
10095 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10096 else
10097 gcc_unreachable();
10098 return;
10099 }
10100 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10101 p:DD)) if p0 is not of floating point class and p1 is spilled as
10102 we can have no analogous movsd_load for this. */
10103 if (lra_in_progress && mode == DDmode
10104 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10105 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10106 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10107 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10108 {
10109 enum reg_class cl;
10110 int regno = REGNO (SUBREG_REG (operands[0]));
10111
10112 if (regno >= FIRST_PSEUDO_REGISTER)
10113 {
10114 cl = reg_preferred_class (regno);
10115 regno = reg_renumber[regno];
10116 if (regno < 0)
10117 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10118 }
10119 if (regno >= 0 && ! FP_REGNO_P (regno))
10120 {
10121 mode = SDmode;
10122 operands[0] = SUBREG_REG (operands[0]);
10123 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10124 }
10125 }
10126 if (lra_in_progress
10127 && mode == SDmode
10128 && (REG_P (operands[0])
10129 || (GET_CODE (operands[0]) == SUBREG
10130 && REG_P (SUBREG_REG (operands[0]))))
10131 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10132 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10133 {
10134 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10135 ? SUBREG_REG (operands[0]) : operands[0]);
10136 enum reg_class cl;
10137
10138 if (regno >= FIRST_PSEUDO_REGISTER)
10139 {
10140 cl = reg_preferred_class (regno);
10141 gcc_assert (cl != NO_REGS);
10142 regno = reg_renumber[regno];
10143 if (regno < 0)
10144 regno = ira_class_hard_regs[cl][0];
10145 }
10146 if (FP_REGNO_P (regno))
10147 {
10148 if (GET_MODE (operands[1]) != DDmode)
10149 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10150 emit_insn (gen_movsd_load (operands[0], operands[1]));
10151 }
10152 else if (INT_REGNO_P (regno))
10153 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10154 else
10155 gcc_unreachable();
10156 return;
10157 }
10158
10159 /* FIXME: In the long term, this switch statement should go away
10160 and be replaced by a sequence of tests based on things like
10161 mode == Pmode. */
10162 switch (mode)
10163 {
10164 case E_HImode:
10165 case E_QImode:
10166 if (CONSTANT_P (operands[1])
10167 && GET_CODE (operands[1]) != CONST_INT)
10168 operands[1] = force_const_mem (mode, operands[1]);
10169 break;
10170
10171 case E_TFmode:
10172 case E_TDmode:
10173 case E_IFmode:
10174 case E_KFmode:
10175 if (FLOAT128_2REG_P (mode))
10176 rs6000_eliminate_indexed_memrefs (operands);
10177 /* fall through */
10178
10179 case E_DFmode:
10180 case E_DDmode:
10181 case E_SFmode:
10182 case E_SDmode:
10183 if (CONSTANT_P (operands[1])
10184 && ! easy_fp_constant (operands[1], mode))
10185 operands[1] = force_const_mem (mode, operands[1]);
10186 break;
10187
10188 case E_V16QImode:
10189 case E_V8HImode:
10190 case E_V4SFmode:
10191 case E_V4SImode:
10192 case E_V2DFmode:
10193 case E_V2DImode:
10194 case E_V1TImode:
10195 if (CONSTANT_P (operands[1])
10196 && !easy_vector_constant (operands[1], mode))
10197 operands[1] = force_const_mem (mode, operands[1]);
10198 break;
10199
10200 case E_SImode:
10201 case E_DImode:
10202 /* Use default pattern for address of ELF small data */
10203 if (TARGET_ELF
10204 && mode == Pmode
10205 && DEFAULT_ABI == ABI_V4
10206 && (GET_CODE (operands[1]) == SYMBOL_REF
10207 || GET_CODE (operands[1]) == CONST)
10208 && small_data_operand (operands[1], mode))
10209 {
10210 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10211 return;
10212 }
10213
10214 if (DEFAULT_ABI == ABI_V4
10215 && mode == Pmode && mode == SImode
10216 && flag_pic == 1 && got_operand (operands[1], mode))
10217 {
10218 emit_insn (gen_movsi_got (operands[0], operands[1]));
10219 return;
10220 }
10221
10222 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10223 && TARGET_NO_TOC
10224 && ! flag_pic
10225 && mode == Pmode
10226 && CONSTANT_P (operands[1])
10227 && GET_CODE (operands[1]) != HIGH
10228 && GET_CODE (operands[1]) != CONST_INT)
10229 {
10230 rtx target = (!can_create_pseudo_p ()
10231 ? operands[0]
10232 : gen_reg_rtx (mode));
10233
10234 /* If this is a function address on -mcall-aixdesc,
10235 convert it to the address of the descriptor. */
10236 if (DEFAULT_ABI == ABI_AIX
10237 && GET_CODE (operands[1]) == SYMBOL_REF
10238 && XSTR (operands[1], 0)[0] == '.')
10239 {
10240 const char *name = XSTR (operands[1], 0);
10241 rtx new_ref;
10242 while (*name == '.')
10243 name++;
10244 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10245 CONSTANT_POOL_ADDRESS_P (new_ref)
10246 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10247 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10248 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10249 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10250 operands[1] = new_ref;
10251 }
10252
10253 if (DEFAULT_ABI == ABI_DARWIN)
10254 {
10255 #if TARGET_MACHO
10256 if (MACHO_DYNAMIC_NO_PIC_P)
10257 {
10258 /* Take care of any required data indirection. */
10259 operands[1] = rs6000_machopic_legitimize_pic_address (
10260 operands[1], mode, operands[0]);
10261 if (operands[0] != operands[1])
10262 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10263 return;
10264 }
10265 #endif
10266 emit_insn (gen_macho_high (target, operands[1]));
10267 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10268 return;
10269 }
10270
10271 emit_insn (gen_elf_high (target, operands[1]));
10272 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10273 return;
10274 }
10275
10276 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10277 and we have put it in the TOC, we just need to make a TOC-relative
10278 reference to it. */
10279 if (TARGET_TOC
10280 && GET_CODE (operands[1]) == SYMBOL_REF
10281 && use_toc_relative_ref (operands[1], mode))
10282 operands[1] = create_TOC_reference (operands[1], operands[0]);
10283 else if (mode == Pmode
10284 && CONSTANT_P (operands[1])
10285 && GET_CODE (operands[1]) != HIGH
10286 && ((REG_P (operands[0])
10287 && FP_REGNO_P (REGNO (operands[0])))
10288 || !CONST_INT_P (operands[1])
10289 || (num_insns_constant (operands[1], mode)
10290 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10291 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10292 && (TARGET_CMODEL == CMODEL_SMALL
10293 || can_create_pseudo_p ()
10294 || (REG_P (operands[0])
10295 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10296 {
10297
10298 #if TARGET_MACHO
10299 /* Darwin uses a special PIC legitimizer. */
10300 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10301 {
10302 operands[1] =
10303 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10304 operands[0]);
10305 if (operands[0] != operands[1])
10306 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10307 return;
10308 }
10309 #endif
10310
10311 /* If we are to limit the number of things we put in the TOC and
10312 this is a symbol plus a constant we can add in one insn,
10313 just put the symbol in the TOC and add the constant. */
10314 if (GET_CODE (operands[1]) == CONST
10315 && TARGET_NO_SUM_IN_TOC
10316 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10317 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10318 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10319 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10320 && ! side_effects_p (operands[0]))
10321 {
10322 rtx sym =
10323 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10324 rtx other = XEXP (XEXP (operands[1], 0), 1);
10325
10326 sym = force_reg (mode, sym);
10327 emit_insn (gen_add3_insn (operands[0], sym, other));
10328 return;
10329 }
10330
10331 operands[1] = force_const_mem (mode, operands[1]);
10332
10333 if (TARGET_TOC
10334 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10335 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10336 {
10337 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10338 operands[0]);
10339 operands[1] = gen_const_mem (mode, tocref);
10340 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10341 }
10342 }
10343 break;
10344
10345 case E_TImode:
10346 if (!VECTOR_MEM_VSX_P (TImode))
10347 rs6000_eliminate_indexed_memrefs (operands);
10348 break;
10349
10350 case E_PTImode:
10351 rs6000_eliminate_indexed_memrefs (operands);
10352 break;
10353
10354 default:
10355 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10356 }
10357
10358 /* Above, we may have called force_const_mem which may have returned
10359 an invalid address. If we can, fix this up; otherwise, reload will
10360 have to deal with it. */
10361 if (GET_CODE (operands[1]) == MEM)
10362 operands[1] = validize_mem (operands[1]);
10363
10364 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10365 }
10366 \f
10367 /* Nonzero if we can use a floating-point register to pass this arg. */
10368 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10369 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10370 && (CUM)->fregno <= FP_ARG_MAX_REG \
10371 && TARGET_HARD_FLOAT)
10372
10373 /* Nonzero if we can use an AltiVec register to pass this arg. */
10374 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10375 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10376 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10377 && TARGET_ALTIVEC_ABI \
10378 && (NAMED))
10379
10380 /* Walk down the type tree of TYPE counting consecutive base elements.
10381 If *MODEP is VOIDmode, then set it to the first valid floating point
10382 or vector type. If a non-floating point or vector type is found, or
10383 if a floating point or vector type that doesn't match a non-VOIDmode
10384 *MODEP is found, then return -1, otherwise return the count in the
10385 sub-tree. */
10386
10387 static int
10388 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10389 {
10390 machine_mode mode;
10391 HOST_WIDE_INT size;
10392
10393 switch (TREE_CODE (type))
10394 {
10395 case REAL_TYPE:
10396 mode = TYPE_MODE (type);
10397 if (!SCALAR_FLOAT_MODE_P (mode))
10398 return -1;
10399
10400 if (*modep == VOIDmode)
10401 *modep = mode;
10402
10403 if (*modep == mode)
10404 return 1;
10405
10406 break;
10407
10408 case COMPLEX_TYPE:
10409 mode = TYPE_MODE (TREE_TYPE (type));
10410 if (!SCALAR_FLOAT_MODE_P (mode))
10411 return -1;
10412
10413 if (*modep == VOIDmode)
10414 *modep = mode;
10415
10416 if (*modep == mode)
10417 return 2;
10418
10419 break;
10420
10421 case VECTOR_TYPE:
10422 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10423 return -1;
10424
10425 /* Use V4SImode as representative of all 128-bit vector types. */
10426 size = int_size_in_bytes (type);
10427 switch (size)
10428 {
10429 case 16:
10430 mode = V4SImode;
10431 break;
10432 default:
10433 return -1;
10434 }
10435
10436 if (*modep == VOIDmode)
10437 *modep = mode;
10438
10439 /* Vector modes are considered to be opaque: two vectors are
10440 equivalent for the purposes of being homogeneous aggregates
10441 if they are the same size. */
10442 if (*modep == mode)
10443 return 1;
10444
10445 break;
10446
10447 case ARRAY_TYPE:
10448 {
10449 int count;
10450 tree index = TYPE_DOMAIN (type);
10451
10452 /* Can't handle incomplete types nor sizes that are not
10453 fixed. */
10454 if (!COMPLETE_TYPE_P (type)
10455 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10456 return -1;
10457
10458 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10459 if (count == -1
10460 || !index
10461 || !TYPE_MAX_VALUE (index)
10462 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10463 || !TYPE_MIN_VALUE (index)
10464 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10465 || count < 0)
10466 return -1;
10467
10468 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10469 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10470
10471 /* There must be no padding. */
10472 if (wi::to_wide (TYPE_SIZE (type))
10473 != count * GET_MODE_BITSIZE (*modep))
10474 return -1;
10475
10476 return count;
10477 }
10478
10479 case RECORD_TYPE:
10480 {
10481 int count = 0;
10482 int sub_count;
10483 tree field;
10484
10485 /* Can't handle incomplete types nor sizes that are not
10486 fixed. */
10487 if (!COMPLETE_TYPE_P (type)
10488 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10489 return -1;
10490
10491 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10492 {
10493 if (TREE_CODE (field) != FIELD_DECL)
10494 continue;
10495
10496 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10497 if (sub_count < 0)
10498 return -1;
10499 count += sub_count;
10500 }
10501
10502 /* There must be no padding. */
10503 if (wi::to_wide (TYPE_SIZE (type))
10504 != count * GET_MODE_BITSIZE (*modep))
10505 return -1;
10506
10507 return count;
10508 }
10509
10510 case UNION_TYPE:
10511 case QUAL_UNION_TYPE:
10512 {
10513 /* These aren't very interesting except in a degenerate case. */
10514 int count = 0;
10515 int sub_count;
10516 tree field;
10517
10518 /* Can't handle incomplete types nor sizes that are not
10519 fixed. */
10520 if (!COMPLETE_TYPE_P (type)
10521 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10522 return -1;
10523
10524 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10525 {
10526 if (TREE_CODE (field) != FIELD_DECL)
10527 continue;
10528
10529 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10530 if (sub_count < 0)
10531 return -1;
10532 count = count > sub_count ? count : sub_count;
10533 }
10534
10535 /* There must be no padding. */
10536 if (wi::to_wide (TYPE_SIZE (type))
10537 != count * GET_MODE_BITSIZE (*modep))
10538 return -1;
10539
10540 return count;
10541 }
10542
10543 default:
10544 break;
10545 }
10546
10547 return -1;
10548 }
10549
10550 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10551 float or vector aggregate that shall be passed in FP/vector registers
10552 according to the ELFv2 ABI, return the homogeneous element mode in
10553 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10554
10555 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10556
10557 static bool
10558 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10559 machine_mode *elt_mode,
10560 int *n_elts)
10561 {
10562 /* Note that we do not accept complex types at the top level as
10563 homogeneous aggregates; these types are handled via the
10564 targetm.calls.split_complex_arg mechanism. Complex types
10565 can be elements of homogeneous aggregates, however. */
10566 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10567 && AGGREGATE_TYPE_P (type))
10568 {
10569 machine_mode field_mode = VOIDmode;
10570 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10571
10572 if (field_count > 0)
10573 {
10574 int reg_size = ALTIVEC_OR_VSX_VECTOR_MODE (field_mode) ? 16 : 8;
10575 int field_size = ROUND_UP (GET_MODE_SIZE (field_mode), reg_size);
10576
10577 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10578 up to AGGR_ARG_NUM_REG registers. */
10579 if (field_count * field_size <= AGGR_ARG_NUM_REG * reg_size)
10580 {
10581 if (elt_mode)
10582 *elt_mode = field_mode;
10583 if (n_elts)
10584 *n_elts = field_count;
10585 return true;
10586 }
10587 }
10588 }
10589
10590 if (elt_mode)
10591 *elt_mode = mode;
10592 if (n_elts)
10593 *n_elts = 1;
10594 return false;
10595 }
10596
10597 /* Return a nonzero value to say to return the function value in
10598 memory, just as large structures are always returned. TYPE will be
10599 the data type of the value, and FNTYPE will be the type of the
10600 function doing the returning, or @code{NULL} for libcalls.
10601
10602 The AIX ABI for the RS/6000 specifies that all structures are
10603 returned in memory. The Darwin ABI does the same.
10604
10605 For the Darwin 64 Bit ABI, a function result can be returned in
10606 registers or in memory, depending on the size of the return data
10607 type. If it is returned in registers, the value occupies the same
10608 registers as it would if it were the first and only function
10609 argument. Otherwise, the function places its result in memory at
10610 the location pointed to by GPR3.
10611
10612 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10613 but a draft put them in memory, and GCC used to implement the draft
10614 instead of the final standard. Therefore, aix_struct_return
10615 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10616 compatibility can change DRAFT_V4_STRUCT_RET to override the
10617 default, and -m switches get the final word. See
10618 rs6000_option_override_internal for more details.
10619
10620 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10621 long double support is enabled. These values are returned in memory.
10622
10623 int_size_in_bytes returns -1 for variable size objects, which go in
10624 memory always. The cast to unsigned makes -1 > 8. */
10625
10626 static bool
10627 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10628 {
10629 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10630 if (TARGET_MACHO
10631 && rs6000_darwin64_abi
10632 && TREE_CODE (type) == RECORD_TYPE
10633 && int_size_in_bytes (type) > 0)
10634 {
10635 CUMULATIVE_ARGS valcum;
10636 rtx valret;
10637
10638 valcum.words = 0;
10639 valcum.fregno = FP_ARG_MIN_REG;
10640 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10641 /* Do a trial code generation as if this were going to be passed
10642 as an argument; if any part goes in memory, we return NULL. */
10643 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10644 if (valret)
10645 return false;
10646 /* Otherwise fall through to more conventional ABI rules. */
10647 }
10648
10649 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10650 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10651 NULL, NULL))
10652 return false;
10653
10654 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10655 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10656 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10657 return false;
10658
10659 if (AGGREGATE_TYPE_P (type)
10660 && (aix_struct_return
10661 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10662 return true;
10663
10664 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10665 modes only exist for GCC vector types if -maltivec. */
10666 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10667 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10668 return false;
10669
10670 /* Return synthetic vectors in memory. */
10671 if (TREE_CODE (type) == VECTOR_TYPE
10672 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10673 {
10674 static bool warned_for_return_big_vectors = false;
10675 if (!warned_for_return_big_vectors)
10676 {
10677 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10678 "non-standard ABI extension with no compatibility "
10679 "guarantee");
10680 warned_for_return_big_vectors = true;
10681 }
10682 return true;
10683 }
10684
10685 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10686 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10687 return true;
10688
10689 return false;
10690 }
10691
10692 /* Specify whether values returned in registers should be at the most
10693 significant end of a register. We want aggregates returned by
10694 value to match the way aggregates are passed to functions. */
10695
10696 static bool
10697 rs6000_return_in_msb (const_tree valtype)
10698 {
10699 return (DEFAULT_ABI == ABI_ELFv2
10700 && BYTES_BIG_ENDIAN
10701 && AGGREGATE_TYPE_P (valtype)
10702 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
10703 == PAD_UPWARD));
10704 }
10705
10706 #ifdef HAVE_AS_GNU_ATTRIBUTE
10707 /* Return TRUE if a call to function FNDECL may be one that
10708 potentially affects the function calling ABI of the object file. */
10709
10710 static bool
10711 call_ABI_of_interest (tree fndecl)
10712 {
10713 if (rs6000_gnu_attr && symtab->state == EXPANSION)
10714 {
10715 struct cgraph_node *c_node;
10716
10717 /* Libcalls are always interesting. */
10718 if (fndecl == NULL_TREE)
10719 return true;
10720
10721 /* Any call to an external function is interesting. */
10722 if (DECL_EXTERNAL (fndecl))
10723 return true;
10724
10725 /* Interesting functions that we are emitting in this object file. */
10726 c_node = cgraph_node::get (fndecl);
10727 c_node = c_node->ultimate_alias_target ();
10728 return !c_node->only_called_directly_p ();
10729 }
10730 return false;
10731 }
10732 #endif
10733
10734 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10735 for a call to a function whose data type is FNTYPE.
10736 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10737
10738 For incoming args we set the number of arguments in the prototype large
10739 so we never return a PARALLEL. */
10740
10741 void
10742 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10743 rtx libname ATTRIBUTE_UNUSED, int incoming,
10744 int libcall, int n_named_args,
10745 tree fndecl,
10746 machine_mode return_mode ATTRIBUTE_UNUSED)
10747 {
10748 static CUMULATIVE_ARGS zero_cumulative;
10749
10750 *cum = zero_cumulative;
10751 cum->words = 0;
10752 cum->fregno = FP_ARG_MIN_REG;
10753 cum->vregno = ALTIVEC_ARG_MIN_REG;
10754 cum->prototype = (fntype && prototype_p (fntype));
10755 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10756 ? CALL_LIBCALL : CALL_NORMAL);
10757 cum->sysv_gregno = GP_ARG_MIN_REG;
10758 cum->stdarg = stdarg_p (fntype);
10759 cum->libcall = libcall;
10760
10761 cum->nargs_prototype = 0;
10762 if (incoming || cum->prototype)
10763 cum->nargs_prototype = n_named_args;
10764
10765 /* Check for a longcall attribute. */
10766 if ((!fntype && rs6000_default_long_calls)
10767 || (fntype
10768 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10769 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10770 cum->call_cookie |= CALL_LONG;
10771 else if (DEFAULT_ABI != ABI_DARWIN)
10772 {
10773 bool is_local = (fndecl
10774 && !DECL_EXTERNAL (fndecl)
10775 && !DECL_WEAK (fndecl)
10776 && (*targetm.binds_local_p) (fndecl));
10777 if (is_local)
10778 ;
10779 else if (flag_plt)
10780 {
10781 if (fntype
10782 && lookup_attribute ("noplt", TYPE_ATTRIBUTES (fntype)))
10783 cum->call_cookie |= CALL_LONG;
10784 }
10785 else
10786 {
10787 if (!(fntype
10788 && lookup_attribute ("plt", TYPE_ATTRIBUTES (fntype))))
10789 cum->call_cookie |= CALL_LONG;
10790 }
10791 }
10792
10793 if (TARGET_DEBUG_ARG)
10794 {
10795 fprintf (stderr, "\ninit_cumulative_args:");
10796 if (fntype)
10797 {
10798 tree ret_type = TREE_TYPE (fntype);
10799 fprintf (stderr, " ret code = %s,",
10800 get_tree_code_name (TREE_CODE (ret_type)));
10801 }
10802
10803 if (cum->call_cookie & CALL_LONG)
10804 fprintf (stderr, " longcall,");
10805
10806 fprintf (stderr, " proto = %d, nargs = %d\n",
10807 cum->prototype, cum->nargs_prototype);
10808 }
10809
10810 #ifdef HAVE_AS_GNU_ATTRIBUTE
10811 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
10812 {
10813 cum->escapes = call_ABI_of_interest (fndecl);
10814 if (cum->escapes)
10815 {
10816 tree return_type;
10817
10818 if (fntype)
10819 {
10820 return_type = TREE_TYPE (fntype);
10821 return_mode = TYPE_MODE (return_type);
10822 }
10823 else
10824 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10825
10826 if (return_type != NULL)
10827 {
10828 if (TREE_CODE (return_type) == RECORD_TYPE
10829 && TYPE_TRANSPARENT_AGGR (return_type))
10830 {
10831 return_type = TREE_TYPE (first_field (return_type));
10832 return_mode = TYPE_MODE (return_type);
10833 }
10834 if (AGGREGATE_TYPE_P (return_type)
10835 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10836 <= 8))
10837 rs6000_returns_struct = true;
10838 }
10839 if (SCALAR_FLOAT_MODE_P (return_mode))
10840 {
10841 rs6000_passes_float = true;
10842 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10843 && (FLOAT128_IBM_P (return_mode)
10844 || FLOAT128_IEEE_P (return_mode)
10845 || (return_type != NULL
10846 && (TYPE_MAIN_VARIANT (return_type)
10847 == long_double_type_node))))
10848 rs6000_passes_long_double = true;
10849
10850 /* Note if we passed or return a IEEE 128-bit type. We changed
10851 the mangling for these types, and we may need to make an alias
10852 with the old mangling. */
10853 if (FLOAT128_IEEE_P (return_mode))
10854 rs6000_passes_ieee128 = true;
10855 }
10856 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
10857 rs6000_passes_vector = true;
10858 }
10859 }
10860 #endif
10861
10862 if (fntype
10863 && !TARGET_ALTIVEC
10864 && TARGET_ALTIVEC_ABI
10865 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10866 {
10867 error ("cannot return value in vector register because"
10868 " altivec instructions are disabled, use %qs"
10869 " to enable them", "-maltivec");
10870 }
10871 }
10872 \f
10873 /* The mode the ABI uses for a word. This is not the same as word_mode
10874 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10875
10876 static scalar_int_mode
10877 rs6000_abi_word_mode (void)
10878 {
10879 return TARGET_32BIT ? SImode : DImode;
10880 }
10881
10882 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10883 static char *
10884 rs6000_offload_options (void)
10885 {
10886 if (TARGET_64BIT)
10887 return xstrdup ("-foffload-abi=lp64");
10888 else
10889 return xstrdup ("-foffload-abi=ilp32");
10890 }
10891
10892 /* On rs6000, function arguments are promoted, as are function return
10893 values. */
10894
10895 static machine_mode
10896 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10897 machine_mode mode,
10898 int *punsignedp ATTRIBUTE_UNUSED,
10899 const_tree, int)
10900 {
10901 PROMOTE_MODE (mode, *punsignedp, type);
10902
10903 return mode;
10904 }
10905
10906 /* Return true if TYPE must be passed on the stack and not in registers. */
10907
10908 static bool
10909 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10910 {
10911 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10912 return must_pass_in_stack_var_size (mode, type);
10913 else
10914 return must_pass_in_stack_var_size_or_pad (mode, type);
10915 }
10916
10917 static inline bool
10918 is_complex_IBM_long_double (machine_mode mode)
10919 {
10920 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
10921 }
10922
10923 /* Whether ABI_V4 passes MODE args to a function in floating point
10924 registers. */
10925
10926 static bool
10927 abi_v4_pass_in_fpr (machine_mode mode, bool named)
10928 {
10929 if (!TARGET_HARD_FLOAT)
10930 return false;
10931 if (mode == DFmode)
10932 return true;
10933 if (mode == SFmode && named)
10934 return true;
10935 /* ABI_V4 passes complex IBM long double in 8 gprs.
10936 Stupid, but we can't change the ABI now. */
10937 if (is_complex_IBM_long_double (mode))
10938 return false;
10939 if (FLOAT128_2REG_P (mode))
10940 return true;
10941 if (DECIMAL_FLOAT_MODE_P (mode))
10942 return true;
10943 return false;
10944 }
10945
10946 /* Implement TARGET_FUNCTION_ARG_PADDING.
10947
10948 For the AIX ABI structs are always stored left shifted in their
10949 argument slot. */
10950
10951 static pad_direction
10952 rs6000_function_arg_padding (machine_mode mode, const_tree type)
10953 {
10954 #ifndef AGGREGATE_PADDING_FIXED
10955 #define AGGREGATE_PADDING_FIXED 0
10956 #endif
10957 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10958 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10959 #endif
10960
10961 if (!AGGREGATE_PADDING_FIXED)
10962 {
10963 /* GCC used to pass structures of the same size as integer types as
10964 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
10965 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10966 passed padded downward, except that -mstrict-align further
10967 muddied the water in that multi-component structures of 2 and 4
10968 bytes in size were passed padded upward.
10969
10970 The following arranges for best compatibility with previous
10971 versions of gcc, but removes the -mstrict-align dependency. */
10972 if (BYTES_BIG_ENDIAN)
10973 {
10974 HOST_WIDE_INT size = 0;
10975
10976 if (mode == BLKmode)
10977 {
10978 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10979 size = int_size_in_bytes (type);
10980 }
10981 else
10982 size = GET_MODE_SIZE (mode);
10983
10984 if (size == 1 || size == 2 || size == 4)
10985 return PAD_DOWNWARD;
10986 }
10987 return PAD_UPWARD;
10988 }
10989
10990 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10991 {
10992 if (type != 0 && AGGREGATE_TYPE_P (type))
10993 return PAD_UPWARD;
10994 }
10995
10996 /* Fall back to the default. */
10997 return default_function_arg_padding (mode, type);
10998 }
10999
11000 /* If defined, a C expression that gives the alignment boundary, in bits,
11001 of an argument with the specified mode and type. If it is not defined,
11002 PARM_BOUNDARY is used for all arguments.
11003
11004 V.4 wants long longs and doubles to be double word aligned. Just
11005 testing the mode size is a boneheaded way to do this as it means
11006 that other types such as complex int are also double word aligned.
11007 However, we're stuck with this because changing the ABI might break
11008 existing library interfaces.
11009
11010 Quadword align Altivec/VSX vectors.
11011 Quadword align large synthetic vector types. */
11012
11013 static unsigned int
11014 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11015 {
11016 machine_mode elt_mode;
11017 int n_elts;
11018
11019 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11020
11021 if (DEFAULT_ABI == ABI_V4
11022 && (GET_MODE_SIZE (mode) == 8
11023 || (TARGET_HARD_FLOAT
11024 && !is_complex_IBM_long_double (mode)
11025 && FLOAT128_2REG_P (mode))))
11026 return 64;
11027 else if (FLOAT128_VECTOR_P (mode))
11028 return 128;
11029 else if (type && TREE_CODE (type) == VECTOR_TYPE
11030 && int_size_in_bytes (type) >= 8
11031 && int_size_in_bytes (type) < 16)
11032 return 64;
11033 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11034 || (type && TREE_CODE (type) == VECTOR_TYPE
11035 && int_size_in_bytes (type) >= 16))
11036 return 128;
11037
11038 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11039 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11040 -mcompat-align-parm is used. */
11041 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11042 || DEFAULT_ABI == ABI_ELFv2)
11043 && type && TYPE_ALIGN (type) > 64)
11044 {
11045 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11046 or homogeneous float/vector aggregates here. We already handled
11047 vector aggregates above, but still need to check for float here. */
11048 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11049 && !SCALAR_FLOAT_MODE_P (elt_mode));
11050
11051 /* We used to check for BLKmode instead of the above aggregate type
11052 check. Warn when this results in any difference to the ABI. */
11053 if (aggregate_p != (mode == BLKmode))
11054 {
11055 static bool warned;
11056 if (!warned && warn_psabi)
11057 {
11058 warned = true;
11059 inform (input_location,
11060 "the ABI of passing aggregates with %d-byte alignment"
11061 " has changed in GCC 5",
11062 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11063 }
11064 }
11065
11066 if (aggregate_p)
11067 return 128;
11068 }
11069
11070 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11071 implement the "aggregate type" check as a BLKmode check here; this
11072 means certain aggregate types are in fact not aligned. */
11073 if (TARGET_MACHO && rs6000_darwin64_abi
11074 && mode == BLKmode
11075 && type && TYPE_ALIGN (type) > 64)
11076 return 128;
11077
11078 return PARM_BOUNDARY;
11079 }
11080
11081 /* The offset in words to the start of the parameter save area. */
11082
11083 static unsigned int
11084 rs6000_parm_offset (void)
11085 {
11086 return (DEFAULT_ABI == ABI_V4 ? 2
11087 : DEFAULT_ABI == ABI_ELFv2 ? 4
11088 : 6);
11089 }
11090
11091 /* For a function parm of MODE and TYPE, return the starting word in
11092 the parameter area. NWORDS of the parameter area are already used. */
11093
11094 static unsigned int
11095 rs6000_parm_start (machine_mode mode, const_tree type,
11096 unsigned int nwords)
11097 {
11098 unsigned int align;
11099
11100 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11101 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11102 }
11103
11104 /* Compute the size (in words) of a function argument. */
11105
11106 static unsigned long
11107 rs6000_arg_size (machine_mode mode, const_tree type)
11108 {
11109 unsigned long size;
11110
11111 if (mode != BLKmode)
11112 size = GET_MODE_SIZE (mode);
11113 else
11114 size = int_size_in_bytes (type);
11115
11116 if (TARGET_32BIT)
11117 return (size + 3) >> 2;
11118 else
11119 return (size + 7) >> 3;
11120 }
11121 \f
11122 /* Use this to flush pending int fields. */
11123
11124 static void
11125 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11126 HOST_WIDE_INT bitpos, int final)
11127 {
11128 unsigned int startbit, endbit;
11129 int intregs, intoffset;
11130
11131 /* Handle the situations where a float is taking up the first half
11132 of the GPR, and the other half is empty (typically due to
11133 alignment restrictions). We can detect this by a 8-byte-aligned
11134 int field, or by seeing that this is the final flush for this
11135 argument. Count the word and continue on. */
11136 if (cum->floats_in_gpr == 1
11137 && (cum->intoffset % 64 == 0
11138 || (cum->intoffset == -1 && final)))
11139 {
11140 cum->words++;
11141 cum->floats_in_gpr = 0;
11142 }
11143
11144 if (cum->intoffset == -1)
11145 return;
11146
11147 intoffset = cum->intoffset;
11148 cum->intoffset = -1;
11149 cum->floats_in_gpr = 0;
11150
11151 if (intoffset % BITS_PER_WORD != 0)
11152 {
11153 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11154 if (!int_mode_for_size (bits, 0).exists ())
11155 {
11156 /* We couldn't find an appropriate mode, which happens,
11157 e.g., in packed structs when there are 3 bytes to load.
11158 Back intoffset back to the beginning of the word in this
11159 case. */
11160 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11161 }
11162 }
11163
11164 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11165 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11166 intregs = (endbit - startbit) / BITS_PER_WORD;
11167 cum->words += intregs;
11168 /* words should be unsigned. */
11169 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11170 {
11171 int pad = (endbit/BITS_PER_WORD) - cum->words;
11172 cum->words += pad;
11173 }
11174 }
11175
11176 /* The darwin64 ABI calls for us to recurse down through structs,
11177 looking for elements passed in registers. Unfortunately, we have
11178 to track int register count here also because of misalignments
11179 in powerpc alignment mode. */
11180
11181 static void
11182 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11183 const_tree type,
11184 HOST_WIDE_INT startbitpos)
11185 {
11186 tree f;
11187
11188 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11189 if (TREE_CODE (f) == FIELD_DECL)
11190 {
11191 HOST_WIDE_INT bitpos = startbitpos;
11192 tree ftype = TREE_TYPE (f);
11193 machine_mode mode;
11194 if (ftype == error_mark_node)
11195 continue;
11196 mode = TYPE_MODE (ftype);
11197
11198 if (DECL_SIZE (f) != 0
11199 && tree_fits_uhwi_p (bit_position (f)))
11200 bitpos += int_bit_position (f);
11201
11202 /* ??? FIXME: else assume zero offset. */
11203
11204 if (TREE_CODE (ftype) == RECORD_TYPE)
11205 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11206 else if (USE_FP_FOR_ARG_P (cum, mode))
11207 {
11208 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11209 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11210 cum->fregno += n_fpregs;
11211 /* Single-precision floats present a special problem for
11212 us, because they are smaller than an 8-byte GPR, and so
11213 the structure-packing rules combined with the standard
11214 varargs behavior mean that we want to pack float/float
11215 and float/int combinations into a single register's
11216 space. This is complicated by the arg advance flushing,
11217 which works on arbitrarily large groups of int-type
11218 fields. */
11219 if (mode == SFmode)
11220 {
11221 if (cum->floats_in_gpr == 1)
11222 {
11223 /* Two floats in a word; count the word and reset
11224 the float count. */
11225 cum->words++;
11226 cum->floats_in_gpr = 0;
11227 }
11228 else if (bitpos % 64 == 0)
11229 {
11230 /* A float at the beginning of an 8-byte word;
11231 count it and put off adjusting cum->words until
11232 we see if a arg advance flush is going to do it
11233 for us. */
11234 cum->floats_in_gpr++;
11235 }
11236 else
11237 {
11238 /* The float is at the end of a word, preceded
11239 by integer fields, so the arg advance flush
11240 just above has already set cum->words and
11241 everything is taken care of. */
11242 }
11243 }
11244 else
11245 cum->words += n_fpregs;
11246 }
11247 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11248 {
11249 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11250 cum->vregno++;
11251 cum->words += 2;
11252 }
11253 else if (cum->intoffset == -1)
11254 cum->intoffset = bitpos;
11255 }
11256 }
11257
11258 /* Check for an item that needs to be considered specially under the darwin 64
11259 bit ABI. These are record types where the mode is BLK or the structure is
11260 8 bytes in size. */
11261 static int
11262 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11263 {
11264 return rs6000_darwin64_abi
11265 && ((mode == BLKmode
11266 && TREE_CODE (type) == RECORD_TYPE
11267 && int_size_in_bytes (type) > 0)
11268 || (type && TREE_CODE (type) == RECORD_TYPE
11269 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11270 }
11271
11272 /* Update the data in CUM to advance over an argument
11273 of mode MODE and data type TYPE.
11274 (TYPE is null for libcalls where that information may not be available.)
11275
11276 Note that for args passed by reference, function_arg will be called
11277 with MODE and TYPE set to that of the pointer to the arg, not the arg
11278 itself. */
11279
11280 static void
11281 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11282 const_tree type, bool named, int depth)
11283 {
11284 machine_mode elt_mode;
11285 int n_elts;
11286
11287 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11288
11289 /* Only tick off an argument if we're not recursing. */
11290 if (depth == 0)
11291 cum->nargs_prototype--;
11292
11293 #ifdef HAVE_AS_GNU_ATTRIBUTE
11294 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11295 && cum->escapes)
11296 {
11297 if (SCALAR_FLOAT_MODE_P (mode))
11298 {
11299 rs6000_passes_float = true;
11300 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11301 && (FLOAT128_IBM_P (mode)
11302 || FLOAT128_IEEE_P (mode)
11303 || (type != NULL
11304 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11305 rs6000_passes_long_double = true;
11306
11307 /* Note if we passed or return a IEEE 128-bit type. We changed the
11308 mangling for these types, and we may need to make an alias with
11309 the old mangling. */
11310 if (FLOAT128_IEEE_P (mode))
11311 rs6000_passes_ieee128 = true;
11312 }
11313 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11314 rs6000_passes_vector = true;
11315 }
11316 #endif
11317
11318 if (TARGET_ALTIVEC_ABI
11319 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11320 || (type && TREE_CODE (type) == VECTOR_TYPE
11321 && int_size_in_bytes (type) == 16)))
11322 {
11323 bool stack = false;
11324
11325 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11326 {
11327 cum->vregno += n_elts;
11328
11329 if (!TARGET_ALTIVEC)
11330 error ("cannot pass argument in vector register because"
11331 " altivec instructions are disabled, use %qs"
11332 " to enable them", "-maltivec");
11333
11334 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11335 even if it is going to be passed in a vector register.
11336 Darwin does the same for variable-argument functions. */
11337 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11338 && TARGET_64BIT)
11339 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11340 stack = true;
11341 }
11342 else
11343 stack = true;
11344
11345 if (stack)
11346 {
11347 int align;
11348
11349 /* Vector parameters must be 16-byte aligned. In 32-bit
11350 mode this means we need to take into account the offset
11351 to the parameter save area. In 64-bit mode, they just
11352 have to start on an even word, since the parameter save
11353 area is 16-byte aligned. */
11354 if (TARGET_32BIT)
11355 align = -(rs6000_parm_offset () + cum->words) & 3;
11356 else
11357 align = cum->words & 1;
11358 cum->words += align + rs6000_arg_size (mode, type);
11359
11360 if (TARGET_DEBUG_ARG)
11361 {
11362 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11363 cum->words, align);
11364 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11365 cum->nargs_prototype, cum->prototype,
11366 GET_MODE_NAME (mode));
11367 }
11368 }
11369 }
11370 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11371 {
11372 int size = int_size_in_bytes (type);
11373 /* Variable sized types have size == -1 and are
11374 treated as if consisting entirely of ints.
11375 Pad to 16 byte boundary if needed. */
11376 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11377 && (cum->words % 2) != 0)
11378 cum->words++;
11379 /* For varargs, we can just go up by the size of the struct. */
11380 if (!named)
11381 cum->words += (size + 7) / 8;
11382 else
11383 {
11384 /* It is tempting to say int register count just goes up by
11385 sizeof(type)/8, but this is wrong in a case such as
11386 { int; double; int; } [powerpc alignment]. We have to
11387 grovel through the fields for these too. */
11388 cum->intoffset = 0;
11389 cum->floats_in_gpr = 0;
11390 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11391 rs6000_darwin64_record_arg_advance_flush (cum,
11392 size * BITS_PER_UNIT, 1);
11393 }
11394 if (TARGET_DEBUG_ARG)
11395 {
11396 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11397 cum->words, TYPE_ALIGN (type), size);
11398 fprintf (stderr,
11399 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11400 cum->nargs_prototype, cum->prototype,
11401 GET_MODE_NAME (mode));
11402 }
11403 }
11404 else if (DEFAULT_ABI == ABI_V4)
11405 {
11406 if (abi_v4_pass_in_fpr (mode, named))
11407 {
11408 /* _Decimal128 must use an even/odd register pair. This assumes
11409 that the register number is odd when fregno is odd. */
11410 if (mode == TDmode && (cum->fregno % 2) == 1)
11411 cum->fregno++;
11412
11413 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11414 <= FP_ARG_V4_MAX_REG)
11415 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11416 else
11417 {
11418 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11419 if (mode == DFmode || FLOAT128_IBM_P (mode)
11420 || mode == DDmode || mode == TDmode)
11421 cum->words += cum->words & 1;
11422 cum->words += rs6000_arg_size (mode, type);
11423 }
11424 }
11425 else
11426 {
11427 int n_words = rs6000_arg_size (mode, type);
11428 int gregno = cum->sysv_gregno;
11429
11430 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11431 As does any other 2 word item such as complex int due to a
11432 historical mistake. */
11433 if (n_words == 2)
11434 gregno += (1 - gregno) & 1;
11435
11436 /* Multi-reg args are not split between registers and stack. */
11437 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11438 {
11439 /* Long long is aligned on the stack. So are other 2 word
11440 items such as complex int due to a historical mistake. */
11441 if (n_words == 2)
11442 cum->words += cum->words & 1;
11443 cum->words += n_words;
11444 }
11445
11446 /* Note: continuing to accumulate gregno past when we've started
11447 spilling to the stack indicates the fact that we've started
11448 spilling to the stack to expand_builtin_saveregs. */
11449 cum->sysv_gregno = gregno + n_words;
11450 }
11451
11452 if (TARGET_DEBUG_ARG)
11453 {
11454 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11455 cum->words, cum->fregno);
11456 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11457 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11458 fprintf (stderr, "mode = %4s, named = %d\n",
11459 GET_MODE_NAME (mode), named);
11460 }
11461 }
11462 else
11463 {
11464 int n_words = rs6000_arg_size (mode, type);
11465 int start_words = cum->words;
11466 int align_words = rs6000_parm_start (mode, type, start_words);
11467
11468 cum->words = align_words + n_words;
11469
11470 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11471 {
11472 /* _Decimal128 must be passed in an even/odd float register pair.
11473 This assumes that the register number is odd when fregno is
11474 odd. */
11475 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11476 cum->fregno++;
11477 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11478 }
11479
11480 if (TARGET_DEBUG_ARG)
11481 {
11482 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11483 cum->words, cum->fregno);
11484 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11485 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11486 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11487 named, align_words - start_words, depth);
11488 }
11489 }
11490 }
11491
11492 static void
11493 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11494 const_tree type, bool named)
11495 {
11496 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11497 0);
11498 }
11499
11500 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11501 structure between cum->intoffset and bitpos to integer registers. */
11502
11503 static void
11504 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11505 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11506 {
11507 machine_mode mode;
11508 unsigned int regno;
11509 unsigned int startbit, endbit;
11510 int this_regno, intregs, intoffset;
11511 rtx reg;
11512
11513 if (cum->intoffset == -1)
11514 return;
11515
11516 intoffset = cum->intoffset;
11517 cum->intoffset = -1;
11518
11519 /* If this is the trailing part of a word, try to only load that
11520 much into the register. Otherwise load the whole register. Note
11521 that in the latter case we may pick up unwanted bits. It's not a
11522 problem at the moment but may wish to revisit. */
11523
11524 if (intoffset % BITS_PER_WORD != 0)
11525 {
11526 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11527 if (!int_mode_for_size (bits, 0).exists (&mode))
11528 {
11529 /* We couldn't find an appropriate mode, which happens,
11530 e.g., in packed structs when there are 3 bytes to load.
11531 Back intoffset back to the beginning of the word in this
11532 case. */
11533 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11534 mode = word_mode;
11535 }
11536 }
11537 else
11538 mode = word_mode;
11539
11540 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11541 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11542 intregs = (endbit - startbit) / BITS_PER_WORD;
11543 this_regno = cum->words + intoffset / BITS_PER_WORD;
11544
11545 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11546 cum->use_stack = 1;
11547
11548 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11549 if (intregs <= 0)
11550 return;
11551
11552 intoffset /= BITS_PER_UNIT;
11553 do
11554 {
11555 regno = GP_ARG_MIN_REG + this_regno;
11556 reg = gen_rtx_REG (mode, regno);
11557 rvec[(*k)++] =
11558 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11559
11560 this_regno += 1;
11561 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11562 mode = word_mode;
11563 intregs -= 1;
11564 }
11565 while (intregs > 0);
11566 }
11567
11568 /* Recursive workhorse for the following. */
11569
11570 static void
11571 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11572 HOST_WIDE_INT startbitpos, rtx rvec[],
11573 int *k)
11574 {
11575 tree f;
11576
11577 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11578 if (TREE_CODE (f) == FIELD_DECL)
11579 {
11580 HOST_WIDE_INT bitpos = startbitpos;
11581 tree ftype = TREE_TYPE (f);
11582 machine_mode mode;
11583 if (ftype == error_mark_node)
11584 continue;
11585 mode = TYPE_MODE (ftype);
11586
11587 if (DECL_SIZE (f) != 0
11588 && tree_fits_uhwi_p (bit_position (f)))
11589 bitpos += int_bit_position (f);
11590
11591 /* ??? FIXME: else assume zero offset. */
11592
11593 if (TREE_CODE (ftype) == RECORD_TYPE)
11594 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11595 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11596 {
11597 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11598 #if 0
11599 switch (mode)
11600 {
11601 case E_SCmode: mode = SFmode; break;
11602 case E_DCmode: mode = DFmode; break;
11603 case E_TCmode: mode = TFmode; break;
11604 default: break;
11605 }
11606 #endif
11607 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11608 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11609 {
11610 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11611 && (mode == TFmode || mode == TDmode));
11612 /* Long double or _Decimal128 split over regs and memory. */
11613 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11614 cum->use_stack=1;
11615 }
11616 rvec[(*k)++]
11617 = gen_rtx_EXPR_LIST (VOIDmode,
11618 gen_rtx_REG (mode, cum->fregno++),
11619 GEN_INT (bitpos / BITS_PER_UNIT));
11620 if (FLOAT128_2REG_P (mode))
11621 cum->fregno++;
11622 }
11623 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11624 {
11625 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11626 rvec[(*k)++]
11627 = gen_rtx_EXPR_LIST (VOIDmode,
11628 gen_rtx_REG (mode, cum->vregno++),
11629 GEN_INT (bitpos / BITS_PER_UNIT));
11630 }
11631 else if (cum->intoffset == -1)
11632 cum->intoffset = bitpos;
11633 }
11634 }
11635
11636 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11637 the register(s) to be used for each field and subfield of a struct
11638 being passed by value, along with the offset of where the
11639 register's value may be found in the block. FP fields go in FP
11640 register, vector fields go in vector registers, and everything
11641 else goes in int registers, packed as in memory.
11642
11643 This code is also used for function return values. RETVAL indicates
11644 whether this is the case.
11645
11646 Much of this is taken from the SPARC V9 port, which has a similar
11647 calling convention. */
11648
11649 static rtx
11650 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11651 bool named, bool retval)
11652 {
11653 rtx rvec[FIRST_PSEUDO_REGISTER];
11654 int k = 1, kbase = 1;
11655 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11656 /* This is a copy; modifications are not visible to our caller. */
11657 CUMULATIVE_ARGS copy_cum = *orig_cum;
11658 CUMULATIVE_ARGS *cum = &copy_cum;
11659
11660 /* Pad to 16 byte boundary if needed. */
11661 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11662 && (cum->words % 2) != 0)
11663 cum->words++;
11664
11665 cum->intoffset = 0;
11666 cum->use_stack = 0;
11667 cum->named = named;
11668
11669 /* Put entries into rvec[] for individual FP and vector fields, and
11670 for the chunks of memory that go in int regs. Note we start at
11671 element 1; 0 is reserved for an indication of using memory, and
11672 may or may not be filled in below. */
11673 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11674 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11675
11676 /* If any part of the struct went on the stack put all of it there.
11677 This hack is because the generic code for
11678 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11679 parts of the struct are not at the beginning. */
11680 if (cum->use_stack)
11681 {
11682 if (retval)
11683 return NULL_RTX; /* doesn't go in registers at all */
11684 kbase = 0;
11685 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11686 }
11687 if (k > 1 || cum->use_stack)
11688 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11689 else
11690 return NULL_RTX;
11691 }
11692
11693 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11694
11695 static rtx
11696 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11697 int align_words)
11698 {
11699 int n_units;
11700 int i, k;
11701 rtx rvec[GP_ARG_NUM_REG + 1];
11702
11703 if (align_words >= GP_ARG_NUM_REG)
11704 return NULL_RTX;
11705
11706 n_units = rs6000_arg_size (mode, type);
11707
11708 /* Optimize the simple case where the arg fits in one gpr, except in
11709 the case of BLKmode due to assign_parms assuming that registers are
11710 BITS_PER_WORD wide. */
11711 if (n_units == 0
11712 || (n_units == 1 && mode != BLKmode))
11713 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11714
11715 k = 0;
11716 if (align_words + n_units > GP_ARG_NUM_REG)
11717 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11718 using a magic NULL_RTX component.
11719 This is not strictly correct. Only some of the arg belongs in
11720 memory, not all of it. However, the normal scheme using
11721 function_arg_partial_nregs can result in unusual subregs, eg.
11722 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11723 store the whole arg to memory is often more efficient than code
11724 to store pieces, and we know that space is available in the right
11725 place for the whole arg. */
11726 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11727
11728 i = 0;
11729 do
11730 {
11731 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11732 rtx off = GEN_INT (i++ * 4);
11733 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11734 }
11735 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11736
11737 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11738 }
11739
11740 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11741 but must also be copied into the parameter save area starting at
11742 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11743 to the GPRs and/or memory. Return the number of elements used. */
11744
11745 static int
11746 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11747 int align_words, rtx *rvec)
11748 {
11749 int k = 0;
11750
11751 if (align_words < GP_ARG_NUM_REG)
11752 {
11753 int n_words = rs6000_arg_size (mode, type);
11754
11755 if (align_words + n_words > GP_ARG_NUM_REG
11756 || mode == BLKmode
11757 || (TARGET_32BIT && TARGET_POWERPC64))
11758 {
11759 /* If this is partially on the stack, then we only
11760 include the portion actually in registers here. */
11761 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11762 int i = 0;
11763
11764 if (align_words + n_words > GP_ARG_NUM_REG)
11765 {
11766 /* Not all of the arg fits in gprs. Say that it goes in memory
11767 too, using a magic NULL_RTX component. Also see comment in
11768 rs6000_mixed_function_arg for why the normal
11769 function_arg_partial_nregs scheme doesn't work in this case. */
11770 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11771 }
11772
11773 do
11774 {
11775 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11776 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11777 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11778 }
11779 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11780 }
11781 else
11782 {
11783 /* The whole arg fits in gprs. */
11784 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11785 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11786 }
11787 }
11788 else
11789 {
11790 /* It's entirely in memory. */
11791 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11792 }
11793
11794 return k;
11795 }
11796
11797 /* RVEC is a vector of K components of an argument of mode MODE.
11798 Construct the final function_arg return value from it. */
11799
11800 static rtx
11801 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11802 {
11803 gcc_assert (k >= 1);
11804
11805 /* Avoid returning a PARALLEL in the trivial cases. */
11806 if (k == 1)
11807 {
11808 if (XEXP (rvec[0], 0) == NULL_RTX)
11809 return NULL_RTX;
11810
11811 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11812 return XEXP (rvec[0], 0);
11813 }
11814
11815 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11816 }
11817
11818 /* Determine where to put an argument to a function.
11819 Value is zero to push the argument on the stack,
11820 or a hard register in which to store the argument.
11821
11822 MODE is the argument's machine mode.
11823 TYPE is the data type of the argument (as a tree).
11824 This is null for libcalls where that information may
11825 not be available.
11826 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11827 the preceding args and about the function being called. It is
11828 not modified in this routine.
11829 NAMED is nonzero if this argument is a named parameter
11830 (otherwise it is an extra parameter matching an ellipsis).
11831
11832 On RS/6000 the first eight words of non-FP are normally in registers
11833 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11834 Under V.4, the first 8 FP args are in registers.
11835
11836 If this is floating-point and no prototype is specified, we use
11837 both an FP and integer register (or possibly FP reg and stack). Library
11838 functions (when CALL_LIBCALL is set) always have the proper types for args,
11839 so we can pass the FP value just in one register. emit_library_function
11840 doesn't support PARALLEL anyway.
11841
11842 Note that for args passed by reference, function_arg will be called
11843 with MODE and TYPE set to that of the pointer to the arg, not the arg
11844 itself. */
11845
11846 static rtx
11847 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11848 const_tree type, bool named)
11849 {
11850 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11851 enum rs6000_abi abi = DEFAULT_ABI;
11852 machine_mode elt_mode;
11853 int n_elts;
11854
11855 /* Return a marker to indicate whether CR1 needs to set or clear the
11856 bit that V.4 uses to say fp args were passed in registers.
11857 Assume that we don't need the marker for software floating point,
11858 or compiler generated library calls. */
11859 if (mode == VOIDmode)
11860 {
11861 if (abi == ABI_V4
11862 && (cum->call_cookie & CALL_LIBCALL) == 0
11863 && (cum->stdarg
11864 || (cum->nargs_prototype < 0
11865 && (cum->prototype || TARGET_NO_PROTOTYPE)))
11866 && TARGET_HARD_FLOAT)
11867 return GEN_INT (cum->call_cookie
11868 | ((cum->fregno == FP_ARG_MIN_REG)
11869 ? CALL_V4_SET_FP_ARGS
11870 : CALL_V4_CLEAR_FP_ARGS));
11871
11872 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11873 }
11874
11875 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11876
11877 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11878 {
11879 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11880 if (rslt != NULL_RTX)
11881 return rslt;
11882 /* Else fall through to usual handling. */
11883 }
11884
11885 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11886 {
11887 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11888 rtx r, off;
11889 int i, k = 0;
11890
11891 /* Do we also need to pass this argument in the parameter save area?
11892 Library support functions for IEEE 128-bit are assumed to not need the
11893 value passed both in GPRs and in vector registers. */
11894 if (TARGET_64BIT && !cum->prototype
11895 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11896 {
11897 int align_words = ROUND_UP (cum->words, 2);
11898 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11899 }
11900
11901 /* Describe where this argument goes in the vector registers. */
11902 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11903 {
11904 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11905 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11906 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11907 }
11908
11909 return rs6000_finish_function_arg (mode, rvec, k);
11910 }
11911 else if (TARGET_ALTIVEC_ABI
11912 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11913 || (type && TREE_CODE (type) == VECTOR_TYPE
11914 && int_size_in_bytes (type) == 16)))
11915 {
11916 if (named || abi == ABI_V4)
11917 return NULL_RTX;
11918 else
11919 {
11920 /* Vector parameters to varargs functions under AIX or Darwin
11921 get passed in memory and possibly also in GPRs. */
11922 int align, align_words, n_words;
11923 machine_mode part_mode;
11924
11925 /* Vector parameters must be 16-byte aligned. In 32-bit
11926 mode this means we need to take into account the offset
11927 to the parameter save area. In 64-bit mode, they just
11928 have to start on an even word, since the parameter save
11929 area is 16-byte aligned. */
11930 if (TARGET_32BIT)
11931 align = -(rs6000_parm_offset () + cum->words) & 3;
11932 else
11933 align = cum->words & 1;
11934 align_words = cum->words + align;
11935
11936 /* Out of registers? Memory, then. */
11937 if (align_words >= GP_ARG_NUM_REG)
11938 return NULL_RTX;
11939
11940 if (TARGET_32BIT && TARGET_POWERPC64)
11941 return rs6000_mixed_function_arg (mode, type, align_words);
11942
11943 /* The vector value goes in GPRs. Only the part of the
11944 value in GPRs is reported here. */
11945 part_mode = mode;
11946 n_words = rs6000_arg_size (mode, type);
11947 if (align_words + n_words > GP_ARG_NUM_REG)
11948 /* Fortunately, there are only two possibilities, the value
11949 is either wholly in GPRs or half in GPRs and half not. */
11950 part_mode = DImode;
11951
11952 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11953 }
11954 }
11955
11956 else if (abi == ABI_V4)
11957 {
11958 if (abi_v4_pass_in_fpr (mode, named))
11959 {
11960 /* _Decimal128 must use an even/odd register pair. This assumes
11961 that the register number is odd when fregno is odd. */
11962 if (mode == TDmode && (cum->fregno % 2) == 1)
11963 cum->fregno++;
11964
11965 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11966 <= FP_ARG_V4_MAX_REG)
11967 return gen_rtx_REG (mode, cum->fregno);
11968 else
11969 return NULL_RTX;
11970 }
11971 else
11972 {
11973 int n_words = rs6000_arg_size (mode, type);
11974 int gregno = cum->sysv_gregno;
11975
11976 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11977 As does any other 2 word item such as complex int due to a
11978 historical mistake. */
11979 if (n_words == 2)
11980 gregno += (1 - gregno) & 1;
11981
11982 /* Multi-reg args are not split between registers and stack. */
11983 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11984 return NULL_RTX;
11985
11986 if (TARGET_32BIT && TARGET_POWERPC64)
11987 return rs6000_mixed_function_arg (mode, type,
11988 gregno - GP_ARG_MIN_REG);
11989 return gen_rtx_REG (mode, gregno);
11990 }
11991 }
11992 else
11993 {
11994 int align_words = rs6000_parm_start (mode, type, cum->words);
11995
11996 /* _Decimal128 must be passed in an even/odd float register pair.
11997 This assumes that the register number is odd when fregno is odd. */
11998 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11999 cum->fregno++;
12000
12001 if (USE_FP_FOR_ARG_P (cum, elt_mode)
12002 && !(TARGET_AIX && !TARGET_ELF
12003 && type != NULL && AGGREGATE_TYPE_P (type)))
12004 {
12005 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12006 rtx r, off;
12007 int i, k = 0;
12008 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12009 int fpr_words;
12010
12011 /* Do we also need to pass this argument in the parameter
12012 save area? */
12013 if (type && (cum->nargs_prototype <= 0
12014 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12015 && TARGET_XL_COMPAT
12016 && align_words >= GP_ARG_NUM_REG)))
12017 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12018
12019 /* Describe where this argument goes in the fprs. */
12020 for (i = 0; i < n_elts
12021 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12022 {
12023 /* Check if the argument is split over registers and memory.
12024 This can only ever happen for long double or _Decimal128;
12025 complex types are handled via split_complex_arg. */
12026 machine_mode fmode = elt_mode;
12027 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12028 {
12029 gcc_assert (FLOAT128_2REG_P (fmode));
12030 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12031 }
12032
12033 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12034 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12035 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12036 }
12037
12038 /* If there were not enough FPRs to hold the argument, the rest
12039 usually goes into memory. However, if the current position
12040 is still within the register parameter area, a portion may
12041 actually have to go into GPRs.
12042
12043 Note that it may happen that the portion of the argument
12044 passed in the first "half" of the first GPR was already
12045 passed in the last FPR as well.
12046
12047 For unnamed arguments, we already set up GPRs to cover the
12048 whole argument in rs6000_psave_function_arg, so there is
12049 nothing further to do at this point. */
12050 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12051 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12052 && cum->nargs_prototype > 0)
12053 {
12054 static bool warned;
12055
12056 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12057 int n_words = rs6000_arg_size (mode, type);
12058
12059 align_words += fpr_words;
12060 n_words -= fpr_words;
12061
12062 do
12063 {
12064 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12065 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12066 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12067 }
12068 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12069
12070 if (!warned && warn_psabi)
12071 {
12072 warned = true;
12073 inform (input_location,
12074 "the ABI of passing homogeneous float aggregates"
12075 " has changed in GCC 5");
12076 }
12077 }
12078
12079 return rs6000_finish_function_arg (mode, rvec, k);
12080 }
12081 else if (align_words < GP_ARG_NUM_REG)
12082 {
12083 if (TARGET_32BIT && TARGET_POWERPC64)
12084 return rs6000_mixed_function_arg (mode, type, align_words);
12085
12086 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12087 }
12088 else
12089 return NULL_RTX;
12090 }
12091 }
12092 \f
12093 /* For an arg passed partly in registers and partly in memory, this is
12094 the number of bytes passed in registers. For args passed entirely in
12095 registers or entirely in memory, zero. When an arg is described by a
12096 PARALLEL, perhaps using more than one register type, this function
12097 returns the number of bytes used by the first element of the PARALLEL. */
12098
12099 static int
12100 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12101 tree type, bool named)
12102 {
12103 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12104 bool passed_in_gprs = true;
12105 int ret = 0;
12106 int align_words;
12107 machine_mode elt_mode;
12108 int n_elts;
12109
12110 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12111
12112 if (DEFAULT_ABI == ABI_V4)
12113 return 0;
12114
12115 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12116 {
12117 /* If we are passing this arg in the fixed parameter save area (gprs or
12118 memory) as well as VRs, we do not use the partial bytes mechanism;
12119 instead, rs6000_function_arg will return a PARALLEL including a memory
12120 element as necessary. Library support functions for IEEE 128-bit are
12121 assumed to not need the value passed both in GPRs and in vector
12122 registers. */
12123 if (TARGET_64BIT && !cum->prototype
12124 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12125 return 0;
12126
12127 /* Otherwise, we pass in VRs only. Check for partial copies. */
12128 passed_in_gprs = false;
12129 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12130 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12131 }
12132
12133 /* In this complicated case we just disable the partial_nregs code. */
12134 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12135 return 0;
12136
12137 align_words = rs6000_parm_start (mode, type, cum->words);
12138
12139 if (USE_FP_FOR_ARG_P (cum, elt_mode)
12140 && !(TARGET_AIX && !TARGET_ELF
12141 && type != NULL && AGGREGATE_TYPE_P (type)))
12142 {
12143 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12144
12145 /* If we are passing this arg in the fixed parameter save area
12146 (gprs or memory) as well as FPRs, we do not use the partial
12147 bytes mechanism; instead, rs6000_function_arg will return a
12148 PARALLEL including a memory element as necessary. */
12149 if (type
12150 && (cum->nargs_prototype <= 0
12151 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12152 && TARGET_XL_COMPAT
12153 && align_words >= GP_ARG_NUM_REG)))
12154 return 0;
12155
12156 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12157 passed_in_gprs = false;
12158 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12159 {
12160 /* Compute number of bytes / words passed in FPRs. If there
12161 is still space available in the register parameter area
12162 *after* that amount, a part of the argument will be passed
12163 in GPRs. In that case, the total amount passed in any
12164 registers is equal to the amount that would have been passed
12165 in GPRs if everything were passed there, so we fall back to
12166 the GPR code below to compute the appropriate value. */
12167 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12168 * MIN (8, GET_MODE_SIZE (elt_mode)));
12169 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12170
12171 if (align_words + fpr_words < GP_ARG_NUM_REG)
12172 passed_in_gprs = true;
12173 else
12174 ret = fpr;
12175 }
12176 }
12177
12178 if (passed_in_gprs
12179 && align_words < GP_ARG_NUM_REG
12180 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12181 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12182
12183 if (ret != 0 && TARGET_DEBUG_ARG)
12184 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12185
12186 return ret;
12187 }
12188 \f
12189 /* A C expression that indicates when an argument must be passed by
12190 reference. If nonzero for an argument, a copy of that argument is
12191 made in memory and a pointer to the argument is passed instead of
12192 the argument itself. The pointer is passed in whatever way is
12193 appropriate for passing a pointer to that type.
12194
12195 Under V.4, aggregates and long double are passed by reference.
12196
12197 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12198 reference unless the AltiVec vector extension ABI is in force.
12199
12200 As an extension to all ABIs, variable sized types are passed by
12201 reference. */
12202
12203 static bool
12204 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12205 machine_mode mode, const_tree type,
12206 bool named ATTRIBUTE_UNUSED)
12207 {
12208 if (!type)
12209 return 0;
12210
12211 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12212 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12213 {
12214 if (TARGET_DEBUG_ARG)
12215 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12216 return 1;
12217 }
12218
12219 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12220 {
12221 if (TARGET_DEBUG_ARG)
12222 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12223 return 1;
12224 }
12225
12226 if (int_size_in_bytes (type) < 0)
12227 {
12228 if (TARGET_DEBUG_ARG)
12229 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12230 return 1;
12231 }
12232
12233 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12234 modes only exist for GCC vector types if -maltivec. */
12235 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12236 {
12237 if (TARGET_DEBUG_ARG)
12238 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12239 return 1;
12240 }
12241
12242 /* Pass synthetic vectors in memory. */
12243 if (TREE_CODE (type) == VECTOR_TYPE
12244 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12245 {
12246 static bool warned_for_pass_big_vectors = false;
12247 if (TARGET_DEBUG_ARG)
12248 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12249 if (!warned_for_pass_big_vectors)
12250 {
12251 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12252 "non-standard ABI extension with no compatibility "
12253 "guarantee");
12254 warned_for_pass_big_vectors = true;
12255 }
12256 return 1;
12257 }
12258
12259 return 0;
12260 }
12261
12262 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12263 already processes. Return true if the parameter must be passed
12264 (fully or partially) on the stack. */
12265
12266 static bool
12267 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12268 {
12269 machine_mode mode;
12270 int unsignedp;
12271 rtx entry_parm;
12272
12273 /* Catch errors. */
12274 if (type == NULL || type == error_mark_node)
12275 return true;
12276
12277 /* Handle types with no storage requirement. */
12278 if (TYPE_MODE (type) == VOIDmode)
12279 return false;
12280
12281 /* Handle complex types. */
12282 if (TREE_CODE (type) == COMPLEX_TYPE)
12283 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12284 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12285
12286 /* Handle transparent aggregates. */
12287 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12288 && TYPE_TRANSPARENT_AGGR (type))
12289 type = TREE_TYPE (first_field (type));
12290
12291 /* See if this arg was passed by invisible reference. */
12292 if (pass_by_reference (get_cumulative_args (args_so_far),
12293 TYPE_MODE (type), type, true))
12294 type = build_pointer_type (type);
12295
12296 /* Find mode as it is passed by the ABI. */
12297 unsignedp = TYPE_UNSIGNED (type);
12298 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12299
12300 /* If we must pass in stack, we need a stack. */
12301 if (rs6000_must_pass_in_stack (mode, type))
12302 return true;
12303
12304 /* If there is no incoming register, we need a stack. */
12305 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12306 if (entry_parm == NULL)
12307 return true;
12308
12309 /* Likewise if we need to pass both in registers and on the stack. */
12310 if (GET_CODE (entry_parm) == PARALLEL
12311 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12312 return true;
12313
12314 /* Also true if we're partially in registers and partially not. */
12315 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12316 return true;
12317
12318 /* Update info on where next arg arrives in registers. */
12319 rs6000_function_arg_advance (args_so_far, mode, type, true);
12320 return false;
12321 }
12322
12323 /* Return true if FUN has no prototype, has a variable argument
12324 list, or passes any parameter in memory. */
12325
12326 static bool
12327 rs6000_function_parms_need_stack (tree fun, bool incoming)
12328 {
12329 tree fntype, result;
12330 CUMULATIVE_ARGS args_so_far_v;
12331 cumulative_args_t args_so_far;
12332
12333 if (!fun)
12334 /* Must be a libcall, all of which only use reg parms. */
12335 return false;
12336
12337 fntype = fun;
12338 if (!TYPE_P (fun))
12339 fntype = TREE_TYPE (fun);
12340
12341 /* Varargs functions need the parameter save area. */
12342 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12343 return true;
12344
12345 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12346 args_so_far = pack_cumulative_args (&args_so_far_v);
12347
12348 /* When incoming, we will have been passed the function decl.
12349 It is necessary to use the decl to handle K&R style functions,
12350 where TYPE_ARG_TYPES may not be available. */
12351 if (incoming)
12352 {
12353 gcc_assert (DECL_P (fun));
12354 result = DECL_RESULT (fun);
12355 }
12356 else
12357 result = TREE_TYPE (fntype);
12358
12359 if (result && aggregate_value_p (result, fntype))
12360 {
12361 if (!TYPE_P (result))
12362 result = TREE_TYPE (result);
12363 result = build_pointer_type (result);
12364 rs6000_parm_needs_stack (args_so_far, result);
12365 }
12366
12367 if (incoming)
12368 {
12369 tree parm;
12370
12371 for (parm = DECL_ARGUMENTS (fun);
12372 parm && parm != void_list_node;
12373 parm = TREE_CHAIN (parm))
12374 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12375 return true;
12376 }
12377 else
12378 {
12379 function_args_iterator args_iter;
12380 tree arg_type;
12381
12382 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12383 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12384 return true;
12385 }
12386
12387 return false;
12388 }
12389
12390 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12391 usually a constant depending on the ABI. However, in the ELFv2 ABI
12392 the register parameter area is optional when calling a function that
12393 has a prototype is scope, has no variable argument list, and passes
12394 all parameters in registers. */
12395
12396 int
12397 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12398 {
12399 int reg_parm_stack_space;
12400
12401 switch (DEFAULT_ABI)
12402 {
12403 default:
12404 reg_parm_stack_space = 0;
12405 break;
12406
12407 case ABI_AIX:
12408 case ABI_DARWIN:
12409 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12410 break;
12411
12412 case ABI_ELFv2:
12413 /* ??? Recomputing this every time is a bit expensive. Is there
12414 a place to cache this information? */
12415 if (rs6000_function_parms_need_stack (fun, incoming))
12416 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12417 else
12418 reg_parm_stack_space = 0;
12419 break;
12420 }
12421
12422 return reg_parm_stack_space;
12423 }
12424
12425 static void
12426 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12427 {
12428 int i;
12429 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12430
12431 if (nregs == 0)
12432 return;
12433
12434 for (i = 0; i < nregs; i++)
12435 {
12436 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12437 if (reload_completed)
12438 {
12439 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12440 tem = NULL_RTX;
12441 else
12442 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12443 i * GET_MODE_SIZE (reg_mode));
12444 }
12445 else
12446 tem = replace_equiv_address (tem, XEXP (tem, 0));
12447
12448 gcc_assert (tem);
12449
12450 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12451 }
12452 }
12453 \f
12454 /* Perform any needed actions needed for a function that is receiving a
12455 variable number of arguments.
12456
12457 CUM is as above.
12458
12459 MODE and TYPE are the mode and type of the current parameter.
12460
12461 PRETEND_SIZE is a variable that should be set to the amount of stack
12462 that must be pushed by the prolog to pretend that our caller pushed
12463 it.
12464
12465 Normally, this macro will push all remaining incoming registers on the
12466 stack and set PRETEND_SIZE to the length of the registers pushed. */
12467
12468 static void
12469 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12470 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12471 int no_rtl)
12472 {
12473 CUMULATIVE_ARGS next_cum;
12474 int reg_size = TARGET_32BIT ? 4 : 8;
12475 rtx save_area = NULL_RTX, mem;
12476 int first_reg_offset;
12477 alias_set_type set;
12478
12479 /* Skip the last named argument. */
12480 next_cum = *get_cumulative_args (cum);
12481 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12482
12483 if (DEFAULT_ABI == ABI_V4)
12484 {
12485 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12486
12487 if (! no_rtl)
12488 {
12489 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12490 HOST_WIDE_INT offset = 0;
12491
12492 /* Try to optimize the size of the varargs save area.
12493 The ABI requires that ap.reg_save_area is doubleword
12494 aligned, but we don't need to allocate space for all
12495 the bytes, only those to which we actually will save
12496 anything. */
12497 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12498 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12499 if (TARGET_HARD_FLOAT
12500 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12501 && cfun->va_list_fpr_size)
12502 {
12503 if (gpr_reg_num)
12504 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12505 * UNITS_PER_FP_WORD;
12506 if (cfun->va_list_fpr_size
12507 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12508 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12509 else
12510 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12511 * UNITS_PER_FP_WORD;
12512 }
12513 if (gpr_reg_num)
12514 {
12515 offset = -((first_reg_offset * reg_size) & ~7);
12516 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12517 {
12518 gpr_reg_num = cfun->va_list_gpr_size;
12519 if (reg_size == 4 && (first_reg_offset & 1))
12520 gpr_reg_num++;
12521 }
12522 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12523 }
12524 else if (fpr_size)
12525 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12526 * UNITS_PER_FP_WORD
12527 - (int) (GP_ARG_NUM_REG * reg_size);
12528
12529 if (gpr_size + fpr_size)
12530 {
12531 rtx reg_save_area
12532 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12533 gcc_assert (GET_CODE (reg_save_area) == MEM);
12534 reg_save_area = XEXP (reg_save_area, 0);
12535 if (GET_CODE (reg_save_area) == PLUS)
12536 {
12537 gcc_assert (XEXP (reg_save_area, 0)
12538 == virtual_stack_vars_rtx);
12539 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
12540 offset += INTVAL (XEXP (reg_save_area, 1));
12541 }
12542 else
12543 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12544 }
12545
12546 cfun->machine->varargs_save_offset = offset;
12547 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12548 }
12549 }
12550 else
12551 {
12552 first_reg_offset = next_cum.words;
12553 save_area = crtl->args.internal_arg_pointer;
12554
12555 if (targetm.calls.must_pass_in_stack (mode, type))
12556 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12557 }
12558
12559 set = get_varargs_alias_set ();
12560 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12561 && cfun->va_list_gpr_size)
12562 {
12563 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12564
12565 if (va_list_gpr_counter_field)
12566 /* V4 va_list_gpr_size counts number of registers needed. */
12567 n_gpr = cfun->va_list_gpr_size;
12568 else
12569 /* char * va_list instead counts number of bytes needed. */
12570 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12571
12572 if (nregs > n_gpr)
12573 nregs = n_gpr;
12574
12575 mem = gen_rtx_MEM (BLKmode,
12576 plus_constant (Pmode, save_area,
12577 first_reg_offset * reg_size));
12578 MEM_NOTRAP_P (mem) = 1;
12579 set_mem_alias_set (mem, set);
12580 set_mem_align (mem, BITS_PER_WORD);
12581
12582 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12583 nregs);
12584 }
12585
12586 /* Save FP registers if needed. */
12587 if (DEFAULT_ABI == ABI_V4
12588 && TARGET_HARD_FLOAT
12589 && ! no_rtl
12590 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12591 && cfun->va_list_fpr_size)
12592 {
12593 int fregno = next_cum.fregno, nregs;
12594 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12595 rtx lab = gen_label_rtx ();
12596 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12597 * UNITS_PER_FP_WORD);
12598
12599 emit_jump_insn
12600 (gen_rtx_SET (pc_rtx,
12601 gen_rtx_IF_THEN_ELSE (VOIDmode,
12602 gen_rtx_NE (VOIDmode, cr1,
12603 const0_rtx),
12604 gen_rtx_LABEL_REF (VOIDmode, lab),
12605 pc_rtx)));
12606
12607 for (nregs = 0;
12608 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12609 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12610 {
12611 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12612 plus_constant (Pmode, save_area, off));
12613 MEM_NOTRAP_P (mem) = 1;
12614 set_mem_alias_set (mem, set);
12615 set_mem_align (mem, GET_MODE_ALIGNMENT (
12616 TARGET_HARD_FLOAT ? DFmode : SFmode));
12617 emit_move_insn (mem, gen_rtx_REG (
12618 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12619 }
12620
12621 emit_label (lab);
12622 }
12623 }
12624
12625 /* Create the va_list data type. */
12626
12627 static tree
12628 rs6000_build_builtin_va_list (void)
12629 {
12630 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12631
12632 /* For AIX, prefer 'char *' because that's what the system
12633 header files like. */
12634 if (DEFAULT_ABI != ABI_V4)
12635 return build_pointer_type (char_type_node);
12636
12637 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12638 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12639 get_identifier ("__va_list_tag"), record);
12640
12641 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12642 unsigned_char_type_node);
12643 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12644 unsigned_char_type_node);
12645 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12646 every user file. */
12647 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12648 get_identifier ("reserved"), short_unsigned_type_node);
12649 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12650 get_identifier ("overflow_arg_area"),
12651 ptr_type_node);
12652 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12653 get_identifier ("reg_save_area"),
12654 ptr_type_node);
12655
12656 va_list_gpr_counter_field = f_gpr;
12657 va_list_fpr_counter_field = f_fpr;
12658
12659 DECL_FIELD_CONTEXT (f_gpr) = record;
12660 DECL_FIELD_CONTEXT (f_fpr) = record;
12661 DECL_FIELD_CONTEXT (f_res) = record;
12662 DECL_FIELD_CONTEXT (f_ovf) = record;
12663 DECL_FIELD_CONTEXT (f_sav) = record;
12664
12665 TYPE_STUB_DECL (record) = type_decl;
12666 TYPE_NAME (record) = type_decl;
12667 TYPE_FIELDS (record) = f_gpr;
12668 DECL_CHAIN (f_gpr) = f_fpr;
12669 DECL_CHAIN (f_fpr) = f_res;
12670 DECL_CHAIN (f_res) = f_ovf;
12671 DECL_CHAIN (f_ovf) = f_sav;
12672
12673 layout_type (record);
12674
12675 /* The correct type is an array type of one element. */
12676 return build_array_type (record, build_index_type (size_zero_node));
12677 }
12678
12679 /* Implement va_start. */
12680
12681 static void
12682 rs6000_va_start (tree valist, rtx nextarg)
12683 {
12684 HOST_WIDE_INT words, n_gpr, n_fpr;
12685 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12686 tree gpr, fpr, ovf, sav, t;
12687
12688 /* Only SVR4 needs something special. */
12689 if (DEFAULT_ABI != ABI_V4)
12690 {
12691 std_expand_builtin_va_start (valist, nextarg);
12692 return;
12693 }
12694
12695 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12696 f_fpr = DECL_CHAIN (f_gpr);
12697 f_res = DECL_CHAIN (f_fpr);
12698 f_ovf = DECL_CHAIN (f_res);
12699 f_sav = DECL_CHAIN (f_ovf);
12700
12701 valist = build_simple_mem_ref (valist);
12702 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12703 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12704 f_fpr, NULL_TREE);
12705 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12706 f_ovf, NULL_TREE);
12707 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12708 f_sav, NULL_TREE);
12709
12710 /* Count number of gp and fp argument registers used. */
12711 words = crtl->args.info.words;
12712 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12713 GP_ARG_NUM_REG);
12714 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12715 FP_ARG_NUM_REG);
12716
12717 if (TARGET_DEBUG_ARG)
12718 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12719 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12720 words, n_gpr, n_fpr);
12721
12722 if (cfun->va_list_gpr_size)
12723 {
12724 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12725 build_int_cst (NULL_TREE, n_gpr));
12726 TREE_SIDE_EFFECTS (t) = 1;
12727 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12728 }
12729
12730 if (cfun->va_list_fpr_size)
12731 {
12732 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12733 build_int_cst (NULL_TREE, n_fpr));
12734 TREE_SIDE_EFFECTS (t) = 1;
12735 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12736
12737 #ifdef HAVE_AS_GNU_ATTRIBUTE
12738 if (call_ABI_of_interest (cfun->decl))
12739 rs6000_passes_float = true;
12740 #endif
12741 }
12742
12743 /* Find the overflow area. */
12744 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12745 if (words != 0)
12746 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12747 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12748 TREE_SIDE_EFFECTS (t) = 1;
12749 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12750
12751 /* If there were no va_arg invocations, don't set up the register
12752 save area. */
12753 if (!cfun->va_list_gpr_size
12754 && !cfun->va_list_fpr_size
12755 && n_gpr < GP_ARG_NUM_REG
12756 && n_fpr < FP_ARG_V4_MAX_REG)
12757 return;
12758
12759 /* Find the register save area. */
12760 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12761 if (cfun->machine->varargs_save_offset)
12762 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12763 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12764 TREE_SIDE_EFFECTS (t) = 1;
12765 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12766 }
12767
12768 /* Implement va_arg. */
12769
12770 static tree
12771 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12772 gimple_seq *post_p)
12773 {
12774 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12775 tree gpr, fpr, ovf, sav, reg, t, u;
12776 int size, rsize, n_reg, sav_ofs, sav_scale;
12777 tree lab_false, lab_over, addr;
12778 int align;
12779 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12780 int regalign = 0;
12781 gimple *stmt;
12782
12783 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12784 {
12785 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12786 return build_va_arg_indirect_ref (t);
12787 }
12788
12789 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12790 earlier version of gcc, with the property that it always applied alignment
12791 adjustments to the va-args (even for zero-sized types). The cheapest way
12792 to deal with this is to replicate the effect of the part of
12793 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12794 of relevance.
12795 We don't need to check for pass-by-reference because of the test above.
12796 We can return a simplifed answer, since we know there's no offset to add. */
12797
12798 if (((TARGET_MACHO
12799 && rs6000_darwin64_abi)
12800 || DEFAULT_ABI == ABI_ELFv2
12801 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12802 && integer_zerop (TYPE_SIZE (type)))
12803 {
12804 unsigned HOST_WIDE_INT align, boundary;
12805 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12806 align = PARM_BOUNDARY / BITS_PER_UNIT;
12807 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12808 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12809 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12810 boundary /= BITS_PER_UNIT;
12811 if (boundary > align)
12812 {
12813 tree t ;
12814 /* This updates arg ptr by the amount that would be necessary
12815 to align the zero-sized (but not zero-alignment) item. */
12816 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12817 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12818 gimplify_and_add (t, pre_p);
12819
12820 t = fold_convert (sizetype, valist_tmp);
12821 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12822 fold_convert (TREE_TYPE (valist),
12823 fold_build2 (BIT_AND_EXPR, sizetype, t,
12824 size_int (-boundary))));
12825 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12826 gimplify_and_add (t, pre_p);
12827 }
12828 /* Since it is zero-sized there's no increment for the item itself. */
12829 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12830 return build_va_arg_indirect_ref (valist_tmp);
12831 }
12832
12833 if (DEFAULT_ABI != ABI_V4)
12834 {
12835 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12836 {
12837 tree elem_type = TREE_TYPE (type);
12838 machine_mode elem_mode = TYPE_MODE (elem_type);
12839 int elem_size = GET_MODE_SIZE (elem_mode);
12840
12841 if (elem_size < UNITS_PER_WORD)
12842 {
12843 tree real_part, imag_part;
12844 gimple_seq post = NULL;
12845
12846 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12847 &post);
12848 /* Copy the value into a temporary, lest the formal temporary
12849 be reused out from under us. */
12850 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12851 gimple_seq_add_seq (pre_p, post);
12852
12853 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12854 post_p);
12855
12856 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12857 }
12858 }
12859
12860 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12861 }
12862
12863 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12864 f_fpr = DECL_CHAIN (f_gpr);
12865 f_res = DECL_CHAIN (f_fpr);
12866 f_ovf = DECL_CHAIN (f_res);
12867 f_sav = DECL_CHAIN (f_ovf);
12868
12869 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12870 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12871 f_fpr, NULL_TREE);
12872 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12873 f_ovf, NULL_TREE);
12874 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12875 f_sav, NULL_TREE);
12876
12877 size = int_size_in_bytes (type);
12878 rsize = (size + 3) / 4;
12879 int pad = 4 * rsize - size;
12880 align = 1;
12881
12882 machine_mode mode = TYPE_MODE (type);
12883 if (abi_v4_pass_in_fpr (mode, false))
12884 {
12885 /* FP args go in FP registers, if present. */
12886 reg = fpr;
12887 n_reg = (size + 7) / 8;
12888 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
12889 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
12890 if (mode != SFmode && mode != SDmode)
12891 align = 8;
12892 }
12893 else
12894 {
12895 /* Otherwise into GP registers. */
12896 reg = gpr;
12897 n_reg = rsize;
12898 sav_ofs = 0;
12899 sav_scale = 4;
12900 if (n_reg == 2)
12901 align = 8;
12902 }
12903
12904 /* Pull the value out of the saved registers.... */
12905
12906 lab_over = NULL;
12907 addr = create_tmp_var (ptr_type_node, "addr");
12908
12909 /* AltiVec vectors never go in registers when -mabi=altivec. */
12910 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12911 align = 16;
12912 else
12913 {
12914 lab_false = create_artificial_label (input_location);
12915 lab_over = create_artificial_label (input_location);
12916
12917 /* Long long is aligned in the registers. As are any other 2 gpr
12918 item such as complex int due to a historical mistake. */
12919 u = reg;
12920 if (n_reg == 2 && reg == gpr)
12921 {
12922 regalign = 1;
12923 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12924 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12925 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12926 unshare_expr (reg), u);
12927 }
12928 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12929 reg number is 0 for f1, so we want to make it odd. */
12930 else if (reg == fpr && mode == TDmode)
12931 {
12932 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12933 build_int_cst (TREE_TYPE (reg), 1));
12934 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12935 }
12936
12937 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12938 t = build2 (GE_EXPR, boolean_type_node, u, t);
12939 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12940 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12941 gimplify_and_add (t, pre_p);
12942
12943 t = sav;
12944 if (sav_ofs)
12945 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12946
12947 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12948 build_int_cst (TREE_TYPE (reg), n_reg));
12949 u = fold_convert (sizetype, u);
12950 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12951 t = fold_build_pointer_plus (t, u);
12952
12953 /* _Decimal32 varargs are located in the second word of the 64-bit
12954 FP register for 32-bit binaries. */
12955 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
12956 t = fold_build_pointer_plus_hwi (t, size);
12957
12958 /* Args are passed right-aligned. */
12959 if (BYTES_BIG_ENDIAN)
12960 t = fold_build_pointer_plus_hwi (t, pad);
12961
12962 gimplify_assign (addr, t, pre_p);
12963
12964 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12965
12966 stmt = gimple_build_label (lab_false);
12967 gimple_seq_add_stmt (pre_p, stmt);
12968
12969 if ((n_reg == 2 && !regalign) || n_reg > 2)
12970 {
12971 /* Ensure that we don't find any more args in regs.
12972 Alignment has taken care of for special cases. */
12973 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12974 }
12975 }
12976
12977 /* ... otherwise out of the overflow area. */
12978
12979 /* Care for on-stack alignment if needed. */
12980 t = ovf;
12981 if (align != 1)
12982 {
12983 t = fold_build_pointer_plus_hwi (t, align - 1);
12984 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
12985 build_int_cst (TREE_TYPE (t), -align));
12986 }
12987
12988 /* Args are passed right-aligned. */
12989 if (BYTES_BIG_ENDIAN)
12990 t = fold_build_pointer_plus_hwi (t, pad);
12991
12992 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12993
12994 gimplify_assign (unshare_expr (addr), t, pre_p);
12995
12996 t = fold_build_pointer_plus_hwi (t, size);
12997 gimplify_assign (unshare_expr (ovf), t, pre_p);
12998
12999 if (lab_over)
13000 {
13001 stmt = gimple_build_label (lab_over);
13002 gimple_seq_add_stmt (pre_p, stmt);
13003 }
13004
13005 if (STRICT_ALIGNMENT
13006 && (TYPE_ALIGN (type)
13007 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
13008 {
13009 /* The value (of type complex double, for example) may not be
13010 aligned in memory in the saved registers, so copy via a
13011 temporary. (This is the same code as used for SPARC.) */
13012 tree tmp = create_tmp_var (type, "va_arg_tmp");
13013 tree dest_addr = build_fold_addr_expr (tmp);
13014
13015 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13016 3, dest_addr, addr, size_int (rsize * 4));
13017 TREE_ADDRESSABLE (tmp) = 1;
13018
13019 gimplify_and_add (copy, pre_p);
13020 addr = dest_addr;
13021 }
13022
13023 addr = fold_convert (ptrtype, addr);
13024 return build_va_arg_indirect_ref (addr);
13025 }
13026
13027 /* Builtins. */
13028
13029 static void
13030 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13031 {
13032 tree t;
13033 unsigned classify = rs6000_builtin_info[(int)code].attr;
13034 const char *attr_string = "";
13035
13036 gcc_assert (name != NULL);
13037 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13038
13039 if (rs6000_builtin_decls[(int)code])
13040 fatal_error (input_location,
13041 "internal error: builtin function %qs already processed",
13042 name);
13043
13044 rs6000_builtin_decls[(int)code] = t =
13045 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13046
13047 /* Set any special attributes. */
13048 if ((classify & RS6000_BTC_CONST) != 0)
13049 {
13050 /* const function, function only depends on the inputs. */
13051 TREE_READONLY (t) = 1;
13052 TREE_NOTHROW (t) = 1;
13053 attr_string = ", const";
13054 }
13055 else if ((classify & RS6000_BTC_PURE) != 0)
13056 {
13057 /* pure function, function can read global memory, but does not set any
13058 external state. */
13059 DECL_PURE_P (t) = 1;
13060 TREE_NOTHROW (t) = 1;
13061 attr_string = ", pure";
13062 }
13063 else if ((classify & RS6000_BTC_FP) != 0)
13064 {
13065 /* Function is a math function. If rounding mode is on, then treat the
13066 function as not reading global memory, but it can have arbitrary side
13067 effects. If it is off, then assume the function is a const function.
13068 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13069 builtin-attribute.def that is used for the math functions. */
13070 TREE_NOTHROW (t) = 1;
13071 if (flag_rounding_math)
13072 {
13073 DECL_PURE_P (t) = 1;
13074 DECL_IS_NOVOPS (t) = 1;
13075 attr_string = ", fp, pure";
13076 }
13077 else
13078 {
13079 TREE_READONLY (t) = 1;
13080 attr_string = ", fp, const";
13081 }
13082 }
13083 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13084 gcc_unreachable ();
13085
13086 if (TARGET_DEBUG_BUILTIN)
13087 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13088 (int)code, name, attr_string);
13089 }
13090
13091 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13092
13093 #undef RS6000_BUILTIN_0
13094 #undef RS6000_BUILTIN_1
13095 #undef RS6000_BUILTIN_2
13096 #undef RS6000_BUILTIN_3
13097 #undef RS6000_BUILTIN_A
13098 #undef RS6000_BUILTIN_D
13099 #undef RS6000_BUILTIN_H
13100 #undef RS6000_BUILTIN_P
13101 #undef RS6000_BUILTIN_X
13102
13103 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13104 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13105 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13106 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13107 { MASK, ICODE, NAME, ENUM },
13108
13109 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13110 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13111 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13112 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13113 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13114
13115 static const struct builtin_description bdesc_3arg[] =
13116 {
13117 #include "rs6000-builtin.def"
13118 };
13119
13120 /* DST operations: void foo (void *, const int, const char). */
13121
13122 #undef RS6000_BUILTIN_0
13123 #undef RS6000_BUILTIN_1
13124 #undef RS6000_BUILTIN_2
13125 #undef RS6000_BUILTIN_3
13126 #undef RS6000_BUILTIN_A
13127 #undef RS6000_BUILTIN_D
13128 #undef RS6000_BUILTIN_H
13129 #undef RS6000_BUILTIN_P
13130 #undef RS6000_BUILTIN_X
13131
13132 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13133 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13134 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13135 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13136 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13137 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13138 { MASK, ICODE, NAME, ENUM },
13139
13140 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13141 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13142 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13143
13144 static const struct builtin_description bdesc_dst[] =
13145 {
13146 #include "rs6000-builtin.def"
13147 };
13148
13149 /* Simple binary operations: VECc = foo (VECa, VECb). */
13150
13151 #undef RS6000_BUILTIN_0
13152 #undef RS6000_BUILTIN_1
13153 #undef RS6000_BUILTIN_2
13154 #undef RS6000_BUILTIN_3
13155 #undef RS6000_BUILTIN_A
13156 #undef RS6000_BUILTIN_D
13157 #undef RS6000_BUILTIN_H
13158 #undef RS6000_BUILTIN_P
13159 #undef RS6000_BUILTIN_X
13160
13161 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13162 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13163 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13164 { MASK, ICODE, NAME, ENUM },
13165
13166 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13167 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13168 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13169 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13170 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13171 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13172
13173 static const struct builtin_description bdesc_2arg[] =
13174 {
13175 #include "rs6000-builtin.def"
13176 };
13177
13178 #undef RS6000_BUILTIN_0
13179 #undef RS6000_BUILTIN_1
13180 #undef RS6000_BUILTIN_2
13181 #undef RS6000_BUILTIN_3
13182 #undef RS6000_BUILTIN_A
13183 #undef RS6000_BUILTIN_D
13184 #undef RS6000_BUILTIN_H
13185 #undef RS6000_BUILTIN_P
13186 #undef RS6000_BUILTIN_X
13187
13188 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13189 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13190 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13191 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13192 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13193 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13194 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13195 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13196 { MASK, ICODE, NAME, ENUM },
13197
13198 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13199
13200 /* AltiVec predicates. */
13201
13202 static const struct builtin_description bdesc_altivec_preds[] =
13203 {
13204 #include "rs6000-builtin.def"
13205 };
13206
13207 /* ABS* operations. */
13208
13209 #undef RS6000_BUILTIN_0
13210 #undef RS6000_BUILTIN_1
13211 #undef RS6000_BUILTIN_2
13212 #undef RS6000_BUILTIN_3
13213 #undef RS6000_BUILTIN_A
13214 #undef RS6000_BUILTIN_D
13215 #undef RS6000_BUILTIN_H
13216 #undef RS6000_BUILTIN_P
13217 #undef RS6000_BUILTIN_X
13218
13219 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13220 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13221 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13222 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13223 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13224 { MASK, ICODE, NAME, ENUM },
13225
13226 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13227 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13228 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13229 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13230
13231 static const struct builtin_description bdesc_abs[] =
13232 {
13233 #include "rs6000-builtin.def"
13234 };
13235
13236 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13237 foo (VECa). */
13238
13239 #undef RS6000_BUILTIN_0
13240 #undef RS6000_BUILTIN_1
13241 #undef RS6000_BUILTIN_2
13242 #undef RS6000_BUILTIN_3
13243 #undef RS6000_BUILTIN_A
13244 #undef RS6000_BUILTIN_D
13245 #undef RS6000_BUILTIN_H
13246 #undef RS6000_BUILTIN_P
13247 #undef RS6000_BUILTIN_X
13248
13249 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13250 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13251 { MASK, ICODE, NAME, ENUM },
13252
13253 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13254 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13255 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13256 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13257 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13258 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13259 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13260
13261 static const struct builtin_description bdesc_1arg[] =
13262 {
13263 #include "rs6000-builtin.def"
13264 };
13265
13266 /* Simple no-argument operations: result = __builtin_darn_32 () */
13267
13268 #undef RS6000_BUILTIN_0
13269 #undef RS6000_BUILTIN_1
13270 #undef RS6000_BUILTIN_2
13271 #undef RS6000_BUILTIN_3
13272 #undef RS6000_BUILTIN_A
13273 #undef RS6000_BUILTIN_D
13274 #undef RS6000_BUILTIN_H
13275 #undef RS6000_BUILTIN_P
13276 #undef RS6000_BUILTIN_X
13277
13278 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13279 { MASK, ICODE, NAME, ENUM },
13280
13281 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13282 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13283 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13284 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13285 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13286 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13287 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13288 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13289
13290 static const struct builtin_description bdesc_0arg[] =
13291 {
13292 #include "rs6000-builtin.def"
13293 };
13294
13295 /* HTM builtins. */
13296 #undef RS6000_BUILTIN_0
13297 #undef RS6000_BUILTIN_1
13298 #undef RS6000_BUILTIN_2
13299 #undef RS6000_BUILTIN_3
13300 #undef RS6000_BUILTIN_A
13301 #undef RS6000_BUILTIN_D
13302 #undef RS6000_BUILTIN_H
13303 #undef RS6000_BUILTIN_P
13304 #undef RS6000_BUILTIN_X
13305
13306 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13307 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13308 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13309 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13310 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13311 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13312 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13313 { MASK, ICODE, NAME, ENUM },
13314
13315 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13316 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13317
13318 static const struct builtin_description bdesc_htm[] =
13319 {
13320 #include "rs6000-builtin.def"
13321 };
13322
13323 #undef RS6000_BUILTIN_0
13324 #undef RS6000_BUILTIN_1
13325 #undef RS6000_BUILTIN_2
13326 #undef RS6000_BUILTIN_3
13327 #undef RS6000_BUILTIN_A
13328 #undef RS6000_BUILTIN_D
13329 #undef RS6000_BUILTIN_H
13330 #undef RS6000_BUILTIN_P
13331
13332 /* Return true if a builtin function is overloaded. */
13333 bool
13334 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13335 {
13336 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13337 }
13338
13339 const char *
13340 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13341 {
13342 return rs6000_builtin_info[(int)fncode].name;
13343 }
13344
13345 /* Expand an expression EXP that calls a builtin without arguments. */
13346 static rtx
13347 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13348 {
13349 rtx pat;
13350 machine_mode tmode = insn_data[icode].operand[0].mode;
13351
13352 if (icode == CODE_FOR_nothing)
13353 /* Builtin not supported on this processor. */
13354 return 0;
13355
13356 if (icode == CODE_FOR_rs6000_mffsl
13357 && rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13358 {
13359 error ("__builtin_mffsl() not supported with -msoft-float");
13360 return const0_rtx;
13361 }
13362
13363 if (target == 0
13364 || GET_MODE (target) != tmode
13365 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13366 target = gen_reg_rtx (tmode);
13367
13368 pat = GEN_FCN (icode) (target);
13369 if (! pat)
13370 return 0;
13371 emit_insn (pat);
13372
13373 return target;
13374 }
13375
13376
13377 static rtx
13378 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13379 {
13380 rtx pat;
13381 tree arg0 = CALL_EXPR_ARG (exp, 0);
13382 tree arg1 = CALL_EXPR_ARG (exp, 1);
13383 rtx op0 = expand_normal (arg0);
13384 rtx op1 = expand_normal (arg1);
13385 machine_mode mode0 = insn_data[icode].operand[0].mode;
13386 machine_mode mode1 = insn_data[icode].operand[1].mode;
13387
13388 if (icode == CODE_FOR_nothing)
13389 /* Builtin not supported on this processor. */
13390 return 0;
13391
13392 /* If we got invalid arguments bail out before generating bad rtl. */
13393 if (arg0 == error_mark_node || arg1 == error_mark_node)
13394 return const0_rtx;
13395
13396 if (GET_CODE (op0) != CONST_INT
13397 || INTVAL (op0) > 255
13398 || INTVAL (op0) < 0)
13399 {
13400 error ("argument 1 must be an 8-bit field value");
13401 return const0_rtx;
13402 }
13403
13404 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13405 op0 = copy_to_mode_reg (mode0, op0);
13406
13407 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13408 op1 = copy_to_mode_reg (mode1, op1);
13409
13410 pat = GEN_FCN (icode) (op0, op1);
13411 if (!pat)
13412 return const0_rtx;
13413 emit_insn (pat);
13414
13415 return NULL_RTX;
13416 }
13417
13418 static rtx
13419 rs6000_expand_mtfsb_builtin (enum insn_code icode, tree exp)
13420 {
13421 rtx pat;
13422 tree arg0 = CALL_EXPR_ARG (exp, 0);
13423 rtx op0 = expand_normal (arg0);
13424
13425 if (icode == CODE_FOR_nothing)
13426 /* Builtin not supported on this processor. */
13427 return 0;
13428
13429 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13430 {
13431 error ("__builtin_mtfsb0 and __builtin_mtfsb1 not supported with -msoft-float");
13432 return const0_rtx;
13433 }
13434
13435 /* If we got invalid arguments bail out before generating bad rtl. */
13436 if (arg0 == error_mark_node)
13437 return const0_rtx;
13438
13439 /* Only allow bit numbers 0 to 31. */
13440 if (!u5bit_cint_operand (op0, VOIDmode))
13441 {
13442 error ("Argument must be a constant between 0 and 31.");
13443 return const0_rtx;
13444 }
13445
13446 pat = GEN_FCN (icode) (op0);
13447 if (!pat)
13448 return const0_rtx;
13449 emit_insn (pat);
13450
13451 return NULL_RTX;
13452 }
13453
13454 static rtx
13455 rs6000_expand_set_fpscr_rn_builtin (enum insn_code icode, tree exp)
13456 {
13457 rtx pat;
13458 tree arg0 = CALL_EXPR_ARG (exp, 0);
13459 rtx op0 = expand_normal (arg0);
13460 machine_mode mode0 = insn_data[icode].operand[0].mode;
13461
13462 if (icode == CODE_FOR_nothing)
13463 /* Builtin not supported on this processor. */
13464 return 0;
13465
13466 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13467 {
13468 error ("__builtin_set_fpscr_rn not supported with -msoft-float");
13469 return const0_rtx;
13470 }
13471
13472 /* If we got invalid arguments bail out before generating bad rtl. */
13473 if (arg0 == error_mark_node)
13474 return const0_rtx;
13475
13476 /* If the argument is a constant, check the range. Argument can only be a
13477 2-bit value. Unfortunately, can't check the range of the value at
13478 compile time if the argument is a variable. The least significant two
13479 bits of the argument, regardless of type, are used to set the rounding
13480 mode. All other bits are ignored. */
13481 if (GET_CODE (op0) == CONST_INT && !const_0_to_3_operand(op0, VOIDmode))
13482 {
13483 error ("Argument must be a value between 0 and 3.");
13484 return const0_rtx;
13485 }
13486
13487 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13488 op0 = copy_to_mode_reg (mode0, op0);
13489
13490 pat = GEN_FCN (icode) (op0);
13491 if (!pat)
13492 return const0_rtx;
13493 emit_insn (pat);
13494
13495 return NULL_RTX;
13496 }
13497 static rtx
13498 rs6000_expand_set_fpscr_drn_builtin (enum insn_code icode, tree exp)
13499 {
13500 rtx pat;
13501 tree arg0 = CALL_EXPR_ARG (exp, 0);
13502 rtx op0 = expand_normal (arg0);
13503 machine_mode mode0 = insn_data[icode].operand[0].mode;
13504
13505 if (TARGET_32BIT)
13506 /* Builtin not supported in 32-bit mode. */
13507 fatal_error (input_location,
13508 "__builtin_set_fpscr_drn is not supported in 32-bit mode.");
13509
13510 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13511 {
13512 error ("__builtin_set_fpscr_drn not supported with -msoft-float");
13513 return const0_rtx;
13514 }
13515
13516 if (icode == CODE_FOR_nothing)
13517 /* Builtin not supported on this processor. */
13518 return 0;
13519
13520 /* If we got invalid arguments bail out before generating bad rtl. */
13521 if (arg0 == error_mark_node)
13522 return const0_rtx;
13523
13524 /* If the argument is a constant, check the range. Agrument can only be a
13525 3-bit value. Unfortunately, can't check the range of the value at
13526 compile time if the argument is a variable. The least significant two
13527 bits of the argument, regardless of type, are used to set the rounding
13528 mode. All other bits are ignored. */
13529 if (GET_CODE (op0) == CONST_INT && !const_0_to_7_operand(op0, VOIDmode))
13530 {
13531 error ("Argument must be a value between 0 and 7.");
13532 return const0_rtx;
13533 }
13534
13535 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13536 op0 = copy_to_mode_reg (mode0, op0);
13537
13538 pat = GEN_FCN (icode) (op0);
13539 if (! pat)
13540 return const0_rtx;
13541 emit_insn (pat);
13542
13543 return NULL_RTX;
13544 }
13545
13546 static rtx
13547 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13548 {
13549 rtx pat;
13550 tree arg0 = CALL_EXPR_ARG (exp, 0);
13551 rtx op0 = expand_normal (arg0);
13552 machine_mode tmode = insn_data[icode].operand[0].mode;
13553 machine_mode mode0 = insn_data[icode].operand[1].mode;
13554
13555 if (icode == CODE_FOR_nothing)
13556 /* Builtin not supported on this processor. */
13557 return 0;
13558
13559 /* If we got invalid arguments bail out before generating bad rtl. */
13560 if (arg0 == error_mark_node)
13561 return const0_rtx;
13562
13563 if (icode == CODE_FOR_altivec_vspltisb
13564 || icode == CODE_FOR_altivec_vspltish
13565 || icode == CODE_FOR_altivec_vspltisw)
13566 {
13567 /* Only allow 5-bit *signed* literals. */
13568 if (GET_CODE (op0) != CONST_INT
13569 || INTVAL (op0) > 15
13570 || INTVAL (op0) < -16)
13571 {
13572 error ("argument 1 must be a 5-bit signed literal");
13573 return CONST0_RTX (tmode);
13574 }
13575 }
13576
13577 if (target == 0
13578 || GET_MODE (target) != tmode
13579 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13580 target = gen_reg_rtx (tmode);
13581
13582 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13583 op0 = copy_to_mode_reg (mode0, op0);
13584
13585 pat = GEN_FCN (icode) (target, op0);
13586 if (! pat)
13587 return 0;
13588 emit_insn (pat);
13589
13590 return target;
13591 }
13592
13593 static rtx
13594 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13595 {
13596 rtx pat, scratch1, scratch2;
13597 tree arg0 = CALL_EXPR_ARG (exp, 0);
13598 rtx op0 = expand_normal (arg0);
13599 machine_mode tmode = insn_data[icode].operand[0].mode;
13600 machine_mode mode0 = insn_data[icode].operand[1].mode;
13601
13602 /* If we have invalid arguments, bail out before generating bad rtl. */
13603 if (arg0 == error_mark_node)
13604 return const0_rtx;
13605
13606 if (target == 0
13607 || GET_MODE (target) != tmode
13608 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13609 target = gen_reg_rtx (tmode);
13610
13611 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13612 op0 = copy_to_mode_reg (mode0, op0);
13613
13614 scratch1 = gen_reg_rtx (mode0);
13615 scratch2 = gen_reg_rtx (mode0);
13616
13617 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13618 if (! pat)
13619 return 0;
13620 emit_insn (pat);
13621
13622 return target;
13623 }
13624
13625 static rtx
13626 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13627 {
13628 rtx pat;
13629 tree arg0 = CALL_EXPR_ARG (exp, 0);
13630 tree arg1 = CALL_EXPR_ARG (exp, 1);
13631 rtx op0 = expand_normal (arg0);
13632 rtx op1 = expand_normal (arg1);
13633 machine_mode tmode = insn_data[icode].operand[0].mode;
13634 machine_mode mode0 = insn_data[icode].operand[1].mode;
13635 machine_mode mode1 = insn_data[icode].operand[2].mode;
13636
13637 if (icode == CODE_FOR_nothing)
13638 /* Builtin not supported on this processor. */
13639 return 0;
13640
13641 /* If we got invalid arguments bail out before generating bad rtl. */
13642 if (arg0 == error_mark_node || arg1 == error_mark_node)
13643 return const0_rtx;
13644
13645 if (icode == CODE_FOR_unpackv1ti
13646 || icode == CODE_FOR_unpackkf
13647 || icode == CODE_FOR_unpacktf
13648 || icode == CODE_FOR_unpackif
13649 || icode == CODE_FOR_unpacktd)
13650 {
13651 /* Only allow 1-bit unsigned literals. */
13652 STRIP_NOPS (arg1);
13653 if (TREE_CODE (arg1) != INTEGER_CST
13654 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13655 {
13656 error ("argument 2 must be a 1-bit unsigned literal");
13657 return CONST0_RTX (tmode);
13658 }
13659 }
13660 else if (icode == CODE_FOR_altivec_vspltw)
13661 {
13662 /* Only allow 2-bit unsigned literals. */
13663 STRIP_NOPS (arg1);
13664 if (TREE_CODE (arg1) != INTEGER_CST
13665 || TREE_INT_CST_LOW (arg1) & ~3)
13666 {
13667 error ("argument 2 must be a 2-bit unsigned literal");
13668 return CONST0_RTX (tmode);
13669 }
13670 }
13671 else if (icode == CODE_FOR_altivec_vsplth)
13672 {
13673 /* Only allow 3-bit unsigned literals. */
13674 STRIP_NOPS (arg1);
13675 if (TREE_CODE (arg1) != INTEGER_CST
13676 || TREE_INT_CST_LOW (arg1) & ~7)
13677 {
13678 error ("argument 2 must be a 3-bit unsigned literal");
13679 return CONST0_RTX (tmode);
13680 }
13681 }
13682 else if (icode == CODE_FOR_altivec_vspltb)
13683 {
13684 /* Only allow 4-bit unsigned literals. */
13685 STRIP_NOPS (arg1);
13686 if (TREE_CODE (arg1) != INTEGER_CST
13687 || TREE_INT_CST_LOW (arg1) & ~15)
13688 {
13689 error ("argument 2 must be a 4-bit unsigned literal");
13690 return CONST0_RTX (tmode);
13691 }
13692 }
13693 else if (icode == CODE_FOR_altivec_vcfux
13694 || icode == CODE_FOR_altivec_vcfsx
13695 || icode == CODE_FOR_altivec_vctsxs
13696 || icode == CODE_FOR_altivec_vctuxs)
13697 {
13698 /* Only allow 5-bit unsigned literals. */
13699 STRIP_NOPS (arg1);
13700 if (TREE_CODE (arg1) != INTEGER_CST
13701 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13702 {
13703 error ("argument 2 must be a 5-bit unsigned literal");
13704 return CONST0_RTX (tmode);
13705 }
13706 }
13707 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13708 || icode == CODE_FOR_dfptstsfi_lt_dd
13709 || icode == CODE_FOR_dfptstsfi_gt_dd
13710 || icode == CODE_FOR_dfptstsfi_unordered_dd
13711 || icode == CODE_FOR_dfptstsfi_eq_td
13712 || icode == CODE_FOR_dfptstsfi_lt_td
13713 || icode == CODE_FOR_dfptstsfi_gt_td
13714 || icode == CODE_FOR_dfptstsfi_unordered_td)
13715 {
13716 /* Only allow 6-bit unsigned literals. */
13717 STRIP_NOPS (arg0);
13718 if (TREE_CODE (arg0) != INTEGER_CST
13719 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13720 {
13721 error ("argument 1 must be a 6-bit unsigned literal");
13722 return CONST0_RTX (tmode);
13723 }
13724 }
13725 else if (icode == CODE_FOR_xststdcqp_kf
13726 || icode == CODE_FOR_xststdcqp_tf
13727 || icode == CODE_FOR_xststdcdp
13728 || icode == CODE_FOR_xststdcsp
13729 || icode == CODE_FOR_xvtstdcdp
13730 || icode == CODE_FOR_xvtstdcsp)
13731 {
13732 /* Only allow 7-bit unsigned literals. */
13733 STRIP_NOPS (arg1);
13734 if (TREE_CODE (arg1) != INTEGER_CST
13735 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13736 {
13737 error ("argument 2 must be a 7-bit unsigned literal");
13738 return CONST0_RTX (tmode);
13739 }
13740 }
13741
13742 if (target == 0
13743 || GET_MODE (target) != tmode
13744 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13745 target = gen_reg_rtx (tmode);
13746
13747 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13748 op0 = copy_to_mode_reg (mode0, op0);
13749 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13750 op1 = copy_to_mode_reg (mode1, op1);
13751
13752 pat = GEN_FCN (icode) (target, op0, op1);
13753 if (! pat)
13754 return 0;
13755 emit_insn (pat);
13756
13757 return target;
13758 }
13759
13760 static rtx
13761 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13762 {
13763 rtx pat, scratch;
13764 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13765 tree arg0 = CALL_EXPR_ARG (exp, 1);
13766 tree arg1 = CALL_EXPR_ARG (exp, 2);
13767 rtx op0 = expand_normal (arg0);
13768 rtx op1 = expand_normal (arg1);
13769 machine_mode tmode = SImode;
13770 machine_mode mode0 = insn_data[icode].operand[1].mode;
13771 machine_mode mode1 = insn_data[icode].operand[2].mode;
13772 int cr6_form_int;
13773
13774 if (TREE_CODE (cr6_form) != INTEGER_CST)
13775 {
13776 error ("argument 1 of %qs must be a constant",
13777 "__builtin_altivec_predicate");
13778 return const0_rtx;
13779 }
13780 else
13781 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13782
13783 gcc_assert (mode0 == mode1);
13784
13785 /* If we have invalid arguments, bail out before generating bad rtl. */
13786 if (arg0 == error_mark_node || arg1 == error_mark_node)
13787 return const0_rtx;
13788
13789 if (target == 0
13790 || GET_MODE (target) != tmode
13791 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13792 target = gen_reg_rtx (tmode);
13793
13794 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13795 op0 = copy_to_mode_reg (mode0, op0);
13796 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13797 op1 = copy_to_mode_reg (mode1, op1);
13798
13799 /* Note that for many of the relevant operations (e.g. cmpne or
13800 cmpeq) with float or double operands, it makes more sense for the
13801 mode of the allocated scratch register to select a vector of
13802 integer. But the choice to copy the mode of operand 0 was made
13803 long ago and there are no plans to change it. */
13804 scratch = gen_reg_rtx (mode0);
13805
13806 pat = GEN_FCN (icode) (scratch, op0, op1);
13807 if (! pat)
13808 return 0;
13809 emit_insn (pat);
13810
13811 /* The vec_any* and vec_all* predicates use the same opcodes for two
13812 different operations, but the bits in CR6 will be different
13813 depending on what information we want. So we have to play tricks
13814 with CR6 to get the right bits out.
13815
13816 If you think this is disgusting, look at the specs for the
13817 AltiVec predicates. */
13818
13819 switch (cr6_form_int)
13820 {
13821 case 0:
13822 emit_insn (gen_cr6_test_for_zero (target));
13823 break;
13824 case 1:
13825 emit_insn (gen_cr6_test_for_zero_reverse (target));
13826 break;
13827 case 2:
13828 emit_insn (gen_cr6_test_for_lt (target));
13829 break;
13830 case 3:
13831 emit_insn (gen_cr6_test_for_lt_reverse (target));
13832 break;
13833 default:
13834 error ("argument 1 of %qs is out of range",
13835 "__builtin_altivec_predicate");
13836 break;
13837 }
13838
13839 return target;
13840 }
13841
13842 rtx
13843 swap_endian_selector_for_mode (machine_mode mode)
13844 {
13845 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13846 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13847 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13848 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13849
13850 unsigned int *swaparray, i;
13851 rtx perm[16];
13852
13853 switch (mode)
13854 {
13855 case E_V1TImode:
13856 swaparray = swap1;
13857 break;
13858 case E_V2DFmode:
13859 case E_V2DImode:
13860 swaparray = swap2;
13861 break;
13862 case E_V4SFmode:
13863 case E_V4SImode:
13864 swaparray = swap4;
13865 break;
13866 case E_V8HImode:
13867 swaparray = swap8;
13868 break;
13869 default:
13870 gcc_unreachable ();
13871 }
13872
13873 for (i = 0; i < 16; ++i)
13874 perm[i] = GEN_INT (swaparray[i]);
13875
13876 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13877 gen_rtvec_v (16, perm)));
13878 }
13879
13880 static rtx
13881 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13882 {
13883 rtx pat, addr;
13884 tree arg0 = CALL_EXPR_ARG (exp, 0);
13885 tree arg1 = CALL_EXPR_ARG (exp, 1);
13886 machine_mode tmode = insn_data[icode].operand[0].mode;
13887 machine_mode mode0 = Pmode;
13888 machine_mode mode1 = Pmode;
13889 rtx op0 = expand_normal (arg0);
13890 rtx op1 = expand_normal (arg1);
13891
13892 if (icode == CODE_FOR_nothing)
13893 /* Builtin not supported on this processor. */
13894 return 0;
13895
13896 /* If we got invalid arguments bail out before generating bad rtl. */
13897 if (arg0 == error_mark_node || arg1 == error_mark_node)
13898 return const0_rtx;
13899
13900 if (target == 0
13901 || GET_MODE (target) != tmode
13902 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13903 target = gen_reg_rtx (tmode);
13904
13905 op1 = copy_to_mode_reg (mode1, op1);
13906
13907 /* For LVX, express the RTL accurately by ANDing the address with -16.
13908 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13909 so the raw address is fine. */
13910 if (icode == CODE_FOR_altivec_lvx_v1ti
13911 || icode == CODE_FOR_altivec_lvx_v2df
13912 || icode == CODE_FOR_altivec_lvx_v2di
13913 || icode == CODE_FOR_altivec_lvx_v4sf
13914 || icode == CODE_FOR_altivec_lvx_v4si
13915 || icode == CODE_FOR_altivec_lvx_v8hi
13916 || icode == CODE_FOR_altivec_lvx_v16qi)
13917 {
13918 rtx rawaddr;
13919 if (op0 == const0_rtx)
13920 rawaddr = op1;
13921 else
13922 {
13923 op0 = copy_to_mode_reg (mode0, op0);
13924 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13925 }
13926 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13927 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13928
13929 emit_insn (gen_rtx_SET (target, addr));
13930 }
13931 else
13932 {
13933 if (op0 == const0_rtx)
13934 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13935 else
13936 {
13937 op0 = copy_to_mode_reg (mode0, op0);
13938 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13939 gen_rtx_PLUS (Pmode, op1, op0));
13940 }
13941
13942 pat = GEN_FCN (icode) (target, addr);
13943 if (! pat)
13944 return 0;
13945 emit_insn (pat);
13946 }
13947
13948 return target;
13949 }
13950
13951 static rtx
13952 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
13953 {
13954 rtx pat;
13955 tree arg0 = CALL_EXPR_ARG (exp, 0);
13956 tree arg1 = CALL_EXPR_ARG (exp, 1);
13957 tree arg2 = CALL_EXPR_ARG (exp, 2);
13958 rtx op0 = expand_normal (arg0);
13959 rtx op1 = expand_normal (arg1);
13960 rtx op2 = expand_normal (arg2);
13961 machine_mode mode0 = insn_data[icode].operand[0].mode;
13962 machine_mode mode1 = insn_data[icode].operand[1].mode;
13963 machine_mode mode2 = insn_data[icode].operand[2].mode;
13964
13965 if (icode == CODE_FOR_nothing)
13966 /* Builtin not supported on this processor. */
13967 return NULL_RTX;
13968
13969 /* If we got invalid arguments bail out before generating bad rtl. */
13970 if (arg0 == error_mark_node
13971 || arg1 == error_mark_node
13972 || arg2 == error_mark_node)
13973 return NULL_RTX;
13974
13975 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13976 op0 = copy_to_mode_reg (mode0, op0);
13977 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13978 op1 = copy_to_mode_reg (mode1, op1);
13979 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13980 op2 = copy_to_mode_reg (mode2, op2);
13981
13982 pat = GEN_FCN (icode) (op0, op1, op2);
13983 if (pat)
13984 emit_insn (pat);
13985
13986 return NULL_RTX;
13987 }
13988
13989 static rtx
13990 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
13991 {
13992 tree arg0 = CALL_EXPR_ARG (exp, 0);
13993 tree arg1 = CALL_EXPR_ARG (exp, 1);
13994 tree arg2 = CALL_EXPR_ARG (exp, 2);
13995 rtx op0 = expand_normal (arg0);
13996 rtx op1 = expand_normal (arg1);
13997 rtx op2 = expand_normal (arg2);
13998 rtx pat, addr, rawaddr;
13999 machine_mode tmode = insn_data[icode].operand[0].mode;
14000 machine_mode smode = insn_data[icode].operand[1].mode;
14001 machine_mode mode1 = Pmode;
14002 machine_mode mode2 = Pmode;
14003
14004 /* Invalid arguments. Bail before doing anything stoopid! */
14005 if (arg0 == error_mark_node
14006 || arg1 == error_mark_node
14007 || arg2 == error_mark_node)
14008 return const0_rtx;
14009
14010 op2 = copy_to_mode_reg (mode2, op2);
14011
14012 /* For STVX, express the RTL accurately by ANDing the address with -16.
14013 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14014 so the raw address is fine. */
14015 if (icode == CODE_FOR_altivec_stvx_v2df
14016 || icode == CODE_FOR_altivec_stvx_v2di
14017 || icode == CODE_FOR_altivec_stvx_v4sf
14018 || icode == CODE_FOR_altivec_stvx_v4si
14019 || icode == CODE_FOR_altivec_stvx_v8hi
14020 || icode == CODE_FOR_altivec_stvx_v16qi)
14021 {
14022 if (op1 == const0_rtx)
14023 rawaddr = op2;
14024 else
14025 {
14026 op1 = copy_to_mode_reg (mode1, op1);
14027 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14028 }
14029
14030 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14031 addr = gen_rtx_MEM (tmode, addr);
14032
14033 op0 = copy_to_mode_reg (tmode, op0);
14034
14035 emit_insn (gen_rtx_SET (addr, op0));
14036 }
14037 else
14038 {
14039 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14040 op0 = copy_to_mode_reg (smode, op0);
14041
14042 if (op1 == const0_rtx)
14043 addr = gen_rtx_MEM (tmode, op2);
14044 else
14045 {
14046 op1 = copy_to_mode_reg (mode1, op1);
14047 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14048 }
14049
14050 pat = GEN_FCN (icode) (addr, op0);
14051 if (pat)
14052 emit_insn (pat);
14053 }
14054
14055 return NULL_RTX;
14056 }
14057
14058 /* Return the appropriate SPR number associated with the given builtin. */
14059 static inline HOST_WIDE_INT
14060 htm_spr_num (enum rs6000_builtins code)
14061 {
14062 if (code == HTM_BUILTIN_GET_TFHAR
14063 || code == HTM_BUILTIN_SET_TFHAR)
14064 return TFHAR_SPR;
14065 else if (code == HTM_BUILTIN_GET_TFIAR
14066 || code == HTM_BUILTIN_SET_TFIAR)
14067 return TFIAR_SPR;
14068 else if (code == HTM_BUILTIN_GET_TEXASR
14069 || code == HTM_BUILTIN_SET_TEXASR)
14070 return TEXASR_SPR;
14071 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14072 || code == HTM_BUILTIN_SET_TEXASRU);
14073 return TEXASRU_SPR;
14074 }
14075
14076 /* Return the appropriate SPR regno associated with the given builtin. */
14077 static inline HOST_WIDE_INT
14078 htm_spr_regno (enum rs6000_builtins code)
14079 {
14080 if (code == HTM_BUILTIN_GET_TFHAR
14081 || code == HTM_BUILTIN_SET_TFHAR)
14082 return TFHAR_REGNO;
14083 else if (code == HTM_BUILTIN_GET_TFIAR
14084 || code == HTM_BUILTIN_SET_TFIAR)
14085 return TFIAR_REGNO;
14086 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14087 || code == HTM_BUILTIN_SET_TEXASR
14088 || code == HTM_BUILTIN_GET_TEXASRU
14089 || code == HTM_BUILTIN_SET_TEXASRU);
14090 return TEXASR_REGNO;
14091 }
14092
14093 /* Return the correct ICODE value depending on whether we are
14094 setting or reading the HTM SPRs. */
14095 static inline enum insn_code
14096 rs6000_htm_spr_icode (bool nonvoid)
14097 {
14098 if (nonvoid)
14099 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14100 else
14101 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14102 }
14103
14104 /* Expand the HTM builtin in EXP and store the result in TARGET.
14105 Store true in *EXPANDEDP if we found a builtin to expand. */
14106 static rtx
14107 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14108 {
14109 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14110 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14111 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14112 const struct builtin_description *d;
14113 size_t i;
14114
14115 *expandedp = true;
14116
14117 if (!TARGET_POWERPC64
14118 && (fcode == HTM_BUILTIN_TABORTDC
14119 || fcode == HTM_BUILTIN_TABORTDCI))
14120 {
14121 size_t uns_fcode = (size_t)fcode;
14122 const char *name = rs6000_builtin_info[uns_fcode].name;
14123 error ("builtin %qs is only valid in 64-bit mode", name);
14124 return const0_rtx;
14125 }
14126
14127 /* Expand the HTM builtins. */
14128 d = bdesc_htm;
14129 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14130 if (d->code == fcode)
14131 {
14132 rtx op[MAX_HTM_OPERANDS], pat;
14133 int nopnds = 0;
14134 tree arg;
14135 call_expr_arg_iterator iter;
14136 unsigned attr = rs6000_builtin_info[fcode].attr;
14137 enum insn_code icode = d->icode;
14138 const struct insn_operand_data *insn_op;
14139 bool uses_spr = (attr & RS6000_BTC_SPR);
14140 rtx cr = NULL_RTX;
14141
14142 if (uses_spr)
14143 icode = rs6000_htm_spr_icode (nonvoid);
14144 insn_op = &insn_data[icode].operand[0];
14145
14146 if (nonvoid)
14147 {
14148 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14149 if (!target
14150 || GET_MODE (target) != tmode
14151 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14152 target = gen_reg_rtx (tmode);
14153 if (uses_spr)
14154 op[nopnds++] = target;
14155 }
14156
14157 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14158 {
14159 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14160 return const0_rtx;
14161
14162 insn_op = &insn_data[icode].operand[nopnds];
14163
14164 op[nopnds] = expand_normal (arg);
14165
14166 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14167 {
14168 if (!strcmp (insn_op->constraint, "n"))
14169 {
14170 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14171 if (!CONST_INT_P (op[nopnds]))
14172 error ("argument %d must be an unsigned literal", arg_num);
14173 else
14174 error ("argument %d is an unsigned literal that is "
14175 "out of range", arg_num);
14176 return const0_rtx;
14177 }
14178 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14179 }
14180
14181 nopnds++;
14182 }
14183
14184 /* Handle the builtins for extended mnemonics. These accept
14185 no arguments, but map to builtins that take arguments. */
14186 switch (fcode)
14187 {
14188 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14189 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14190 op[nopnds++] = GEN_INT (1);
14191 if (flag_checking)
14192 attr |= RS6000_BTC_UNARY;
14193 break;
14194 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14195 op[nopnds++] = GEN_INT (0);
14196 if (flag_checking)
14197 attr |= RS6000_BTC_UNARY;
14198 break;
14199 default:
14200 break;
14201 }
14202
14203 /* If this builtin accesses SPRs, then pass in the appropriate
14204 SPR number and SPR regno as the last two operands. */
14205 if (uses_spr)
14206 {
14207 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14208 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14209 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14210 }
14211 /* If this builtin accesses a CR, then pass in a scratch
14212 CR as the last operand. */
14213 else if (attr & RS6000_BTC_CR)
14214 { cr = gen_reg_rtx (CCmode);
14215 op[nopnds++] = cr;
14216 }
14217
14218 if (flag_checking)
14219 {
14220 int expected_nopnds = 0;
14221 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14222 expected_nopnds = 1;
14223 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14224 expected_nopnds = 2;
14225 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14226 expected_nopnds = 3;
14227 if (!(attr & RS6000_BTC_VOID))
14228 expected_nopnds += 1;
14229 if (uses_spr)
14230 expected_nopnds += 2;
14231
14232 gcc_assert (nopnds == expected_nopnds
14233 && nopnds <= MAX_HTM_OPERANDS);
14234 }
14235
14236 switch (nopnds)
14237 {
14238 case 1:
14239 pat = GEN_FCN (icode) (op[0]);
14240 break;
14241 case 2:
14242 pat = GEN_FCN (icode) (op[0], op[1]);
14243 break;
14244 case 3:
14245 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14246 break;
14247 case 4:
14248 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14249 break;
14250 default:
14251 gcc_unreachable ();
14252 }
14253 if (!pat)
14254 return NULL_RTX;
14255 emit_insn (pat);
14256
14257 if (attr & RS6000_BTC_CR)
14258 {
14259 if (fcode == HTM_BUILTIN_TBEGIN)
14260 {
14261 /* Emit code to set TARGET to true or false depending on
14262 whether the tbegin. instruction successfully or failed
14263 to start a transaction. We do this by placing the 1's
14264 complement of CR's EQ bit into TARGET. */
14265 rtx scratch = gen_reg_rtx (SImode);
14266 emit_insn (gen_rtx_SET (scratch,
14267 gen_rtx_EQ (SImode, cr,
14268 const0_rtx)));
14269 emit_insn (gen_rtx_SET (target,
14270 gen_rtx_XOR (SImode, scratch,
14271 GEN_INT (1))));
14272 }
14273 else
14274 {
14275 /* Emit code to copy the 4-bit condition register field
14276 CR into the least significant end of register TARGET. */
14277 rtx scratch1 = gen_reg_rtx (SImode);
14278 rtx scratch2 = gen_reg_rtx (SImode);
14279 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14280 emit_insn (gen_movcc (subreg, cr));
14281 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14282 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14283 }
14284 }
14285
14286 if (nonvoid)
14287 return target;
14288 return const0_rtx;
14289 }
14290
14291 *expandedp = false;
14292 return NULL_RTX;
14293 }
14294
14295 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14296
14297 static rtx
14298 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14299 rtx target)
14300 {
14301 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14302 if (fcode == RS6000_BUILTIN_CPU_INIT)
14303 return const0_rtx;
14304
14305 if (target == 0 || GET_MODE (target) != SImode)
14306 target = gen_reg_rtx (SImode);
14307
14308 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14309 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14310 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14311 to a STRING_CST. */
14312 if (TREE_CODE (arg) == ARRAY_REF
14313 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14314 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14315 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14316 arg = TREE_OPERAND (arg, 0);
14317
14318 if (TREE_CODE (arg) != STRING_CST)
14319 {
14320 error ("builtin %qs only accepts a string argument",
14321 rs6000_builtin_info[(size_t) fcode].name);
14322 return const0_rtx;
14323 }
14324
14325 if (fcode == RS6000_BUILTIN_CPU_IS)
14326 {
14327 const char *cpu = TREE_STRING_POINTER (arg);
14328 rtx cpuid = NULL_RTX;
14329 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14330 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14331 {
14332 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14333 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14334 break;
14335 }
14336 if (cpuid == NULL_RTX)
14337 {
14338 /* Invalid CPU argument. */
14339 error ("cpu %qs is an invalid argument to builtin %qs",
14340 cpu, rs6000_builtin_info[(size_t) fcode].name);
14341 return const0_rtx;
14342 }
14343
14344 rtx platform = gen_reg_rtx (SImode);
14345 rtx tcbmem = gen_const_mem (SImode,
14346 gen_rtx_PLUS (Pmode,
14347 gen_rtx_REG (Pmode, TLS_REGNUM),
14348 GEN_INT (TCB_PLATFORM_OFFSET)));
14349 emit_move_insn (platform, tcbmem);
14350 emit_insn (gen_eqsi3 (target, platform, cpuid));
14351 }
14352 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14353 {
14354 const char *hwcap = TREE_STRING_POINTER (arg);
14355 rtx mask = NULL_RTX;
14356 int hwcap_offset;
14357 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14358 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14359 {
14360 mask = GEN_INT (cpu_supports_info[i].mask);
14361 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14362 break;
14363 }
14364 if (mask == NULL_RTX)
14365 {
14366 /* Invalid HWCAP argument. */
14367 error ("%s %qs is an invalid argument to builtin %qs",
14368 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14369 return const0_rtx;
14370 }
14371
14372 rtx tcb_hwcap = gen_reg_rtx (SImode);
14373 rtx tcbmem = gen_const_mem (SImode,
14374 gen_rtx_PLUS (Pmode,
14375 gen_rtx_REG (Pmode, TLS_REGNUM),
14376 GEN_INT (hwcap_offset)));
14377 emit_move_insn (tcb_hwcap, tcbmem);
14378 rtx scratch1 = gen_reg_rtx (SImode);
14379 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14380 rtx scratch2 = gen_reg_rtx (SImode);
14381 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14382 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14383 }
14384 else
14385 gcc_unreachable ();
14386
14387 /* Record that we have expanded a CPU builtin, so that we can later
14388 emit a reference to the special symbol exported by LIBC to ensure we
14389 do not link against an old LIBC that doesn't support this feature. */
14390 cpu_builtin_p = true;
14391
14392 #else
14393 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14394 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14395
14396 /* For old LIBCs, always return FALSE. */
14397 emit_move_insn (target, GEN_INT (0));
14398 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14399
14400 return target;
14401 }
14402
14403 static rtx
14404 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14405 {
14406 rtx pat;
14407 tree arg0 = CALL_EXPR_ARG (exp, 0);
14408 tree arg1 = CALL_EXPR_ARG (exp, 1);
14409 tree arg2 = CALL_EXPR_ARG (exp, 2);
14410 rtx op0 = expand_normal (arg0);
14411 rtx op1 = expand_normal (arg1);
14412 rtx op2 = expand_normal (arg2);
14413 machine_mode tmode = insn_data[icode].operand[0].mode;
14414 machine_mode mode0 = insn_data[icode].operand[1].mode;
14415 machine_mode mode1 = insn_data[icode].operand[2].mode;
14416 machine_mode mode2 = insn_data[icode].operand[3].mode;
14417
14418 if (icode == CODE_FOR_nothing)
14419 /* Builtin not supported on this processor. */
14420 return 0;
14421
14422 /* If we got invalid arguments bail out before generating bad rtl. */
14423 if (arg0 == error_mark_node
14424 || arg1 == error_mark_node
14425 || arg2 == error_mark_node)
14426 return const0_rtx;
14427
14428 /* Check and prepare argument depending on the instruction code.
14429
14430 Note that a switch statement instead of the sequence of tests
14431 would be incorrect as many of the CODE_FOR values could be
14432 CODE_FOR_nothing and that would yield multiple alternatives
14433 with identical values. We'd never reach here at runtime in
14434 this case. */
14435 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14436 || icode == CODE_FOR_altivec_vsldoi_v2df
14437 || icode == CODE_FOR_altivec_vsldoi_v4si
14438 || icode == CODE_FOR_altivec_vsldoi_v8hi
14439 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14440 {
14441 /* Only allow 4-bit unsigned literals. */
14442 STRIP_NOPS (arg2);
14443 if (TREE_CODE (arg2) != INTEGER_CST
14444 || TREE_INT_CST_LOW (arg2) & ~0xf)
14445 {
14446 error ("argument 3 must be a 4-bit unsigned literal");
14447 return CONST0_RTX (tmode);
14448 }
14449 }
14450 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14451 || icode == CODE_FOR_vsx_xxpermdi_v2di
14452 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14453 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
14454 || icode == CODE_FOR_vsx_xxpermdi_v1ti
14455 || icode == CODE_FOR_vsx_xxpermdi_v4sf
14456 || icode == CODE_FOR_vsx_xxpermdi_v4si
14457 || icode == CODE_FOR_vsx_xxpermdi_v8hi
14458 || icode == CODE_FOR_vsx_xxpermdi_v16qi
14459 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14460 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14461 || icode == CODE_FOR_vsx_xxsldwi_v4si
14462 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14463 || icode == CODE_FOR_vsx_xxsldwi_v2di
14464 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14465 {
14466 /* Only allow 2-bit unsigned literals. */
14467 STRIP_NOPS (arg2);
14468 if (TREE_CODE (arg2) != INTEGER_CST
14469 || TREE_INT_CST_LOW (arg2) & ~0x3)
14470 {
14471 error ("argument 3 must be a 2-bit unsigned literal");
14472 return CONST0_RTX (tmode);
14473 }
14474 }
14475 else if (icode == CODE_FOR_vsx_set_v2df
14476 || icode == CODE_FOR_vsx_set_v2di
14477 || icode == CODE_FOR_bcdadd
14478 || icode == CODE_FOR_bcdadd_lt
14479 || icode == CODE_FOR_bcdadd_eq
14480 || icode == CODE_FOR_bcdadd_gt
14481 || icode == CODE_FOR_bcdsub
14482 || icode == CODE_FOR_bcdsub_lt
14483 || icode == CODE_FOR_bcdsub_eq
14484 || icode == CODE_FOR_bcdsub_gt)
14485 {
14486 /* Only allow 1-bit unsigned literals. */
14487 STRIP_NOPS (arg2);
14488 if (TREE_CODE (arg2) != INTEGER_CST
14489 || TREE_INT_CST_LOW (arg2) & ~0x1)
14490 {
14491 error ("argument 3 must be a 1-bit unsigned literal");
14492 return CONST0_RTX (tmode);
14493 }
14494 }
14495 else if (icode == CODE_FOR_dfp_ddedpd_dd
14496 || icode == CODE_FOR_dfp_ddedpd_td)
14497 {
14498 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14499 STRIP_NOPS (arg0);
14500 if (TREE_CODE (arg0) != INTEGER_CST
14501 || TREE_INT_CST_LOW (arg2) & ~0x3)
14502 {
14503 error ("argument 1 must be 0 or 2");
14504 return CONST0_RTX (tmode);
14505 }
14506 }
14507 else if (icode == CODE_FOR_dfp_denbcd_dd
14508 || icode == CODE_FOR_dfp_denbcd_td)
14509 {
14510 /* Only allow 1-bit unsigned literals. */
14511 STRIP_NOPS (arg0);
14512 if (TREE_CODE (arg0) != INTEGER_CST
14513 || TREE_INT_CST_LOW (arg0) & ~0x1)
14514 {
14515 error ("argument 1 must be a 1-bit unsigned literal");
14516 return CONST0_RTX (tmode);
14517 }
14518 }
14519 else if (icode == CODE_FOR_dfp_dscli_dd
14520 || icode == CODE_FOR_dfp_dscli_td
14521 || icode == CODE_FOR_dfp_dscri_dd
14522 || icode == CODE_FOR_dfp_dscri_td)
14523 {
14524 /* Only allow 6-bit unsigned literals. */
14525 STRIP_NOPS (arg1);
14526 if (TREE_CODE (arg1) != INTEGER_CST
14527 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14528 {
14529 error ("argument 2 must be a 6-bit unsigned literal");
14530 return CONST0_RTX (tmode);
14531 }
14532 }
14533 else if (icode == CODE_FOR_crypto_vshasigmaw
14534 || icode == CODE_FOR_crypto_vshasigmad)
14535 {
14536 /* Check whether the 2nd and 3rd arguments are integer constants and in
14537 range and prepare arguments. */
14538 STRIP_NOPS (arg1);
14539 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14540 {
14541 error ("argument 2 must be 0 or 1");
14542 return CONST0_RTX (tmode);
14543 }
14544
14545 STRIP_NOPS (arg2);
14546 if (TREE_CODE (arg2) != INTEGER_CST
14547 || wi::geu_p (wi::to_wide (arg2), 16))
14548 {
14549 error ("argument 3 must be in the range 0..15");
14550 return CONST0_RTX (tmode);
14551 }
14552 }
14553
14554 if (target == 0
14555 || GET_MODE (target) != tmode
14556 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14557 target = gen_reg_rtx (tmode);
14558
14559 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14560 op0 = copy_to_mode_reg (mode0, op0);
14561 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14562 op1 = copy_to_mode_reg (mode1, op1);
14563 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14564 op2 = copy_to_mode_reg (mode2, op2);
14565
14566 pat = GEN_FCN (icode) (target, op0, op1, op2);
14567 if (! pat)
14568 return 0;
14569 emit_insn (pat);
14570
14571 return target;
14572 }
14573
14574
14575 /* Expand the dst builtins. */
14576 static rtx
14577 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14578 bool *expandedp)
14579 {
14580 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14581 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14582 tree arg0, arg1, arg2;
14583 machine_mode mode0, mode1;
14584 rtx pat, op0, op1, op2;
14585 const struct builtin_description *d;
14586 size_t i;
14587
14588 *expandedp = false;
14589
14590 /* Handle DST variants. */
14591 d = bdesc_dst;
14592 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14593 if (d->code == fcode)
14594 {
14595 arg0 = CALL_EXPR_ARG (exp, 0);
14596 arg1 = CALL_EXPR_ARG (exp, 1);
14597 arg2 = CALL_EXPR_ARG (exp, 2);
14598 op0 = expand_normal (arg0);
14599 op1 = expand_normal (arg1);
14600 op2 = expand_normal (arg2);
14601 mode0 = insn_data[d->icode].operand[0].mode;
14602 mode1 = insn_data[d->icode].operand[1].mode;
14603
14604 /* Invalid arguments, bail out before generating bad rtl. */
14605 if (arg0 == error_mark_node
14606 || arg1 == error_mark_node
14607 || arg2 == error_mark_node)
14608 return const0_rtx;
14609
14610 *expandedp = true;
14611 STRIP_NOPS (arg2);
14612 if (TREE_CODE (arg2) != INTEGER_CST
14613 || TREE_INT_CST_LOW (arg2) & ~0x3)
14614 {
14615 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14616 return const0_rtx;
14617 }
14618
14619 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14620 op0 = copy_to_mode_reg (Pmode, op0);
14621 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14622 op1 = copy_to_mode_reg (mode1, op1);
14623
14624 pat = GEN_FCN (d->icode) (op0, op1, op2);
14625 if (pat != 0)
14626 emit_insn (pat);
14627
14628 return NULL_RTX;
14629 }
14630
14631 return NULL_RTX;
14632 }
14633
14634 /* Expand vec_init builtin. */
14635 static rtx
14636 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14637 {
14638 machine_mode tmode = TYPE_MODE (type);
14639 machine_mode inner_mode = GET_MODE_INNER (tmode);
14640 int i, n_elt = GET_MODE_NUNITS (tmode);
14641
14642 gcc_assert (VECTOR_MODE_P (tmode));
14643 gcc_assert (n_elt == call_expr_nargs (exp));
14644
14645 if (!target || !register_operand (target, tmode))
14646 target = gen_reg_rtx (tmode);
14647
14648 /* If we have a vector compromised of a single element, such as V1TImode, do
14649 the initialization directly. */
14650 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14651 {
14652 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14653 emit_move_insn (target, gen_lowpart (tmode, x));
14654 }
14655 else
14656 {
14657 rtvec v = rtvec_alloc (n_elt);
14658
14659 for (i = 0; i < n_elt; ++i)
14660 {
14661 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14662 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14663 }
14664
14665 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14666 }
14667
14668 return target;
14669 }
14670
14671 /* Return the integer constant in ARG. Constrain it to be in the range
14672 of the subparts of VEC_TYPE; issue an error if not. */
14673
14674 static int
14675 get_element_number (tree vec_type, tree arg)
14676 {
14677 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14678
14679 if (!tree_fits_uhwi_p (arg)
14680 || (elt = tree_to_uhwi (arg), elt > max))
14681 {
14682 error ("selector must be an integer constant in the range 0..%wi", max);
14683 return 0;
14684 }
14685
14686 return elt;
14687 }
14688
14689 /* Expand vec_set builtin. */
14690 static rtx
14691 altivec_expand_vec_set_builtin (tree exp)
14692 {
14693 machine_mode tmode, mode1;
14694 tree arg0, arg1, arg2;
14695 int elt;
14696 rtx op0, op1;
14697
14698 arg0 = CALL_EXPR_ARG (exp, 0);
14699 arg1 = CALL_EXPR_ARG (exp, 1);
14700 arg2 = CALL_EXPR_ARG (exp, 2);
14701
14702 tmode = TYPE_MODE (TREE_TYPE (arg0));
14703 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14704 gcc_assert (VECTOR_MODE_P (tmode));
14705
14706 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14707 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14708 elt = get_element_number (TREE_TYPE (arg0), arg2);
14709
14710 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14711 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14712
14713 op0 = force_reg (tmode, op0);
14714 op1 = force_reg (mode1, op1);
14715
14716 rs6000_expand_vector_set (op0, op1, elt);
14717
14718 return op0;
14719 }
14720
14721 /* Expand vec_ext builtin. */
14722 static rtx
14723 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14724 {
14725 machine_mode tmode, mode0;
14726 tree arg0, arg1;
14727 rtx op0;
14728 rtx op1;
14729
14730 arg0 = CALL_EXPR_ARG (exp, 0);
14731 arg1 = CALL_EXPR_ARG (exp, 1);
14732
14733 op0 = expand_normal (arg0);
14734 op1 = expand_normal (arg1);
14735
14736 /* Call get_element_number to validate arg1 if it is a constant. */
14737 if (TREE_CODE (arg1) == INTEGER_CST)
14738 (void) get_element_number (TREE_TYPE (arg0), arg1);
14739
14740 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14741 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14742 gcc_assert (VECTOR_MODE_P (mode0));
14743
14744 op0 = force_reg (mode0, op0);
14745
14746 if (optimize || !target || !register_operand (target, tmode))
14747 target = gen_reg_rtx (tmode);
14748
14749 rs6000_expand_vector_extract (target, op0, op1);
14750
14751 return target;
14752 }
14753
14754 /* Expand the builtin in EXP and store the result in TARGET. Store
14755 true in *EXPANDEDP if we found a builtin to expand. */
14756 static rtx
14757 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14758 {
14759 const struct builtin_description *d;
14760 size_t i;
14761 enum insn_code icode;
14762 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14763 tree arg0, arg1, arg2;
14764 rtx op0, pat;
14765 machine_mode tmode, mode0;
14766 enum rs6000_builtins fcode
14767 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14768
14769 if (rs6000_overloaded_builtin_p (fcode))
14770 {
14771 *expandedp = true;
14772 error ("unresolved overload for Altivec builtin %qF", fndecl);
14773
14774 /* Given it is invalid, just generate a normal call. */
14775 return expand_call (exp, target, false);
14776 }
14777
14778 target = altivec_expand_dst_builtin (exp, target, expandedp);
14779 if (*expandedp)
14780 return target;
14781
14782 *expandedp = true;
14783
14784 switch (fcode)
14785 {
14786 case ALTIVEC_BUILTIN_STVX_V2DF:
14787 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14788 case ALTIVEC_BUILTIN_STVX_V2DI:
14789 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14790 case ALTIVEC_BUILTIN_STVX_V4SF:
14791 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14792 case ALTIVEC_BUILTIN_STVX:
14793 case ALTIVEC_BUILTIN_STVX_V4SI:
14794 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14795 case ALTIVEC_BUILTIN_STVX_V8HI:
14796 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14797 case ALTIVEC_BUILTIN_STVX_V16QI:
14798 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14799 case ALTIVEC_BUILTIN_STVEBX:
14800 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14801 case ALTIVEC_BUILTIN_STVEHX:
14802 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14803 case ALTIVEC_BUILTIN_STVEWX:
14804 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14805 case ALTIVEC_BUILTIN_STVXL_V2DF:
14806 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14807 case ALTIVEC_BUILTIN_STVXL_V2DI:
14808 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14809 case ALTIVEC_BUILTIN_STVXL_V4SF:
14810 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14811 case ALTIVEC_BUILTIN_STVXL:
14812 case ALTIVEC_BUILTIN_STVXL_V4SI:
14813 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14814 case ALTIVEC_BUILTIN_STVXL_V8HI:
14815 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14816 case ALTIVEC_BUILTIN_STVXL_V16QI:
14817 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14818
14819 case ALTIVEC_BUILTIN_STVLX:
14820 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14821 case ALTIVEC_BUILTIN_STVLXL:
14822 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14823 case ALTIVEC_BUILTIN_STVRX:
14824 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14825 case ALTIVEC_BUILTIN_STVRXL:
14826 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14827
14828 case P9V_BUILTIN_STXVL:
14829 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14830
14831 case P9V_BUILTIN_XST_LEN_R:
14832 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14833
14834 case VSX_BUILTIN_STXVD2X_V1TI:
14835 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14836 case VSX_BUILTIN_STXVD2X_V2DF:
14837 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14838 case VSX_BUILTIN_STXVD2X_V2DI:
14839 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14840 case VSX_BUILTIN_STXVW4X_V4SF:
14841 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14842 case VSX_BUILTIN_STXVW4X_V4SI:
14843 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14844 case VSX_BUILTIN_STXVW4X_V8HI:
14845 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14846 case VSX_BUILTIN_STXVW4X_V16QI:
14847 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14848
14849 /* For the following on big endian, it's ok to use any appropriate
14850 unaligned-supporting store, so use a generic expander. For
14851 little-endian, the exact element-reversing instruction must
14852 be used. */
14853 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14854 {
14855 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14856 : CODE_FOR_vsx_st_elemrev_v1ti);
14857 return altivec_expand_stv_builtin (code, exp);
14858 }
14859 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14860 {
14861 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14862 : CODE_FOR_vsx_st_elemrev_v2df);
14863 return altivec_expand_stv_builtin (code, exp);
14864 }
14865 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14866 {
14867 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14868 : CODE_FOR_vsx_st_elemrev_v2di);
14869 return altivec_expand_stv_builtin (code, exp);
14870 }
14871 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14872 {
14873 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14874 : CODE_FOR_vsx_st_elemrev_v4sf);
14875 return altivec_expand_stv_builtin (code, exp);
14876 }
14877 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14878 {
14879 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14880 : CODE_FOR_vsx_st_elemrev_v4si);
14881 return altivec_expand_stv_builtin (code, exp);
14882 }
14883 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14884 {
14885 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14886 : CODE_FOR_vsx_st_elemrev_v8hi);
14887 return altivec_expand_stv_builtin (code, exp);
14888 }
14889 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14890 {
14891 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14892 : CODE_FOR_vsx_st_elemrev_v16qi);
14893 return altivec_expand_stv_builtin (code, exp);
14894 }
14895
14896 case ALTIVEC_BUILTIN_MFVSCR:
14897 icode = CODE_FOR_altivec_mfvscr;
14898 tmode = insn_data[icode].operand[0].mode;
14899
14900 if (target == 0
14901 || GET_MODE (target) != tmode
14902 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14903 target = gen_reg_rtx (tmode);
14904
14905 pat = GEN_FCN (icode) (target);
14906 if (! pat)
14907 return 0;
14908 emit_insn (pat);
14909 return target;
14910
14911 case ALTIVEC_BUILTIN_MTVSCR:
14912 icode = CODE_FOR_altivec_mtvscr;
14913 arg0 = CALL_EXPR_ARG (exp, 0);
14914 op0 = expand_normal (arg0);
14915 mode0 = insn_data[icode].operand[0].mode;
14916
14917 /* If we got invalid arguments bail out before generating bad rtl. */
14918 if (arg0 == error_mark_node)
14919 return const0_rtx;
14920
14921 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14922 op0 = copy_to_mode_reg (mode0, op0);
14923
14924 pat = GEN_FCN (icode) (op0);
14925 if (pat)
14926 emit_insn (pat);
14927 return NULL_RTX;
14928
14929 case ALTIVEC_BUILTIN_DSSALL:
14930 emit_insn (gen_altivec_dssall ());
14931 return NULL_RTX;
14932
14933 case ALTIVEC_BUILTIN_DSS:
14934 icode = CODE_FOR_altivec_dss;
14935 arg0 = CALL_EXPR_ARG (exp, 0);
14936 STRIP_NOPS (arg0);
14937 op0 = expand_normal (arg0);
14938 mode0 = insn_data[icode].operand[0].mode;
14939
14940 /* If we got invalid arguments bail out before generating bad rtl. */
14941 if (arg0 == error_mark_node)
14942 return const0_rtx;
14943
14944 if (TREE_CODE (arg0) != INTEGER_CST
14945 || TREE_INT_CST_LOW (arg0) & ~0x3)
14946 {
14947 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
14948 return const0_rtx;
14949 }
14950
14951 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14952 op0 = copy_to_mode_reg (mode0, op0);
14953
14954 emit_insn (gen_altivec_dss (op0));
14955 return NULL_RTX;
14956
14957 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14958 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14959 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14960 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14961 case VSX_BUILTIN_VEC_INIT_V2DF:
14962 case VSX_BUILTIN_VEC_INIT_V2DI:
14963 case VSX_BUILTIN_VEC_INIT_V1TI:
14964 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14965
14966 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14967 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14968 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14969 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14970 case VSX_BUILTIN_VEC_SET_V2DF:
14971 case VSX_BUILTIN_VEC_SET_V2DI:
14972 case VSX_BUILTIN_VEC_SET_V1TI:
14973 return altivec_expand_vec_set_builtin (exp);
14974
14975 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
14976 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
14977 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
14978 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
14979 case VSX_BUILTIN_VEC_EXT_V2DF:
14980 case VSX_BUILTIN_VEC_EXT_V2DI:
14981 case VSX_BUILTIN_VEC_EXT_V1TI:
14982 return altivec_expand_vec_ext_builtin (exp, target);
14983
14984 case P9V_BUILTIN_VEC_EXTRACT4B:
14985 arg1 = CALL_EXPR_ARG (exp, 1);
14986 STRIP_NOPS (arg1);
14987
14988 /* Generate a normal call if it is invalid. */
14989 if (arg1 == error_mark_node)
14990 return expand_call (exp, target, false);
14991
14992 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
14993 {
14994 error ("second argument to %qs must be 0..12", "vec_vextract4b");
14995 return expand_call (exp, target, false);
14996 }
14997 break;
14998
14999 case P9V_BUILTIN_VEC_INSERT4B:
15000 arg2 = CALL_EXPR_ARG (exp, 2);
15001 STRIP_NOPS (arg2);
15002
15003 /* Generate a normal call if it is invalid. */
15004 if (arg2 == error_mark_node)
15005 return expand_call (exp, target, false);
15006
15007 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
15008 {
15009 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15010 return expand_call (exp, target, false);
15011 }
15012 break;
15013
15014 default:
15015 break;
15016 /* Fall through. */
15017 }
15018
15019 /* Expand abs* operations. */
15020 d = bdesc_abs;
15021 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15022 if (d->code == fcode)
15023 return altivec_expand_abs_builtin (d->icode, exp, target);
15024
15025 /* Expand the AltiVec predicates. */
15026 d = bdesc_altivec_preds;
15027 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15028 if (d->code == fcode)
15029 return altivec_expand_predicate_builtin (d->icode, exp, target);
15030
15031 /* LV* are funky. We initialized them differently. */
15032 switch (fcode)
15033 {
15034 case ALTIVEC_BUILTIN_LVSL:
15035 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15036 exp, target, false);
15037 case ALTIVEC_BUILTIN_LVSR:
15038 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15039 exp, target, false);
15040 case ALTIVEC_BUILTIN_LVEBX:
15041 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15042 exp, target, false);
15043 case ALTIVEC_BUILTIN_LVEHX:
15044 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15045 exp, target, false);
15046 case ALTIVEC_BUILTIN_LVEWX:
15047 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15048 exp, target, false);
15049 case ALTIVEC_BUILTIN_LVXL_V2DF:
15050 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15051 exp, target, false);
15052 case ALTIVEC_BUILTIN_LVXL_V2DI:
15053 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15054 exp, target, false);
15055 case ALTIVEC_BUILTIN_LVXL_V4SF:
15056 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15057 exp, target, false);
15058 case ALTIVEC_BUILTIN_LVXL:
15059 case ALTIVEC_BUILTIN_LVXL_V4SI:
15060 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15061 exp, target, false);
15062 case ALTIVEC_BUILTIN_LVXL_V8HI:
15063 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15064 exp, target, false);
15065 case ALTIVEC_BUILTIN_LVXL_V16QI:
15066 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15067 exp, target, false);
15068 case ALTIVEC_BUILTIN_LVX_V1TI:
15069 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
15070 exp, target, false);
15071 case ALTIVEC_BUILTIN_LVX_V2DF:
15072 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
15073 exp, target, false);
15074 case ALTIVEC_BUILTIN_LVX_V2DI:
15075 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
15076 exp, target, false);
15077 case ALTIVEC_BUILTIN_LVX_V4SF:
15078 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
15079 exp, target, false);
15080 case ALTIVEC_BUILTIN_LVX:
15081 case ALTIVEC_BUILTIN_LVX_V4SI:
15082 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
15083 exp, target, false);
15084 case ALTIVEC_BUILTIN_LVX_V8HI:
15085 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
15086 exp, target, false);
15087 case ALTIVEC_BUILTIN_LVX_V16QI:
15088 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
15089 exp, target, false);
15090 case ALTIVEC_BUILTIN_LVLX:
15091 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15092 exp, target, true);
15093 case ALTIVEC_BUILTIN_LVLXL:
15094 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15095 exp, target, true);
15096 case ALTIVEC_BUILTIN_LVRX:
15097 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15098 exp, target, true);
15099 case ALTIVEC_BUILTIN_LVRXL:
15100 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15101 exp, target, true);
15102 case VSX_BUILTIN_LXVD2X_V1TI:
15103 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15104 exp, target, false);
15105 case VSX_BUILTIN_LXVD2X_V2DF:
15106 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15107 exp, target, false);
15108 case VSX_BUILTIN_LXVD2X_V2DI:
15109 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15110 exp, target, false);
15111 case VSX_BUILTIN_LXVW4X_V4SF:
15112 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15113 exp, target, false);
15114 case VSX_BUILTIN_LXVW4X_V4SI:
15115 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15116 exp, target, false);
15117 case VSX_BUILTIN_LXVW4X_V8HI:
15118 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15119 exp, target, false);
15120 case VSX_BUILTIN_LXVW4X_V16QI:
15121 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15122 exp, target, false);
15123 /* For the following on big endian, it's ok to use any appropriate
15124 unaligned-supporting load, so use a generic expander. For
15125 little-endian, the exact element-reversing instruction must
15126 be used. */
15127 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15128 {
15129 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15130 : CODE_FOR_vsx_ld_elemrev_v2df);
15131 return altivec_expand_lv_builtin (code, exp, target, false);
15132 }
15133 case VSX_BUILTIN_LD_ELEMREV_V1TI:
15134 {
15135 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
15136 : CODE_FOR_vsx_ld_elemrev_v1ti);
15137 return altivec_expand_lv_builtin (code, exp, target, false);
15138 }
15139 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15140 {
15141 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15142 : CODE_FOR_vsx_ld_elemrev_v2di);
15143 return altivec_expand_lv_builtin (code, exp, target, false);
15144 }
15145 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15146 {
15147 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15148 : CODE_FOR_vsx_ld_elemrev_v4sf);
15149 return altivec_expand_lv_builtin (code, exp, target, false);
15150 }
15151 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15152 {
15153 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15154 : CODE_FOR_vsx_ld_elemrev_v4si);
15155 return altivec_expand_lv_builtin (code, exp, target, false);
15156 }
15157 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15158 {
15159 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15160 : CODE_FOR_vsx_ld_elemrev_v8hi);
15161 return altivec_expand_lv_builtin (code, exp, target, false);
15162 }
15163 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15164 {
15165 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15166 : CODE_FOR_vsx_ld_elemrev_v16qi);
15167 return altivec_expand_lv_builtin (code, exp, target, false);
15168 }
15169 break;
15170 default:
15171 break;
15172 /* Fall through. */
15173 }
15174
15175 *expandedp = false;
15176 return NULL_RTX;
15177 }
15178
15179 /* Check whether a builtin function is supported in this target
15180 configuration. */
15181 bool
15182 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
15183 {
15184 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
15185 if ((fnmask & rs6000_builtin_mask) != fnmask)
15186 return false;
15187 else
15188 return true;
15189 }
15190
15191 /* Raise an error message for a builtin function that is called without the
15192 appropriate target options being set. */
15193
15194 static void
15195 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15196 {
15197 size_t uns_fncode = (size_t) fncode;
15198 const char *name = rs6000_builtin_info[uns_fncode].name;
15199 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15200
15201 gcc_assert (name != NULL);
15202 if ((fnmask & RS6000_BTM_CELL) != 0)
15203 error ("builtin function %qs is only valid for the cell processor", name);
15204 else if ((fnmask & RS6000_BTM_VSX) != 0)
15205 error ("builtin function %qs requires the %qs option", name, "-mvsx");
15206 else if ((fnmask & RS6000_BTM_HTM) != 0)
15207 error ("builtin function %qs requires the %qs option", name, "-mhtm");
15208 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
15209 error ("builtin function %qs requires the %qs option", name, "-maltivec");
15210 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15211 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15212 error ("builtin function %qs requires the %qs and %qs options",
15213 name, "-mhard-dfp", "-mpower8-vector");
15214 else if ((fnmask & RS6000_BTM_DFP) != 0)
15215 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
15216 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
15217 error ("builtin function %qs requires the %qs option", name,
15218 "-mpower8-vector");
15219 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15220 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15221 error ("builtin function %qs requires the %qs and %qs options",
15222 name, "-mcpu=power9", "-m64");
15223 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
15224 error ("builtin function %qs requires the %qs option", name,
15225 "-mcpu=power9");
15226 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15227 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15228 error ("builtin function %qs requires the %qs and %qs options",
15229 name, "-mcpu=power9", "-m64");
15230 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
15231 error ("builtin function %qs requires the %qs option", name,
15232 "-mcpu=power9");
15233 else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128)
15234 {
15235 if (!TARGET_HARD_FLOAT)
15236 error ("builtin function %qs requires the %qs option", name,
15237 "-mhard-float");
15238 else
15239 error ("builtin function %qs requires the %qs option", name,
15240 TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128");
15241 }
15242 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
15243 error ("builtin function %qs requires the %qs option", name,
15244 "-mhard-float");
15245 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
15246 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
15247 name);
15248 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
15249 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
15250 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15251 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15252 error ("builtin function %qs requires the %qs (or newer), and "
15253 "%qs or %qs options",
15254 name, "-mcpu=power7", "-m64", "-mpowerpc64");
15255 else
15256 error ("builtin function %qs is not supported with the current options",
15257 name);
15258 }
15259
15260 /* Target hook for early folding of built-ins, shamelessly stolen
15261 from ia64.c. */
15262
15263 static tree
15264 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
15265 int n_args ATTRIBUTE_UNUSED,
15266 tree *args ATTRIBUTE_UNUSED,
15267 bool ignore ATTRIBUTE_UNUSED)
15268 {
15269 #ifdef SUBTARGET_FOLD_BUILTIN
15270 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
15271 #else
15272 return NULL_TREE;
15273 #endif
15274 }
15275
15276 /* Helper function to sort out which built-ins may be valid without having
15277 a LHS. */
15278 static bool
15279 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
15280 {
15281 switch (fn_code)
15282 {
15283 case ALTIVEC_BUILTIN_STVX_V16QI:
15284 case ALTIVEC_BUILTIN_STVX_V8HI:
15285 case ALTIVEC_BUILTIN_STVX_V4SI:
15286 case ALTIVEC_BUILTIN_STVX_V4SF:
15287 case ALTIVEC_BUILTIN_STVX_V2DI:
15288 case ALTIVEC_BUILTIN_STVX_V2DF:
15289 case VSX_BUILTIN_STXVW4X_V16QI:
15290 case VSX_BUILTIN_STXVW4X_V8HI:
15291 case VSX_BUILTIN_STXVW4X_V4SF:
15292 case VSX_BUILTIN_STXVW4X_V4SI:
15293 case VSX_BUILTIN_STXVD2X_V2DF:
15294 case VSX_BUILTIN_STXVD2X_V2DI:
15295 return true;
15296 default:
15297 return false;
15298 }
15299 }
15300
15301 /* Helper function to handle the gimple folding of a vector compare
15302 operation. This sets up true/false vectors, and uses the
15303 VEC_COND_EXPR operation.
15304 CODE indicates which comparison is to be made. (EQ, GT, ...).
15305 TYPE indicates the type of the result. */
15306 static tree
15307 fold_build_vec_cmp (tree_code code, tree type,
15308 tree arg0, tree arg1)
15309 {
15310 tree cmp_type = build_same_sized_truth_vector_type (type);
15311 tree zero_vec = build_zero_cst (type);
15312 tree minus_one_vec = build_minus_one_cst (type);
15313 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
15314 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
15315 }
15316
15317 /* Helper function to handle the in-between steps for the
15318 vector compare built-ins. */
15319 static void
15320 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
15321 {
15322 tree arg0 = gimple_call_arg (stmt, 0);
15323 tree arg1 = gimple_call_arg (stmt, 1);
15324 tree lhs = gimple_call_lhs (stmt);
15325 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
15326 gimple *g = gimple_build_assign (lhs, cmp);
15327 gimple_set_location (g, gimple_location (stmt));
15328 gsi_replace (gsi, g, true);
15329 }
15330
15331 /* Helper function to map V2DF and V4SF types to their
15332 integral equivalents (V2DI and V4SI). */
15333 tree map_to_integral_tree_type (tree input_tree_type)
15334 {
15335 if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type)))
15336 return input_tree_type;
15337 else
15338 {
15339 if (types_compatible_p (TREE_TYPE (input_tree_type),
15340 TREE_TYPE (V2DF_type_node)))
15341 return V2DI_type_node;
15342 else if (types_compatible_p (TREE_TYPE (input_tree_type),
15343 TREE_TYPE (V4SF_type_node)))
15344 return V4SI_type_node;
15345 else
15346 gcc_unreachable ();
15347 }
15348 }
15349
15350 /* Helper function to handle the vector merge[hl] built-ins. The
15351 implementation difference between h and l versions for this code are in
15352 the values used when building of the permute vector for high word versus
15353 low word merge. The variance is keyed off the use_high parameter. */
15354 static void
15355 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
15356 {
15357 tree arg0 = gimple_call_arg (stmt, 0);
15358 tree arg1 = gimple_call_arg (stmt, 1);
15359 tree lhs = gimple_call_lhs (stmt);
15360 tree lhs_type = TREE_TYPE (lhs);
15361 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15362 int midpoint = n_elts / 2;
15363 int offset = 0;
15364
15365 if (use_high == 1)
15366 offset = midpoint;
15367
15368 /* The permute_type will match the lhs for integral types. For double and
15369 float types, the permute type needs to map to the V2 or V4 type that
15370 matches size. */
15371 tree permute_type;
15372 permute_type = map_to_integral_tree_type (lhs_type);
15373 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15374
15375 for (int i = 0; i < midpoint; i++)
15376 {
15377 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15378 offset + i));
15379 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15380 offset + n_elts + i));
15381 }
15382
15383 tree permute = elts.build ();
15384
15385 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15386 gimple_set_location (g, gimple_location (stmt));
15387 gsi_replace (gsi, g, true);
15388 }
15389
15390 /* Helper function to handle the vector merge[eo] built-ins. */
15391 static void
15392 fold_mergeeo_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_odd)
15393 {
15394 tree arg0 = gimple_call_arg (stmt, 0);
15395 tree arg1 = gimple_call_arg (stmt, 1);
15396 tree lhs = gimple_call_lhs (stmt);
15397 tree lhs_type = TREE_TYPE (lhs);
15398 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15399
15400 /* The permute_type will match the lhs for integral types. For double and
15401 float types, the permute type needs to map to the V2 or V4 type that
15402 matches size. */
15403 tree permute_type;
15404 permute_type = map_to_integral_tree_type (lhs_type);
15405
15406 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15407
15408 /* Build the permute vector. */
15409 for (int i = 0; i < n_elts / 2; i++)
15410 {
15411 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15412 2*i + use_odd));
15413 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15414 2*i + use_odd + n_elts));
15415 }
15416
15417 tree permute = elts.build ();
15418
15419 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15420 gimple_set_location (g, gimple_location (stmt));
15421 gsi_replace (gsi, g, true);
15422 }
15423
15424 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15425 a constant, use rs6000_fold_builtin.) */
15426
15427 bool
15428 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
15429 {
15430 gimple *stmt = gsi_stmt (*gsi);
15431 tree fndecl = gimple_call_fndecl (stmt);
15432 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
15433 enum rs6000_builtins fn_code
15434 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15435 tree arg0, arg1, lhs, temp;
15436 enum tree_code bcode;
15437 gimple *g;
15438
15439 size_t uns_fncode = (size_t) fn_code;
15440 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
15441 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
15442 const char *fn_name2 = (icode != CODE_FOR_nothing)
15443 ? get_insn_name ((int) icode)
15444 : "nothing";
15445
15446 if (TARGET_DEBUG_BUILTIN)
15447 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
15448 fn_code, fn_name1, fn_name2);
15449
15450 if (!rs6000_fold_gimple)
15451 return false;
15452
15453 /* Prevent gimple folding for code that does not have a LHS, unless it is
15454 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15455 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
15456 return false;
15457
15458 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15459 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
15460 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
15461 if (!func_valid_p)
15462 return false;
15463
15464 switch (fn_code)
15465 {
15466 /* Flavors of vec_add. We deliberately don't expand
15467 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15468 TImode, resulting in much poorer code generation. */
15469 case ALTIVEC_BUILTIN_VADDUBM:
15470 case ALTIVEC_BUILTIN_VADDUHM:
15471 case ALTIVEC_BUILTIN_VADDUWM:
15472 case P8V_BUILTIN_VADDUDM:
15473 case ALTIVEC_BUILTIN_VADDFP:
15474 case VSX_BUILTIN_XVADDDP:
15475 bcode = PLUS_EXPR;
15476 do_binary:
15477 arg0 = gimple_call_arg (stmt, 0);
15478 arg1 = gimple_call_arg (stmt, 1);
15479 lhs = gimple_call_lhs (stmt);
15480 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (lhs)))
15481 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (lhs))))
15482 {
15483 /* Ensure the binary operation is performed in a type
15484 that wraps if it is integral type. */
15485 gimple_seq stmts = NULL;
15486 tree type = unsigned_type_for (TREE_TYPE (lhs));
15487 tree uarg0 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15488 type, arg0);
15489 tree uarg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15490 type, arg1);
15491 tree res = gimple_build (&stmts, gimple_location (stmt), bcode,
15492 type, uarg0, uarg1);
15493 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15494 g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR,
15495 build1 (VIEW_CONVERT_EXPR,
15496 TREE_TYPE (lhs), res));
15497 gsi_replace (gsi, g, true);
15498 return true;
15499 }
15500 g = gimple_build_assign (lhs, bcode, arg0, arg1);
15501 gimple_set_location (g, gimple_location (stmt));
15502 gsi_replace (gsi, g, true);
15503 return true;
15504 /* Flavors of vec_sub. We deliberately don't expand
15505 P8V_BUILTIN_VSUBUQM. */
15506 case ALTIVEC_BUILTIN_VSUBUBM:
15507 case ALTIVEC_BUILTIN_VSUBUHM:
15508 case ALTIVEC_BUILTIN_VSUBUWM:
15509 case P8V_BUILTIN_VSUBUDM:
15510 case ALTIVEC_BUILTIN_VSUBFP:
15511 case VSX_BUILTIN_XVSUBDP:
15512 bcode = MINUS_EXPR;
15513 goto do_binary;
15514 case VSX_BUILTIN_XVMULSP:
15515 case VSX_BUILTIN_XVMULDP:
15516 arg0 = gimple_call_arg (stmt, 0);
15517 arg1 = gimple_call_arg (stmt, 1);
15518 lhs = gimple_call_lhs (stmt);
15519 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15520 gimple_set_location (g, gimple_location (stmt));
15521 gsi_replace (gsi, g, true);
15522 return true;
15523 /* Even element flavors of vec_mul (signed). */
15524 case ALTIVEC_BUILTIN_VMULESB:
15525 case ALTIVEC_BUILTIN_VMULESH:
15526 case P8V_BUILTIN_VMULESW:
15527 /* Even element flavors of vec_mul (unsigned). */
15528 case ALTIVEC_BUILTIN_VMULEUB:
15529 case ALTIVEC_BUILTIN_VMULEUH:
15530 case P8V_BUILTIN_VMULEUW:
15531 arg0 = gimple_call_arg (stmt, 0);
15532 arg1 = gimple_call_arg (stmt, 1);
15533 lhs = gimple_call_lhs (stmt);
15534 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15535 gimple_set_location (g, gimple_location (stmt));
15536 gsi_replace (gsi, g, true);
15537 return true;
15538 /* Odd element flavors of vec_mul (signed). */
15539 case ALTIVEC_BUILTIN_VMULOSB:
15540 case ALTIVEC_BUILTIN_VMULOSH:
15541 case P8V_BUILTIN_VMULOSW:
15542 /* Odd element flavors of vec_mul (unsigned). */
15543 case ALTIVEC_BUILTIN_VMULOUB:
15544 case ALTIVEC_BUILTIN_VMULOUH:
15545 case P8V_BUILTIN_VMULOUW:
15546 arg0 = gimple_call_arg (stmt, 0);
15547 arg1 = gimple_call_arg (stmt, 1);
15548 lhs = gimple_call_lhs (stmt);
15549 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15550 gimple_set_location (g, gimple_location (stmt));
15551 gsi_replace (gsi, g, true);
15552 return true;
15553 /* Flavors of vec_div (Integer). */
15554 case VSX_BUILTIN_DIV_V2DI:
15555 case VSX_BUILTIN_UDIV_V2DI:
15556 arg0 = gimple_call_arg (stmt, 0);
15557 arg1 = gimple_call_arg (stmt, 1);
15558 lhs = gimple_call_lhs (stmt);
15559 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15560 gimple_set_location (g, gimple_location (stmt));
15561 gsi_replace (gsi, g, true);
15562 return true;
15563 /* Flavors of vec_div (Float). */
15564 case VSX_BUILTIN_XVDIVSP:
15565 case VSX_BUILTIN_XVDIVDP:
15566 arg0 = gimple_call_arg (stmt, 0);
15567 arg1 = gimple_call_arg (stmt, 1);
15568 lhs = gimple_call_lhs (stmt);
15569 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15570 gimple_set_location (g, gimple_location (stmt));
15571 gsi_replace (gsi, g, true);
15572 return true;
15573 /* Flavors of vec_and. */
15574 case ALTIVEC_BUILTIN_VAND:
15575 arg0 = gimple_call_arg (stmt, 0);
15576 arg1 = gimple_call_arg (stmt, 1);
15577 lhs = gimple_call_lhs (stmt);
15578 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15579 gimple_set_location (g, gimple_location (stmt));
15580 gsi_replace (gsi, g, true);
15581 return true;
15582 /* Flavors of vec_andc. */
15583 case ALTIVEC_BUILTIN_VANDC:
15584 arg0 = gimple_call_arg (stmt, 0);
15585 arg1 = gimple_call_arg (stmt, 1);
15586 lhs = gimple_call_lhs (stmt);
15587 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15588 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15589 gimple_set_location (g, gimple_location (stmt));
15590 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15591 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15592 gimple_set_location (g, gimple_location (stmt));
15593 gsi_replace (gsi, g, true);
15594 return true;
15595 /* Flavors of vec_nand. */
15596 case P8V_BUILTIN_VEC_NAND:
15597 case P8V_BUILTIN_NAND_V16QI:
15598 case P8V_BUILTIN_NAND_V8HI:
15599 case P8V_BUILTIN_NAND_V4SI:
15600 case P8V_BUILTIN_NAND_V4SF:
15601 case P8V_BUILTIN_NAND_V2DF:
15602 case P8V_BUILTIN_NAND_V2DI:
15603 arg0 = gimple_call_arg (stmt, 0);
15604 arg1 = gimple_call_arg (stmt, 1);
15605 lhs = gimple_call_lhs (stmt);
15606 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15607 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15608 gimple_set_location (g, gimple_location (stmt));
15609 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15610 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15611 gimple_set_location (g, gimple_location (stmt));
15612 gsi_replace (gsi, g, true);
15613 return true;
15614 /* Flavors of vec_or. */
15615 case ALTIVEC_BUILTIN_VOR:
15616 arg0 = gimple_call_arg (stmt, 0);
15617 arg1 = gimple_call_arg (stmt, 1);
15618 lhs = gimple_call_lhs (stmt);
15619 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15620 gimple_set_location (g, gimple_location (stmt));
15621 gsi_replace (gsi, g, true);
15622 return true;
15623 /* flavors of vec_orc. */
15624 case P8V_BUILTIN_ORC_V16QI:
15625 case P8V_BUILTIN_ORC_V8HI:
15626 case P8V_BUILTIN_ORC_V4SI:
15627 case P8V_BUILTIN_ORC_V4SF:
15628 case P8V_BUILTIN_ORC_V2DF:
15629 case P8V_BUILTIN_ORC_V2DI:
15630 arg0 = gimple_call_arg (stmt, 0);
15631 arg1 = gimple_call_arg (stmt, 1);
15632 lhs = gimple_call_lhs (stmt);
15633 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15634 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15635 gimple_set_location (g, gimple_location (stmt));
15636 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15637 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15638 gimple_set_location (g, gimple_location (stmt));
15639 gsi_replace (gsi, g, true);
15640 return true;
15641 /* Flavors of vec_xor. */
15642 case ALTIVEC_BUILTIN_VXOR:
15643 arg0 = gimple_call_arg (stmt, 0);
15644 arg1 = gimple_call_arg (stmt, 1);
15645 lhs = gimple_call_lhs (stmt);
15646 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15647 gimple_set_location (g, gimple_location (stmt));
15648 gsi_replace (gsi, g, true);
15649 return true;
15650 /* Flavors of vec_nor. */
15651 case ALTIVEC_BUILTIN_VNOR:
15652 arg0 = gimple_call_arg (stmt, 0);
15653 arg1 = gimple_call_arg (stmt, 1);
15654 lhs = gimple_call_lhs (stmt);
15655 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15656 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15657 gimple_set_location (g, gimple_location (stmt));
15658 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15659 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15660 gimple_set_location (g, gimple_location (stmt));
15661 gsi_replace (gsi, g, true);
15662 return true;
15663 /* flavors of vec_abs. */
15664 case ALTIVEC_BUILTIN_ABS_V16QI:
15665 case ALTIVEC_BUILTIN_ABS_V8HI:
15666 case ALTIVEC_BUILTIN_ABS_V4SI:
15667 case ALTIVEC_BUILTIN_ABS_V4SF:
15668 case P8V_BUILTIN_ABS_V2DI:
15669 case VSX_BUILTIN_XVABSDP:
15670 arg0 = gimple_call_arg (stmt, 0);
15671 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15672 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15673 return false;
15674 lhs = gimple_call_lhs (stmt);
15675 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15676 gimple_set_location (g, gimple_location (stmt));
15677 gsi_replace (gsi, g, true);
15678 return true;
15679 /* flavors of vec_min. */
15680 case VSX_BUILTIN_XVMINDP:
15681 case P8V_BUILTIN_VMINSD:
15682 case P8V_BUILTIN_VMINUD:
15683 case ALTIVEC_BUILTIN_VMINSB:
15684 case ALTIVEC_BUILTIN_VMINSH:
15685 case ALTIVEC_BUILTIN_VMINSW:
15686 case ALTIVEC_BUILTIN_VMINUB:
15687 case ALTIVEC_BUILTIN_VMINUH:
15688 case ALTIVEC_BUILTIN_VMINUW:
15689 case ALTIVEC_BUILTIN_VMINFP:
15690 arg0 = gimple_call_arg (stmt, 0);
15691 arg1 = gimple_call_arg (stmt, 1);
15692 lhs = gimple_call_lhs (stmt);
15693 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15694 gimple_set_location (g, gimple_location (stmt));
15695 gsi_replace (gsi, g, true);
15696 return true;
15697 /* flavors of vec_max. */
15698 case VSX_BUILTIN_XVMAXDP:
15699 case P8V_BUILTIN_VMAXSD:
15700 case P8V_BUILTIN_VMAXUD:
15701 case ALTIVEC_BUILTIN_VMAXSB:
15702 case ALTIVEC_BUILTIN_VMAXSH:
15703 case ALTIVEC_BUILTIN_VMAXSW:
15704 case ALTIVEC_BUILTIN_VMAXUB:
15705 case ALTIVEC_BUILTIN_VMAXUH:
15706 case ALTIVEC_BUILTIN_VMAXUW:
15707 case ALTIVEC_BUILTIN_VMAXFP:
15708 arg0 = gimple_call_arg (stmt, 0);
15709 arg1 = gimple_call_arg (stmt, 1);
15710 lhs = gimple_call_lhs (stmt);
15711 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15712 gimple_set_location (g, gimple_location (stmt));
15713 gsi_replace (gsi, g, true);
15714 return true;
15715 /* Flavors of vec_eqv. */
15716 case P8V_BUILTIN_EQV_V16QI:
15717 case P8V_BUILTIN_EQV_V8HI:
15718 case P8V_BUILTIN_EQV_V4SI:
15719 case P8V_BUILTIN_EQV_V4SF:
15720 case P8V_BUILTIN_EQV_V2DF:
15721 case P8V_BUILTIN_EQV_V2DI:
15722 arg0 = gimple_call_arg (stmt, 0);
15723 arg1 = gimple_call_arg (stmt, 1);
15724 lhs = gimple_call_lhs (stmt);
15725 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15726 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15727 gimple_set_location (g, gimple_location (stmt));
15728 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15729 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15730 gimple_set_location (g, gimple_location (stmt));
15731 gsi_replace (gsi, g, true);
15732 return true;
15733 /* Flavors of vec_rotate_left. */
15734 case ALTIVEC_BUILTIN_VRLB:
15735 case ALTIVEC_BUILTIN_VRLH:
15736 case ALTIVEC_BUILTIN_VRLW:
15737 case P8V_BUILTIN_VRLD:
15738 arg0 = gimple_call_arg (stmt, 0);
15739 arg1 = gimple_call_arg (stmt, 1);
15740 lhs = gimple_call_lhs (stmt);
15741 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15742 gimple_set_location (g, gimple_location (stmt));
15743 gsi_replace (gsi, g, true);
15744 return true;
15745 /* Flavors of vector shift right algebraic.
15746 vec_sra{b,h,w} -> vsra{b,h,w}. */
15747 case ALTIVEC_BUILTIN_VSRAB:
15748 case ALTIVEC_BUILTIN_VSRAH:
15749 case ALTIVEC_BUILTIN_VSRAW:
15750 case P8V_BUILTIN_VSRAD:
15751 arg0 = gimple_call_arg (stmt, 0);
15752 arg1 = gimple_call_arg (stmt, 1);
15753 lhs = gimple_call_lhs (stmt);
15754 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
15755 gimple_set_location (g, gimple_location (stmt));
15756 gsi_replace (gsi, g, true);
15757 return true;
15758 /* Flavors of vector shift left.
15759 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15760 case ALTIVEC_BUILTIN_VSLB:
15761 case ALTIVEC_BUILTIN_VSLH:
15762 case ALTIVEC_BUILTIN_VSLW:
15763 case P8V_BUILTIN_VSLD:
15764 {
15765 location_t loc;
15766 gimple_seq stmts = NULL;
15767 arg0 = gimple_call_arg (stmt, 0);
15768 tree arg0_type = TREE_TYPE (arg0);
15769 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
15770 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
15771 return false;
15772 arg1 = gimple_call_arg (stmt, 1);
15773 tree arg1_type = TREE_TYPE (arg1);
15774 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15775 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15776 loc = gimple_location (stmt);
15777 lhs = gimple_call_lhs (stmt);
15778 /* Force arg1 into the range valid matching the arg0 type. */
15779 /* Build a vector consisting of the max valid bit-size values. */
15780 int n_elts = VECTOR_CST_NELTS (arg1);
15781 int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
15782 * BITS_PER_UNIT;
15783 tree element_size = build_int_cst (unsigned_element_type,
15784 tree_size_in_bits / n_elts);
15785 tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
15786 for (int i = 0; i < n_elts; i++)
15787 elts.safe_push (element_size);
15788 tree modulo_tree = elts.build ();
15789 /* Modulo the provided shift value against that vector. */
15790 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15791 unsigned_arg1_type, arg1);
15792 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15793 unsigned_arg1_type, unsigned_arg1,
15794 modulo_tree);
15795 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15796 /* And finally, do the shift. */
15797 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
15798 gimple_set_location (g, gimple_location (stmt));
15799 gsi_replace (gsi, g, true);
15800 return true;
15801 }
15802 /* Flavors of vector shift right. */
15803 case ALTIVEC_BUILTIN_VSRB:
15804 case ALTIVEC_BUILTIN_VSRH:
15805 case ALTIVEC_BUILTIN_VSRW:
15806 case P8V_BUILTIN_VSRD:
15807 {
15808 arg0 = gimple_call_arg (stmt, 0);
15809 arg1 = gimple_call_arg (stmt, 1);
15810 lhs = gimple_call_lhs (stmt);
15811 gimple_seq stmts = NULL;
15812 /* Convert arg0 to unsigned. */
15813 tree arg0_unsigned
15814 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15815 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15816 tree res
15817 = gimple_build (&stmts, RSHIFT_EXPR,
15818 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
15819 /* Convert result back to the lhs type. */
15820 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15821 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15822 update_call_from_tree (gsi, res);
15823 return true;
15824 }
15825 /* Vector loads. */
15826 case ALTIVEC_BUILTIN_LVX_V16QI:
15827 case ALTIVEC_BUILTIN_LVX_V8HI:
15828 case ALTIVEC_BUILTIN_LVX_V4SI:
15829 case ALTIVEC_BUILTIN_LVX_V4SF:
15830 case ALTIVEC_BUILTIN_LVX_V2DI:
15831 case ALTIVEC_BUILTIN_LVX_V2DF:
15832 case ALTIVEC_BUILTIN_LVX_V1TI:
15833 {
15834 arg0 = gimple_call_arg (stmt, 0); // offset
15835 arg1 = gimple_call_arg (stmt, 1); // address
15836 lhs = gimple_call_lhs (stmt);
15837 location_t loc = gimple_location (stmt);
15838 /* Since arg1 may be cast to a different type, just use ptr_type_node
15839 here instead of trying to enforce TBAA on pointer types. */
15840 tree arg1_type = ptr_type_node;
15841 tree lhs_type = TREE_TYPE (lhs);
15842 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15843 the tree using the value from arg0. The resulting type will match
15844 the type of arg1. */
15845 gimple_seq stmts = NULL;
15846 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15847 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15848 arg1_type, arg1, temp_offset);
15849 /* Mask off any lower bits from the address. */
15850 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15851 arg1_type, temp_addr,
15852 build_int_cst (arg1_type, -16));
15853 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15854 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15855 take an offset, but since we've already incorporated the offset
15856 above, here we just pass in a zero. */
15857 gimple *g
15858 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15859 build_int_cst (arg1_type, 0)));
15860 gimple_set_location (g, loc);
15861 gsi_replace (gsi, g, true);
15862 return true;
15863 }
15864 /* Vector stores. */
15865 case ALTIVEC_BUILTIN_STVX_V16QI:
15866 case ALTIVEC_BUILTIN_STVX_V8HI:
15867 case ALTIVEC_BUILTIN_STVX_V4SI:
15868 case ALTIVEC_BUILTIN_STVX_V4SF:
15869 case ALTIVEC_BUILTIN_STVX_V2DI:
15870 case ALTIVEC_BUILTIN_STVX_V2DF:
15871 {
15872 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15873 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15874 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15875 location_t loc = gimple_location (stmt);
15876 tree arg0_type = TREE_TYPE (arg0);
15877 /* Use ptr_type_node (no TBAA) for the arg2_type.
15878 FIXME: (Richard) "A proper fix would be to transition this type as
15879 seen from the frontend to GIMPLE, for example in a similar way we
15880 do for MEM_REFs by piggy-backing that on an extra argument, a
15881 constant zero pointer of the alias pointer type to use (which would
15882 also serve as a type indicator of the store itself). I'd use a
15883 target specific internal function for this (not sure if we can have
15884 those target specific, but I guess if it's folded away then that's
15885 fine) and get away with the overload set." */
15886 tree arg2_type = ptr_type_node;
15887 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15888 the tree using the value from arg0. The resulting type will match
15889 the type of arg2. */
15890 gimple_seq stmts = NULL;
15891 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15892 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15893 arg2_type, arg2, temp_offset);
15894 /* Mask off any lower bits from the address. */
15895 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15896 arg2_type, temp_addr,
15897 build_int_cst (arg2_type, -16));
15898 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15899 /* The desired gimple result should be similar to:
15900 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15901 gimple *g
15902 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15903 build_int_cst (arg2_type, 0)), arg0);
15904 gimple_set_location (g, loc);
15905 gsi_replace (gsi, g, true);
15906 return true;
15907 }
15908
15909 /* unaligned Vector loads. */
15910 case VSX_BUILTIN_LXVW4X_V16QI:
15911 case VSX_BUILTIN_LXVW4X_V8HI:
15912 case VSX_BUILTIN_LXVW4X_V4SF:
15913 case VSX_BUILTIN_LXVW4X_V4SI:
15914 case VSX_BUILTIN_LXVD2X_V2DF:
15915 case VSX_BUILTIN_LXVD2X_V2DI:
15916 {
15917 arg0 = gimple_call_arg (stmt, 0); // offset
15918 arg1 = gimple_call_arg (stmt, 1); // address
15919 lhs = gimple_call_lhs (stmt);
15920 location_t loc = gimple_location (stmt);
15921 /* Since arg1 may be cast to a different type, just use ptr_type_node
15922 here instead of trying to enforce TBAA on pointer types. */
15923 tree arg1_type = ptr_type_node;
15924 tree lhs_type = TREE_TYPE (lhs);
15925 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15926 required alignment (power) is 4 bytes regardless of data type. */
15927 tree align_ltype = build_aligned_type (lhs_type, 4);
15928 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15929 the tree using the value from arg0. The resulting type will match
15930 the type of arg1. */
15931 gimple_seq stmts = NULL;
15932 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15933 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15934 arg1_type, arg1, temp_offset);
15935 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15936 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15937 take an offset, but since we've already incorporated the offset
15938 above, here we just pass in a zero. */
15939 gimple *g;
15940 g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr,
15941 build_int_cst (arg1_type, 0)));
15942 gimple_set_location (g, loc);
15943 gsi_replace (gsi, g, true);
15944 return true;
15945 }
15946
15947 /* unaligned Vector stores. */
15948 case VSX_BUILTIN_STXVW4X_V16QI:
15949 case VSX_BUILTIN_STXVW4X_V8HI:
15950 case VSX_BUILTIN_STXVW4X_V4SF:
15951 case VSX_BUILTIN_STXVW4X_V4SI:
15952 case VSX_BUILTIN_STXVD2X_V2DF:
15953 case VSX_BUILTIN_STXVD2X_V2DI:
15954 {
15955 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15956 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15957 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15958 location_t loc = gimple_location (stmt);
15959 tree arg0_type = TREE_TYPE (arg0);
15960 /* Use ptr_type_node (no TBAA) for the arg2_type. */
15961 tree arg2_type = ptr_type_node;
15962 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15963 required alignment (power) is 4 bytes regardless of data type. */
15964 tree align_stype = build_aligned_type (arg0_type, 4);
15965 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15966 the tree using the value from arg1. */
15967 gimple_seq stmts = NULL;
15968 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15969 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15970 arg2_type, arg2, temp_offset);
15971 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15972 gimple *g;
15973 g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
15974 build_int_cst (arg2_type, 0)), arg0);
15975 gimple_set_location (g, loc);
15976 gsi_replace (gsi, g, true);
15977 return true;
15978 }
15979
15980 /* Vector Fused multiply-add (fma). */
15981 case ALTIVEC_BUILTIN_VMADDFP:
15982 case VSX_BUILTIN_XVMADDDP:
15983 case ALTIVEC_BUILTIN_VMLADDUHM:
15984 {
15985 arg0 = gimple_call_arg (stmt, 0);
15986 arg1 = gimple_call_arg (stmt, 1);
15987 tree arg2 = gimple_call_arg (stmt, 2);
15988 lhs = gimple_call_lhs (stmt);
15989 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
15990 gimple_call_set_lhs (g, lhs);
15991 gimple_call_set_nothrow (g, true);
15992 gimple_set_location (g, gimple_location (stmt));
15993 gsi_replace (gsi, g, true);
15994 return true;
15995 }
15996
15997 /* Vector compares; EQ, NE, GE, GT, LE. */
15998 case ALTIVEC_BUILTIN_VCMPEQUB:
15999 case ALTIVEC_BUILTIN_VCMPEQUH:
16000 case ALTIVEC_BUILTIN_VCMPEQUW:
16001 case P8V_BUILTIN_VCMPEQUD:
16002 fold_compare_helper (gsi, EQ_EXPR, stmt);
16003 return true;
16004
16005 case P9V_BUILTIN_CMPNEB:
16006 case P9V_BUILTIN_CMPNEH:
16007 case P9V_BUILTIN_CMPNEW:
16008 fold_compare_helper (gsi, NE_EXPR, stmt);
16009 return true;
16010
16011 case VSX_BUILTIN_CMPGE_16QI:
16012 case VSX_BUILTIN_CMPGE_U16QI:
16013 case VSX_BUILTIN_CMPGE_8HI:
16014 case VSX_BUILTIN_CMPGE_U8HI:
16015 case VSX_BUILTIN_CMPGE_4SI:
16016 case VSX_BUILTIN_CMPGE_U4SI:
16017 case VSX_BUILTIN_CMPGE_2DI:
16018 case VSX_BUILTIN_CMPGE_U2DI:
16019 fold_compare_helper (gsi, GE_EXPR, stmt);
16020 return true;
16021
16022 case ALTIVEC_BUILTIN_VCMPGTSB:
16023 case ALTIVEC_BUILTIN_VCMPGTUB:
16024 case ALTIVEC_BUILTIN_VCMPGTSH:
16025 case ALTIVEC_BUILTIN_VCMPGTUH:
16026 case ALTIVEC_BUILTIN_VCMPGTSW:
16027 case ALTIVEC_BUILTIN_VCMPGTUW:
16028 case P8V_BUILTIN_VCMPGTUD:
16029 case P8V_BUILTIN_VCMPGTSD:
16030 fold_compare_helper (gsi, GT_EXPR, stmt);
16031 return true;
16032
16033 case VSX_BUILTIN_CMPLE_16QI:
16034 case VSX_BUILTIN_CMPLE_U16QI:
16035 case VSX_BUILTIN_CMPLE_8HI:
16036 case VSX_BUILTIN_CMPLE_U8HI:
16037 case VSX_BUILTIN_CMPLE_4SI:
16038 case VSX_BUILTIN_CMPLE_U4SI:
16039 case VSX_BUILTIN_CMPLE_2DI:
16040 case VSX_BUILTIN_CMPLE_U2DI:
16041 fold_compare_helper (gsi, LE_EXPR, stmt);
16042 return true;
16043
16044 /* flavors of vec_splat_[us]{8,16,32}. */
16045 case ALTIVEC_BUILTIN_VSPLTISB:
16046 case ALTIVEC_BUILTIN_VSPLTISH:
16047 case ALTIVEC_BUILTIN_VSPLTISW:
16048 {
16049 int size;
16050 if (fn_code == ALTIVEC_BUILTIN_VSPLTISB)
16051 size = 8;
16052 else if (fn_code == ALTIVEC_BUILTIN_VSPLTISH)
16053 size = 16;
16054 else
16055 size = 32;
16056
16057 arg0 = gimple_call_arg (stmt, 0);
16058 lhs = gimple_call_lhs (stmt);
16059
16060 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
16061 5-bit signed constant in range -16 to +15. */
16062 if (TREE_CODE (arg0) != INTEGER_CST
16063 || !IN_RANGE (sext_hwi (TREE_INT_CST_LOW (arg0), size),
16064 -16, 15))
16065 return false;
16066 gimple_seq stmts = NULL;
16067 location_t loc = gimple_location (stmt);
16068 tree splat_value = gimple_convert (&stmts, loc,
16069 TREE_TYPE (TREE_TYPE (lhs)), arg0);
16070 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16071 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
16072 g = gimple_build_assign (lhs, splat_tree);
16073 gimple_set_location (g, gimple_location (stmt));
16074 gsi_replace (gsi, g, true);
16075 return true;
16076 }
16077
16078 /* Flavors of vec_splat. */
16079 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
16080 case ALTIVEC_BUILTIN_VSPLTB:
16081 case ALTIVEC_BUILTIN_VSPLTH:
16082 case ALTIVEC_BUILTIN_VSPLTW:
16083 case VSX_BUILTIN_XXSPLTD_V2DI:
16084 case VSX_BUILTIN_XXSPLTD_V2DF:
16085 {
16086 arg0 = gimple_call_arg (stmt, 0); /* input vector. */
16087 arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */
16088 /* Only fold the vec_splat_*() if arg1 is both a constant value and
16089 is a valid index into the arg0 vector. */
16090 unsigned int n_elts = VECTOR_CST_NELTS (arg0);
16091 if (TREE_CODE (arg1) != INTEGER_CST
16092 || TREE_INT_CST_LOW (arg1) > (n_elts -1))
16093 return false;
16094 lhs = gimple_call_lhs (stmt);
16095 tree lhs_type = TREE_TYPE (lhs);
16096 tree arg0_type = TREE_TYPE (arg0);
16097 tree splat;
16098 if (TREE_CODE (arg0) == VECTOR_CST)
16099 splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1));
16100 else
16101 {
16102 /* Determine (in bits) the length and start location of the
16103 splat value for a call to the tree_vec_extract helper. */
16104 int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type))
16105 * BITS_PER_UNIT / n_elts;
16106 int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size;
16107 tree len = build_int_cst (bitsizetype, splat_elem_size);
16108 tree start = build_int_cst (bitsizetype, splat_start_bit);
16109 splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0,
16110 len, start);
16111 }
16112 /* And finally, build the new vector. */
16113 tree splat_tree = build_vector_from_val (lhs_type, splat);
16114 g = gimple_build_assign (lhs, splat_tree);
16115 gimple_set_location (g, gimple_location (stmt));
16116 gsi_replace (gsi, g, true);
16117 return true;
16118 }
16119
16120 /* vec_mergel (integrals). */
16121 case ALTIVEC_BUILTIN_VMRGLH:
16122 case ALTIVEC_BUILTIN_VMRGLW:
16123 case VSX_BUILTIN_XXMRGLW_4SI:
16124 case ALTIVEC_BUILTIN_VMRGLB:
16125 case VSX_BUILTIN_VEC_MERGEL_V2DI:
16126 case VSX_BUILTIN_XXMRGLW_4SF:
16127 case VSX_BUILTIN_VEC_MERGEL_V2DF:
16128 fold_mergehl_helper (gsi, stmt, 1);
16129 return true;
16130 /* vec_mergeh (integrals). */
16131 case ALTIVEC_BUILTIN_VMRGHH:
16132 case ALTIVEC_BUILTIN_VMRGHW:
16133 case VSX_BUILTIN_XXMRGHW_4SI:
16134 case ALTIVEC_BUILTIN_VMRGHB:
16135 case VSX_BUILTIN_VEC_MERGEH_V2DI:
16136 case VSX_BUILTIN_XXMRGHW_4SF:
16137 case VSX_BUILTIN_VEC_MERGEH_V2DF:
16138 fold_mergehl_helper (gsi, stmt, 0);
16139 return true;
16140
16141 /* Flavors of vec_mergee. */
16142 case P8V_BUILTIN_VMRGEW_V4SI:
16143 case P8V_BUILTIN_VMRGEW_V2DI:
16144 case P8V_BUILTIN_VMRGEW_V4SF:
16145 case P8V_BUILTIN_VMRGEW_V2DF:
16146 fold_mergeeo_helper (gsi, stmt, 0);
16147 return true;
16148 /* Flavors of vec_mergeo. */
16149 case P8V_BUILTIN_VMRGOW_V4SI:
16150 case P8V_BUILTIN_VMRGOW_V2DI:
16151 case P8V_BUILTIN_VMRGOW_V4SF:
16152 case P8V_BUILTIN_VMRGOW_V2DF:
16153 fold_mergeeo_helper (gsi, stmt, 1);
16154 return true;
16155
16156 /* d = vec_pack (a, b) */
16157 case P8V_BUILTIN_VPKUDUM:
16158 case ALTIVEC_BUILTIN_VPKUHUM:
16159 case ALTIVEC_BUILTIN_VPKUWUM:
16160 {
16161 arg0 = gimple_call_arg (stmt, 0);
16162 arg1 = gimple_call_arg (stmt, 1);
16163 lhs = gimple_call_lhs (stmt);
16164 gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1);
16165 gimple_set_location (g, gimple_location (stmt));
16166 gsi_replace (gsi, g, true);
16167 return true;
16168 }
16169
16170 /* d = vec_unpackh (a) */
16171 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
16172 in this code is sensitive to endian-ness, and needs to be inverted to
16173 handle both LE and BE targets. */
16174 case ALTIVEC_BUILTIN_VUPKHSB:
16175 case ALTIVEC_BUILTIN_VUPKHSH:
16176 case P8V_BUILTIN_VUPKHSW:
16177 {
16178 arg0 = gimple_call_arg (stmt, 0);
16179 lhs = gimple_call_lhs (stmt);
16180 if (BYTES_BIG_ENDIAN)
16181 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16182 else
16183 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16184 gimple_set_location (g, gimple_location (stmt));
16185 gsi_replace (gsi, g, true);
16186 return true;
16187 }
16188 /* d = vec_unpackl (a) */
16189 case ALTIVEC_BUILTIN_VUPKLSB:
16190 case ALTIVEC_BUILTIN_VUPKLSH:
16191 case P8V_BUILTIN_VUPKLSW:
16192 {
16193 arg0 = gimple_call_arg (stmt, 0);
16194 lhs = gimple_call_lhs (stmt);
16195 if (BYTES_BIG_ENDIAN)
16196 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16197 else
16198 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16199 gimple_set_location (g, gimple_location (stmt));
16200 gsi_replace (gsi, g, true);
16201 return true;
16202 }
16203 /* There is no gimple type corresponding with pixel, so just return. */
16204 case ALTIVEC_BUILTIN_VUPKHPX:
16205 case ALTIVEC_BUILTIN_VUPKLPX:
16206 return false;
16207
16208 /* vec_perm. */
16209 case ALTIVEC_BUILTIN_VPERM_16QI:
16210 case ALTIVEC_BUILTIN_VPERM_8HI:
16211 case ALTIVEC_BUILTIN_VPERM_4SI:
16212 case ALTIVEC_BUILTIN_VPERM_2DI:
16213 case ALTIVEC_BUILTIN_VPERM_4SF:
16214 case ALTIVEC_BUILTIN_VPERM_2DF:
16215 {
16216 arg0 = gimple_call_arg (stmt, 0);
16217 arg1 = gimple_call_arg (stmt, 1);
16218 tree permute = gimple_call_arg (stmt, 2);
16219 lhs = gimple_call_lhs (stmt);
16220 location_t loc = gimple_location (stmt);
16221 gimple_seq stmts = NULL;
16222 // convert arg0 and arg1 to match the type of the permute
16223 // for the VEC_PERM_EXPR operation.
16224 tree permute_type = (TREE_TYPE (permute));
16225 tree arg0_ptype = gimple_convert (&stmts, loc, permute_type, arg0);
16226 tree arg1_ptype = gimple_convert (&stmts, loc, permute_type, arg1);
16227 tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR,
16228 permute_type, arg0_ptype, arg1_ptype,
16229 permute);
16230 // Convert the result back to the desired lhs type upon completion.
16231 tree temp = gimple_convert (&stmts, loc, TREE_TYPE (lhs), lhs_ptype);
16232 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16233 g = gimple_build_assign (lhs, temp);
16234 gimple_set_location (g, loc);
16235 gsi_replace (gsi, g, true);
16236 return true;
16237 }
16238
16239 default:
16240 if (TARGET_DEBUG_BUILTIN)
16241 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16242 fn_code, fn_name1, fn_name2);
16243 break;
16244 }
16245
16246 return false;
16247 }
16248
16249 /* Expand an expression EXP that calls a built-in function,
16250 with result going to TARGET if that's convenient
16251 (and in mode MODE if that's convenient).
16252 SUBTARGET may be used as the target for computing one of EXP's operands.
16253 IGNORE is nonzero if the value is to be ignored. */
16254
16255 static rtx
16256 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16257 machine_mode mode ATTRIBUTE_UNUSED,
16258 int ignore ATTRIBUTE_UNUSED)
16259 {
16260 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16261 enum rs6000_builtins fcode
16262 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16263 size_t uns_fcode = (size_t)fcode;
16264 const struct builtin_description *d;
16265 size_t i;
16266 rtx ret;
16267 bool success;
16268 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16269 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16270 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16271
16272 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16273 floating point type, depending on whether long double is the IBM extended
16274 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16275 we only define one variant of the built-in function, and switch the code
16276 when defining it, rather than defining two built-ins and using the
16277 overload table in rs6000-c.c to switch between the two. If we don't have
16278 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16279 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16280 if (FLOAT128_IEEE_P (TFmode))
16281 switch (icode)
16282 {
16283 default:
16284 break;
16285
16286 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16287 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16288 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16289 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16290 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16291 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16292 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16293 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16294 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16295 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16296 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16297 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16298 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16299 }
16300
16301 if (TARGET_DEBUG_BUILTIN)
16302 {
16303 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16304 const char *name2 = (icode != CODE_FOR_nothing)
16305 ? get_insn_name ((int) icode)
16306 : "nothing";
16307 const char *name3;
16308
16309 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16310 {
16311 default: name3 = "unknown"; break;
16312 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16313 case RS6000_BTC_UNARY: name3 = "unary"; break;
16314 case RS6000_BTC_BINARY: name3 = "binary"; break;
16315 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16316 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16317 case RS6000_BTC_ABS: name3 = "abs"; break;
16318 case RS6000_BTC_DST: name3 = "dst"; break;
16319 }
16320
16321
16322 fprintf (stderr,
16323 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16324 (name1) ? name1 : "---", fcode,
16325 (name2) ? name2 : "---", (int) icode,
16326 name3,
16327 func_valid_p ? "" : ", not valid");
16328 }
16329
16330 if (!func_valid_p)
16331 {
16332 rs6000_invalid_builtin (fcode);
16333
16334 /* Given it is invalid, just generate a normal call. */
16335 return expand_call (exp, target, ignore);
16336 }
16337
16338 switch (fcode)
16339 {
16340 case RS6000_BUILTIN_RECIP:
16341 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16342
16343 case RS6000_BUILTIN_RECIPF:
16344 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16345
16346 case RS6000_BUILTIN_RSQRTF:
16347 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16348
16349 case RS6000_BUILTIN_RSQRT:
16350 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16351
16352 case POWER7_BUILTIN_BPERMD:
16353 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16354 ? CODE_FOR_bpermd_di
16355 : CODE_FOR_bpermd_si), exp, target);
16356
16357 case RS6000_BUILTIN_GET_TB:
16358 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16359 target);
16360
16361 case RS6000_BUILTIN_MFTB:
16362 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16363 ? CODE_FOR_rs6000_mftb_di
16364 : CODE_FOR_rs6000_mftb_si),
16365 target);
16366
16367 case RS6000_BUILTIN_MFFS:
16368 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16369
16370 case RS6000_BUILTIN_MTFSB0:
16371 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb0, exp);
16372
16373 case RS6000_BUILTIN_MTFSB1:
16374 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb1, exp);
16375
16376 case RS6000_BUILTIN_SET_FPSCR_RN:
16377 return rs6000_expand_set_fpscr_rn_builtin (CODE_FOR_rs6000_set_fpscr_rn,
16378 exp);
16379
16380 case RS6000_BUILTIN_SET_FPSCR_DRN:
16381 return
16382 rs6000_expand_set_fpscr_drn_builtin (CODE_FOR_rs6000_set_fpscr_drn,
16383 exp);
16384
16385 case RS6000_BUILTIN_MFFSL:
16386 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffsl, target);
16387
16388 case RS6000_BUILTIN_MTFSF:
16389 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16390
16391 case RS6000_BUILTIN_CPU_INIT:
16392 case RS6000_BUILTIN_CPU_IS:
16393 case RS6000_BUILTIN_CPU_SUPPORTS:
16394 return cpu_expand_builtin (fcode, exp, target);
16395
16396 case MISC_BUILTIN_SPEC_BARRIER:
16397 {
16398 emit_insn (gen_speculation_barrier ());
16399 return NULL_RTX;
16400 }
16401
16402 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16403 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16404 {
16405 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16406 : (int) CODE_FOR_altivec_lvsl_direct);
16407 machine_mode tmode = insn_data[icode2].operand[0].mode;
16408 machine_mode mode = insn_data[icode2].operand[1].mode;
16409 tree arg;
16410 rtx op, addr, pat;
16411
16412 gcc_assert (TARGET_ALTIVEC);
16413
16414 arg = CALL_EXPR_ARG (exp, 0);
16415 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16416 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16417 addr = memory_address (mode, op);
16418 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16419 op = addr;
16420 else
16421 {
16422 /* For the load case need to negate the address. */
16423 op = gen_reg_rtx (GET_MODE (addr));
16424 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16425 }
16426 op = gen_rtx_MEM (mode, op);
16427
16428 if (target == 0
16429 || GET_MODE (target) != tmode
16430 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16431 target = gen_reg_rtx (tmode);
16432
16433 pat = GEN_FCN (icode2) (target, op);
16434 if (!pat)
16435 return 0;
16436 emit_insn (pat);
16437
16438 return target;
16439 }
16440
16441 case ALTIVEC_BUILTIN_VCFUX:
16442 case ALTIVEC_BUILTIN_VCFSX:
16443 case ALTIVEC_BUILTIN_VCTUXS:
16444 case ALTIVEC_BUILTIN_VCTSXS:
16445 /* FIXME: There's got to be a nicer way to handle this case than
16446 constructing a new CALL_EXPR. */
16447 if (call_expr_nargs (exp) == 1)
16448 {
16449 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16450 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16451 }
16452 break;
16453
16454 /* For the pack and unpack int128 routines, fix up the builtin so it
16455 uses the correct IBM128 type. */
16456 case MISC_BUILTIN_PACK_IF:
16457 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16458 {
16459 icode = CODE_FOR_packtf;
16460 fcode = MISC_BUILTIN_PACK_TF;
16461 uns_fcode = (size_t)fcode;
16462 }
16463 break;
16464
16465 case MISC_BUILTIN_UNPACK_IF:
16466 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16467 {
16468 icode = CODE_FOR_unpacktf;
16469 fcode = MISC_BUILTIN_UNPACK_TF;
16470 uns_fcode = (size_t)fcode;
16471 }
16472 break;
16473
16474 default:
16475 break;
16476 }
16477
16478 if (TARGET_ALTIVEC)
16479 {
16480 ret = altivec_expand_builtin (exp, target, &success);
16481
16482 if (success)
16483 return ret;
16484 }
16485 if (TARGET_HTM)
16486 {
16487 ret = htm_expand_builtin (exp, target, &success);
16488
16489 if (success)
16490 return ret;
16491 }
16492
16493 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16494 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16495 gcc_assert (attr == RS6000_BTC_UNARY
16496 || attr == RS6000_BTC_BINARY
16497 || attr == RS6000_BTC_TERNARY
16498 || attr == RS6000_BTC_SPECIAL);
16499
16500 /* Handle simple unary operations. */
16501 d = bdesc_1arg;
16502 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16503 if (d->code == fcode)
16504 return rs6000_expand_unop_builtin (icode, exp, target);
16505
16506 /* Handle simple binary operations. */
16507 d = bdesc_2arg;
16508 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16509 if (d->code == fcode)
16510 return rs6000_expand_binop_builtin (icode, exp, target);
16511
16512 /* Handle simple ternary operations. */
16513 d = bdesc_3arg;
16514 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16515 if (d->code == fcode)
16516 return rs6000_expand_ternop_builtin (icode, exp, target);
16517
16518 /* Handle simple no-argument operations. */
16519 d = bdesc_0arg;
16520 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16521 if (d->code == fcode)
16522 return rs6000_expand_zeroop_builtin (icode, target);
16523
16524 gcc_unreachable ();
16525 }
16526
16527 /* Create a builtin vector type with a name. Taking care not to give
16528 the canonical type a name. */
16529
16530 static tree
16531 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16532 {
16533 tree result = build_vector_type (elt_type, num_elts);
16534
16535 /* Copy so we don't give the canonical type a name. */
16536 result = build_variant_type_copy (result);
16537
16538 add_builtin_type (name, result);
16539
16540 return result;
16541 }
16542
16543 static void
16544 rs6000_init_builtins (void)
16545 {
16546 tree tdecl;
16547 tree ftype;
16548 machine_mode mode;
16549
16550 if (TARGET_DEBUG_BUILTIN)
16551 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16552 (TARGET_ALTIVEC) ? ", altivec" : "",
16553 (TARGET_VSX) ? ", vsx" : "");
16554
16555 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16556 : "__vector long long",
16557 intDI_type_node, 2);
16558 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16559 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16560 intSI_type_node, 4);
16561 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16562 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16563 intHI_type_node, 8);
16564 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16565 intQI_type_node, 16);
16566
16567 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16568 unsigned_intQI_type_node, 16);
16569 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16570 unsigned_intHI_type_node, 8);
16571 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16572 unsigned_intSI_type_node, 4);
16573 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16574 ? "__vector unsigned long"
16575 : "__vector unsigned long long",
16576 unsigned_intDI_type_node, 2);
16577
16578 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16579
16580 const_str_type_node
16581 = build_pointer_type (build_qualified_type (char_type_node,
16582 TYPE_QUAL_CONST));
16583
16584 /* We use V1TI mode as a special container to hold __int128_t items that
16585 must live in VSX registers. */
16586 if (intTI_type_node)
16587 {
16588 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16589 intTI_type_node, 1);
16590 unsigned_V1TI_type_node
16591 = rs6000_vector_type ("__vector unsigned __int128",
16592 unsigned_intTI_type_node, 1);
16593 }
16594
16595 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16596 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16597 'vector unsigned short'. */
16598
16599 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16600 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16601 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16602 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16603 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16604
16605 long_integer_type_internal_node = long_integer_type_node;
16606 long_unsigned_type_internal_node = long_unsigned_type_node;
16607 long_long_integer_type_internal_node = long_long_integer_type_node;
16608 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16609 intQI_type_internal_node = intQI_type_node;
16610 uintQI_type_internal_node = unsigned_intQI_type_node;
16611 intHI_type_internal_node = intHI_type_node;
16612 uintHI_type_internal_node = unsigned_intHI_type_node;
16613 intSI_type_internal_node = intSI_type_node;
16614 uintSI_type_internal_node = unsigned_intSI_type_node;
16615 intDI_type_internal_node = intDI_type_node;
16616 uintDI_type_internal_node = unsigned_intDI_type_node;
16617 intTI_type_internal_node = intTI_type_node;
16618 uintTI_type_internal_node = unsigned_intTI_type_node;
16619 float_type_internal_node = float_type_node;
16620 double_type_internal_node = double_type_node;
16621 long_double_type_internal_node = long_double_type_node;
16622 dfloat64_type_internal_node = dfloat64_type_node;
16623 dfloat128_type_internal_node = dfloat128_type_node;
16624 void_type_internal_node = void_type_node;
16625
16626 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16627 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16628 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16629 format that uses a pair of doubles, depending on the switches and
16630 defaults.
16631
16632 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16633 floating point, we need make sure the type is non-zero or else self-test
16634 fails during bootstrap.
16635
16636 Always create __ibm128 as a separate type, even if the current long double
16637 format is IBM extended double.
16638
16639 For IEEE 128-bit floating point, always create the type __ieee128. If the
16640 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16641 __ieee128. */
16642 if (TARGET_FLOAT128_TYPE)
16643 {
16644 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16645 ibm128_float_type_node = long_double_type_node;
16646 else
16647 {
16648 ibm128_float_type_node = make_node (REAL_TYPE);
16649 TYPE_PRECISION (ibm128_float_type_node) = 128;
16650 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16651 layout_type (ibm128_float_type_node);
16652 }
16653
16654 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16655 "__ibm128");
16656
16657 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16658 ieee128_float_type_node = long_double_type_node;
16659 else
16660 ieee128_float_type_node = float128_type_node;
16661
16662 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16663 "__ieee128");
16664 }
16665
16666 else
16667 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16668
16669 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16670 tree type node. */
16671 builtin_mode_to_type[QImode][0] = integer_type_node;
16672 builtin_mode_to_type[HImode][0] = integer_type_node;
16673 builtin_mode_to_type[SImode][0] = intSI_type_node;
16674 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16675 builtin_mode_to_type[DImode][0] = intDI_type_node;
16676 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16677 builtin_mode_to_type[TImode][0] = intTI_type_node;
16678 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16679 builtin_mode_to_type[SFmode][0] = float_type_node;
16680 builtin_mode_to_type[DFmode][0] = double_type_node;
16681 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16682 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16683 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16684 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16685 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16686 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16687 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16688 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16689 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16690 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16691 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16692 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16693 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16694 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16695 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16696 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16697 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16698
16699 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16700 TYPE_NAME (bool_char_type_node) = tdecl;
16701
16702 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16703 TYPE_NAME (bool_short_type_node) = tdecl;
16704
16705 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16706 TYPE_NAME (bool_int_type_node) = tdecl;
16707
16708 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16709 TYPE_NAME (pixel_type_node) = tdecl;
16710
16711 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16712 bool_char_type_node, 16);
16713 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16714 bool_short_type_node, 8);
16715 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16716 bool_int_type_node, 4);
16717 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16718 ? "__vector __bool long"
16719 : "__vector __bool long long",
16720 bool_long_long_type_node, 2);
16721 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16722 pixel_type_node, 8);
16723
16724 /* Create Altivec and VSX builtins on machines with at least the
16725 general purpose extensions (970 and newer) to allow the use of
16726 the target attribute. */
16727 if (TARGET_EXTRA_BUILTINS)
16728 altivec_init_builtins ();
16729 if (TARGET_HTM)
16730 htm_init_builtins ();
16731
16732 if (TARGET_EXTRA_BUILTINS)
16733 rs6000_common_init_builtins ();
16734
16735 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16736 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16737 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16738
16739 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16740 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16741 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16742
16743 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16744 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16745 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16746
16747 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16748 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16749 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16750
16751 mode = (TARGET_64BIT) ? DImode : SImode;
16752 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16753 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16754 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16755
16756 ftype = build_function_type_list (unsigned_intDI_type_node,
16757 NULL_TREE);
16758 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16759
16760 if (TARGET_64BIT)
16761 ftype = build_function_type_list (unsigned_intDI_type_node,
16762 NULL_TREE);
16763 else
16764 ftype = build_function_type_list (unsigned_intSI_type_node,
16765 NULL_TREE);
16766 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16767
16768 ftype = build_function_type_list (double_type_node, NULL_TREE);
16769 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16770
16771 ftype = build_function_type_list (double_type_node, NULL_TREE);
16772 def_builtin ("__builtin_mffsl", ftype, RS6000_BUILTIN_MFFSL);
16773
16774 ftype = build_function_type_list (void_type_node,
16775 intSI_type_node,
16776 NULL_TREE);
16777 def_builtin ("__builtin_mtfsb0", ftype, RS6000_BUILTIN_MTFSB0);
16778
16779 ftype = build_function_type_list (void_type_node,
16780 intSI_type_node,
16781 NULL_TREE);
16782 def_builtin ("__builtin_mtfsb1", ftype, RS6000_BUILTIN_MTFSB1);
16783
16784 ftype = build_function_type_list (void_type_node,
16785 intDI_type_node,
16786 NULL_TREE);
16787 def_builtin ("__builtin_set_fpscr_rn", ftype, RS6000_BUILTIN_SET_FPSCR_RN);
16788
16789 ftype = build_function_type_list (void_type_node,
16790 intDI_type_node,
16791 NULL_TREE);
16792 def_builtin ("__builtin_set_fpscr_drn", ftype, RS6000_BUILTIN_SET_FPSCR_DRN);
16793
16794 ftype = build_function_type_list (void_type_node,
16795 intSI_type_node, double_type_node,
16796 NULL_TREE);
16797 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16798
16799 ftype = build_function_type_list (void_type_node, NULL_TREE);
16800 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16801 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16802 MISC_BUILTIN_SPEC_BARRIER);
16803
16804 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16805 NULL_TREE);
16806 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16807 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16808
16809 /* AIX libm provides clog as __clog. */
16810 if (TARGET_XCOFF &&
16811 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16812 set_user_assembler_name (tdecl, "__clog");
16813
16814 #ifdef SUBTARGET_INIT_BUILTINS
16815 SUBTARGET_INIT_BUILTINS;
16816 #endif
16817 }
16818
16819 /* Returns the rs6000 builtin decl for CODE. */
16820
16821 static tree
16822 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16823 {
16824 HOST_WIDE_INT fnmask;
16825
16826 if (code >= RS6000_BUILTIN_COUNT)
16827 return error_mark_node;
16828
16829 fnmask = rs6000_builtin_info[code].mask;
16830 if ((fnmask & rs6000_builtin_mask) != fnmask)
16831 {
16832 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16833 return error_mark_node;
16834 }
16835
16836 return rs6000_builtin_decls[code];
16837 }
16838
16839 static void
16840 altivec_init_builtins (void)
16841 {
16842 const struct builtin_description *d;
16843 size_t i;
16844 tree ftype;
16845 tree decl;
16846 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16847
16848 tree pvoid_type_node = build_pointer_type (void_type_node);
16849
16850 tree pcvoid_type_node
16851 = build_pointer_type (build_qualified_type (void_type_node,
16852 TYPE_QUAL_CONST));
16853
16854 tree int_ftype_opaque
16855 = build_function_type_list (integer_type_node,
16856 opaque_V4SI_type_node, NULL_TREE);
16857 tree opaque_ftype_opaque
16858 = build_function_type_list (integer_type_node, NULL_TREE);
16859 tree opaque_ftype_opaque_int
16860 = build_function_type_list (opaque_V4SI_type_node,
16861 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16862 tree opaque_ftype_opaque_opaque_int
16863 = build_function_type_list (opaque_V4SI_type_node,
16864 opaque_V4SI_type_node, opaque_V4SI_type_node,
16865 integer_type_node, NULL_TREE);
16866 tree opaque_ftype_opaque_opaque_opaque
16867 = build_function_type_list (opaque_V4SI_type_node,
16868 opaque_V4SI_type_node, opaque_V4SI_type_node,
16869 opaque_V4SI_type_node, NULL_TREE);
16870 tree opaque_ftype_opaque_opaque
16871 = build_function_type_list (opaque_V4SI_type_node,
16872 opaque_V4SI_type_node, opaque_V4SI_type_node,
16873 NULL_TREE);
16874 tree int_ftype_int_opaque_opaque
16875 = build_function_type_list (integer_type_node,
16876 integer_type_node, opaque_V4SI_type_node,
16877 opaque_V4SI_type_node, NULL_TREE);
16878 tree int_ftype_int_v4si_v4si
16879 = build_function_type_list (integer_type_node,
16880 integer_type_node, V4SI_type_node,
16881 V4SI_type_node, NULL_TREE);
16882 tree int_ftype_int_v2di_v2di
16883 = build_function_type_list (integer_type_node,
16884 integer_type_node, V2DI_type_node,
16885 V2DI_type_node, NULL_TREE);
16886 tree void_ftype_v4si
16887 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16888 tree v8hi_ftype_void
16889 = build_function_type_list (V8HI_type_node, NULL_TREE);
16890 tree void_ftype_void
16891 = build_function_type_list (void_type_node, NULL_TREE);
16892 tree void_ftype_int
16893 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16894
16895 tree opaque_ftype_long_pcvoid
16896 = build_function_type_list (opaque_V4SI_type_node,
16897 long_integer_type_node, pcvoid_type_node,
16898 NULL_TREE);
16899 tree v16qi_ftype_long_pcvoid
16900 = build_function_type_list (V16QI_type_node,
16901 long_integer_type_node, pcvoid_type_node,
16902 NULL_TREE);
16903 tree v8hi_ftype_long_pcvoid
16904 = build_function_type_list (V8HI_type_node,
16905 long_integer_type_node, pcvoid_type_node,
16906 NULL_TREE);
16907 tree v4si_ftype_long_pcvoid
16908 = build_function_type_list (V4SI_type_node,
16909 long_integer_type_node, pcvoid_type_node,
16910 NULL_TREE);
16911 tree v4sf_ftype_long_pcvoid
16912 = build_function_type_list (V4SF_type_node,
16913 long_integer_type_node, pcvoid_type_node,
16914 NULL_TREE);
16915 tree v2df_ftype_long_pcvoid
16916 = build_function_type_list (V2DF_type_node,
16917 long_integer_type_node, pcvoid_type_node,
16918 NULL_TREE);
16919 tree v2di_ftype_long_pcvoid
16920 = build_function_type_list (V2DI_type_node,
16921 long_integer_type_node, pcvoid_type_node,
16922 NULL_TREE);
16923 tree v1ti_ftype_long_pcvoid
16924 = build_function_type_list (V1TI_type_node,
16925 long_integer_type_node, pcvoid_type_node,
16926 NULL_TREE);
16927
16928 tree void_ftype_opaque_long_pvoid
16929 = build_function_type_list (void_type_node,
16930 opaque_V4SI_type_node, long_integer_type_node,
16931 pvoid_type_node, NULL_TREE);
16932 tree void_ftype_v4si_long_pvoid
16933 = build_function_type_list (void_type_node,
16934 V4SI_type_node, long_integer_type_node,
16935 pvoid_type_node, NULL_TREE);
16936 tree void_ftype_v16qi_long_pvoid
16937 = build_function_type_list (void_type_node,
16938 V16QI_type_node, long_integer_type_node,
16939 pvoid_type_node, NULL_TREE);
16940
16941 tree void_ftype_v16qi_pvoid_long
16942 = build_function_type_list (void_type_node,
16943 V16QI_type_node, pvoid_type_node,
16944 long_integer_type_node, NULL_TREE);
16945
16946 tree void_ftype_v8hi_long_pvoid
16947 = build_function_type_list (void_type_node,
16948 V8HI_type_node, long_integer_type_node,
16949 pvoid_type_node, NULL_TREE);
16950 tree void_ftype_v4sf_long_pvoid
16951 = build_function_type_list (void_type_node,
16952 V4SF_type_node, long_integer_type_node,
16953 pvoid_type_node, NULL_TREE);
16954 tree void_ftype_v2df_long_pvoid
16955 = build_function_type_list (void_type_node,
16956 V2DF_type_node, long_integer_type_node,
16957 pvoid_type_node, NULL_TREE);
16958 tree void_ftype_v1ti_long_pvoid
16959 = build_function_type_list (void_type_node,
16960 V1TI_type_node, long_integer_type_node,
16961 pvoid_type_node, NULL_TREE);
16962 tree void_ftype_v2di_long_pvoid
16963 = build_function_type_list (void_type_node,
16964 V2DI_type_node, long_integer_type_node,
16965 pvoid_type_node, NULL_TREE);
16966 tree int_ftype_int_v8hi_v8hi
16967 = build_function_type_list (integer_type_node,
16968 integer_type_node, V8HI_type_node,
16969 V8HI_type_node, NULL_TREE);
16970 tree int_ftype_int_v16qi_v16qi
16971 = build_function_type_list (integer_type_node,
16972 integer_type_node, V16QI_type_node,
16973 V16QI_type_node, NULL_TREE);
16974 tree int_ftype_int_v4sf_v4sf
16975 = build_function_type_list (integer_type_node,
16976 integer_type_node, V4SF_type_node,
16977 V4SF_type_node, NULL_TREE);
16978 tree int_ftype_int_v2df_v2df
16979 = build_function_type_list (integer_type_node,
16980 integer_type_node, V2DF_type_node,
16981 V2DF_type_node, NULL_TREE);
16982 tree v2di_ftype_v2di
16983 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16984 tree v4si_ftype_v4si
16985 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16986 tree v8hi_ftype_v8hi
16987 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16988 tree v16qi_ftype_v16qi
16989 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16990 tree v4sf_ftype_v4sf
16991 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16992 tree v2df_ftype_v2df
16993 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16994 tree void_ftype_pcvoid_int_int
16995 = build_function_type_list (void_type_node,
16996 pcvoid_type_node, integer_type_node,
16997 integer_type_node, NULL_TREE);
16998
16999 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
17000 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
17001 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
17002 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
17003 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
17004 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
17005 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
17006 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
17007 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
17008 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
17009 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
17010 ALTIVEC_BUILTIN_LVXL_V2DF);
17011 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
17012 ALTIVEC_BUILTIN_LVXL_V2DI);
17013 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
17014 ALTIVEC_BUILTIN_LVXL_V4SF);
17015 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
17016 ALTIVEC_BUILTIN_LVXL_V4SI);
17017 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
17018 ALTIVEC_BUILTIN_LVXL_V8HI);
17019 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
17020 ALTIVEC_BUILTIN_LVXL_V16QI);
17021 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
17022 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
17023 ALTIVEC_BUILTIN_LVX_V1TI);
17024 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
17025 ALTIVEC_BUILTIN_LVX_V2DF);
17026 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
17027 ALTIVEC_BUILTIN_LVX_V2DI);
17028 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
17029 ALTIVEC_BUILTIN_LVX_V4SF);
17030 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
17031 ALTIVEC_BUILTIN_LVX_V4SI);
17032 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
17033 ALTIVEC_BUILTIN_LVX_V8HI);
17034 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
17035 ALTIVEC_BUILTIN_LVX_V16QI);
17036 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
17037 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
17038 ALTIVEC_BUILTIN_STVX_V2DF);
17039 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
17040 ALTIVEC_BUILTIN_STVX_V2DI);
17041 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
17042 ALTIVEC_BUILTIN_STVX_V4SF);
17043 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
17044 ALTIVEC_BUILTIN_STVX_V4SI);
17045 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
17046 ALTIVEC_BUILTIN_STVX_V8HI);
17047 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
17048 ALTIVEC_BUILTIN_STVX_V16QI);
17049 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
17050 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
17051 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
17052 ALTIVEC_BUILTIN_STVXL_V2DF);
17053 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
17054 ALTIVEC_BUILTIN_STVXL_V2DI);
17055 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
17056 ALTIVEC_BUILTIN_STVXL_V4SF);
17057 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
17058 ALTIVEC_BUILTIN_STVXL_V4SI);
17059 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
17060 ALTIVEC_BUILTIN_STVXL_V8HI);
17061 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17062 ALTIVEC_BUILTIN_STVXL_V16QI);
17063 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17064 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17065 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17066 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17067 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17068 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17069 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17070 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17071 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17072 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17073 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17074 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17075 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17076 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17077 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17078 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17079
17080 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17081 VSX_BUILTIN_LXVD2X_V2DF);
17082 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17083 VSX_BUILTIN_LXVD2X_V2DI);
17084 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17085 VSX_BUILTIN_LXVW4X_V4SF);
17086 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17087 VSX_BUILTIN_LXVW4X_V4SI);
17088 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17089 VSX_BUILTIN_LXVW4X_V8HI);
17090 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17091 VSX_BUILTIN_LXVW4X_V16QI);
17092 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17093 VSX_BUILTIN_STXVD2X_V2DF);
17094 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17095 VSX_BUILTIN_STXVD2X_V2DI);
17096 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17097 VSX_BUILTIN_STXVW4X_V4SF);
17098 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17099 VSX_BUILTIN_STXVW4X_V4SI);
17100 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17101 VSX_BUILTIN_STXVW4X_V8HI);
17102 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17103 VSX_BUILTIN_STXVW4X_V16QI);
17104
17105 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17106 VSX_BUILTIN_LD_ELEMREV_V2DF);
17107 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17108 VSX_BUILTIN_LD_ELEMREV_V2DI);
17109 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17110 VSX_BUILTIN_LD_ELEMREV_V4SF);
17111 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17112 VSX_BUILTIN_LD_ELEMREV_V4SI);
17113 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17114 VSX_BUILTIN_LD_ELEMREV_V8HI);
17115 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17116 VSX_BUILTIN_LD_ELEMREV_V16QI);
17117 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17118 VSX_BUILTIN_ST_ELEMREV_V2DF);
17119 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
17120 VSX_BUILTIN_ST_ELEMREV_V1TI);
17121 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17122 VSX_BUILTIN_ST_ELEMREV_V2DI);
17123 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17124 VSX_BUILTIN_ST_ELEMREV_V4SF);
17125 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17126 VSX_BUILTIN_ST_ELEMREV_V4SI);
17127 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
17128 VSX_BUILTIN_ST_ELEMREV_V8HI);
17129 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
17130 VSX_BUILTIN_ST_ELEMREV_V16QI);
17131
17132 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17133 VSX_BUILTIN_VEC_LD);
17134 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17135 VSX_BUILTIN_VEC_ST);
17136 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17137 VSX_BUILTIN_VEC_XL);
17138 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17139 VSX_BUILTIN_VEC_XL_BE);
17140 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17141 VSX_BUILTIN_VEC_XST);
17142 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
17143 VSX_BUILTIN_VEC_XST_BE);
17144
17145 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17146 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17147 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17148
17149 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17150 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17151 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17152 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17153 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17154 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17155 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17156 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17157 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17158 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17159 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17160 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17161
17162 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17163 ALTIVEC_BUILTIN_VEC_ADDE);
17164 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17165 ALTIVEC_BUILTIN_VEC_ADDEC);
17166 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17167 ALTIVEC_BUILTIN_VEC_CMPNE);
17168 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17169 ALTIVEC_BUILTIN_VEC_MUL);
17170 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17171 ALTIVEC_BUILTIN_VEC_SUBE);
17172 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17173 ALTIVEC_BUILTIN_VEC_SUBEC);
17174
17175 /* Cell builtins. */
17176 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17177 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17178 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17179 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17180
17181 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17182 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17183 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17184 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17185
17186 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17187 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17188 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17189 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17190
17191 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17192 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17193 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17194 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17195
17196 if (TARGET_P9_VECTOR)
17197 {
17198 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17199 P9V_BUILTIN_STXVL);
17200 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
17201 P9V_BUILTIN_XST_LEN_R);
17202 }
17203
17204 /* Add the DST variants. */
17205 d = bdesc_dst;
17206 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17207 {
17208 HOST_WIDE_INT mask = d->mask;
17209
17210 /* It is expected that these dst built-in functions may have
17211 d->icode equal to CODE_FOR_nothing. */
17212 if ((mask & builtin_mask) != mask)
17213 {
17214 if (TARGET_DEBUG_BUILTIN)
17215 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17216 d->name);
17217 continue;
17218 }
17219 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17220 }
17221
17222 /* Initialize the predicates. */
17223 d = bdesc_altivec_preds;
17224 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17225 {
17226 machine_mode mode1;
17227 tree type;
17228 HOST_WIDE_INT mask = d->mask;
17229
17230 if ((mask & builtin_mask) != mask)
17231 {
17232 if (TARGET_DEBUG_BUILTIN)
17233 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17234 d->name);
17235 continue;
17236 }
17237
17238 if (rs6000_overloaded_builtin_p (d->code))
17239 mode1 = VOIDmode;
17240 else
17241 {
17242 /* Cannot define builtin if the instruction is disabled. */
17243 gcc_assert (d->icode != CODE_FOR_nothing);
17244 mode1 = insn_data[d->icode].operand[1].mode;
17245 }
17246
17247 switch (mode1)
17248 {
17249 case E_VOIDmode:
17250 type = int_ftype_int_opaque_opaque;
17251 break;
17252 case E_V2DImode:
17253 type = int_ftype_int_v2di_v2di;
17254 break;
17255 case E_V4SImode:
17256 type = int_ftype_int_v4si_v4si;
17257 break;
17258 case E_V8HImode:
17259 type = int_ftype_int_v8hi_v8hi;
17260 break;
17261 case E_V16QImode:
17262 type = int_ftype_int_v16qi_v16qi;
17263 break;
17264 case E_V4SFmode:
17265 type = int_ftype_int_v4sf_v4sf;
17266 break;
17267 case E_V2DFmode:
17268 type = int_ftype_int_v2df_v2df;
17269 break;
17270 default:
17271 gcc_unreachable ();
17272 }
17273
17274 def_builtin (d->name, type, d->code);
17275 }
17276
17277 /* Initialize the abs* operators. */
17278 d = bdesc_abs;
17279 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17280 {
17281 machine_mode mode0;
17282 tree type;
17283 HOST_WIDE_INT mask = d->mask;
17284
17285 if ((mask & builtin_mask) != mask)
17286 {
17287 if (TARGET_DEBUG_BUILTIN)
17288 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17289 d->name);
17290 continue;
17291 }
17292
17293 /* Cannot define builtin if the instruction is disabled. */
17294 gcc_assert (d->icode != CODE_FOR_nothing);
17295 mode0 = insn_data[d->icode].operand[0].mode;
17296
17297 switch (mode0)
17298 {
17299 case E_V2DImode:
17300 type = v2di_ftype_v2di;
17301 break;
17302 case E_V4SImode:
17303 type = v4si_ftype_v4si;
17304 break;
17305 case E_V8HImode:
17306 type = v8hi_ftype_v8hi;
17307 break;
17308 case E_V16QImode:
17309 type = v16qi_ftype_v16qi;
17310 break;
17311 case E_V4SFmode:
17312 type = v4sf_ftype_v4sf;
17313 break;
17314 case E_V2DFmode:
17315 type = v2df_ftype_v2df;
17316 break;
17317 default:
17318 gcc_unreachable ();
17319 }
17320
17321 def_builtin (d->name, type, d->code);
17322 }
17323
17324 /* Initialize target builtin that implements
17325 targetm.vectorize.builtin_mask_for_load. */
17326
17327 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17328 v16qi_ftype_long_pcvoid,
17329 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17330 BUILT_IN_MD, NULL, NULL_TREE);
17331 TREE_READONLY (decl) = 1;
17332 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17333 altivec_builtin_mask_for_load = decl;
17334
17335 /* Access to the vec_init patterns. */
17336 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17337 integer_type_node, integer_type_node,
17338 integer_type_node, NULL_TREE);
17339 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17340
17341 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17342 short_integer_type_node,
17343 short_integer_type_node,
17344 short_integer_type_node,
17345 short_integer_type_node,
17346 short_integer_type_node,
17347 short_integer_type_node,
17348 short_integer_type_node, NULL_TREE);
17349 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17350
17351 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17352 char_type_node, char_type_node,
17353 char_type_node, char_type_node,
17354 char_type_node, char_type_node,
17355 char_type_node, char_type_node,
17356 char_type_node, char_type_node,
17357 char_type_node, char_type_node,
17358 char_type_node, char_type_node,
17359 char_type_node, NULL_TREE);
17360 def_builtin ("__builtin_vec_init_v16qi", ftype,
17361 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17362
17363 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17364 float_type_node, float_type_node,
17365 float_type_node, NULL_TREE);
17366 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17367
17368 /* VSX builtins. */
17369 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17370 double_type_node, NULL_TREE);
17371 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17372
17373 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17374 intDI_type_node, NULL_TREE);
17375 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17376
17377 /* Access to the vec_set patterns. */
17378 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17379 intSI_type_node,
17380 integer_type_node, NULL_TREE);
17381 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17382
17383 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17384 intHI_type_node,
17385 integer_type_node, NULL_TREE);
17386 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17387
17388 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17389 intQI_type_node,
17390 integer_type_node, NULL_TREE);
17391 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17392
17393 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17394 float_type_node,
17395 integer_type_node, NULL_TREE);
17396 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17397
17398 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17399 double_type_node,
17400 integer_type_node, NULL_TREE);
17401 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17402
17403 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17404 intDI_type_node,
17405 integer_type_node, NULL_TREE);
17406 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17407
17408 /* Access to the vec_extract patterns. */
17409 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17410 integer_type_node, NULL_TREE);
17411 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17412
17413 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17414 integer_type_node, NULL_TREE);
17415 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17416
17417 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17418 integer_type_node, NULL_TREE);
17419 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17420
17421 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17422 integer_type_node, NULL_TREE);
17423 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17424
17425 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17426 integer_type_node, NULL_TREE);
17427 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17428
17429 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17430 integer_type_node, NULL_TREE);
17431 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17432
17433
17434 if (V1TI_type_node)
17435 {
17436 tree v1ti_ftype_long_pcvoid
17437 = build_function_type_list (V1TI_type_node,
17438 long_integer_type_node, pcvoid_type_node,
17439 NULL_TREE);
17440 tree void_ftype_v1ti_long_pvoid
17441 = build_function_type_list (void_type_node,
17442 V1TI_type_node, long_integer_type_node,
17443 pvoid_type_node, NULL_TREE);
17444 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17445 VSX_BUILTIN_LD_ELEMREV_V1TI);
17446 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17447 VSX_BUILTIN_LXVD2X_V1TI);
17448 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17449 VSX_BUILTIN_STXVD2X_V1TI);
17450 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17451 NULL_TREE, NULL_TREE);
17452 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17453 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17454 intTI_type_node,
17455 integer_type_node, NULL_TREE);
17456 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17457 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17458 integer_type_node, NULL_TREE);
17459 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17460 }
17461
17462 }
17463
17464 static void
17465 htm_init_builtins (void)
17466 {
17467 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17468 const struct builtin_description *d;
17469 size_t i;
17470
17471 d = bdesc_htm;
17472 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17473 {
17474 tree op[MAX_HTM_OPERANDS], type;
17475 HOST_WIDE_INT mask = d->mask;
17476 unsigned attr = rs6000_builtin_info[d->code].attr;
17477 bool void_func = (attr & RS6000_BTC_VOID);
17478 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17479 int nopnds = 0;
17480 tree gpr_type_node;
17481 tree rettype;
17482 tree argtype;
17483
17484 /* It is expected that these htm built-in functions may have
17485 d->icode equal to CODE_FOR_nothing. */
17486
17487 if (TARGET_32BIT && TARGET_POWERPC64)
17488 gpr_type_node = long_long_unsigned_type_node;
17489 else
17490 gpr_type_node = long_unsigned_type_node;
17491
17492 if (attr & RS6000_BTC_SPR)
17493 {
17494 rettype = gpr_type_node;
17495 argtype = gpr_type_node;
17496 }
17497 else if (d->code == HTM_BUILTIN_TABORTDC
17498 || d->code == HTM_BUILTIN_TABORTDCI)
17499 {
17500 rettype = unsigned_type_node;
17501 argtype = gpr_type_node;
17502 }
17503 else
17504 {
17505 rettype = unsigned_type_node;
17506 argtype = unsigned_type_node;
17507 }
17508
17509 if ((mask & builtin_mask) != mask)
17510 {
17511 if (TARGET_DEBUG_BUILTIN)
17512 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17513 continue;
17514 }
17515
17516 if (d->name == 0)
17517 {
17518 if (TARGET_DEBUG_BUILTIN)
17519 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17520 (long unsigned) i);
17521 continue;
17522 }
17523
17524 op[nopnds++] = (void_func) ? void_type_node : rettype;
17525
17526 if (attr_args == RS6000_BTC_UNARY)
17527 op[nopnds++] = argtype;
17528 else if (attr_args == RS6000_BTC_BINARY)
17529 {
17530 op[nopnds++] = argtype;
17531 op[nopnds++] = argtype;
17532 }
17533 else if (attr_args == RS6000_BTC_TERNARY)
17534 {
17535 op[nopnds++] = argtype;
17536 op[nopnds++] = argtype;
17537 op[nopnds++] = argtype;
17538 }
17539
17540 switch (nopnds)
17541 {
17542 case 1:
17543 type = build_function_type_list (op[0], NULL_TREE);
17544 break;
17545 case 2:
17546 type = build_function_type_list (op[0], op[1], NULL_TREE);
17547 break;
17548 case 3:
17549 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17550 break;
17551 case 4:
17552 type = build_function_type_list (op[0], op[1], op[2], op[3],
17553 NULL_TREE);
17554 break;
17555 default:
17556 gcc_unreachable ();
17557 }
17558
17559 def_builtin (d->name, type, d->code);
17560 }
17561 }
17562
17563 /* Hash function for builtin functions with up to 3 arguments and a return
17564 type. */
17565 hashval_t
17566 builtin_hasher::hash (builtin_hash_struct *bh)
17567 {
17568 unsigned ret = 0;
17569 int i;
17570
17571 for (i = 0; i < 4; i++)
17572 {
17573 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17574 ret = (ret * 2) + bh->uns_p[i];
17575 }
17576
17577 return ret;
17578 }
17579
17580 /* Compare builtin hash entries H1 and H2 for equivalence. */
17581 bool
17582 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17583 {
17584 return ((p1->mode[0] == p2->mode[0])
17585 && (p1->mode[1] == p2->mode[1])
17586 && (p1->mode[2] == p2->mode[2])
17587 && (p1->mode[3] == p2->mode[3])
17588 && (p1->uns_p[0] == p2->uns_p[0])
17589 && (p1->uns_p[1] == p2->uns_p[1])
17590 && (p1->uns_p[2] == p2->uns_p[2])
17591 && (p1->uns_p[3] == p2->uns_p[3]));
17592 }
17593
17594 /* Map types for builtin functions with an explicit return type and up to 3
17595 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17596 of the argument. */
17597 static tree
17598 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17599 machine_mode mode_arg1, machine_mode mode_arg2,
17600 enum rs6000_builtins builtin, const char *name)
17601 {
17602 struct builtin_hash_struct h;
17603 struct builtin_hash_struct *h2;
17604 int num_args = 3;
17605 int i;
17606 tree ret_type = NULL_TREE;
17607 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17608
17609 /* Create builtin_hash_table. */
17610 if (builtin_hash_table == NULL)
17611 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17612
17613 h.type = NULL_TREE;
17614 h.mode[0] = mode_ret;
17615 h.mode[1] = mode_arg0;
17616 h.mode[2] = mode_arg1;
17617 h.mode[3] = mode_arg2;
17618 h.uns_p[0] = 0;
17619 h.uns_p[1] = 0;
17620 h.uns_p[2] = 0;
17621 h.uns_p[3] = 0;
17622
17623 /* If the builtin is a type that produces unsigned results or takes unsigned
17624 arguments, and it is returned as a decl for the vectorizer (such as
17625 widening multiplies, permute), make sure the arguments and return value
17626 are type correct. */
17627 switch (builtin)
17628 {
17629 /* unsigned 1 argument functions. */
17630 case CRYPTO_BUILTIN_VSBOX:
17631 case P8V_BUILTIN_VGBBD:
17632 case MISC_BUILTIN_CDTBCD:
17633 case MISC_BUILTIN_CBCDTD:
17634 h.uns_p[0] = 1;
17635 h.uns_p[1] = 1;
17636 break;
17637
17638 /* unsigned 2 argument functions. */
17639 case ALTIVEC_BUILTIN_VMULEUB:
17640 case ALTIVEC_BUILTIN_VMULEUH:
17641 case P8V_BUILTIN_VMULEUW:
17642 case ALTIVEC_BUILTIN_VMULOUB:
17643 case ALTIVEC_BUILTIN_VMULOUH:
17644 case P8V_BUILTIN_VMULOUW:
17645 case CRYPTO_BUILTIN_VCIPHER:
17646 case CRYPTO_BUILTIN_VCIPHERLAST:
17647 case CRYPTO_BUILTIN_VNCIPHER:
17648 case CRYPTO_BUILTIN_VNCIPHERLAST:
17649 case CRYPTO_BUILTIN_VPMSUMB:
17650 case CRYPTO_BUILTIN_VPMSUMH:
17651 case CRYPTO_BUILTIN_VPMSUMW:
17652 case CRYPTO_BUILTIN_VPMSUMD:
17653 case CRYPTO_BUILTIN_VPMSUM:
17654 case MISC_BUILTIN_ADDG6S:
17655 case MISC_BUILTIN_DIVWEU:
17656 case MISC_BUILTIN_DIVDEU:
17657 case VSX_BUILTIN_UDIV_V2DI:
17658 case ALTIVEC_BUILTIN_VMAXUB:
17659 case ALTIVEC_BUILTIN_VMINUB:
17660 case ALTIVEC_BUILTIN_VMAXUH:
17661 case ALTIVEC_BUILTIN_VMINUH:
17662 case ALTIVEC_BUILTIN_VMAXUW:
17663 case ALTIVEC_BUILTIN_VMINUW:
17664 case P8V_BUILTIN_VMAXUD:
17665 case P8V_BUILTIN_VMINUD:
17666 h.uns_p[0] = 1;
17667 h.uns_p[1] = 1;
17668 h.uns_p[2] = 1;
17669 break;
17670
17671 /* unsigned 3 argument functions. */
17672 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17673 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17674 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17675 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17676 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17677 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17678 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17679 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17680 case VSX_BUILTIN_VPERM_16QI_UNS:
17681 case VSX_BUILTIN_VPERM_8HI_UNS:
17682 case VSX_BUILTIN_VPERM_4SI_UNS:
17683 case VSX_BUILTIN_VPERM_2DI_UNS:
17684 case VSX_BUILTIN_XXSEL_16QI_UNS:
17685 case VSX_BUILTIN_XXSEL_8HI_UNS:
17686 case VSX_BUILTIN_XXSEL_4SI_UNS:
17687 case VSX_BUILTIN_XXSEL_2DI_UNS:
17688 case CRYPTO_BUILTIN_VPERMXOR:
17689 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17690 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17691 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17692 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17693 case CRYPTO_BUILTIN_VSHASIGMAW:
17694 case CRYPTO_BUILTIN_VSHASIGMAD:
17695 case CRYPTO_BUILTIN_VSHASIGMA:
17696 h.uns_p[0] = 1;
17697 h.uns_p[1] = 1;
17698 h.uns_p[2] = 1;
17699 h.uns_p[3] = 1;
17700 break;
17701
17702 /* signed permute functions with unsigned char mask. */
17703 case ALTIVEC_BUILTIN_VPERM_16QI:
17704 case ALTIVEC_BUILTIN_VPERM_8HI:
17705 case ALTIVEC_BUILTIN_VPERM_4SI:
17706 case ALTIVEC_BUILTIN_VPERM_4SF:
17707 case ALTIVEC_BUILTIN_VPERM_2DI:
17708 case ALTIVEC_BUILTIN_VPERM_2DF:
17709 case VSX_BUILTIN_VPERM_16QI:
17710 case VSX_BUILTIN_VPERM_8HI:
17711 case VSX_BUILTIN_VPERM_4SI:
17712 case VSX_BUILTIN_VPERM_4SF:
17713 case VSX_BUILTIN_VPERM_2DI:
17714 case VSX_BUILTIN_VPERM_2DF:
17715 h.uns_p[3] = 1;
17716 break;
17717
17718 /* unsigned args, signed return. */
17719 case VSX_BUILTIN_XVCVUXDSP:
17720 case VSX_BUILTIN_XVCVUXDDP_UNS:
17721 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17722 h.uns_p[1] = 1;
17723 break;
17724
17725 /* signed args, unsigned return. */
17726 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17727 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17728 case MISC_BUILTIN_UNPACK_TD:
17729 case MISC_BUILTIN_UNPACK_V1TI:
17730 h.uns_p[0] = 1;
17731 break;
17732
17733 /* unsigned arguments, bool return (compares). */
17734 case ALTIVEC_BUILTIN_VCMPEQUB:
17735 case ALTIVEC_BUILTIN_VCMPEQUH:
17736 case ALTIVEC_BUILTIN_VCMPEQUW:
17737 case P8V_BUILTIN_VCMPEQUD:
17738 case VSX_BUILTIN_CMPGE_U16QI:
17739 case VSX_BUILTIN_CMPGE_U8HI:
17740 case VSX_BUILTIN_CMPGE_U4SI:
17741 case VSX_BUILTIN_CMPGE_U2DI:
17742 case ALTIVEC_BUILTIN_VCMPGTUB:
17743 case ALTIVEC_BUILTIN_VCMPGTUH:
17744 case ALTIVEC_BUILTIN_VCMPGTUW:
17745 case P8V_BUILTIN_VCMPGTUD:
17746 h.uns_p[1] = 1;
17747 h.uns_p[2] = 1;
17748 break;
17749
17750 /* unsigned arguments for 128-bit pack instructions. */
17751 case MISC_BUILTIN_PACK_TD:
17752 case MISC_BUILTIN_PACK_V1TI:
17753 h.uns_p[1] = 1;
17754 h.uns_p[2] = 1;
17755 break;
17756
17757 /* unsigned second arguments (vector shift right). */
17758 case ALTIVEC_BUILTIN_VSRB:
17759 case ALTIVEC_BUILTIN_VSRH:
17760 case ALTIVEC_BUILTIN_VSRW:
17761 case P8V_BUILTIN_VSRD:
17762 h.uns_p[2] = 1;
17763 break;
17764
17765 default:
17766 break;
17767 }
17768
17769 /* Figure out how many args are present. */
17770 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17771 num_args--;
17772
17773 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17774 if (!ret_type && h.uns_p[0])
17775 ret_type = builtin_mode_to_type[h.mode[0]][0];
17776
17777 if (!ret_type)
17778 fatal_error (input_location,
17779 "internal error: builtin function %qs had an unexpected "
17780 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17781
17782 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17783 arg_type[i] = NULL_TREE;
17784
17785 for (i = 0; i < num_args; i++)
17786 {
17787 int m = (int) h.mode[i+1];
17788 int uns_p = h.uns_p[i+1];
17789
17790 arg_type[i] = builtin_mode_to_type[m][uns_p];
17791 if (!arg_type[i] && uns_p)
17792 arg_type[i] = builtin_mode_to_type[m][0];
17793
17794 if (!arg_type[i])
17795 fatal_error (input_location,
17796 "internal error: builtin function %qs, argument %d "
17797 "had unexpected argument type %qs", name, i,
17798 GET_MODE_NAME (m));
17799 }
17800
17801 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17802 if (*found == NULL)
17803 {
17804 h2 = ggc_alloc<builtin_hash_struct> ();
17805 *h2 = h;
17806 *found = h2;
17807
17808 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17809 arg_type[2], NULL_TREE);
17810 }
17811
17812 return (*found)->type;
17813 }
17814
17815 static void
17816 rs6000_common_init_builtins (void)
17817 {
17818 const struct builtin_description *d;
17819 size_t i;
17820
17821 tree opaque_ftype_opaque = NULL_TREE;
17822 tree opaque_ftype_opaque_opaque = NULL_TREE;
17823 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17824 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17825
17826 /* Create Altivec and VSX builtins on machines with at least the
17827 general purpose extensions (970 and newer) to allow the use of
17828 the target attribute. */
17829
17830 if (TARGET_EXTRA_BUILTINS)
17831 builtin_mask |= RS6000_BTM_COMMON;
17832
17833 /* Add the ternary operators. */
17834 d = bdesc_3arg;
17835 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17836 {
17837 tree type;
17838 HOST_WIDE_INT mask = d->mask;
17839
17840 if ((mask & builtin_mask) != mask)
17841 {
17842 if (TARGET_DEBUG_BUILTIN)
17843 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17844 continue;
17845 }
17846
17847 if (rs6000_overloaded_builtin_p (d->code))
17848 {
17849 if (! (type = opaque_ftype_opaque_opaque_opaque))
17850 type = opaque_ftype_opaque_opaque_opaque
17851 = build_function_type_list (opaque_V4SI_type_node,
17852 opaque_V4SI_type_node,
17853 opaque_V4SI_type_node,
17854 opaque_V4SI_type_node,
17855 NULL_TREE);
17856 }
17857 else
17858 {
17859 enum insn_code icode = d->icode;
17860 if (d->name == 0)
17861 {
17862 if (TARGET_DEBUG_BUILTIN)
17863 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17864 (long unsigned)i);
17865
17866 continue;
17867 }
17868
17869 if (icode == CODE_FOR_nothing)
17870 {
17871 if (TARGET_DEBUG_BUILTIN)
17872 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17873 d->name);
17874
17875 continue;
17876 }
17877
17878 type = builtin_function_type (insn_data[icode].operand[0].mode,
17879 insn_data[icode].operand[1].mode,
17880 insn_data[icode].operand[2].mode,
17881 insn_data[icode].operand[3].mode,
17882 d->code, d->name);
17883 }
17884
17885 def_builtin (d->name, type, d->code);
17886 }
17887
17888 /* Add the binary operators. */
17889 d = bdesc_2arg;
17890 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17891 {
17892 machine_mode mode0, mode1, mode2;
17893 tree type;
17894 HOST_WIDE_INT mask = d->mask;
17895
17896 if ((mask & builtin_mask) != mask)
17897 {
17898 if (TARGET_DEBUG_BUILTIN)
17899 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17900 continue;
17901 }
17902
17903 if (rs6000_overloaded_builtin_p (d->code))
17904 {
17905 if (! (type = opaque_ftype_opaque_opaque))
17906 type = opaque_ftype_opaque_opaque
17907 = build_function_type_list (opaque_V4SI_type_node,
17908 opaque_V4SI_type_node,
17909 opaque_V4SI_type_node,
17910 NULL_TREE);
17911 }
17912 else
17913 {
17914 enum insn_code icode = d->icode;
17915 if (d->name == 0)
17916 {
17917 if (TARGET_DEBUG_BUILTIN)
17918 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17919 (long unsigned)i);
17920
17921 continue;
17922 }
17923
17924 if (icode == CODE_FOR_nothing)
17925 {
17926 if (TARGET_DEBUG_BUILTIN)
17927 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17928 d->name);
17929
17930 continue;
17931 }
17932
17933 mode0 = insn_data[icode].operand[0].mode;
17934 mode1 = insn_data[icode].operand[1].mode;
17935 mode2 = insn_data[icode].operand[2].mode;
17936
17937 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17938 d->code, d->name);
17939 }
17940
17941 def_builtin (d->name, type, d->code);
17942 }
17943
17944 /* Add the simple unary operators. */
17945 d = bdesc_1arg;
17946 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17947 {
17948 machine_mode mode0, mode1;
17949 tree type;
17950 HOST_WIDE_INT mask = d->mask;
17951
17952 if ((mask & builtin_mask) != mask)
17953 {
17954 if (TARGET_DEBUG_BUILTIN)
17955 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17956 continue;
17957 }
17958
17959 if (rs6000_overloaded_builtin_p (d->code))
17960 {
17961 if (! (type = opaque_ftype_opaque))
17962 type = opaque_ftype_opaque
17963 = build_function_type_list (opaque_V4SI_type_node,
17964 opaque_V4SI_type_node,
17965 NULL_TREE);
17966 }
17967 else
17968 {
17969 enum insn_code icode = d->icode;
17970 if (d->name == 0)
17971 {
17972 if (TARGET_DEBUG_BUILTIN)
17973 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17974 (long unsigned)i);
17975
17976 continue;
17977 }
17978
17979 if (icode == CODE_FOR_nothing)
17980 {
17981 if (TARGET_DEBUG_BUILTIN)
17982 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17983 d->name);
17984
17985 continue;
17986 }
17987
17988 mode0 = insn_data[icode].operand[0].mode;
17989 mode1 = insn_data[icode].operand[1].mode;
17990
17991 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
17992 d->code, d->name);
17993 }
17994
17995 def_builtin (d->name, type, d->code);
17996 }
17997
17998 /* Add the simple no-argument operators. */
17999 d = bdesc_0arg;
18000 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
18001 {
18002 machine_mode mode0;
18003 tree type;
18004 HOST_WIDE_INT mask = d->mask;
18005
18006 if ((mask & builtin_mask) != mask)
18007 {
18008 if (TARGET_DEBUG_BUILTIN)
18009 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
18010 continue;
18011 }
18012 if (rs6000_overloaded_builtin_p (d->code))
18013 {
18014 if (!opaque_ftype_opaque)
18015 opaque_ftype_opaque
18016 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
18017 type = opaque_ftype_opaque;
18018 }
18019 else
18020 {
18021 enum insn_code icode = d->icode;
18022 if (d->name == 0)
18023 {
18024 if (TARGET_DEBUG_BUILTIN)
18025 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18026 (long unsigned) i);
18027 continue;
18028 }
18029 if (icode == CODE_FOR_nothing)
18030 {
18031 if (TARGET_DEBUG_BUILTIN)
18032 fprintf (stderr,
18033 "rs6000_builtin, skip no-argument %s (no code)\n",
18034 d->name);
18035 continue;
18036 }
18037 mode0 = insn_data[icode].operand[0].mode;
18038 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
18039 d->code, d->name);
18040 }
18041 def_builtin (d->name, type, d->code);
18042 }
18043 }
18044
18045 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18046 static void
18047 init_float128_ibm (machine_mode mode)
18048 {
18049 if (!TARGET_XL_COMPAT)
18050 {
18051 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
18052 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
18053 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
18054 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
18055
18056 if (!TARGET_HARD_FLOAT)
18057 {
18058 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
18059 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
18060 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
18061 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18062 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18063 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18064 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18065 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18066
18067 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18068 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18069 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18070 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18071 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18072 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18073 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18074 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18075 }
18076 }
18077 else
18078 {
18079 set_optab_libfunc (add_optab, mode, "_xlqadd");
18080 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18081 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18082 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18083 }
18084
18085 /* Add various conversions for IFmode to use the traditional TFmode
18086 names. */
18087 if (mode == IFmode)
18088 {
18089 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf");
18090 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf");
18091 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdtf");
18092 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd");
18093 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd");
18094 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtftd");
18095
18096 if (TARGET_POWERPC64)
18097 {
18098 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18099 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18100 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18101 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18102 }
18103 }
18104 }
18105
18106 /* Create a decl for either complex long double multiply or complex long double
18107 divide when long double is IEEE 128-bit floating point. We can't use
18108 __multc3 and __divtc3 because the original long double using IBM extended
18109 double used those names. The complex multiply/divide functions are encoded
18110 as builtin functions with a complex result and 4 scalar inputs. */
18111
18112 static void
18113 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
18114 {
18115 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
18116 name, NULL_TREE);
18117
18118 set_builtin_decl (fncode, fndecl, true);
18119
18120 if (TARGET_DEBUG_BUILTIN)
18121 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
18122
18123 return;
18124 }
18125
18126 /* Set up IEEE 128-bit floating point routines. Use different names if the
18127 arguments can be passed in a vector register. The historical PowerPC
18128 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18129 continue to use that if we aren't using vector registers to pass IEEE
18130 128-bit floating point. */
18131
18132 static void
18133 init_float128_ieee (machine_mode mode)
18134 {
18135 if (FLOAT128_VECTOR_P (mode))
18136 {
18137 static bool complex_muldiv_init_p = false;
18138
18139 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
18140 we have clone or target attributes, this will be called a second
18141 time. We want to create the built-in function only once. */
18142 if (mode == TFmode && TARGET_IEEEQUAD && !complex_muldiv_init_p)
18143 {
18144 complex_muldiv_init_p = true;
18145 built_in_function fncode_mul =
18146 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
18147 - MIN_MODE_COMPLEX_FLOAT);
18148 built_in_function fncode_div =
18149 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
18150 - MIN_MODE_COMPLEX_FLOAT);
18151
18152 tree fntype = build_function_type_list (complex_long_double_type_node,
18153 long_double_type_node,
18154 long_double_type_node,
18155 long_double_type_node,
18156 long_double_type_node,
18157 NULL_TREE);
18158
18159 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
18160 create_complex_muldiv ("__divkc3", fncode_div, fntype);
18161 }
18162
18163 set_optab_libfunc (add_optab, mode, "__addkf3");
18164 set_optab_libfunc (sub_optab, mode, "__subkf3");
18165 set_optab_libfunc (neg_optab, mode, "__negkf2");
18166 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18167 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18168 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18169 set_optab_libfunc (abs_optab, mode, "__abskf2");
18170 set_optab_libfunc (powi_optab, mode, "__powikf2");
18171
18172 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18173 set_optab_libfunc (ne_optab, mode, "__nekf2");
18174 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18175 set_optab_libfunc (ge_optab, mode, "__gekf2");
18176 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18177 set_optab_libfunc (le_optab, mode, "__lekf2");
18178 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18179
18180 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18181 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18182 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18183 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18184
18185 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
18186 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18187 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
18188
18189 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
18190 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18191 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
18192
18193 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf");
18194 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf");
18195 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdkf");
18196 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd");
18197 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd");
18198 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendkftd");
18199
18200 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18201 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18202 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18203 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18204
18205 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18206 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18207 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18208 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18209
18210 if (TARGET_POWERPC64)
18211 {
18212 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18213 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18214 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18215 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18216 }
18217 }
18218
18219 else
18220 {
18221 set_optab_libfunc (add_optab, mode, "_q_add");
18222 set_optab_libfunc (sub_optab, mode, "_q_sub");
18223 set_optab_libfunc (neg_optab, mode, "_q_neg");
18224 set_optab_libfunc (smul_optab, mode, "_q_mul");
18225 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18226 if (TARGET_PPC_GPOPT)
18227 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18228
18229 set_optab_libfunc (eq_optab, mode, "_q_feq");
18230 set_optab_libfunc (ne_optab, mode, "_q_fne");
18231 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18232 set_optab_libfunc (ge_optab, mode, "_q_fge");
18233 set_optab_libfunc (lt_optab, mode, "_q_flt");
18234 set_optab_libfunc (le_optab, mode, "_q_fle");
18235
18236 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18237 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18238 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18239 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18240 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18241 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18242 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18243 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18244 }
18245 }
18246
18247 static void
18248 rs6000_init_libfuncs (void)
18249 {
18250 /* __float128 support. */
18251 if (TARGET_FLOAT128_TYPE)
18252 {
18253 init_float128_ibm (IFmode);
18254 init_float128_ieee (KFmode);
18255 }
18256
18257 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18258 if (TARGET_LONG_DOUBLE_128)
18259 {
18260 if (!TARGET_IEEEQUAD)
18261 init_float128_ibm (TFmode);
18262
18263 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18264 else
18265 init_float128_ieee (TFmode);
18266 }
18267 }
18268
18269 /* Emit a potentially record-form instruction, setting DST from SRC.
18270 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18271 signed comparison of DST with zero. If DOT is 1, the generated RTL
18272 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18273 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18274 a separate COMPARE. */
18275
18276 void
18277 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18278 {
18279 if (dot == 0)
18280 {
18281 emit_move_insn (dst, src);
18282 return;
18283 }
18284
18285 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18286 {
18287 emit_move_insn (dst, src);
18288 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18289 return;
18290 }
18291
18292 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18293 if (dot == 1)
18294 {
18295 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18296 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18297 }
18298 else
18299 {
18300 rtx set = gen_rtx_SET (dst, src);
18301 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18302 }
18303 }
18304
18305 \f
18306 /* A validation routine: say whether CODE, a condition code, and MODE
18307 match. The other alternatives either don't make sense or should
18308 never be generated. */
18309
18310 void
18311 validate_condition_mode (enum rtx_code code, machine_mode mode)
18312 {
18313 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18314 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18315 && GET_MODE_CLASS (mode) == MODE_CC);
18316
18317 /* These don't make sense. */
18318 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18319 || mode != CCUNSmode);
18320
18321 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18322 || mode == CCUNSmode);
18323
18324 gcc_assert (mode == CCFPmode
18325 || (code != ORDERED && code != UNORDERED
18326 && code != UNEQ && code != LTGT
18327 && code != UNGT && code != UNLT
18328 && code != UNGE && code != UNLE));
18329
18330 /* These should never be generated except for
18331 flag_finite_math_only. */
18332 gcc_assert (mode != CCFPmode
18333 || flag_finite_math_only
18334 || (code != LE && code != GE
18335 && code != UNEQ && code != LTGT
18336 && code != UNGT && code != UNLT));
18337
18338 /* These are invalid; the information is not there. */
18339 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18340 }
18341
18342 \f
18343 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18344 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18345 not zero, store there the bit offset (counted from the right) where
18346 the single stretch of 1 bits begins; and similarly for B, the bit
18347 offset where it ends. */
18348
18349 bool
18350 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18351 {
18352 unsigned HOST_WIDE_INT val = INTVAL (mask);
18353 unsigned HOST_WIDE_INT bit;
18354 int nb, ne;
18355 int n = GET_MODE_PRECISION (mode);
18356
18357 if (mode != DImode && mode != SImode)
18358 return false;
18359
18360 if (INTVAL (mask) >= 0)
18361 {
18362 bit = val & -val;
18363 ne = exact_log2 (bit);
18364 nb = exact_log2 (val + bit);
18365 }
18366 else if (val + 1 == 0)
18367 {
18368 nb = n;
18369 ne = 0;
18370 }
18371 else if (val & 1)
18372 {
18373 val = ~val;
18374 bit = val & -val;
18375 nb = exact_log2 (bit);
18376 ne = exact_log2 (val + bit);
18377 }
18378 else
18379 {
18380 bit = val & -val;
18381 ne = exact_log2 (bit);
18382 if (val + bit == 0)
18383 nb = n;
18384 else
18385 nb = 0;
18386 }
18387
18388 nb--;
18389
18390 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18391 return false;
18392
18393 if (b)
18394 *b = nb;
18395 if (e)
18396 *e = ne;
18397
18398 return true;
18399 }
18400
18401 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18402 or rldicr instruction, to implement an AND with it in mode MODE. */
18403
18404 bool
18405 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18406 {
18407 int nb, ne;
18408
18409 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18410 return false;
18411
18412 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18413 does not wrap. */
18414 if (mode == DImode)
18415 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18416
18417 /* For SImode, rlwinm can do everything. */
18418 if (mode == SImode)
18419 return (nb < 32 && ne < 32);
18420
18421 return false;
18422 }
18423
18424 /* Return the instruction template for an AND with mask in mode MODE, with
18425 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18426
18427 const char *
18428 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18429 {
18430 int nb, ne;
18431
18432 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18433 gcc_unreachable ();
18434
18435 if (mode == DImode && ne == 0)
18436 {
18437 operands[3] = GEN_INT (63 - nb);
18438 if (dot)
18439 return "rldicl. %0,%1,0,%3";
18440 return "rldicl %0,%1,0,%3";
18441 }
18442
18443 if (mode == DImode && nb == 63)
18444 {
18445 operands[3] = GEN_INT (63 - ne);
18446 if (dot)
18447 return "rldicr. %0,%1,0,%3";
18448 return "rldicr %0,%1,0,%3";
18449 }
18450
18451 if (nb < 32 && ne < 32)
18452 {
18453 operands[3] = GEN_INT (31 - nb);
18454 operands[4] = GEN_INT (31 - ne);
18455 if (dot)
18456 return "rlwinm. %0,%1,0,%3,%4";
18457 return "rlwinm %0,%1,0,%3,%4";
18458 }
18459
18460 gcc_unreachable ();
18461 }
18462
18463 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18464 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18465 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18466
18467 bool
18468 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18469 {
18470 int nb, ne;
18471
18472 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18473 return false;
18474
18475 int n = GET_MODE_PRECISION (mode);
18476 int sh = -1;
18477
18478 if (CONST_INT_P (XEXP (shift, 1)))
18479 {
18480 sh = INTVAL (XEXP (shift, 1));
18481 if (sh < 0 || sh >= n)
18482 return false;
18483 }
18484
18485 rtx_code code = GET_CODE (shift);
18486
18487 /* Convert any shift by 0 to a rotate, to simplify below code. */
18488 if (sh == 0)
18489 code = ROTATE;
18490
18491 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18492 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18493 code = ASHIFT;
18494 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18495 {
18496 code = LSHIFTRT;
18497 sh = n - sh;
18498 }
18499
18500 /* DImode rotates need rld*. */
18501 if (mode == DImode && code == ROTATE)
18502 return (nb == 63 || ne == 0 || ne == sh);
18503
18504 /* SImode rotates need rlw*. */
18505 if (mode == SImode && code == ROTATE)
18506 return (nb < 32 && ne < 32 && sh < 32);
18507
18508 /* Wrap-around masks are only okay for rotates. */
18509 if (ne > nb)
18510 return false;
18511
18512 /* Variable shifts are only okay for rotates. */
18513 if (sh < 0)
18514 return false;
18515
18516 /* Don't allow ASHIFT if the mask is wrong for that. */
18517 if (code == ASHIFT && ne < sh)
18518 return false;
18519
18520 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18521 if the mask is wrong for that. */
18522 if (nb < 32 && ne < 32 && sh < 32
18523 && !(code == LSHIFTRT && nb >= 32 - sh))
18524 return true;
18525
18526 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18527 if the mask is wrong for that. */
18528 if (code == LSHIFTRT)
18529 sh = 64 - sh;
18530 if (nb == 63 || ne == 0 || ne == sh)
18531 return !(code == LSHIFTRT && nb >= sh);
18532
18533 return false;
18534 }
18535
18536 /* Return the instruction template for a shift with mask in mode MODE, with
18537 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18538
18539 const char *
18540 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18541 {
18542 int nb, ne;
18543
18544 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18545 gcc_unreachable ();
18546
18547 if (mode == DImode && ne == 0)
18548 {
18549 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18550 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18551 operands[3] = GEN_INT (63 - nb);
18552 if (dot)
18553 return "rld%I2cl. %0,%1,%2,%3";
18554 return "rld%I2cl %0,%1,%2,%3";
18555 }
18556
18557 if (mode == DImode && nb == 63)
18558 {
18559 operands[3] = GEN_INT (63 - ne);
18560 if (dot)
18561 return "rld%I2cr. %0,%1,%2,%3";
18562 return "rld%I2cr %0,%1,%2,%3";
18563 }
18564
18565 if (mode == DImode
18566 && GET_CODE (operands[4]) != LSHIFTRT
18567 && CONST_INT_P (operands[2])
18568 && ne == INTVAL (operands[2]))
18569 {
18570 operands[3] = GEN_INT (63 - nb);
18571 if (dot)
18572 return "rld%I2c. %0,%1,%2,%3";
18573 return "rld%I2c %0,%1,%2,%3";
18574 }
18575
18576 if (nb < 32 && ne < 32)
18577 {
18578 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18579 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18580 operands[3] = GEN_INT (31 - nb);
18581 operands[4] = GEN_INT (31 - ne);
18582 /* This insn can also be a 64-bit rotate with mask that really makes
18583 it just a shift right (with mask); the %h below are to adjust for
18584 that situation (shift count is >= 32 in that case). */
18585 if (dot)
18586 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18587 return "rlw%I2nm %0,%1,%h2,%3,%4";
18588 }
18589
18590 gcc_unreachable ();
18591 }
18592
18593 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18594 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18595 ASHIFT, or LSHIFTRT) in mode MODE. */
18596
18597 bool
18598 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18599 {
18600 int nb, ne;
18601
18602 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18603 return false;
18604
18605 int n = GET_MODE_PRECISION (mode);
18606
18607 int sh = INTVAL (XEXP (shift, 1));
18608 if (sh < 0 || sh >= n)
18609 return false;
18610
18611 rtx_code code = GET_CODE (shift);
18612
18613 /* Convert any shift by 0 to a rotate, to simplify below code. */
18614 if (sh == 0)
18615 code = ROTATE;
18616
18617 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18618 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18619 code = ASHIFT;
18620 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18621 {
18622 code = LSHIFTRT;
18623 sh = n - sh;
18624 }
18625
18626 /* DImode rotates need rldimi. */
18627 if (mode == DImode && code == ROTATE)
18628 return (ne == sh);
18629
18630 /* SImode rotates need rlwimi. */
18631 if (mode == SImode && code == ROTATE)
18632 return (nb < 32 && ne < 32 && sh < 32);
18633
18634 /* Wrap-around masks are only okay for rotates. */
18635 if (ne > nb)
18636 return false;
18637
18638 /* Don't allow ASHIFT if the mask is wrong for that. */
18639 if (code == ASHIFT && ne < sh)
18640 return false;
18641
18642 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18643 if the mask is wrong for that. */
18644 if (nb < 32 && ne < 32 && sh < 32
18645 && !(code == LSHIFTRT && nb >= 32 - sh))
18646 return true;
18647
18648 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18649 if the mask is wrong for that. */
18650 if (code == LSHIFTRT)
18651 sh = 64 - sh;
18652 if (ne == sh)
18653 return !(code == LSHIFTRT && nb >= sh);
18654
18655 return false;
18656 }
18657
18658 /* Return the instruction template for an insert with mask in mode MODE, with
18659 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18660
18661 const char *
18662 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18663 {
18664 int nb, ne;
18665
18666 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18667 gcc_unreachable ();
18668
18669 /* Prefer rldimi because rlwimi is cracked. */
18670 if (TARGET_POWERPC64
18671 && (!dot || mode == DImode)
18672 && GET_CODE (operands[4]) != LSHIFTRT
18673 && ne == INTVAL (operands[2]))
18674 {
18675 operands[3] = GEN_INT (63 - nb);
18676 if (dot)
18677 return "rldimi. %0,%1,%2,%3";
18678 return "rldimi %0,%1,%2,%3";
18679 }
18680
18681 if (nb < 32 && ne < 32)
18682 {
18683 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18684 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18685 operands[3] = GEN_INT (31 - nb);
18686 operands[4] = GEN_INT (31 - ne);
18687 if (dot)
18688 return "rlwimi. %0,%1,%2,%3,%4";
18689 return "rlwimi %0,%1,%2,%3,%4";
18690 }
18691
18692 gcc_unreachable ();
18693 }
18694
18695 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18696 using two machine instructions. */
18697
18698 bool
18699 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18700 {
18701 /* There are two kinds of AND we can handle with two insns:
18702 1) those we can do with two rl* insn;
18703 2) ori[s];xori[s].
18704
18705 We do not handle that last case yet. */
18706
18707 /* If there is just one stretch of ones, we can do it. */
18708 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18709 return true;
18710
18711 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18712 one insn, we can do the whole thing with two. */
18713 unsigned HOST_WIDE_INT val = INTVAL (c);
18714 unsigned HOST_WIDE_INT bit1 = val & -val;
18715 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18716 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18717 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18718 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18719 }
18720
18721 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18722 If EXPAND is true, split rotate-and-mask instructions we generate to
18723 their constituent parts as well (this is used during expand); if DOT
18724 is 1, make the last insn a record-form instruction clobbering the
18725 destination GPR and setting the CC reg (from operands[3]); if 2, set
18726 that GPR as well as the CC reg. */
18727
18728 void
18729 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18730 {
18731 gcc_assert (!(expand && dot));
18732
18733 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18734
18735 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18736 shift right. This generates better code than doing the masks without
18737 shifts, or shifting first right and then left. */
18738 int nb, ne;
18739 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18740 {
18741 gcc_assert (mode == DImode);
18742
18743 int shift = 63 - nb;
18744 if (expand)
18745 {
18746 rtx tmp1 = gen_reg_rtx (DImode);
18747 rtx tmp2 = gen_reg_rtx (DImode);
18748 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18749 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18750 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18751 }
18752 else
18753 {
18754 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18755 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18756 emit_move_insn (operands[0], tmp);
18757 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18758 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18759 }
18760 return;
18761 }
18762
18763 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18764 that does the rest. */
18765 unsigned HOST_WIDE_INT bit1 = val & -val;
18766 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18767 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18768 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18769
18770 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18771 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18772
18773 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18774
18775 /* Two "no-rotate"-and-mask instructions, for SImode. */
18776 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18777 {
18778 gcc_assert (mode == SImode);
18779
18780 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18781 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18782 emit_move_insn (reg, tmp);
18783 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18784 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18785 return;
18786 }
18787
18788 gcc_assert (mode == DImode);
18789
18790 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18791 insns; we have to do the first in SImode, because it wraps. */
18792 if (mask2 <= 0xffffffff
18793 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18794 {
18795 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18796 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18797 GEN_INT (mask1));
18798 rtx reg_low = gen_lowpart (SImode, reg);
18799 emit_move_insn (reg_low, tmp);
18800 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18801 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18802 return;
18803 }
18804
18805 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18806 at the top end), rotate back and clear the other hole. */
18807 int right = exact_log2 (bit3);
18808 int left = 64 - right;
18809
18810 /* Rotate the mask too. */
18811 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18812
18813 if (expand)
18814 {
18815 rtx tmp1 = gen_reg_rtx (DImode);
18816 rtx tmp2 = gen_reg_rtx (DImode);
18817 rtx tmp3 = gen_reg_rtx (DImode);
18818 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18819 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18820 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18821 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18822 }
18823 else
18824 {
18825 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18826 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18827 emit_move_insn (operands[0], tmp);
18828 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18829 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18830 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18831 }
18832 }
18833 \f
18834 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18835 for lfq and stfq insns iff the registers are hard registers. */
18836
18837 int
18838 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18839 {
18840 /* We might have been passed a SUBREG. */
18841 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
18842 return 0;
18843
18844 /* We might have been passed non floating point registers. */
18845 if (!FP_REGNO_P (REGNO (reg1))
18846 || !FP_REGNO_P (REGNO (reg2)))
18847 return 0;
18848
18849 return (REGNO (reg1) == REGNO (reg2) - 1);
18850 }
18851
18852 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18853 addr1 and addr2 must be in consecutive memory locations
18854 (addr2 == addr1 + 8). */
18855
18856 int
18857 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18858 {
18859 rtx addr1, addr2;
18860 unsigned int reg1, reg2;
18861 int offset1, offset2;
18862
18863 /* The mems cannot be volatile. */
18864 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18865 return 0;
18866
18867 addr1 = XEXP (mem1, 0);
18868 addr2 = XEXP (mem2, 0);
18869
18870 /* Extract an offset (if used) from the first addr. */
18871 if (GET_CODE (addr1) == PLUS)
18872 {
18873 /* If not a REG, return zero. */
18874 if (GET_CODE (XEXP (addr1, 0)) != REG)
18875 return 0;
18876 else
18877 {
18878 reg1 = REGNO (XEXP (addr1, 0));
18879 /* The offset must be constant! */
18880 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
18881 return 0;
18882 offset1 = INTVAL (XEXP (addr1, 1));
18883 }
18884 }
18885 else if (GET_CODE (addr1) != REG)
18886 return 0;
18887 else
18888 {
18889 reg1 = REGNO (addr1);
18890 /* This was a simple (mem (reg)) expression. Offset is 0. */
18891 offset1 = 0;
18892 }
18893
18894 /* And now for the second addr. */
18895 if (GET_CODE (addr2) == PLUS)
18896 {
18897 /* If not a REG, return zero. */
18898 if (GET_CODE (XEXP (addr2, 0)) != REG)
18899 return 0;
18900 else
18901 {
18902 reg2 = REGNO (XEXP (addr2, 0));
18903 /* The offset must be constant. */
18904 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
18905 return 0;
18906 offset2 = INTVAL (XEXP (addr2, 1));
18907 }
18908 }
18909 else if (GET_CODE (addr2) != REG)
18910 return 0;
18911 else
18912 {
18913 reg2 = REGNO (addr2);
18914 /* This was a simple (mem (reg)) expression. Offset is 0. */
18915 offset2 = 0;
18916 }
18917
18918 /* Both of these must have the same base register. */
18919 if (reg1 != reg2)
18920 return 0;
18921
18922 /* The offset for the second addr must be 8 more than the first addr. */
18923 if (offset2 != offset1 + 8)
18924 return 0;
18925
18926 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18927 instructions. */
18928 return 1;
18929 }
18930 \f
18931 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18932 need to use DDmode, in all other cases we can use the same mode. */
18933 static machine_mode
18934 rs6000_secondary_memory_needed_mode (machine_mode mode)
18935 {
18936 if (lra_in_progress && mode == SDmode)
18937 return DDmode;
18938 return mode;
18939 }
18940
18941 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18942 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18943 only work on the traditional altivec registers, note if an altivec register
18944 was chosen. */
18945
18946 static enum rs6000_reg_type
18947 register_to_reg_type (rtx reg, bool *is_altivec)
18948 {
18949 HOST_WIDE_INT regno;
18950 enum reg_class rclass;
18951
18952 if (GET_CODE (reg) == SUBREG)
18953 reg = SUBREG_REG (reg);
18954
18955 if (!REG_P (reg))
18956 return NO_REG_TYPE;
18957
18958 regno = REGNO (reg);
18959 if (regno >= FIRST_PSEUDO_REGISTER)
18960 {
18961 if (!lra_in_progress && !reload_completed)
18962 return PSEUDO_REG_TYPE;
18963
18964 regno = true_regnum (reg);
18965 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
18966 return PSEUDO_REG_TYPE;
18967 }
18968
18969 gcc_assert (regno >= 0);
18970
18971 if (is_altivec && ALTIVEC_REGNO_P (regno))
18972 *is_altivec = true;
18973
18974 rclass = rs6000_regno_regclass[regno];
18975 return reg_class_to_reg_type[(int)rclass];
18976 }
18977
18978 /* Helper function to return the cost of adding a TOC entry address. */
18979
18980 static inline int
18981 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
18982 {
18983 int ret;
18984
18985 if (TARGET_CMODEL != CMODEL_SMALL)
18986 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
18987
18988 else
18989 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
18990
18991 return ret;
18992 }
18993
18994 /* Helper function for rs6000_secondary_reload to determine whether the memory
18995 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18996 needs reloading. Return negative if the memory is not handled by the memory
18997 helper functions and to try a different reload method, 0 if no additional
18998 instructions are need, and positive to give the extra cost for the
18999 memory. */
19000
19001 static int
19002 rs6000_secondary_reload_memory (rtx addr,
19003 enum reg_class rclass,
19004 machine_mode mode)
19005 {
19006 int extra_cost = 0;
19007 rtx reg, and_arg, plus_arg0, plus_arg1;
19008 addr_mask_type addr_mask;
19009 const char *type = NULL;
19010 const char *fail_msg = NULL;
19011
19012 if (GPR_REG_CLASS_P (rclass))
19013 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19014
19015 else if (rclass == FLOAT_REGS)
19016 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19017
19018 else if (rclass == ALTIVEC_REGS)
19019 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19020
19021 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19022 else if (rclass == VSX_REGS)
19023 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
19024 & ~RELOAD_REG_AND_M16);
19025
19026 /* If the register allocator hasn't made up its mind yet on the register
19027 class to use, settle on defaults to use. */
19028 else if (rclass == NO_REGS)
19029 {
19030 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
19031 & ~RELOAD_REG_AND_M16);
19032
19033 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
19034 addr_mask &= ~(RELOAD_REG_INDEXED
19035 | RELOAD_REG_PRE_INCDEC
19036 | RELOAD_REG_PRE_MODIFY);
19037 }
19038
19039 else
19040 addr_mask = 0;
19041
19042 /* If the register isn't valid in this register class, just return now. */
19043 if ((addr_mask & RELOAD_REG_VALID) == 0)
19044 {
19045 if (TARGET_DEBUG_ADDR)
19046 {
19047 fprintf (stderr,
19048 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19049 "not valid in class\n",
19050 GET_MODE_NAME (mode), reg_class_names[rclass]);
19051 debug_rtx (addr);
19052 }
19053
19054 return -1;
19055 }
19056
19057 switch (GET_CODE (addr))
19058 {
19059 /* Does the register class supports auto update forms for this mode? We
19060 don't need a scratch register, since the powerpc only supports
19061 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19062 case PRE_INC:
19063 case PRE_DEC:
19064 reg = XEXP (addr, 0);
19065 if (!base_reg_operand (addr, GET_MODE (reg)))
19066 {
19067 fail_msg = "no base register #1";
19068 extra_cost = -1;
19069 }
19070
19071 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19072 {
19073 extra_cost = 1;
19074 type = "update";
19075 }
19076 break;
19077
19078 case PRE_MODIFY:
19079 reg = XEXP (addr, 0);
19080 plus_arg1 = XEXP (addr, 1);
19081 if (!base_reg_operand (reg, GET_MODE (reg))
19082 || GET_CODE (plus_arg1) != PLUS
19083 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19084 {
19085 fail_msg = "bad PRE_MODIFY";
19086 extra_cost = -1;
19087 }
19088
19089 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19090 {
19091 extra_cost = 1;
19092 type = "update";
19093 }
19094 break;
19095
19096 /* Do we need to simulate AND -16 to clear the bottom address bits used
19097 in VMX load/stores? Only allow the AND for vector sizes. */
19098 case AND:
19099 and_arg = XEXP (addr, 0);
19100 if (GET_MODE_SIZE (mode) != 16
19101 || GET_CODE (XEXP (addr, 1)) != CONST_INT
19102 || INTVAL (XEXP (addr, 1)) != -16)
19103 {
19104 fail_msg = "bad Altivec AND #1";
19105 extra_cost = -1;
19106 }
19107
19108 if (rclass != ALTIVEC_REGS)
19109 {
19110 if (legitimate_indirect_address_p (and_arg, false))
19111 extra_cost = 1;
19112
19113 else if (legitimate_indexed_address_p (and_arg, false))
19114 extra_cost = 2;
19115
19116 else
19117 {
19118 fail_msg = "bad Altivec AND #2";
19119 extra_cost = -1;
19120 }
19121
19122 type = "and";
19123 }
19124 break;
19125
19126 /* If this is an indirect address, make sure it is a base register. */
19127 case REG:
19128 case SUBREG:
19129 if (!legitimate_indirect_address_p (addr, false))
19130 {
19131 extra_cost = 1;
19132 type = "move";
19133 }
19134 break;
19135
19136 /* If this is an indexed address, make sure the register class can handle
19137 indexed addresses for this mode. */
19138 case PLUS:
19139 plus_arg0 = XEXP (addr, 0);
19140 plus_arg1 = XEXP (addr, 1);
19141
19142 /* (plus (plus (reg) (constant)) (constant)) is generated during
19143 push_reload processing, so handle it now. */
19144 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19145 {
19146 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19147 {
19148 extra_cost = 1;
19149 type = "offset";
19150 }
19151 }
19152
19153 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19154 push_reload processing, so handle it now. */
19155 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19156 {
19157 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19158 {
19159 extra_cost = 1;
19160 type = "indexed #2";
19161 }
19162 }
19163
19164 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19165 {
19166 fail_msg = "no base register #2";
19167 extra_cost = -1;
19168 }
19169
19170 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19171 {
19172 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19173 || !legitimate_indexed_address_p (addr, false))
19174 {
19175 extra_cost = 1;
19176 type = "indexed";
19177 }
19178 }
19179
19180 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19181 && CONST_INT_P (plus_arg1))
19182 {
19183 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19184 {
19185 extra_cost = 1;
19186 type = "vector d-form offset";
19187 }
19188 }
19189
19190 /* Make sure the register class can handle offset addresses. */
19191 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19192 {
19193 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19194 {
19195 extra_cost = 1;
19196 type = "offset #2";
19197 }
19198 }
19199
19200 else
19201 {
19202 fail_msg = "bad PLUS";
19203 extra_cost = -1;
19204 }
19205
19206 break;
19207
19208 case LO_SUM:
19209 /* Quad offsets are restricted and can't handle normal addresses. */
19210 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19211 {
19212 extra_cost = -1;
19213 type = "vector d-form lo_sum";
19214 }
19215
19216 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19217 {
19218 fail_msg = "bad LO_SUM";
19219 extra_cost = -1;
19220 }
19221
19222 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19223 {
19224 extra_cost = 1;
19225 type = "lo_sum";
19226 }
19227 break;
19228
19229 /* Static addresses need to create a TOC entry. */
19230 case CONST:
19231 case SYMBOL_REF:
19232 case LABEL_REF:
19233 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19234 {
19235 extra_cost = -1;
19236 type = "vector d-form lo_sum #2";
19237 }
19238
19239 else
19240 {
19241 type = "address";
19242 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19243 }
19244 break;
19245
19246 /* TOC references look like offsetable memory. */
19247 case UNSPEC:
19248 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19249 {
19250 fail_msg = "bad UNSPEC";
19251 extra_cost = -1;
19252 }
19253
19254 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19255 {
19256 extra_cost = -1;
19257 type = "vector d-form lo_sum #3";
19258 }
19259
19260 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19261 {
19262 extra_cost = 1;
19263 type = "toc reference";
19264 }
19265 break;
19266
19267 default:
19268 {
19269 fail_msg = "bad address";
19270 extra_cost = -1;
19271 }
19272 }
19273
19274 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19275 {
19276 if (extra_cost < 0)
19277 fprintf (stderr,
19278 "rs6000_secondary_reload_memory error: mode = %s, "
19279 "class = %s, addr_mask = '%s', %s\n",
19280 GET_MODE_NAME (mode),
19281 reg_class_names[rclass],
19282 rs6000_debug_addr_mask (addr_mask, false),
19283 (fail_msg != NULL) ? fail_msg : "<bad address>");
19284
19285 else
19286 fprintf (stderr,
19287 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19288 "addr_mask = '%s', extra cost = %d, %s\n",
19289 GET_MODE_NAME (mode),
19290 reg_class_names[rclass],
19291 rs6000_debug_addr_mask (addr_mask, false),
19292 extra_cost,
19293 (type) ? type : "<none>");
19294
19295 debug_rtx (addr);
19296 }
19297
19298 return extra_cost;
19299 }
19300
19301 /* Helper function for rs6000_secondary_reload to return true if a move to a
19302 different register classe is really a simple move. */
19303
19304 static bool
19305 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19306 enum rs6000_reg_type from_type,
19307 machine_mode mode)
19308 {
19309 int size = GET_MODE_SIZE (mode);
19310
19311 /* Add support for various direct moves available. In this function, we only
19312 look at cases where we don't need any extra registers, and one or more
19313 simple move insns are issued. Originally small integers are not allowed
19314 in FPR/VSX registers. Single precision binary floating is not a simple
19315 move because we need to convert to the single precision memory layout.
19316 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19317 need special direct move handling, which we do not support yet. */
19318 if (TARGET_DIRECT_MOVE
19319 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19320 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19321 {
19322 if (TARGET_POWERPC64)
19323 {
19324 /* ISA 2.07: MTVSRD or MVFVSRD. */
19325 if (size == 8)
19326 return true;
19327
19328 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19329 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19330 return true;
19331 }
19332
19333 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19334 if (TARGET_P8_VECTOR)
19335 {
19336 if (mode == SImode)
19337 return true;
19338
19339 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19340 return true;
19341 }
19342
19343 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19344 if (mode == SDmode)
19345 return true;
19346 }
19347
19348 /* Power6+: MFTGPR or MFFGPR. */
19349 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19350 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19351 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19352 return true;
19353
19354 /* Move to/from SPR. */
19355 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19356 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19357 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19358 return true;
19359
19360 return false;
19361 }
19362
19363 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19364 special direct moves that involve allocating an extra register, return the
19365 insn code of the helper function if there is such a function or
19366 CODE_FOR_nothing if not. */
19367
19368 static bool
19369 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19370 enum rs6000_reg_type from_type,
19371 machine_mode mode,
19372 secondary_reload_info *sri,
19373 bool altivec_p)
19374 {
19375 bool ret = false;
19376 enum insn_code icode = CODE_FOR_nothing;
19377 int cost = 0;
19378 int size = GET_MODE_SIZE (mode);
19379
19380 if (TARGET_POWERPC64 && size == 16)
19381 {
19382 /* Handle moving 128-bit values from GPRs to VSX point registers on
19383 ISA 2.07 (power8, power9) when running in 64-bit mode using
19384 XXPERMDI to glue the two 64-bit values back together. */
19385 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19386 {
19387 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19388 icode = reg_addr[mode].reload_vsx_gpr;
19389 }
19390
19391 /* Handle moving 128-bit values from VSX point registers to GPRs on
19392 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19393 bottom 64-bit value. */
19394 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19395 {
19396 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19397 icode = reg_addr[mode].reload_gpr_vsx;
19398 }
19399 }
19400
19401 else if (TARGET_POWERPC64 && mode == SFmode)
19402 {
19403 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19404 {
19405 cost = 3; /* xscvdpspn, mfvsrd, and. */
19406 icode = reg_addr[mode].reload_gpr_vsx;
19407 }
19408
19409 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19410 {
19411 cost = 2; /* mtvsrz, xscvspdpn. */
19412 icode = reg_addr[mode].reload_vsx_gpr;
19413 }
19414 }
19415
19416 else if (!TARGET_POWERPC64 && size == 8)
19417 {
19418 /* Handle moving 64-bit values from GPRs to floating point registers on
19419 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19420 32-bit values back together. Altivec register classes must be handled
19421 specially since a different instruction is used, and the secondary
19422 reload support requires a single instruction class in the scratch
19423 register constraint. However, right now TFmode is not allowed in
19424 Altivec registers, so the pattern will never match. */
19425 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19426 {
19427 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19428 icode = reg_addr[mode].reload_fpr_gpr;
19429 }
19430 }
19431
19432 if (icode != CODE_FOR_nothing)
19433 {
19434 ret = true;
19435 if (sri)
19436 {
19437 sri->icode = icode;
19438 sri->extra_cost = cost;
19439 }
19440 }
19441
19442 return ret;
19443 }
19444
19445 /* Return whether a move between two register classes can be done either
19446 directly (simple move) or via a pattern that uses a single extra temporary
19447 (using ISA 2.07's direct move in this case. */
19448
19449 static bool
19450 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19451 enum rs6000_reg_type from_type,
19452 machine_mode mode,
19453 secondary_reload_info *sri,
19454 bool altivec_p)
19455 {
19456 /* Fall back to load/store reloads if either type is not a register. */
19457 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19458 return false;
19459
19460 /* If we haven't allocated registers yet, assume the move can be done for the
19461 standard register types. */
19462 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19463 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19464 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19465 return true;
19466
19467 /* Moves to the same set of registers is a simple move for non-specialized
19468 registers. */
19469 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19470 return true;
19471
19472 /* Check whether a simple move can be done directly. */
19473 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19474 {
19475 if (sri)
19476 {
19477 sri->icode = CODE_FOR_nothing;
19478 sri->extra_cost = 0;
19479 }
19480 return true;
19481 }
19482
19483 /* Now check if we can do it in a few steps. */
19484 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19485 altivec_p);
19486 }
19487
19488 /* Inform reload about cases where moving X with a mode MODE to a register in
19489 RCLASS requires an extra scratch or immediate register. Return the class
19490 needed for the immediate register.
19491
19492 For VSX and Altivec, we may need a register to convert sp+offset into
19493 reg+sp.
19494
19495 For misaligned 64-bit gpr loads and stores we need a register to
19496 convert an offset address to indirect. */
19497
19498 static reg_class_t
19499 rs6000_secondary_reload (bool in_p,
19500 rtx x,
19501 reg_class_t rclass_i,
19502 machine_mode mode,
19503 secondary_reload_info *sri)
19504 {
19505 enum reg_class rclass = (enum reg_class) rclass_i;
19506 reg_class_t ret = ALL_REGS;
19507 enum insn_code icode;
19508 bool default_p = false;
19509 bool done_p = false;
19510
19511 /* Allow subreg of memory before/during reload. */
19512 bool memory_p = (MEM_P (x)
19513 || (!reload_completed && GET_CODE (x) == SUBREG
19514 && MEM_P (SUBREG_REG (x))));
19515
19516 sri->icode = CODE_FOR_nothing;
19517 sri->t_icode = CODE_FOR_nothing;
19518 sri->extra_cost = 0;
19519 icode = ((in_p)
19520 ? reg_addr[mode].reload_load
19521 : reg_addr[mode].reload_store);
19522
19523 if (REG_P (x) || register_operand (x, mode))
19524 {
19525 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19526 bool altivec_p = (rclass == ALTIVEC_REGS);
19527 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19528
19529 if (!in_p)
19530 std::swap (to_type, from_type);
19531
19532 /* Can we do a direct move of some sort? */
19533 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19534 altivec_p))
19535 {
19536 icode = (enum insn_code)sri->icode;
19537 default_p = false;
19538 done_p = true;
19539 ret = NO_REGS;
19540 }
19541 }
19542
19543 /* Make sure 0.0 is not reloaded or forced into memory. */
19544 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19545 {
19546 ret = NO_REGS;
19547 default_p = false;
19548 done_p = true;
19549 }
19550
19551 /* If this is a scalar floating point value and we want to load it into the
19552 traditional Altivec registers, do it via a move via a traditional floating
19553 point register, unless we have D-form addressing. Also make sure that
19554 non-zero constants use a FPR. */
19555 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19556 && !mode_supports_vmx_dform (mode)
19557 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19558 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
19559 {
19560 ret = FLOAT_REGS;
19561 default_p = false;
19562 done_p = true;
19563 }
19564
19565 /* Handle reload of load/stores if we have reload helper functions. */
19566 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19567 {
19568 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19569 mode);
19570
19571 if (extra_cost >= 0)
19572 {
19573 done_p = true;
19574 ret = NO_REGS;
19575 if (extra_cost > 0)
19576 {
19577 sri->extra_cost = extra_cost;
19578 sri->icode = icode;
19579 }
19580 }
19581 }
19582
19583 /* Handle unaligned loads and stores of integer registers. */
19584 if (!done_p && TARGET_POWERPC64
19585 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19586 && memory_p
19587 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19588 {
19589 rtx addr = XEXP (x, 0);
19590 rtx off = address_offset (addr);
19591
19592 if (off != NULL_RTX)
19593 {
19594 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19595 unsigned HOST_WIDE_INT offset = INTVAL (off);
19596
19597 /* We need a secondary reload when our legitimate_address_p
19598 says the address is good (as otherwise the entire address
19599 will be reloaded), and the offset is not a multiple of
19600 four or we have an address wrap. Address wrap will only
19601 occur for LO_SUMs since legitimate_offset_address_p
19602 rejects addresses for 16-byte mems that will wrap. */
19603 if (GET_CODE (addr) == LO_SUM
19604 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19605 && ((offset & 3) != 0
19606 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19607 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19608 && (offset & 3) != 0))
19609 {
19610 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19611 if (in_p)
19612 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19613 : CODE_FOR_reload_di_load);
19614 else
19615 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19616 : CODE_FOR_reload_di_store);
19617 sri->extra_cost = 2;
19618 ret = NO_REGS;
19619 done_p = true;
19620 }
19621 else
19622 default_p = true;
19623 }
19624 else
19625 default_p = true;
19626 }
19627
19628 if (!done_p && !TARGET_POWERPC64
19629 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19630 && memory_p
19631 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19632 {
19633 rtx addr = XEXP (x, 0);
19634 rtx off = address_offset (addr);
19635
19636 if (off != NULL_RTX)
19637 {
19638 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19639 unsigned HOST_WIDE_INT offset = INTVAL (off);
19640
19641 /* We need a secondary reload when our legitimate_address_p
19642 says the address is good (as otherwise the entire address
19643 will be reloaded), and we have a wrap.
19644
19645 legitimate_lo_sum_address_p allows LO_SUM addresses to
19646 have any offset so test for wrap in the low 16 bits.
19647
19648 legitimate_offset_address_p checks for the range
19649 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19650 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19651 [0x7ff4,0x7fff] respectively, so test for the
19652 intersection of these ranges, [0x7ffc,0x7fff] and
19653 [0x7ff4,0x7ff7] respectively.
19654
19655 Note that the address we see here may have been
19656 manipulated by legitimize_reload_address. */
19657 if (GET_CODE (addr) == LO_SUM
19658 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19659 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19660 {
19661 if (in_p)
19662 sri->icode = CODE_FOR_reload_si_load;
19663 else
19664 sri->icode = CODE_FOR_reload_si_store;
19665 sri->extra_cost = 2;
19666 ret = NO_REGS;
19667 done_p = true;
19668 }
19669 else
19670 default_p = true;
19671 }
19672 else
19673 default_p = true;
19674 }
19675
19676 if (!done_p)
19677 default_p = true;
19678
19679 if (default_p)
19680 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19681
19682 gcc_assert (ret != ALL_REGS);
19683
19684 if (TARGET_DEBUG_ADDR)
19685 {
19686 fprintf (stderr,
19687 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19688 "mode = %s",
19689 reg_class_names[ret],
19690 in_p ? "true" : "false",
19691 reg_class_names[rclass],
19692 GET_MODE_NAME (mode));
19693
19694 if (reload_completed)
19695 fputs (", after reload", stderr);
19696
19697 if (!done_p)
19698 fputs (", done_p not set", stderr);
19699
19700 if (default_p)
19701 fputs (", default secondary reload", stderr);
19702
19703 if (sri->icode != CODE_FOR_nothing)
19704 fprintf (stderr, ", reload func = %s, extra cost = %d",
19705 insn_data[sri->icode].name, sri->extra_cost);
19706
19707 else if (sri->extra_cost > 0)
19708 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19709
19710 fputs ("\n", stderr);
19711 debug_rtx (x);
19712 }
19713
19714 return ret;
19715 }
19716
19717 /* Better tracing for rs6000_secondary_reload_inner. */
19718
19719 static void
19720 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19721 bool store_p)
19722 {
19723 rtx set, clobber;
19724
19725 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19726
19727 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19728 store_p ? "store" : "load");
19729
19730 if (store_p)
19731 set = gen_rtx_SET (mem, reg);
19732 else
19733 set = gen_rtx_SET (reg, mem);
19734
19735 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19736 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19737 }
19738
19739 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19740 ATTRIBUTE_NORETURN;
19741
19742 static void
19743 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19744 bool store_p)
19745 {
19746 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19747 gcc_unreachable ();
19748 }
19749
19750 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19751 reload helper functions. These were identified in
19752 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19753 reload, it calls the insns:
19754 reload_<RELOAD:mode>_<P:mptrsize>_store
19755 reload_<RELOAD:mode>_<P:mptrsize>_load
19756
19757 which in turn calls this function, to do whatever is necessary to create
19758 valid addresses. */
19759
19760 void
19761 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19762 {
19763 int regno = true_regnum (reg);
19764 machine_mode mode = GET_MODE (reg);
19765 addr_mask_type addr_mask;
19766 rtx addr;
19767 rtx new_addr;
19768 rtx op_reg, op0, op1;
19769 rtx and_op;
19770 rtx cc_clobber;
19771 rtvec rv;
19772
19773 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
19774 || !base_reg_operand (scratch, GET_MODE (scratch)))
19775 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19776
19777 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19778 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19779
19780 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19781 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19782
19783 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19784 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19785
19786 else
19787 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19788
19789 /* Make sure the mode is valid in this register class. */
19790 if ((addr_mask & RELOAD_REG_VALID) == 0)
19791 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19792
19793 if (TARGET_DEBUG_ADDR)
19794 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19795
19796 new_addr = addr = XEXP (mem, 0);
19797 switch (GET_CODE (addr))
19798 {
19799 /* Does the register class support auto update forms for this mode? If
19800 not, do the update now. We don't need a scratch register, since the
19801 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19802 case PRE_INC:
19803 case PRE_DEC:
19804 op_reg = XEXP (addr, 0);
19805 if (!base_reg_operand (op_reg, Pmode))
19806 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19807
19808 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19809 {
19810 int delta = GET_MODE_SIZE (mode);
19811 if (GET_CODE (addr) == PRE_DEC)
19812 delta = -delta;
19813 emit_insn (gen_add2_insn (op_reg, GEN_INT (delta)));
19814 new_addr = op_reg;
19815 }
19816 break;
19817
19818 case PRE_MODIFY:
19819 op0 = XEXP (addr, 0);
19820 op1 = XEXP (addr, 1);
19821 if (!base_reg_operand (op0, Pmode)
19822 || GET_CODE (op1) != PLUS
19823 || !rtx_equal_p (op0, XEXP (op1, 0)))
19824 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19825
19826 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19827 {
19828 emit_insn (gen_rtx_SET (op0, op1));
19829 new_addr = reg;
19830 }
19831 break;
19832
19833 /* Do we need to simulate AND -16 to clear the bottom address bits used
19834 in VMX load/stores? */
19835 case AND:
19836 op0 = XEXP (addr, 0);
19837 op1 = XEXP (addr, 1);
19838 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19839 {
19840 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
19841 op_reg = op0;
19842
19843 else if (GET_CODE (op1) == PLUS)
19844 {
19845 emit_insn (gen_rtx_SET (scratch, op1));
19846 op_reg = scratch;
19847 }
19848
19849 else
19850 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19851
19852 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19853 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19854 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19855 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19856 new_addr = scratch;
19857 }
19858 break;
19859
19860 /* If this is an indirect address, make sure it is a base register. */
19861 case REG:
19862 case SUBREG:
19863 if (!base_reg_operand (addr, GET_MODE (addr)))
19864 {
19865 emit_insn (gen_rtx_SET (scratch, addr));
19866 new_addr = scratch;
19867 }
19868 break;
19869
19870 /* If this is an indexed address, make sure the register class can handle
19871 indexed addresses for this mode. */
19872 case PLUS:
19873 op0 = XEXP (addr, 0);
19874 op1 = XEXP (addr, 1);
19875 if (!base_reg_operand (op0, Pmode))
19876 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19877
19878 else if (int_reg_operand (op1, Pmode))
19879 {
19880 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19881 {
19882 emit_insn (gen_rtx_SET (scratch, addr));
19883 new_addr = scratch;
19884 }
19885 }
19886
19887 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19888 {
19889 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19890 || !quad_address_p (addr, mode, false))
19891 {
19892 emit_insn (gen_rtx_SET (scratch, addr));
19893 new_addr = scratch;
19894 }
19895 }
19896
19897 /* Make sure the register class can handle offset addresses. */
19898 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19899 {
19900 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19901 {
19902 emit_insn (gen_rtx_SET (scratch, addr));
19903 new_addr = scratch;
19904 }
19905 }
19906
19907 else
19908 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19909
19910 break;
19911
19912 case LO_SUM:
19913 op0 = XEXP (addr, 0);
19914 op1 = XEXP (addr, 1);
19915 if (!base_reg_operand (op0, Pmode))
19916 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19917
19918 else if (int_reg_operand (op1, Pmode))
19919 {
19920 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19921 {
19922 emit_insn (gen_rtx_SET (scratch, addr));
19923 new_addr = scratch;
19924 }
19925 }
19926
19927 /* Quad offsets are restricted and can't handle normal addresses. */
19928 else if (mode_supports_dq_form (mode))
19929 {
19930 emit_insn (gen_rtx_SET (scratch, addr));
19931 new_addr = scratch;
19932 }
19933
19934 /* Make sure the register class can handle offset addresses. */
19935 else if (legitimate_lo_sum_address_p (mode, addr, false))
19936 {
19937 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19938 {
19939 emit_insn (gen_rtx_SET (scratch, addr));
19940 new_addr = scratch;
19941 }
19942 }
19943
19944 else
19945 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19946
19947 break;
19948
19949 case SYMBOL_REF:
19950 case CONST:
19951 case LABEL_REF:
19952 rs6000_emit_move (scratch, addr, Pmode);
19953 new_addr = scratch;
19954 break;
19955
19956 default:
19957 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19958 }
19959
19960 /* Adjust the address if it changed. */
19961 if (addr != new_addr)
19962 {
19963 mem = replace_equiv_address_nv (mem, new_addr);
19964 if (TARGET_DEBUG_ADDR)
19965 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19966 }
19967
19968 /* Now create the move. */
19969 if (store_p)
19970 emit_insn (gen_rtx_SET (mem, reg));
19971 else
19972 emit_insn (gen_rtx_SET (reg, mem));
19973
19974 return;
19975 }
19976
19977 /* Convert reloads involving 64-bit gprs and misaligned offset
19978 addressing, or multiple 32-bit gprs and offsets that are too large,
19979 to use indirect addressing. */
19980
19981 void
19982 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
19983 {
19984 int regno = true_regnum (reg);
19985 enum reg_class rclass;
19986 rtx addr;
19987 rtx scratch_or_premodify = scratch;
19988
19989 if (TARGET_DEBUG_ADDR)
19990 {
19991 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
19992 store_p ? "store" : "load");
19993 fprintf (stderr, "reg:\n");
19994 debug_rtx (reg);
19995 fprintf (stderr, "mem:\n");
19996 debug_rtx (mem);
19997 fprintf (stderr, "scratch:\n");
19998 debug_rtx (scratch);
19999 }
20000
20001 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
20002 gcc_assert (GET_CODE (mem) == MEM);
20003 rclass = REGNO_REG_CLASS (regno);
20004 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
20005 addr = XEXP (mem, 0);
20006
20007 if (GET_CODE (addr) == PRE_MODIFY)
20008 {
20009 gcc_assert (REG_P (XEXP (addr, 0))
20010 && GET_CODE (XEXP (addr, 1)) == PLUS
20011 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
20012 scratch_or_premodify = XEXP (addr, 0);
20013 addr = XEXP (addr, 1);
20014 }
20015 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
20016
20017 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
20018
20019 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
20020
20021 /* Now create the move. */
20022 if (store_p)
20023 emit_insn (gen_rtx_SET (mem, reg));
20024 else
20025 emit_insn (gen_rtx_SET (reg, mem));
20026
20027 return;
20028 }
20029
20030 /* Given an rtx X being reloaded into a reg required to be
20031 in class CLASS, return the class of reg to actually use.
20032 In general this is just CLASS; but on some machines
20033 in some cases it is preferable to use a more restrictive class.
20034
20035 On the RS/6000, we have to return NO_REGS when we want to reload a
20036 floating-point CONST_DOUBLE to force it to be copied to memory.
20037
20038 We also don't want to reload integer values into floating-point
20039 registers if we can at all help it. In fact, this can
20040 cause reload to die, if it tries to generate a reload of CTR
20041 into a FP register and discovers it doesn't have the memory location
20042 required.
20043
20044 ??? Would it be a good idea to have reload do the converse, that is
20045 try to reload floating modes into FP registers if possible?
20046 */
20047
20048 static enum reg_class
20049 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20050 {
20051 machine_mode mode = GET_MODE (x);
20052 bool is_constant = CONSTANT_P (x);
20053
20054 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20055 reload class for it. */
20056 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20057 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20058 return NO_REGS;
20059
20060 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20061 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20062 return NO_REGS;
20063
20064 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20065 the reloading of address expressions using PLUS into floating point
20066 registers. */
20067 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20068 {
20069 if (is_constant)
20070 {
20071 /* Zero is always allowed in all VSX registers. */
20072 if (x == CONST0_RTX (mode))
20073 return rclass;
20074
20075 /* If this is a vector constant that can be formed with a few Altivec
20076 instructions, we want altivec registers. */
20077 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20078 return ALTIVEC_REGS;
20079
20080 /* If this is an integer constant that can easily be loaded into
20081 vector registers, allow it. */
20082 if (CONST_INT_P (x))
20083 {
20084 HOST_WIDE_INT value = INTVAL (x);
20085
20086 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20087 2.06 can generate it in the Altivec registers with
20088 VSPLTI<x>. */
20089 if (value == -1)
20090 {
20091 if (TARGET_P8_VECTOR)
20092 return rclass;
20093 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20094 return ALTIVEC_REGS;
20095 else
20096 return NO_REGS;
20097 }
20098
20099 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20100 a sign extend in the Altivec registers. */
20101 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20102 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20103 return ALTIVEC_REGS;
20104 }
20105
20106 /* Force constant to memory. */
20107 return NO_REGS;
20108 }
20109
20110 /* D-form addressing can easily reload the value. */
20111 if (mode_supports_vmx_dform (mode)
20112 || mode_supports_dq_form (mode))
20113 return rclass;
20114
20115 /* If this is a scalar floating point value and we don't have D-form
20116 addressing, prefer the traditional floating point registers so that we
20117 can use D-form (register+offset) addressing. */
20118 if (rclass == VSX_REGS
20119 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20120 return FLOAT_REGS;
20121
20122 /* Prefer the Altivec registers if Altivec is handling the vector
20123 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20124 loads. */
20125 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20126 || mode == V1TImode)
20127 return ALTIVEC_REGS;
20128
20129 return rclass;
20130 }
20131
20132 if (is_constant || GET_CODE (x) == PLUS)
20133 {
20134 if (reg_class_subset_p (GENERAL_REGS, rclass))
20135 return GENERAL_REGS;
20136 if (reg_class_subset_p (BASE_REGS, rclass))
20137 return BASE_REGS;
20138 return NO_REGS;
20139 }
20140
20141 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20142 return GENERAL_REGS;
20143
20144 return rclass;
20145 }
20146
20147 /* Debug version of rs6000_preferred_reload_class. */
20148 static enum reg_class
20149 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20150 {
20151 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20152
20153 fprintf (stderr,
20154 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20155 "mode = %s, x:\n",
20156 reg_class_names[ret], reg_class_names[rclass],
20157 GET_MODE_NAME (GET_MODE (x)));
20158 debug_rtx (x);
20159
20160 return ret;
20161 }
20162
20163 /* If we are copying between FP or AltiVec registers and anything else, we need
20164 a memory location. The exception is when we are targeting ppc64 and the
20165 move to/from fpr to gpr instructions are available. Also, under VSX, you
20166 can copy vector registers from the FP register set to the Altivec register
20167 set and vice versa. */
20168
20169 static bool
20170 rs6000_secondary_memory_needed (machine_mode mode,
20171 reg_class_t from_class,
20172 reg_class_t to_class)
20173 {
20174 enum rs6000_reg_type from_type, to_type;
20175 bool altivec_p = ((from_class == ALTIVEC_REGS)
20176 || (to_class == ALTIVEC_REGS));
20177
20178 /* If a simple/direct move is available, we don't need secondary memory */
20179 from_type = reg_class_to_reg_type[(int)from_class];
20180 to_type = reg_class_to_reg_type[(int)to_class];
20181
20182 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20183 (secondary_reload_info *)0, altivec_p))
20184 return false;
20185
20186 /* If we have a floating point or vector register class, we need to use
20187 memory to transfer the data. */
20188 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20189 return true;
20190
20191 return false;
20192 }
20193
20194 /* Debug version of rs6000_secondary_memory_needed. */
20195 static bool
20196 rs6000_debug_secondary_memory_needed (machine_mode mode,
20197 reg_class_t from_class,
20198 reg_class_t to_class)
20199 {
20200 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
20201
20202 fprintf (stderr,
20203 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20204 "to_class = %s, mode = %s\n",
20205 ret ? "true" : "false",
20206 reg_class_names[from_class],
20207 reg_class_names[to_class],
20208 GET_MODE_NAME (mode));
20209
20210 return ret;
20211 }
20212
20213 /* Return the register class of a scratch register needed to copy IN into
20214 or out of a register in RCLASS in MODE. If it can be done directly,
20215 NO_REGS is returned. */
20216
20217 static enum reg_class
20218 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20219 rtx in)
20220 {
20221 int regno;
20222
20223 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20224 #if TARGET_MACHO
20225 && MACHOPIC_INDIRECT
20226 #endif
20227 ))
20228 {
20229 /* We cannot copy a symbolic operand directly into anything
20230 other than BASE_REGS for TARGET_ELF. So indicate that a
20231 register from BASE_REGS is needed as an intermediate
20232 register.
20233
20234 On Darwin, pic addresses require a load from memory, which
20235 needs a base register. */
20236 if (rclass != BASE_REGS
20237 && (GET_CODE (in) == SYMBOL_REF
20238 || GET_CODE (in) == HIGH
20239 || GET_CODE (in) == LABEL_REF
20240 || GET_CODE (in) == CONST))
20241 return BASE_REGS;
20242 }
20243
20244 if (GET_CODE (in) == REG)
20245 {
20246 regno = REGNO (in);
20247 if (regno >= FIRST_PSEUDO_REGISTER)
20248 {
20249 regno = true_regnum (in);
20250 if (regno >= FIRST_PSEUDO_REGISTER)
20251 regno = -1;
20252 }
20253 }
20254 else if (GET_CODE (in) == SUBREG)
20255 {
20256 regno = true_regnum (in);
20257 if (regno >= FIRST_PSEUDO_REGISTER)
20258 regno = -1;
20259 }
20260 else
20261 regno = -1;
20262
20263 /* If we have VSX register moves, prefer moving scalar values between
20264 Altivec registers and GPR by going via an FPR (and then via memory)
20265 instead of reloading the secondary memory address for Altivec moves. */
20266 if (TARGET_VSX
20267 && GET_MODE_SIZE (mode) < 16
20268 && !mode_supports_vmx_dform (mode)
20269 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20270 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20271 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20272 && (regno >= 0 && INT_REGNO_P (regno)))))
20273 return FLOAT_REGS;
20274
20275 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20276 into anything. */
20277 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20278 || (regno >= 0 && INT_REGNO_P (regno)))
20279 return NO_REGS;
20280
20281 /* Constants, memory, and VSX registers can go into VSX registers (both the
20282 traditional floating point and the altivec registers). */
20283 if (rclass == VSX_REGS
20284 && (regno == -1 || VSX_REGNO_P (regno)))
20285 return NO_REGS;
20286
20287 /* Constants, memory, and FP registers can go into FP registers. */
20288 if ((regno == -1 || FP_REGNO_P (regno))
20289 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20290 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20291
20292 /* Memory, and AltiVec registers can go into AltiVec registers. */
20293 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20294 && rclass == ALTIVEC_REGS)
20295 return NO_REGS;
20296
20297 /* We can copy among the CR registers. */
20298 if ((rclass == CR_REGS || rclass == CR0_REGS)
20299 && regno >= 0 && CR_REGNO_P (regno))
20300 return NO_REGS;
20301
20302 /* Otherwise, we need GENERAL_REGS. */
20303 return GENERAL_REGS;
20304 }
20305
20306 /* Debug version of rs6000_secondary_reload_class. */
20307 static enum reg_class
20308 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20309 machine_mode mode, rtx in)
20310 {
20311 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20312 fprintf (stderr,
20313 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20314 "mode = %s, input rtx:\n",
20315 reg_class_names[ret], reg_class_names[rclass],
20316 GET_MODE_NAME (mode));
20317 debug_rtx (in);
20318
20319 return ret;
20320 }
20321
20322 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20323
20324 static bool
20325 rs6000_can_change_mode_class (machine_mode from,
20326 machine_mode to,
20327 reg_class_t rclass)
20328 {
20329 unsigned from_size = GET_MODE_SIZE (from);
20330 unsigned to_size = GET_MODE_SIZE (to);
20331
20332 if (from_size != to_size)
20333 {
20334 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20335
20336 if (reg_classes_intersect_p (xclass, rclass))
20337 {
20338 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20339 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20340 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20341 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20342
20343 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20344 single register under VSX because the scalar part of the register
20345 is in the upper 64-bits, and not the lower 64-bits. Types like
20346 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20347 IEEE floating point can't overlap, and neither can small
20348 values. */
20349
20350 if (to_float128_vector_p && from_float128_vector_p)
20351 return true;
20352
20353 else if (to_float128_vector_p || from_float128_vector_p)
20354 return false;
20355
20356 /* TDmode in floating-mode registers must always go into a register
20357 pair with the most significant word in the even-numbered register
20358 to match ISA requirements. In little-endian mode, this does not
20359 match subreg numbering, so we cannot allow subregs. */
20360 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20361 return false;
20362
20363 if (from_size < 8 || to_size < 8)
20364 return false;
20365
20366 if (from_size == 8 && (8 * to_nregs) != to_size)
20367 return false;
20368
20369 if (to_size == 8 && (8 * from_nregs) != from_size)
20370 return false;
20371
20372 return true;
20373 }
20374 else
20375 return true;
20376 }
20377
20378 /* Since the VSX register set includes traditional floating point registers
20379 and altivec registers, just check for the size being different instead of
20380 trying to check whether the modes are vector modes. Otherwise it won't
20381 allow say DF and DI to change classes. For types like TFmode and TDmode
20382 that take 2 64-bit registers, rather than a single 128-bit register, don't
20383 allow subregs of those types to other 128 bit types. */
20384 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20385 {
20386 unsigned num_regs = (from_size + 15) / 16;
20387 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20388 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20389 return false;
20390
20391 return (from_size == 8 || from_size == 16);
20392 }
20393
20394 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20395 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20396 return false;
20397
20398 return true;
20399 }
20400
20401 /* Debug version of rs6000_can_change_mode_class. */
20402 static bool
20403 rs6000_debug_can_change_mode_class (machine_mode from,
20404 machine_mode to,
20405 reg_class_t rclass)
20406 {
20407 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20408
20409 fprintf (stderr,
20410 "rs6000_can_change_mode_class, return %s, from = %s, "
20411 "to = %s, rclass = %s\n",
20412 ret ? "true" : "false",
20413 GET_MODE_NAME (from), GET_MODE_NAME (to),
20414 reg_class_names[rclass]);
20415
20416 return ret;
20417 }
20418 \f
20419 /* Return a string to do a move operation of 128 bits of data. */
20420
20421 const char *
20422 rs6000_output_move_128bit (rtx operands[])
20423 {
20424 rtx dest = operands[0];
20425 rtx src = operands[1];
20426 machine_mode mode = GET_MODE (dest);
20427 int dest_regno;
20428 int src_regno;
20429 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20430 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20431
20432 if (REG_P (dest))
20433 {
20434 dest_regno = REGNO (dest);
20435 dest_gpr_p = INT_REGNO_P (dest_regno);
20436 dest_fp_p = FP_REGNO_P (dest_regno);
20437 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20438 dest_vsx_p = dest_fp_p | dest_vmx_p;
20439 }
20440 else
20441 {
20442 dest_regno = -1;
20443 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20444 }
20445
20446 if (REG_P (src))
20447 {
20448 src_regno = REGNO (src);
20449 src_gpr_p = INT_REGNO_P (src_regno);
20450 src_fp_p = FP_REGNO_P (src_regno);
20451 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20452 src_vsx_p = src_fp_p | src_vmx_p;
20453 }
20454 else
20455 {
20456 src_regno = -1;
20457 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20458 }
20459
20460 /* Register moves. */
20461 if (dest_regno >= 0 && src_regno >= 0)
20462 {
20463 if (dest_gpr_p)
20464 {
20465 if (src_gpr_p)
20466 return "#";
20467
20468 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20469 return (WORDS_BIG_ENDIAN
20470 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20471 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20472
20473 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20474 return "#";
20475 }
20476
20477 else if (TARGET_VSX && dest_vsx_p)
20478 {
20479 if (src_vsx_p)
20480 return "xxlor %x0,%x1,%x1";
20481
20482 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20483 return (WORDS_BIG_ENDIAN
20484 ? "mtvsrdd %x0,%1,%L1"
20485 : "mtvsrdd %x0,%L1,%1");
20486
20487 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20488 return "#";
20489 }
20490
20491 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20492 return "vor %0,%1,%1";
20493
20494 else if (dest_fp_p && src_fp_p)
20495 return "#";
20496 }
20497
20498 /* Loads. */
20499 else if (dest_regno >= 0 && MEM_P (src))
20500 {
20501 if (dest_gpr_p)
20502 {
20503 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20504 return "lq %0,%1";
20505 else
20506 return "#";
20507 }
20508
20509 else if (TARGET_ALTIVEC && dest_vmx_p
20510 && altivec_indexed_or_indirect_operand (src, mode))
20511 return "lvx %0,%y1";
20512
20513 else if (TARGET_VSX && dest_vsx_p)
20514 {
20515 if (mode_supports_dq_form (mode)
20516 && quad_address_p (XEXP (src, 0), mode, true))
20517 return "lxv %x0,%1";
20518
20519 else if (TARGET_P9_VECTOR)
20520 return "lxvx %x0,%y1";
20521
20522 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20523 return "lxvw4x %x0,%y1";
20524
20525 else
20526 return "lxvd2x %x0,%y1";
20527 }
20528
20529 else if (TARGET_ALTIVEC && dest_vmx_p)
20530 return "lvx %0,%y1";
20531
20532 else if (dest_fp_p)
20533 return "#";
20534 }
20535
20536 /* Stores. */
20537 else if (src_regno >= 0 && MEM_P (dest))
20538 {
20539 if (src_gpr_p)
20540 {
20541 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20542 return "stq %1,%0";
20543 else
20544 return "#";
20545 }
20546
20547 else if (TARGET_ALTIVEC && src_vmx_p
20548 && altivec_indexed_or_indirect_operand (dest, mode))
20549 return "stvx %1,%y0";
20550
20551 else if (TARGET_VSX && src_vsx_p)
20552 {
20553 if (mode_supports_dq_form (mode)
20554 && quad_address_p (XEXP (dest, 0), mode, true))
20555 return "stxv %x1,%0";
20556
20557 else if (TARGET_P9_VECTOR)
20558 return "stxvx %x1,%y0";
20559
20560 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20561 return "stxvw4x %x1,%y0";
20562
20563 else
20564 return "stxvd2x %x1,%y0";
20565 }
20566
20567 else if (TARGET_ALTIVEC && src_vmx_p)
20568 return "stvx %1,%y0";
20569
20570 else if (src_fp_p)
20571 return "#";
20572 }
20573
20574 /* Constants. */
20575 else if (dest_regno >= 0
20576 && (GET_CODE (src) == CONST_INT
20577 || GET_CODE (src) == CONST_WIDE_INT
20578 || GET_CODE (src) == CONST_DOUBLE
20579 || GET_CODE (src) == CONST_VECTOR))
20580 {
20581 if (dest_gpr_p)
20582 return "#";
20583
20584 else if ((dest_vmx_p && TARGET_ALTIVEC)
20585 || (dest_vsx_p && TARGET_VSX))
20586 return output_vec_const_move (operands);
20587 }
20588
20589 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20590 }
20591
20592 /* Validate a 128-bit move. */
20593 bool
20594 rs6000_move_128bit_ok_p (rtx operands[])
20595 {
20596 machine_mode mode = GET_MODE (operands[0]);
20597 return (gpc_reg_operand (operands[0], mode)
20598 || gpc_reg_operand (operands[1], mode));
20599 }
20600
20601 /* Return true if a 128-bit move needs to be split. */
20602 bool
20603 rs6000_split_128bit_ok_p (rtx operands[])
20604 {
20605 if (!reload_completed)
20606 return false;
20607
20608 if (!gpr_or_gpr_p (operands[0], operands[1]))
20609 return false;
20610
20611 if (quad_load_store_p (operands[0], operands[1]))
20612 return false;
20613
20614 return true;
20615 }
20616
20617 \f
20618 /* Given a comparison operation, return the bit number in CCR to test. We
20619 know this is a valid comparison.
20620
20621 SCC_P is 1 if this is for an scc. That means that %D will have been
20622 used instead of %C, so the bits will be in different places.
20623
20624 Return -1 if OP isn't a valid comparison for some reason. */
20625
20626 int
20627 ccr_bit (rtx op, int scc_p)
20628 {
20629 enum rtx_code code = GET_CODE (op);
20630 machine_mode cc_mode;
20631 int cc_regnum;
20632 int base_bit;
20633 rtx reg;
20634
20635 if (!COMPARISON_P (op))
20636 return -1;
20637
20638 reg = XEXP (op, 0);
20639
20640 if (!REG_P (reg) || !CR_REGNO_P (REGNO (reg)))
20641 return -1;
20642
20643 cc_mode = GET_MODE (reg);
20644 cc_regnum = REGNO (reg);
20645 base_bit = 4 * (cc_regnum - CR0_REGNO);
20646
20647 validate_condition_mode (code, cc_mode);
20648
20649 /* When generating a sCOND operation, only positive conditions are
20650 allowed. */
20651 if (scc_p)
20652 switch (code)
20653 {
20654 case EQ:
20655 case GT:
20656 case LT:
20657 case UNORDERED:
20658 case GTU:
20659 case LTU:
20660 break;
20661 default:
20662 return -1;
20663 }
20664
20665 switch (code)
20666 {
20667 case NE:
20668 return scc_p ? base_bit + 3 : base_bit + 2;
20669 case EQ:
20670 return base_bit + 2;
20671 case GT: case GTU: case UNLE:
20672 return base_bit + 1;
20673 case LT: case LTU: case UNGE:
20674 return base_bit;
20675 case ORDERED: case UNORDERED:
20676 return base_bit + 3;
20677
20678 case GE: case GEU:
20679 /* If scc, we will have done a cror to put the bit in the
20680 unordered position. So test that bit. For integer, this is ! LT
20681 unless this is an scc insn. */
20682 return scc_p ? base_bit + 3 : base_bit;
20683
20684 case LE: case LEU:
20685 return scc_p ? base_bit + 3 : base_bit + 1;
20686
20687 default:
20688 return -1;
20689 }
20690 }
20691 \f
20692 /* Return the GOT register. */
20693
20694 rtx
20695 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20696 {
20697 /* The second flow pass currently (June 1999) can't update
20698 regs_ever_live without disturbing other parts of the compiler, so
20699 update it here to make the prolog/epilogue code happy. */
20700 if (!can_create_pseudo_p ()
20701 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20702 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20703
20704 crtl->uses_pic_offset_table = 1;
20705
20706 return pic_offset_table_rtx;
20707 }
20708 \f
20709 static rs6000_stack_t stack_info;
20710
20711 /* Function to init struct machine_function.
20712 This will be called, via a pointer variable,
20713 from push_function_context. */
20714
20715 static struct machine_function *
20716 rs6000_init_machine_status (void)
20717 {
20718 stack_info.reload_completed = 0;
20719 return ggc_cleared_alloc<machine_function> ();
20720 }
20721 \f
20722 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
20723
20724 /* Write out a function code label. */
20725
20726 void
20727 rs6000_output_function_entry (FILE *file, const char *fname)
20728 {
20729 if (fname[0] != '.')
20730 {
20731 switch (DEFAULT_ABI)
20732 {
20733 default:
20734 gcc_unreachable ();
20735
20736 case ABI_AIX:
20737 if (DOT_SYMBOLS)
20738 putc ('.', file);
20739 else
20740 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20741 break;
20742
20743 case ABI_ELFv2:
20744 case ABI_V4:
20745 case ABI_DARWIN:
20746 break;
20747 }
20748 }
20749
20750 RS6000_OUTPUT_BASENAME (file, fname);
20751 }
20752
20753 /* Print an operand. Recognize special options, documented below. */
20754
20755 #if TARGET_ELF
20756 /* Access to .sdata2 through r2 (see -msdata=eabi in invoke.texi) is
20757 only introduced by the linker, when applying the sda21
20758 relocation. */
20759 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20760 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20761 #else
20762 #define SMALL_DATA_RELOC "sda21"
20763 #define SMALL_DATA_REG 0
20764 #endif
20765
20766 void
20767 print_operand (FILE *file, rtx x, int code)
20768 {
20769 int i;
20770 unsigned HOST_WIDE_INT uval;
20771
20772 switch (code)
20773 {
20774 /* %a is output_address. */
20775
20776 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20777 output_operand. */
20778
20779 case 'D':
20780 /* Like 'J' but get to the GT bit only. */
20781 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20782 {
20783 output_operand_lossage ("invalid %%D value");
20784 return;
20785 }
20786
20787 /* Bit 1 is GT bit. */
20788 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20789
20790 /* Add one for shift count in rlinm for scc. */
20791 fprintf (file, "%d", i + 1);
20792 return;
20793
20794 case 'e':
20795 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20796 if (! INT_P (x))
20797 {
20798 output_operand_lossage ("invalid %%e value");
20799 return;
20800 }
20801
20802 uval = INTVAL (x);
20803 if ((uval & 0xffff) == 0 && uval != 0)
20804 putc ('s', file);
20805 return;
20806
20807 case 'E':
20808 /* X is a CR register. Print the number of the EQ bit of the CR */
20809 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20810 output_operand_lossage ("invalid %%E value");
20811 else
20812 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20813 return;
20814
20815 case 'f':
20816 /* X is a CR register. Print the shift count needed to move it
20817 to the high-order four bits. */
20818 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20819 output_operand_lossage ("invalid %%f value");
20820 else
20821 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20822 return;
20823
20824 case 'F':
20825 /* Similar, but print the count for the rotate in the opposite
20826 direction. */
20827 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20828 output_operand_lossage ("invalid %%F value");
20829 else
20830 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20831 return;
20832
20833 case 'G':
20834 /* X is a constant integer. If it is negative, print "m",
20835 otherwise print "z". This is to make an aze or ame insn. */
20836 if (GET_CODE (x) != CONST_INT)
20837 output_operand_lossage ("invalid %%G value");
20838 else if (INTVAL (x) >= 0)
20839 putc ('z', file);
20840 else
20841 putc ('m', file);
20842 return;
20843
20844 case 'h':
20845 /* If constant, output low-order five bits. Otherwise, write
20846 normally. */
20847 if (INT_P (x))
20848 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20849 else
20850 print_operand (file, x, 0);
20851 return;
20852
20853 case 'H':
20854 /* If constant, output low-order six bits. Otherwise, write
20855 normally. */
20856 if (INT_P (x))
20857 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20858 else
20859 print_operand (file, x, 0);
20860 return;
20861
20862 case 'I':
20863 /* Print `i' if this is a constant, else nothing. */
20864 if (INT_P (x))
20865 putc ('i', file);
20866 return;
20867
20868 case 'j':
20869 /* Write the bit number in CCR for jump. */
20870 i = ccr_bit (x, 0);
20871 if (i == -1)
20872 output_operand_lossage ("invalid %%j code");
20873 else
20874 fprintf (file, "%d", i);
20875 return;
20876
20877 case 'J':
20878 /* Similar, but add one for shift count in rlinm for scc and pass
20879 scc flag to `ccr_bit'. */
20880 i = ccr_bit (x, 1);
20881 if (i == -1)
20882 output_operand_lossage ("invalid %%J code");
20883 else
20884 /* If we want bit 31, write a shift count of zero, not 32. */
20885 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20886 return;
20887
20888 case 'k':
20889 /* X must be a constant. Write the 1's complement of the
20890 constant. */
20891 if (! INT_P (x))
20892 output_operand_lossage ("invalid %%k value");
20893 else
20894 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20895 return;
20896
20897 case 'K':
20898 /* X must be a symbolic constant on ELF. Write an
20899 expression suitable for an 'addi' that adds in the low 16
20900 bits of the MEM. */
20901 if (GET_CODE (x) == CONST)
20902 {
20903 if (GET_CODE (XEXP (x, 0)) != PLUS
20904 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
20905 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20906 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
20907 output_operand_lossage ("invalid %%K value");
20908 }
20909 print_operand_address (file, x);
20910 fputs ("@l", file);
20911 return;
20912
20913 /* %l is output_asm_label. */
20914
20915 case 'L':
20916 /* Write second word of DImode or DFmode reference. Works on register
20917 or non-indexed memory only. */
20918 if (REG_P (x))
20919 fputs (reg_names[REGNO (x) + 1], file);
20920 else if (MEM_P (x))
20921 {
20922 machine_mode mode = GET_MODE (x);
20923 /* Handle possible auto-increment. Since it is pre-increment and
20924 we have already done it, we can just use an offset of word. */
20925 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20926 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20927 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20928 UNITS_PER_WORD));
20929 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20930 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20931 UNITS_PER_WORD));
20932 else
20933 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20934 UNITS_PER_WORD),
20935 0));
20936
20937 if (small_data_operand (x, GET_MODE (x)))
20938 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20939 reg_names[SMALL_DATA_REG]);
20940 }
20941 return;
20942
20943 case 'N': /* Unused */
20944 /* Write the number of elements in the vector times 4. */
20945 if (GET_CODE (x) != PARALLEL)
20946 output_operand_lossage ("invalid %%N value");
20947 else
20948 fprintf (file, "%d", XVECLEN (x, 0) * 4);
20949 return;
20950
20951 case 'O': /* Unused */
20952 /* Similar, but subtract 1 first. */
20953 if (GET_CODE (x) != PARALLEL)
20954 output_operand_lossage ("invalid %%O value");
20955 else
20956 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
20957 return;
20958
20959 case 'p':
20960 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20961 if (! INT_P (x)
20962 || INTVAL (x) < 0
20963 || (i = exact_log2 (INTVAL (x))) < 0)
20964 output_operand_lossage ("invalid %%p value");
20965 else
20966 fprintf (file, "%d", i);
20967 return;
20968
20969 case 'P':
20970 /* The operand must be an indirect memory reference. The result
20971 is the register name. */
20972 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
20973 || REGNO (XEXP (x, 0)) >= 32)
20974 output_operand_lossage ("invalid %%P value");
20975 else
20976 fputs (reg_names[REGNO (XEXP (x, 0))], file);
20977 return;
20978
20979 case 'q':
20980 /* This outputs the logical code corresponding to a boolean
20981 expression. The expression may have one or both operands
20982 negated (if one, only the first one). For condition register
20983 logical operations, it will also treat the negated
20984 CR codes as NOTs, but not handle NOTs of them. */
20985 {
20986 const char *const *t = 0;
20987 const char *s;
20988 enum rtx_code code = GET_CODE (x);
20989 static const char * const tbl[3][3] = {
20990 { "and", "andc", "nor" },
20991 { "or", "orc", "nand" },
20992 { "xor", "eqv", "xor" } };
20993
20994 if (code == AND)
20995 t = tbl[0];
20996 else if (code == IOR)
20997 t = tbl[1];
20998 else if (code == XOR)
20999 t = tbl[2];
21000 else
21001 output_operand_lossage ("invalid %%q value");
21002
21003 if (GET_CODE (XEXP (x, 0)) != NOT)
21004 s = t[0];
21005 else
21006 {
21007 if (GET_CODE (XEXP (x, 1)) == NOT)
21008 s = t[2];
21009 else
21010 s = t[1];
21011 }
21012
21013 fputs (s, file);
21014 }
21015 return;
21016
21017 case 'Q':
21018 if (! TARGET_MFCRF)
21019 return;
21020 fputc (',', file);
21021 /* FALLTHRU */
21022
21023 case 'R':
21024 /* X is a CR register. Print the mask for `mtcrf'. */
21025 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
21026 output_operand_lossage ("invalid %%R value");
21027 else
21028 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
21029 return;
21030
21031 case 's':
21032 /* Low 5 bits of 32 - value */
21033 if (! INT_P (x))
21034 output_operand_lossage ("invalid %%s value");
21035 else
21036 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21037 return;
21038
21039 case 't':
21040 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21041 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
21042 {
21043 output_operand_lossage ("invalid %%t value");
21044 return;
21045 }
21046
21047 /* Bit 3 is OV bit. */
21048 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21049
21050 /* If we want bit 31, write a shift count of zero, not 32. */
21051 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21052 return;
21053
21054 case 'T':
21055 /* Print the symbolic name of a branch target register. */
21056 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21057 x = XVECEXP (x, 0, 0);
21058 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
21059 && REGNO (x) != CTR_REGNO))
21060 output_operand_lossage ("invalid %%T value");
21061 else if (REGNO (x) == LR_REGNO)
21062 fputs ("lr", file);
21063 else
21064 fputs ("ctr", file);
21065 return;
21066
21067 case 'u':
21068 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21069 for use in unsigned operand. */
21070 if (! INT_P (x))
21071 {
21072 output_operand_lossage ("invalid %%u value");
21073 return;
21074 }
21075
21076 uval = INTVAL (x);
21077 if ((uval & 0xffff) == 0)
21078 uval >>= 16;
21079
21080 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21081 return;
21082
21083 case 'v':
21084 /* High-order 16 bits of constant for use in signed operand. */
21085 if (! INT_P (x))
21086 output_operand_lossage ("invalid %%v value");
21087 else
21088 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21089 (INTVAL (x) >> 16) & 0xffff);
21090 return;
21091
21092 case 'U':
21093 /* Print `u' if this has an auto-increment or auto-decrement. */
21094 if (MEM_P (x)
21095 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21096 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21097 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21098 putc ('u', file);
21099 return;
21100
21101 case 'V':
21102 /* Print the trap code for this operand. */
21103 switch (GET_CODE (x))
21104 {
21105 case EQ:
21106 fputs ("eq", file); /* 4 */
21107 break;
21108 case NE:
21109 fputs ("ne", file); /* 24 */
21110 break;
21111 case LT:
21112 fputs ("lt", file); /* 16 */
21113 break;
21114 case LE:
21115 fputs ("le", file); /* 20 */
21116 break;
21117 case GT:
21118 fputs ("gt", file); /* 8 */
21119 break;
21120 case GE:
21121 fputs ("ge", file); /* 12 */
21122 break;
21123 case LTU:
21124 fputs ("llt", file); /* 2 */
21125 break;
21126 case LEU:
21127 fputs ("lle", file); /* 6 */
21128 break;
21129 case GTU:
21130 fputs ("lgt", file); /* 1 */
21131 break;
21132 case GEU:
21133 fputs ("lge", file); /* 5 */
21134 break;
21135 default:
21136 output_operand_lossage ("invalid %%V value");
21137 }
21138 break;
21139
21140 case 'w':
21141 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21142 normally. */
21143 if (INT_P (x))
21144 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21145 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21146 else
21147 print_operand (file, x, 0);
21148 return;
21149
21150 case 'x':
21151 /* X is a FPR or Altivec register used in a VSX context. */
21152 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
21153 output_operand_lossage ("invalid %%x value");
21154 else
21155 {
21156 int reg = REGNO (x);
21157 int vsx_reg = (FP_REGNO_P (reg)
21158 ? reg - 32
21159 : reg - FIRST_ALTIVEC_REGNO + 32);
21160
21161 #ifdef TARGET_REGNAMES
21162 if (TARGET_REGNAMES)
21163 fprintf (file, "%%vs%d", vsx_reg);
21164 else
21165 #endif
21166 fprintf (file, "%d", vsx_reg);
21167 }
21168 return;
21169
21170 case 'X':
21171 if (MEM_P (x)
21172 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21173 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21174 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21175 putc ('x', file);
21176 return;
21177
21178 case 'Y':
21179 /* Like 'L', for third word of TImode/PTImode */
21180 if (REG_P (x))
21181 fputs (reg_names[REGNO (x) + 2], file);
21182 else if (MEM_P (x))
21183 {
21184 machine_mode mode = GET_MODE (x);
21185 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21186 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21187 output_address (mode, plus_constant (Pmode,
21188 XEXP (XEXP (x, 0), 0), 8));
21189 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21190 output_address (mode, plus_constant (Pmode,
21191 XEXP (XEXP (x, 0), 0), 8));
21192 else
21193 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21194 if (small_data_operand (x, GET_MODE (x)))
21195 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21196 reg_names[SMALL_DATA_REG]);
21197 }
21198 return;
21199
21200 case 'z':
21201 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21202 x = XVECEXP (x, 0, 1);
21203 /* X is a SYMBOL_REF. Write out the name preceded by a
21204 period and without any trailing data in brackets. Used for function
21205 names. If we are configured for System V (or the embedded ABI) on
21206 the PowerPC, do not emit the period, since those systems do not use
21207 TOCs and the like. */
21208 if (!SYMBOL_REF_P (x))
21209 {
21210 output_operand_lossage ("invalid %%z value");
21211 return;
21212 }
21213
21214 /* For macho, check to see if we need a stub. */
21215 if (TARGET_MACHO)
21216 {
21217 const char *name = XSTR (x, 0);
21218 #if TARGET_MACHO
21219 if (darwin_emit_branch_islands
21220 && MACHOPIC_INDIRECT
21221 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21222 name = machopic_indirection_name (x, /*stub_p=*/true);
21223 #endif
21224 assemble_name (file, name);
21225 }
21226 else if (!DOT_SYMBOLS)
21227 assemble_name (file, XSTR (x, 0));
21228 else
21229 rs6000_output_function_entry (file, XSTR (x, 0));
21230 return;
21231
21232 case 'Z':
21233 /* Like 'L', for last word of TImode/PTImode. */
21234 if (REG_P (x))
21235 fputs (reg_names[REGNO (x) + 3], file);
21236 else if (MEM_P (x))
21237 {
21238 machine_mode mode = GET_MODE (x);
21239 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21240 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21241 output_address (mode, plus_constant (Pmode,
21242 XEXP (XEXP (x, 0), 0), 12));
21243 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21244 output_address (mode, plus_constant (Pmode,
21245 XEXP (XEXP (x, 0), 0), 12));
21246 else
21247 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21248 if (small_data_operand (x, GET_MODE (x)))
21249 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21250 reg_names[SMALL_DATA_REG]);
21251 }
21252 return;
21253
21254 /* Print AltiVec memory operand. */
21255 case 'y':
21256 {
21257 rtx tmp;
21258
21259 gcc_assert (MEM_P (x));
21260
21261 tmp = XEXP (x, 0);
21262
21263 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
21264 && GET_CODE (tmp) == AND
21265 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
21266 && INTVAL (XEXP (tmp, 1)) == -16)
21267 tmp = XEXP (tmp, 0);
21268 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21269 && GET_CODE (tmp) == PRE_MODIFY)
21270 tmp = XEXP (tmp, 1);
21271 if (REG_P (tmp))
21272 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21273 else
21274 {
21275 if (GET_CODE (tmp) != PLUS
21276 || !REG_P (XEXP (tmp, 0))
21277 || !REG_P (XEXP (tmp, 1)))
21278 {
21279 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21280 break;
21281 }
21282
21283 if (REGNO (XEXP (tmp, 0)) == 0)
21284 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21285 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21286 else
21287 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21288 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21289 }
21290 break;
21291 }
21292
21293 case 0:
21294 if (REG_P (x))
21295 fprintf (file, "%s", reg_names[REGNO (x)]);
21296 else if (MEM_P (x))
21297 {
21298 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21299 know the width from the mode. */
21300 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21301 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21302 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21303 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21304 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21305 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21306 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21307 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21308 else
21309 output_address (GET_MODE (x), XEXP (x, 0));
21310 }
21311 else if (toc_relative_expr_p (x, false,
21312 &tocrel_base_oac, &tocrel_offset_oac))
21313 /* This hack along with a corresponding hack in
21314 rs6000_output_addr_const_extra arranges to output addends
21315 where the assembler expects to find them. eg.
21316 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21317 without this hack would be output as "x@toc+4". We
21318 want "x+4@toc". */
21319 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21320 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
21321 output_addr_const (file, XVECEXP (x, 0, 0));
21322 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21323 output_addr_const (file, XVECEXP (x, 0, 1));
21324 else
21325 output_addr_const (file, x);
21326 return;
21327
21328 case '&':
21329 if (const char *name = get_some_local_dynamic_name ())
21330 assemble_name (file, name);
21331 else
21332 output_operand_lossage ("'%%&' used without any "
21333 "local dynamic TLS references");
21334 return;
21335
21336 default:
21337 output_operand_lossage ("invalid %%xn code");
21338 }
21339 }
21340 \f
21341 /* Print the address of an operand. */
21342
21343 void
21344 print_operand_address (FILE *file, rtx x)
21345 {
21346 if (REG_P (x))
21347 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21348 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
21349 || GET_CODE (x) == LABEL_REF)
21350 {
21351 output_addr_const (file, x);
21352 if (small_data_operand (x, GET_MODE (x)))
21353 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21354 reg_names[SMALL_DATA_REG]);
21355 else
21356 gcc_assert (!TARGET_TOC);
21357 }
21358 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21359 && REG_P (XEXP (x, 1)))
21360 {
21361 if (REGNO (XEXP (x, 0)) == 0)
21362 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21363 reg_names[ REGNO (XEXP (x, 0)) ]);
21364 else
21365 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21366 reg_names[ REGNO (XEXP (x, 1)) ]);
21367 }
21368 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21369 && GET_CODE (XEXP (x, 1)) == CONST_INT)
21370 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21371 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21372 #if TARGET_MACHO
21373 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21374 && CONSTANT_P (XEXP (x, 1)))
21375 {
21376 fprintf (file, "lo16(");
21377 output_addr_const (file, XEXP (x, 1));
21378 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21379 }
21380 #endif
21381 #if TARGET_ELF
21382 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21383 && CONSTANT_P (XEXP (x, 1)))
21384 {
21385 output_addr_const (file, XEXP (x, 1));
21386 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21387 }
21388 #endif
21389 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21390 {
21391 /* This hack along with a corresponding hack in
21392 rs6000_output_addr_const_extra arranges to output addends
21393 where the assembler expects to find them. eg.
21394 (lo_sum (reg 9)
21395 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21396 without this hack would be output as "x@toc+8@l(9)". We
21397 want "x+8@toc@l(9)". */
21398 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21399 if (GET_CODE (x) == LO_SUM)
21400 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21401 else
21402 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21403 }
21404 else
21405 output_addr_const (file, x);
21406 }
21407 \f
21408 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21409
21410 static bool
21411 rs6000_output_addr_const_extra (FILE *file, rtx x)
21412 {
21413 if (GET_CODE (x) == UNSPEC)
21414 switch (XINT (x, 1))
21415 {
21416 case UNSPEC_TOCREL:
21417 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
21418 && REG_P (XVECEXP (x, 0, 1))
21419 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21420 output_addr_const (file, XVECEXP (x, 0, 0));
21421 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21422 {
21423 if (INTVAL (tocrel_offset_oac) >= 0)
21424 fprintf (file, "+");
21425 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21426 }
21427 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21428 {
21429 putc ('-', file);
21430 assemble_name (file, toc_label_name);
21431 need_toc_init = 1;
21432 }
21433 else if (TARGET_ELF)
21434 fputs ("@toc", file);
21435 return true;
21436
21437 #if TARGET_MACHO
21438 case UNSPEC_MACHOPIC_OFFSET:
21439 output_addr_const (file, XVECEXP (x, 0, 0));
21440 putc ('-', file);
21441 machopic_output_function_base_name (file);
21442 return true;
21443 #endif
21444 }
21445 return false;
21446 }
21447 \f
21448 /* Target hook for assembling integer objects. The PowerPC version has
21449 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21450 is defined. It also needs to handle DI-mode objects on 64-bit
21451 targets. */
21452
21453 static bool
21454 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21455 {
21456 #ifdef RELOCATABLE_NEEDS_FIXUP
21457 /* Special handling for SI values. */
21458 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21459 {
21460 static int recurse = 0;
21461
21462 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21463 the .fixup section. Since the TOC section is already relocated, we
21464 don't need to mark it here. We used to skip the text section, but it
21465 should never be valid for relocated addresses to be placed in the text
21466 section. */
21467 if (DEFAULT_ABI == ABI_V4
21468 && (TARGET_RELOCATABLE || flag_pic > 1)
21469 && in_section != toc_section
21470 && !recurse
21471 && !CONST_SCALAR_INT_P (x)
21472 && CONSTANT_P (x))
21473 {
21474 char buf[256];
21475
21476 recurse = 1;
21477 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21478 fixuplabelno++;
21479 ASM_OUTPUT_LABEL (asm_out_file, buf);
21480 fprintf (asm_out_file, "\t.long\t(");
21481 output_addr_const (asm_out_file, x);
21482 fprintf (asm_out_file, ")@fixup\n");
21483 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21484 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21485 fprintf (asm_out_file, "\t.long\t");
21486 assemble_name (asm_out_file, buf);
21487 fprintf (asm_out_file, "\n\t.previous\n");
21488 recurse = 0;
21489 return true;
21490 }
21491 /* Remove initial .'s to turn a -mcall-aixdesc function
21492 address into the address of the descriptor, not the function
21493 itself. */
21494 else if (GET_CODE (x) == SYMBOL_REF
21495 && XSTR (x, 0)[0] == '.'
21496 && DEFAULT_ABI == ABI_AIX)
21497 {
21498 const char *name = XSTR (x, 0);
21499 while (*name == '.')
21500 name++;
21501
21502 fprintf (asm_out_file, "\t.long\t%s\n", name);
21503 return true;
21504 }
21505 }
21506 #endif /* RELOCATABLE_NEEDS_FIXUP */
21507 return default_assemble_integer (x, size, aligned_p);
21508 }
21509
21510 /* Return a template string for assembly to emit when making an
21511 external call. FUNOP is the call mem argument operand number. */
21512
21513 static const char *
21514 rs6000_call_template_1 (rtx *operands, unsigned int funop, bool sibcall)
21515 {
21516 /* -Wformat-overflow workaround, without which gcc thinks that %u
21517 might produce 10 digits. */
21518 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21519
21520 char arg[12];
21521 arg[0] = 0;
21522 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21523 {
21524 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21525 sprintf (arg, "(%%%u@tlsgd)", funop + 1);
21526 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21527 sprintf (arg, "(%%&@tlsld)");
21528 else
21529 gcc_unreachable ();
21530 }
21531
21532 /* The magic 32768 offset here corresponds to the offset of
21533 r30 in .got2, as given by LCTOC1. See sysv4.h:toc_section. */
21534 char z[11];
21535 sprintf (z, "%%z%u%s", funop,
21536 (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic == 2
21537 ? "+32768" : ""));
21538
21539 static char str[32]; /* 2 spare */
21540 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21541 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21542 sibcall ? "" : "\n\tnop");
21543 else if (DEFAULT_ABI == ABI_V4)
21544 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21545 flag_pic ? "@plt" : "");
21546 #if TARGET_MACHO
21547 /* If/when we remove the mlongcall opt, we can share the AIX/ELGv2 case. */
21548 else if (DEFAULT_ABI == ABI_DARWIN)
21549 {
21550 /* The cookie is in operand func+2. */
21551 gcc_checking_assert (GET_CODE (operands[funop + 2]) == CONST_INT);
21552 int cookie = INTVAL (operands[funop + 2]);
21553 if (cookie & CALL_LONG)
21554 {
21555 tree funname = get_identifier (XSTR (operands[funop], 0));
21556 tree labelname = get_prev_label (funname);
21557 gcc_checking_assert (labelname && !sibcall);
21558
21559 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
21560 instruction will reach 'foo', otherwise link as 'bl L42'".
21561 "L42" should be a 'branch island', that will do a far jump to
21562 'foo'. Branch islands are generated in
21563 macho_branch_islands(). */
21564 sprintf (str, "jbsr %%z%u,%.10s", funop,
21565 IDENTIFIER_POINTER (labelname));
21566 }
21567 else
21568 /* Same as AIX or ELFv2, except to keep backwards compat, no nop
21569 after the call. */
21570 sprintf (str, "b%s %s%s", sibcall ? "" : "l", z, arg);
21571 }
21572 #endif
21573 else
21574 gcc_unreachable ();
21575 return str;
21576 }
21577
21578 const char *
21579 rs6000_call_template (rtx *operands, unsigned int funop)
21580 {
21581 return rs6000_call_template_1 (operands, funop, false);
21582 }
21583
21584 const char *
21585 rs6000_sibcall_template (rtx *operands, unsigned int funop)
21586 {
21587 return rs6000_call_template_1 (operands, funop, true);
21588 }
21589
21590 /* As above, for indirect calls. */
21591
21592 static const char *
21593 rs6000_indirect_call_template_1 (rtx *operands, unsigned int funop,
21594 bool sibcall)
21595 {
21596 /* -Wformat-overflow workaround, without which gcc thinks that %u
21597 might produce 10 digits. */
21598 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21599
21600 static char str[144]; /* 1 spare */
21601 char *s = str;
21602 const char *ptrload = TARGET_64BIT ? "d" : "wz";
21603
21604 if (DEFAULT_ABI == ABI_AIX)
21605 s += sprintf (s,
21606 "l%s 2,%%%u\n\t",
21607 ptrload, funop + 2);
21608
21609 /* We don't need the extra code to stop indirect call speculation if
21610 calling via LR. */
21611 bool speculate = (TARGET_MACHO
21612 || rs6000_speculate_indirect_jumps
21613 || (REG_P (operands[funop])
21614 && REGNO (operands[funop]) == LR_REGNO));
21615
21616 if (!TARGET_MACHO && HAVE_AS_PLTSEQ && GET_CODE (operands[funop]) == UNSPEC)
21617 {
21618 const char *rel64 = TARGET_64BIT ? "64" : "";
21619 char tls[29];
21620 tls[0] = 0;
21621 if (GET_CODE (operands[funop + 1]) == UNSPEC)
21622 {
21623 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21624 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%%u\n\t",
21625 rel64, funop + 1);
21626 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21627 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21628 rel64);
21629 else
21630 gcc_unreachable ();
21631 }
21632
21633 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21634 && flag_pic == 2 ? "+32768" : "");
21635 if (!speculate)
21636 {
21637 s += sprintf (s,
21638 "%s.reloc .,R_PPC%s_PLTSEQ,%%z%u%s\n\t",
21639 tls, rel64, funop, addend);
21640 s += sprintf (s, "crset 2\n\t");
21641 }
21642 s += sprintf (s,
21643 "%s.reloc .,R_PPC%s_PLTCALL,%%z%u%s\n\t",
21644 tls, rel64, funop, addend);
21645 }
21646 else if (!speculate)
21647 s += sprintf (s, "crset 2\n\t");
21648
21649 if (DEFAULT_ABI == ABI_AIX)
21650 {
21651 if (speculate)
21652 sprintf (s,
21653 "b%%T%ul\n\t"
21654 "l%s 2,%%%u(1)",
21655 funop, ptrload, funop + 3);
21656 else
21657 sprintf (s,
21658 "beq%%T%ul-\n\t"
21659 "l%s 2,%%%u(1)",
21660 funop, ptrload, funop + 3);
21661 }
21662 else if (DEFAULT_ABI == ABI_ELFv2)
21663 {
21664 if (speculate)
21665 sprintf (s,
21666 "b%%T%ul\n\t"
21667 "l%s 2,%%%u(1)",
21668 funop, ptrload, funop + 2);
21669 else
21670 sprintf (s,
21671 "beq%%T%ul-\n\t"
21672 "l%s 2,%%%u(1)",
21673 funop, ptrload, funop + 2);
21674 }
21675 else
21676 {
21677 if (speculate)
21678 sprintf (s,
21679 "b%%T%u%s",
21680 funop, sibcall ? "" : "l");
21681 else
21682 sprintf (s,
21683 "beq%%T%u%s-%s",
21684 funop, sibcall ? "" : "l", sibcall ? "\n\tb $" : "");
21685 }
21686 return str;
21687 }
21688
21689 const char *
21690 rs6000_indirect_call_template (rtx *operands, unsigned int funop)
21691 {
21692 return rs6000_indirect_call_template_1 (operands, funop, false);
21693 }
21694
21695 const char *
21696 rs6000_indirect_sibcall_template (rtx *operands, unsigned int funop)
21697 {
21698 return rs6000_indirect_call_template_1 (operands, funop, true);
21699 }
21700
21701 #if HAVE_AS_PLTSEQ
21702 /* Output indirect call insns.
21703 WHICH is 0 for tocsave, 1 for plt16_ha, 2 for plt16_lo, 3 for mtctr. */
21704 const char *
21705 rs6000_pltseq_template (rtx *operands, int which)
21706 {
21707 const char *rel64 = TARGET_64BIT ? "64" : "";
21708 char tls[28];
21709 tls[0] = 0;
21710 if (GET_CODE (operands[3]) == UNSPEC)
21711 {
21712 if (XINT (operands[3], 1) == UNSPEC_TLSGD)
21713 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%3\n\t",
21714 rel64);
21715 else if (XINT (operands[3], 1) == UNSPEC_TLSLD)
21716 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21717 rel64);
21718 else
21719 gcc_unreachable ();
21720 }
21721
21722 gcc_assert (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4);
21723 static char str[96]; /* 15 spare */
21724 const char *off = WORDS_BIG_ENDIAN ? "+2" : "";
21725 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21726 && flag_pic == 2 ? "+32768" : "");
21727 switch (which)
21728 {
21729 case 0:
21730 sprintf (str,
21731 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2\n\t"
21732 "st%s",
21733 tls, rel64, TARGET_64BIT ? "d 2,24(1)" : "w 2,12(1)");
21734 break;
21735 case 1:
21736 if (DEFAULT_ABI == ABI_V4 && !flag_pic)
21737 sprintf (str,
21738 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2\n\t"
21739 "lis %%0,0",
21740 tls, off, rel64);
21741 else
21742 sprintf (str,
21743 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2%s\n\t"
21744 "addis %%0,%%1,0",
21745 tls, off, rel64, addend);
21746 break;
21747 case 2:
21748 sprintf (str,
21749 "%s.reloc .%s,R_PPC%s_PLT16_LO%s,%%z2%s\n\t"
21750 "l%s %%0,0(%%1)",
21751 tls, off, rel64, TARGET_64BIT ? "_DS" : "", addend,
21752 TARGET_64BIT ? "d" : "wz");
21753 break;
21754 case 3:
21755 sprintf (str,
21756 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2%s\n\t"
21757 "mtctr %%1",
21758 tls, rel64, addend);
21759 break;
21760 default:
21761 gcc_unreachable ();
21762 }
21763 return str;
21764 }
21765 #endif
21766
21767 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21768 /* Emit an assembler directive to set symbol visibility for DECL to
21769 VISIBILITY_TYPE. */
21770
21771 static void
21772 rs6000_assemble_visibility (tree decl, int vis)
21773 {
21774 if (TARGET_XCOFF)
21775 return;
21776
21777 /* Functions need to have their entry point symbol visibility set as
21778 well as their descriptor symbol visibility. */
21779 if (DEFAULT_ABI == ABI_AIX
21780 && DOT_SYMBOLS
21781 && TREE_CODE (decl) == FUNCTION_DECL)
21782 {
21783 static const char * const visibility_types[] = {
21784 NULL, "protected", "hidden", "internal"
21785 };
21786
21787 const char *name, *type;
21788
21789 name = ((* targetm.strip_name_encoding)
21790 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21791 type = visibility_types[vis];
21792
21793 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21794 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21795 }
21796 else
21797 default_assemble_visibility (decl, vis);
21798 }
21799 #endif
21800 \f
21801 enum rtx_code
21802 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21803 {
21804 /* Reversal of FP compares takes care -- an ordered compare
21805 becomes an unordered compare and vice versa. */
21806 if (mode == CCFPmode
21807 && (!flag_finite_math_only
21808 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21809 || code == UNEQ || code == LTGT))
21810 return reverse_condition_maybe_unordered (code);
21811 else
21812 return reverse_condition (code);
21813 }
21814
21815 /* Generate a compare for CODE. Return a brand-new rtx that
21816 represents the result of the compare. */
21817
21818 static rtx
21819 rs6000_generate_compare (rtx cmp, machine_mode mode)
21820 {
21821 machine_mode comp_mode;
21822 rtx compare_result;
21823 enum rtx_code code = GET_CODE (cmp);
21824 rtx op0 = XEXP (cmp, 0);
21825 rtx op1 = XEXP (cmp, 1);
21826
21827 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21828 comp_mode = CCmode;
21829 else if (FLOAT_MODE_P (mode))
21830 comp_mode = CCFPmode;
21831 else if (code == GTU || code == LTU
21832 || code == GEU || code == LEU)
21833 comp_mode = CCUNSmode;
21834 else if ((code == EQ || code == NE)
21835 && unsigned_reg_p (op0)
21836 && (unsigned_reg_p (op1)
21837 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21838 /* These are unsigned values, perhaps there will be a later
21839 ordering compare that can be shared with this one. */
21840 comp_mode = CCUNSmode;
21841 else
21842 comp_mode = CCmode;
21843
21844 /* If we have an unsigned compare, make sure we don't have a signed value as
21845 an immediate. */
21846 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
21847 && INTVAL (op1) < 0)
21848 {
21849 op0 = copy_rtx_if_shared (op0);
21850 op1 = force_reg (GET_MODE (op0), op1);
21851 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21852 }
21853
21854 /* First, the compare. */
21855 compare_result = gen_reg_rtx (comp_mode);
21856
21857 /* IEEE 128-bit support in VSX registers when we do not have hardware
21858 support. */
21859 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21860 {
21861 rtx libfunc = NULL_RTX;
21862 bool check_nan = false;
21863 rtx dest;
21864
21865 switch (code)
21866 {
21867 case EQ:
21868 case NE:
21869 libfunc = optab_libfunc (eq_optab, mode);
21870 break;
21871
21872 case GT:
21873 case GE:
21874 libfunc = optab_libfunc (ge_optab, mode);
21875 break;
21876
21877 case LT:
21878 case LE:
21879 libfunc = optab_libfunc (le_optab, mode);
21880 break;
21881
21882 case UNORDERED:
21883 case ORDERED:
21884 libfunc = optab_libfunc (unord_optab, mode);
21885 code = (code == UNORDERED) ? NE : EQ;
21886 break;
21887
21888 case UNGE:
21889 case UNGT:
21890 check_nan = true;
21891 libfunc = optab_libfunc (ge_optab, mode);
21892 code = (code == UNGE) ? GE : GT;
21893 break;
21894
21895 case UNLE:
21896 case UNLT:
21897 check_nan = true;
21898 libfunc = optab_libfunc (le_optab, mode);
21899 code = (code == UNLE) ? LE : LT;
21900 break;
21901
21902 case UNEQ:
21903 case LTGT:
21904 check_nan = true;
21905 libfunc = optab_libfunc (eq_optab, mode);
21906 code = (code = UNEQ) ? EQ : NE;
21907 break;
21908
21909 default:
21910 gcc_unreachable ();
21911 }
21912
21913 gcc_assert (libfunc);
21914
21915 if (!check_nan)
21916 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21917 SImode, op0, mode, op1, mode);
21918
21919 /* The library signals an exception for signalling NaNs, so we need to
21920 handle isgreater, etc. by first checking isordered. */
21921 else
21922 {
21923 rtx ne_rtx, normal_dest, unord_dest;
21924 rtx unord_func = optab_libfunc (unord_optab, mode);
21925 rtx join_label = gen_label_rtx ();
21926 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21927 rtx unord_cmp = gen_reg_rtx (comp_mode);
21928
21929
21930 /* Test for either value being a NaN. */
21931 gcc_assert (unord_func);
21932 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21933 SImode, op0, mode, op1, mode);
21934
21935 /* Set value (0) if either value is a NaN, and jump to the join
21936 label. */
21937 dest = gen_reg_rtx (SImode);
21938 emit_move_insn (dest, const1_rtx);
21939 emit_insn (gen_rtx_SET (unord_cmp,
21940 gen_rtx_COMPARE (comp_mode, unord_dest,
21941 const0_rtx)));
21942
21943 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21944 emit_jump_insn (gen_rtx_SET (pc_rtx,
21945 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21946 join_ref,
21947 pc_rtx)));
21948
21949 /* Do the normal comparison, knowing that the values are not
21950 NaNs. */
21951 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21952 SImode, op0, mode, op1, mode);
21953
21954 emit_insn (gen_cstoresi4 (dest,
21955 gen_rtx_fmt_ee (code, SImode, normal_dest,
21956 const0_rtx),
21957 normal_dest, const0_rtx));
21958
21959 /* Join NaN and non-Nan paths. Compare dest against 0. */
21960 emit_label (join_label);
21961 code = NE;
21962 }
21963
21964 emit_insn (gen_rtx_SET (compare_result,
21965 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21966 }
21967
21968 else
21969 {
21970 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21971 CLOBBERs to match cmptf_internal2 pattern. */
21972 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21973 && FLOAT128_IBM_P (GET_MODE (op0))
21974 && TARGET_HARD_FLOAT)
21975 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21976 gen_rtvec (10,
21977 gen_rtx_SET (compare_result,
21978 gen_rtx_COMPARE (comp_mode, op0, op1)),
21979 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21980 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21981 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21982 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21983 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21984 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21985 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21986 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21987 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21988 else if (GET_CODE (op1) == UNSPEC
21989 && XINT (op1, 1) == UNSPEC_SP_TEST)
21990 {
21991 rtx op1b = XVECEXP (op1, 0, 0);
21992 comp_mode = CCEQmode;
21993 compare_result = gen_reg_rtx (CCEQmode);
21994 if (TARGET_64BIT)
21995 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21996 else
21997 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21998 }
21999 else
22000 emit_insn (gen_rtx_SET (compare_result,
22001 gen_rtx_COMPARE (comp_mode, op0, op1)));
22002 }
22003
22004 /* Some kinds of FP comparisons need an OR operation;
22005 under flag_finite_math_only we don't bother. */
22006 if (FLOAT_MODE_P (mode)
22007 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
22008 && !flag_finite_math_only
22009 && (code == LE || code == GE
22010 || code == UNEQ || code == LTGT
22011 || code == UNGT || code == UNLT))
22012 {
22013 enum rtx_code or1, or2;
22014 rtx or1_rtx, or2_rtx, compare2_rtx;
22015 rtx or_result = gen_reg_rtx (CCEQmode);
22016
22017 switch (code)
22018 {
22019 case LE: or1 = LT; or2 = EQ; break;
22020 case GE: or1 = GT; or2 = EQ; break;
22021 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
22022 case LTGT: or1 = LT; or2 = GT; break;
22023 case UNGT: or1 = UNORDERED; or2 = GT; break;
22024 case UNLT: or1 = UNORDERED; or2 = LT; break;
22025 default: gcc_unreachable ();
22026 }
22027 validate_condition_mode (or1, comp_mode);
22028 validate_condition_mode (or2, comp_mode);
22029 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
22030 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
22031 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
22032 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
22033 const_true_rtx);
22034 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
22035
22036 compare_result = or_result;
22037 code = EQ;
22038 }
22039
22040 validate_condition_mode (code, GET_MODE (compare_result));
22041
22042 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
22043 }
22044
22045 \f
22046 /* Return the diagnostic message string if the binary operation OP is
22047 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22048
22049 static const char*
22050 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
22051 const_tree type1,
22052 const_tree type2)
22053 {
22054 machine_mode mode1 = TYPE_MODE (type1);
22055 machine_mode mode2 = TYPE_MODE (type2);
22056
22057 /* For complex modes, use the inner type. */
22058 if (COMPLEX_MODE_P (mode1))
22059 mode1 = GET_MODE_INNER (mode1);
22060
22061 if (COMPLEX_MODE_P (mode2))
22062 mode2 = GET_MODE_INNER (mode2);
22063
22064 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22065 double to intermix unless -mfloat128-convert. */
22066 if (mode1 == mode2)
22067 return NULL;
22068
22069 if (!TARGET_FLOAT128_CVT)
22070 {
22071 if ((mode1 == KFmode && mode2 == IFmode)
22072 || (mode1 == IFmode && mode2 == KFmode))
22073 return N_("__float128 and __ibm128 cannot be used in the same "
22074 "expression");
22075
22076 if (TARGET_IEEEQUAD
22077 && ((mode1 == IFmode && mode2 == TFmode)
22078 || (mode1 == TFmode && mode2 == IFmode)))
22079 return N_("__ibm128 and long double cannot be used in the same "
22080 "expression");
22081
22082 if (!TARGET_IEEEQUAD
22083 && ((mode1 == KFmode && mode2 == TFmode)
22084 || (mode1 == TFmode && mode2 == KFmode)))
22085 return N_("__float128 and long double cannot be used in the same "
22086 "expression");
22087 }
22088
22089 return NULL;
22090 }
22091
22092 \f
22093 /* Expand floating point conversion to/from __float128 and __ibm128. */
22094
22095 void
22096 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22097 {
22098 machine_mode dest_mode = GET_MODE (dest);
22099 machine_mode src_mode = GET_MODE (src);
22100 convert_optab cvt = unknown_optab;
22101 bool do_move = false;
22102 rtx libfunc = NULL_RTX;
22103 rtx dest2;
22104 typedef rtx (*rtx_2func_t) (rtx, rtx);
22105 rtx_2func_t hw_convert = (rtx_2func_t)0;
22106 size_t kf_or_tf;
22107
22108 struct hw_conv_t {
22109 rtx_2func_t from_df;
22110 rtx_2func_t from_sf;
22111 rtx_2func_t from_si_sign;
22112 rtx_2func_t from_si_uns;
22113 rtx_2func_t from_di_sign;
22114 rtx_2func_t from_di_uns;
22115 rtx_2func_t to_df;
22116 rtx_2func_t to_sf;
22117 rtx_2func_t to_si_sign;
22118 rtx_2func_t to_si_uns;
22119 rtx_2func_t to_di_sign;
22120 rtx_2func_t to_di_uns;
22121 } hw_conversions[2] = {
22122 /* convertions to/from KFmode */
22123 {
22124 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22125 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22126 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22127 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22128 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22129 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22130 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22131 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22132 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22133 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22134 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22135 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22136 },
22137
22138 /* convertions to/from TFmode */
22139 {
22140 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22141 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22142 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22143 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22144 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22145 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22146 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22147 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22148 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22149 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22150 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22151 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22152 },
22153 };
22154
22155 if (dest_mode == src_mode)
22156 gcc_unreachable ();
22157
22158 /* Eliminate memory operations. */
22159 if (MEM_P (src))
22160 src = force_reg (src_mode, src);
22161
22162 if (MEM_P (dest))
22163 {
22164 rtx tmp = gen_reg_rtx (dest_mode);
22165 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22166 rs6000_emit_move (dest, tmp, dest_mode);
22167 return;
22168 }
22169
22170 /* Convert to IEEE 128-bit floating point. */
22171 if (FLOAT128_IEEE_P (dest_mode))
22172 {
22173 if (dest_mode == KFmode)
22174 kf_or_tf = 0;
22175 else if (dest_mode == TFmode)
22176 kf_or_tf = 1;
22177 else
22178 gcc_unreachable ();
22179
22180 switch (src_mode)
22181 {
22182 case E_DFmode:
22183 cvt = sext_optab;
22184 hw_convert = hw_conversions[kf_or_tf].from_df;
22185 break;
22186
22187 case E_SFmode:
22188 cvt = sext_optab;
22189 hw_convert = hw_conversions[kf_or_tf].from_sf;
22190 break;
22191
22192 case E_KFmode:
22193 case E_IFmode:
22194 case E_TFmode:
22195 if (FLOAT128_IBM_P (src_mode))
22196 cvt = sext_optab;
22197 else
22198 do_move = true;
22199 break;
22200
22201 case E_SImode:
22202 if (unsigned_p)
22203 {
22204 cvt = ufloat_optab;
22205 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22206 }
22207 else
22208 {
22209 cvt = sfloat_optab;
22210 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22211 }
22212 break;
22213
22214 case E_DImode:
22215 if (unsigned_p)
22216 {
22217 cvt = ufloat_optab;
22218 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22219 }
22220 else
22221 {
22222 cvt = sfloat_optab;
22223 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22224 }
22225 break;
22226
22227 default:
22228 gcc_unreachable ();
22229 }
22230 }
22231
22232 /* Convert from IEEE 128-bit floating point. */
22233 else if (FLOAT128_IEEE_P (src_mode))
22234 {
22235 if (src_mode == KFmode)
22236 kf_or_tf = 0;
22237 else if (src_mode == TFmode)
22238 kf_or_tf = 1;
22239 else
22240 gcc_unreachable ();
22241
22242 switch (dest_mode)
22243 {
22244 case E_DFmode:
22245 cvt = trunc_optab;
22246 hw_convert = hw_conversions[kf_or_tf].to_df;
22247 break;
22248
22249 case E_SFmode:
22250 cvt = trunc_optab;
22251 hw_convert = hw_conversions[kf_or_tf].to_sf;
22252 break;
22253
22254 case E_KFmode:
22255 case E_IFmode:
22256 case E_TFmode:
22257 if (FLOAT128_IBM_P (dest_mode))
22258 cvt = trunc_optab;
22259 else
22260 do_move = true;
22261 break;
22262
22263 case E_SImode:
22264 if (unsigned_p)
22265 {
22266 cvt = ufix_optab;
22267 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22268 }
22269 else
22270 {
22271 cvt = sfix_optab;
22272 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22273 }
22274 break;
22275
22276 case E_DImode:
22277 if (unsigned_p)
22278 {
22279 cvt = ufix_optab;
22280 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22281 }
22282 else
22283 {
22284 cvt = sfix_optab;
22285 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22286 }
22287 break;
22288
22289 default:
22290 gcc_unreachable ();
22291 }
22292 }
22293
22294 /* Both IBM format. */
22295 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22296 do_move = true;
22297
22298 else
22299 gcc_unreachable ();
22300
22301 /* Handle conversion between TFmode/KFmode/IFmode. */
22302 if (do_move)
22303 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
22304
22305 /* Handle conversion if we have hardware support. */
22306 else if (TARGET_FLOAT128_HW && hw_convert)
22307 emit_insn ((hw_convert) (dest, src));
22308
22309 /* Call an external function to do the conversion. */
22310 else if (cvt != unknown_optab)
22311 {
22312 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22313 gcc_assert (libfunc != NULL_RTX);
22314
22315 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22316 src, src_mode);
22317
22318 gcc_assert (dest2 != NULL_RTX);
22319 if (!rtx_equal_p (dest, dest2))
22320 emit_move_insn (dest, dest2);
22321 }
22322
22323 else
22324 gcc_unreachable ();
22325
22326 return;
22327 }
22328
22329 \f
22330 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22331 can be used as that dest register. Return the dest register. */
22332
22333 rtx
22334 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22335 {
22336 if (op2 == const0_rtx)
22337 return op1;
22338
22339 if (GET_CODE (scratch) == SCRATCH)
22340 scratch = gen_reg_rtx (mode);
22341
22342 if (logical_operand (op2, mode))
22343 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22344 else
22345 emit_insn (gen_rtx_SET (scratch,
22346 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22347
22348 return scratch;
22349 }
22350
22351 void
22352 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22353 {
22354 rtx condition_rtx;
22355 machine_mode op_mode;
22356 enum rtx_code cond_code;
22357 rtx result = operands[0];
22358
22359 condition_rtx = rs6000_generate_compare (operands[1], mode);
22360 cond_code = GET_CODE (condition_rtx);
22361
22362 if (cond_code == NE
22363 || cond_code == GE || cond_code == LE
22364 || cond_code == GEU || cond_code == LEU
22365 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22366 {
22367 rtx not_result = gen_reg_rtx (CCEQmode);
22368 rtx not_op, rev_cond_rtx;
22369 machine_mode cc_mode;
22370
22371 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22372
22373 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22374 SImode, XEXP (condition_rtx, 0), const0_rtx);
22375 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22376 emit_insn (gen_rtx_SET (not_result, not_op));
22377 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22378 }
22379
22380 op_mode = GET_MODE (XEXP (operands[1], 0));
22381 if (op_mode == VOIDmode)
22382 op_mode = GET_MODE (XEXP (operands[1], 1));
22383
22384 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22385 {
22386 PUT_MODE (condition_rtx, DImode);
22387 convert_move (result, condition_rtx, 0);
22388 }
22389 else
22390 {
22391 PUT_MODE (condition_rtx, SImode);
22392 emit_insn (gen_rtx_SET (result, condition_rtx));
22393 }
22394 }
22395
22396 /* Emit a branch of kind CODE to location LOC. */
22397
22398 void
22399 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22400 {
22401 rtx condition_rtx, loc_ref;
22402
22403 condition_rtx = rs6000_generate_compare (operands[0], mode);
22404 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22405 emit_jump_insn (gen_rtx_SET (pc_rtx,
22406 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22407 loc_ref, pc_rtx)));
22408 }
22409
22410 /* Return the string to output a conditional branch to LABEL, which is
22411 the operand template of the label, or NULL if the branch is really a
22412 conditional return.
22413
22414 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22415 condition code register and its mode specifies what kind of
22416 comparison we made.
22417
22418 REVERSED is nonzero if we should reverse the sense of the comparison.
22419
22420 INSN is the insn. */
22421
22422 char *
22423 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22424 {
22425 static char string[64];
22426 enum rtx_code code = GET_CODE (op);
22427 rtx cc_reg = XEXP (op, 0);
22428 machine_mode mode = GET_MODE (cc_reg);
22429 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22430 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22431 int really_reversed = reversed ^ need_longbranch;
22432 char *s = string;
22433 const char *ccode;
22434 const char *pred;
22435 rtx note;
22436
22437 validate_condition_mode (code, mode);
22438
22439 /* Work out which way this really branches. We could use
22440 reverse_condition_maybe_unordered here always but this
22441 makes the resulting assembler clearer. */
22442 if (really_reversed)
22443 {
22444 /* Reversal of FP compares takes care -- an ordered compare
22445 becomes an unordered compare and vice versa. */
22446 if (mode == CCFPmode)
22447 code = reverse_condition_maybe_unordered (code);
22448 else
22449 code = reverse_condition (code);
22450 }
22451
22452 switch (code)
22453 {
22454 /* Not all of these are actually distinct opcodes, but
22455 we distinguish them for clarity of the resulting assembler. */
22456 case NE: case LTGT:
22457 ccode = "ne"; break;
22458 case EQ: case UNEQ:
22459 ccode = "eq"; break;
22460 case GE: case GEU:
22461 ccode = "ge"; break;
22462 case GT: case GTU: case UNGT:
22463 ccode = "gt"; break;
22464 case LE: case LEU:
22465 ccode = "le"; break;
22466 case LT: case LTU: case UNLT:
22467 ccode = "lt"; break;
22468 case UNORDERED: ccode = "un"; break;
22469 case ORDERED: ccode = "nu"; break;
22470 case UNGE: ccode = "nl"; break;
22471 case UNLE: ccode = "ng"; break;
22472 default:
22473 gcc_unreachable ();
22474 }
22475
22476 /* Maybe we have a guess as to how likely the branch is. */
22477 pred = "";
22478 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22479 if (note != NULL_RTX)
22480 {
22481 /* PROB is the difference from 50%. */
22482 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22483 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22484
22485 /* Only hint for highly probable/improbable branches on newer cpus when
22486 we have real profile data, as static prediction overrides processor
22487 dynamic prediction. For older cpus we may as well always hint, but
22488 assume not taken for branches that are very close to 50% as a
22489 mispredicted taken branch is more expensive than a
22490 mispredicted not-taken branch. */
22491 if (rs6000_always_hint
22492 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22493 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22494 && br_prob_note_reliable_p (note)))
22495 {
22496 if (abs (prob) > REG_BR_PROB_BASE / 20
22497 && ((prob > 0) ^ need_longbranch))
22498 pred = "+";
22499 else
22500 pred = "-";
22501 }
22502 }
22503
22504 if (label == NULL)
22505 s += sprintf (s, "b%slr%s ", ccode, pred);
22506 else
22507 s += sprintf (s, "b%s%s ", ccode, pred);
22508
22509 /* We need to escape any '%' characters in the reg_names string.
22510 Assume they'd only be the first character.... */
22511 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22512 *s++ = '%';
22513 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22514
22515 if (label != NULL)
22516 {
22517 /* If the branch distance was too far, we may have to use an
22518 unconditional branch to go the distance. */
22519 if (need_longbranch)
22520 s += sprintf (s, ",$+8\n\tb %s", label);
22521 else
22522 s += sprintf (s, ",%s", label);
22523 }
22524
22525 return string;
22526 }
22527
22528 /* Return insn for VSX or Altivec comparisons. */
22529
22530 static rtx
22531 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22532 {
22533 rtx mask;
22534 machine_mode mode = GET_MODE (op0);
22535
22536 switch (code)
22537 {
22538 default:
22539 break;
22540
22541 case GE:
22542 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22543 return NULL_RTX;
22544 /* FALLTHRU */
22545
22546 case EQ:
22547 case GT:
22548 case GTU:
22549 case ORDERED:
22550 case UNORDERED:
22551 case UNEQ:
22552 case LTGT:
22553 mask = gen_reg_rtx (mode);
22554 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22555 return mask;
22556 }
22557
22558 return NULL_RTX;
22559 }
22560
22561 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22562 DMODE is expected destination mode. This is a recursive function. */
22563
22564 static rtx
22565 rs6000_emit_vector_compare (enum rtx_code rcode,
22566 rtx op0, rtx op1,
22567 machine_mode dmode)
22568 {
22569 rtx mask;
22570 bool swap_operands = false;
22571 bool try_again = false;
22572
22573 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22574 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22575
22576 /* See if the comparison works as is. */
22577 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22578 if (mask)
22579 return mask;
22580
22581 switch (rcode)
22582 {
22583 case LT:
22584 rcode = GT;
22585 swap_operands = true;
22586 try_again = true;
22587 break;
22588 case LTU:
22589 rcode = GTU;
22590 swap_operands = true;
22591 try_again = true;
22592 break;
22593 case NE:
22594 case UNLE:
22595 case UNLT:
22596 case UNGE:
22597 case UNGT:
22598 /* Invert condition and try again.
22599 e.g., A != B becomes ~(A==B). */
22600 {
22601 enum rtx_code rev_code;
22602 enum insn_code nor_code;
22603 rtx mask2;
22604
22605 rev_code = reverse_condition_maybe_unordered (rcode);
22606 if (rev_code == UNKNOWN)
22607 return NULL_RTX;
22608
22609 nor_code = optab_handler (one_cmpl_optab, dmode);
22610 if (nor_code == CODE_FOR_nothing)
22611 return NULL_RTX;
22612
22613 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22614 if (!mask2)
22615 return NULL_RTX;
22616
22617 mask = gen_reg_rtx (dmode);
22618 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22619 return mask;
22620 }
22621 break;
22622 case GE:
22623 case GEU:
22624 case LE:
22625 case LEU:
22626 /* Try GT/GTU/LT/LTU OR EQ */
22627 {
22628 rtx c_rtx, eq_rtx;
22629 enum insn_code ior_code;
22630 enum rtx_code new_code;
22631
22632 switch (rcode)
22633 {
22634 case GE:
22635 new_code = GT;
22636 break;
22637
22638 case GEU:
22639 new_code = GTU;
22640 break;
22641
22642 case LE:
22643 new_code = LT;
22644 break;
22645
22646 case LEU:
22647 new_code = LTU;
22648 break;
22649
22650 default:
22651 gcc_unreachable ();
22652 }
22653
22654 ior_code = optab_handler (ior_optab, dmode);
22655 if (ior_code == CODE_FOR_nothing)
22656 return NULL_RTX;
22657
22658 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22659 if (!c_rtx)
22660 return NULL_RTX;
22661
22662 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22663 if (!eq_rtx)
22664 return NULL_RTX;
22665
22666 mask = gen_reg_rtx (dmode);
22667 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22668 return mask;
22669 }
22670 break;
22671 default:
22672 return NULL_RTX;
22673 }
22674
22675 if (try_again)
22676 {
22677 if (swap_operands)
22678 std::swap (op0, op1);
22679
22680 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22681 if (mask)
22682 return mask;
22683 }
22684
22685 /* You only get two chances. */
22686 return NULL_RTX;
22687 }
22688
22689 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22690 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22691 operands for the relation operation COND. */
22692
22693 int
22694 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22695 rtx cond, rtx cc_op0, rtx cc_op1)
22696 {
22697 machine_mode dest_mode = GET_MODE (dest);
22698 machine_mode mask_mode = GET_MODE (cc_op0);
22699 enum rtx_code rcode = GET_CODE (cond);
22700 machine_mode cc_mode = CCmode;
22701 rtx mask;
22702 rtx cond2;
22703 bool invert_move = false;
22704
22705 if (VECTOR_UNIT_NONE_P (dest_mode))
22706 return 0;
22707
22708 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22709 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22710
22711 switch (rcode)
22712 {
22713 /* Swap operands if we can, and fall back to doing the operation as
22714 specified, and doing a NOR to invert the test. */
22715 case NE:
22716 case UNLE:
22717 case UNLT:
22718 case UNGE:
22719 case UNGT:
22720 /* Invert condition and try again.
22721 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22722 invert_move = true;
22723 rcode = reverse_condition_maybe_unordered (rcode);
22724 if (rcode == UNKNOWN)
22725 return 0;
22726 break;
22727
22728 case GE:
22729 case LE:
22730 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22731 {
22732 /* Invert condition to avoid compound test. */
22733 invert_move = true;
22734 rcode = reverse_condition (rcode);
22735 }
22736 break;
22737
22738 case GTU:
22739 case GEU:
22740 case LTU:
22741 case LEU:
22742 /* Mark unsigned tests with CCUNSmode. */
22743 cc_mode = CCUNSmode;
22744
22745 /* Invert condition to avoid compound test if necessary. */
22746 if (rcode == GEU || rcode == LEU)
22747 {
22748 invert_move = true;
22749 rcode = reverse_condition (rcode);
22750 }
22751 break;
22752
22753 default:
22754 break;
22755 }
22756
22757 /* Get the vector mask for the given relational operations. */
22758 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22759
22760 if (!mask)
22761 return 0;
22762
22763 if (invert_move)
22764 std::swap (op_true, op_false);
22765
22766 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22767 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22768 && (GET_CODE (op_true) == CONST_VECTOR
22769 || GET_CODE (op_false) == CONST_VECTOR))
22770 {
22771 rtx constant_0 = CONST0_RTX (dest_mode);
22772 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22773
22774 if (op_true == constant_m1 && op_false == constant_0)
22775 {
22776 emit_move_insn (dest, mask);
22777 return 1;
22778 }
22779
22780 else if (op_true == constant_0 && op_false == constant_m1)
22781 {
22782 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22783 return 1;
22784 }
22785
22786 /* If we can't use the vector comparison directly, perhaps we can use
22787 the mask for the true or false fields, instead of loading up a
22788 constant. */
22789 if (op_true == constant_m1)
22790 op_true = mask;
22791
22792 if (op_false == constant_0)
22793 op_false = mask;
22794 }
22795
22796 if (!REG_P (op_true) && !SUBREG_P (op_true))
22797 op_true = force_reg (dest_mode, op_true);
22798
22799 if (!REG_P (op_false) && !SUBREG_P (op_false))
22800 op_false = force_reg (dest_mode, op_false);
22801
22802 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22803 CONST0_RTX (dest_mode));
22804 emit_insn (gen_rtx_SET (dest,
22805 gen_rtx_IF_THEN_ELSE (dest_mode,
22806 cond2,
22807 op_true,
22808 op_false)));
22809 return 1;
22810 }
22811
22812 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22813 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22814 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22815 hardware has no such operation. */
22816
22817 static int
22818 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22819 {
22820 enum rtx_code code = GET_CODE (op);
22821 rtx op0 = XEXP (op, 0);
22822 rtx op1 = XEXP (op, 1);
22823 machine_mode compare_mode = GET_MODE (op0);
22824 machine_mode result_mode = GET_MODE (dest);
22825 bool max_p = false;
22826
22827 if (result_mode != compare_mode)
22828 return 0;
22829
22830 if (code == GE || code == GT)
22831 max_p = true;
22832 else if (code == LE || code == LT)
22833 max_p = false;
22834 else
22835 return 0;
22836
22837 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22838 ;
22839
22840 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22841 max_p = !max_p;
22842
22843 else
22844 return 0;
22845
22846 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22847 return 1;
22848 }
22849
22850 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22851 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22852 operands of the last comparison is nonzero/true, FALSE_COND if it is
22853 zero/false. Return 0 if the hardware has no such operation. */
22854
22855 static int
22856 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22857 {
22858 enum rtx_code code = GET_CODE (op);
22859 rtx op0 = XEXP (op, 0);
22860 rtx op1 = XEXP (op, 1);
22861 machine_mode result_mode = GET_MODE (dest);
22862 rtx compare_rtx;
22863 rtx cmove_rtx;
22864 rtx clobber_rtx;
22865
22866 if (!can_create_pseudo_p ())
22867 return 0;
22868
22869 switch (code)
22870 {
22871 case EQ:
22872 case GE:
22873 case GT:
22874 break;
22875
22876 case NE:
22877 case LT:
22878 case LE:
22879 code = swap_condition (code);
22880 std::swap (op0, op1);
22881 break;
22882
22883 default:
22884 return 0;
22885 }
22886
22887 /* Generate: [(parallel [(set (dest)
22888 (if_then_else (op (cmp1) (cmp2))
22889 (true)
22890 (false)))
22891 (clobber (scratch))])]. */
22892
22893 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22894 cmove_rtx = gen_rtx_SET (dest,
22895 gen_rtx_IF_THEN_ELSE (result_mode,
22896 compare_rtx,
22897 true_cond,
22898 false_cond));
22899
22900 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22901 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22902 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22903
22904 return 1;
22905 }
22906
22907 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22908 operands of the last comparison is nonzero/true, FALSE_COND if it
22909 is zero/false. Return 0 if the hardware has no such operation. */
22910
22911 int
22912 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22913 {
22914 enum rtx_code code = GET_CODE (op);
22915 rtx op0 = XEXP (op, 0);
22916 rtx op1 = XEXP (op, 1);
22917 machine_mode compare_mode = GET_MODE (op0);
22918 machine_mode result_mode = GET_MODE (dest);
22919 rtx temp;
22920 bool is_against_zero;
22921
22922 /* These modes should always match. */
22923 if (GET_MODE (op1) != compare_mode
22924 /* In the isel case however, we can use a compare immediate, so
22925 op1 may be a small constant. */
22926 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22927 return 0;
22928 if (GET_MODE (true_cond) != result_mode)
22929 return 0;
22930 if (GET_MODE (false_cond) != result_mode)
22931 return 0;
22932
22933 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22934 if (TARGET_P9_MINMAX
22935 && (compare_mode == SFmode || compare_mode == DFmode)
22936 && (result_mode == SFmode || result_mode == DFmode))
22937 {
22938 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22939 return 1;
22940
22941 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22942 return 1;
22943 }
22944
22945 /* Don't allow using floating point comparisons for integer results for
22946 now. */
22947 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22948 return 0;
22949
22950 /* First, work out if the hardware can do this at all, or
22951 if it's too slow.... */
22952 if (!FLOAT_MODE_P (compare_mode))
22953 {
22954 if (TARGET_ISEL)
22955 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22956 return 0;
22957 }
22958
22959 is_against_zero = op1 == CONST0_RTX (compare_mode);
22960
22961 /* A floating-point subtract might overflow, underflow, or produce
22962 an inexact result, thus changing the floating-point flags, so it
22963 can't be generated if we care about that. It's safe if one side
22964 of the construct is zero, since then no subtract will be
22965 generated. */
22966 if (SCALAR_FLOAT_MODE_P (compare_mode)
22967 && flag_trapping_math && ! is_against_zero)
22968 return 0;
22969
22970 /* Eliminate half of the comparisons by switching operands, this
22971 makes the remaining code simpler. */
22972 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22973 || code == LTGT || code == LT || code == UNLE)
22974 {
22975 code = reverse_condition_maybe_unordered (code);
22976 temp = true_cond;
22977 true_cond = false_cond;
22978 false_cond = temp;
22979 }
22980
22981 /* UNEQ and LTGT take four instructions for a comparison with zero,
22982 it'll probably be faster to use a branch here too. */
22983 if (code == UNEQ && HONOR_NANS (compare_mode))
22984 return 0;
22985
22986 /* We're going to try to implement comparisons by performing
22987 a subtract, then comparing against zero. Unfortunately,
22988 Inf - Inf is NaN which is not zero, and so if we don't
22989 know that the operand is finite and the comparison
22990 would treat EQ different to UNORDERED, we can't do it. */
22991 if (HONOR_INFINITIES (compare_mode)
22992 && code != GT && code != UNGE
22993 && (GET_CODE (op1) != CONST_DOUBLE
22994 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
22995 /* Constructs of the form (a OP b ? a : b) are safe. */
22996 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
22997 || (! rtx_equal_p (op0, true_cond)
22998 && ! rtx_equal_p (op1, true_cond))))
22999 return 0;
23000
23001 /* At this point we know we can use fsel. */
23002
23003 /* Reduce the comparison to a comparison against zero. */
23004 if (! is_against_zero)
23005 {
23006 temp = gen_reg_rtx (compare_mode);
23007 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
23008 op0 = temp;
23009 op1 = CONST0_RTX (compare_mode);
23010 }
23011
23012 /* If we don't care about NaNs we can reduce some of the comparisons
23013 down to faster ones. */
23014 if (! HONOR_NANS (compare_mode))
23015 switch (code)
23016 {
23017 case GT:
23018 code = LE;
23019 temp = true_cond;
23020 true_cond = false_cond;
23021 false_cond = temp;
23022 break;
23023 case UNGE:
23024 code = GE;
23025 break;
23026 case UNEQ:
23027 code = EQ;
23028 break;
23029 default:
23030 break;
23031 }
23032
23033 /* Now, reduce everything down to a GE. */
23034 switch (code)
23035 {
23036 case GE:
23037 break;
23038
23039 case LE:
23040 temp = gen_reg_rtx (compare_mode);
23041 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23042 op0 = temp;
23043 break;
23044
23045 case ORDERED:
23046 temp = gen_reg_rtx (compare_mode);
23047 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23048 op0 = temp;
23049 break;
23050
23051 case EQ:
23052 temp = gen_reg_rtx (compare_mode);
23053 emit_insn (gen_rtx_SET (temp,
23054 gen_rtx_NEG (compare_mode,
23055 gen_rtx_ABS (compare_mode, op0))));
23056 op0 = temp;
23057 break;
23058
23059 case UNGE:
23060 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23061 temp = gen_reg_rtx (result_mode);
23062 emit_insn (gen_rtx_SET (temp,
23063 gen_rtx_IF_THEN_ELSE (result_mode,
23064 gen_rtx_GE (VOIDmode,
23065 op0, op1),
23066 true_cond, false_cond)));
23067 false_cond = true_cond;
23068 true_cond = temp;
23069
23070 temp = gen_reg_rtx (compare_mode);
23071 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23072 op0 = temp;
23073 break;
23074
23075 case GT:
23076 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23077 temp = gen_reg_rtx (result_mode);
23078 emit_insn (gen_rtx_SET (temp,
23079 gen_rtx_IF_THEN_ELSE (result_mode,
23080 gen_rtx_GE (VOIDmode,
23081 op0, op1),
23082 true_cond, false_cond)));
23083 true_cond = false_cond;
23084 false_cond = temp;
23085
23086 temp = gen_reg_rtx (compare_mode);
23087 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23088 op0 = temp;
23089 break;
23090
23091 default:
23092 gcc_unreachable ();
23093 }
23094
23095 emit_insn (gen_rtx_SET (dest,
23096 gen_rtx_IF_THEN_ELSE (result_mode,
23097 gen_rtx_GE (VOIDmode,
23098 op0, op1),
23099 true_cond, false_cond)));
23100 return 1;
23101 }
23102
23103 /* Same as above, but for ints (isel). */
23104
23105 int
23106 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23107 {
23108 rtx condition_rtx, cr;
23109 machine_mode mode = GET_MODE (dest);
23110 enum rtx_code cond_code;
23111 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23112 bool signedp;
23113
23114 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23115 return 0;
23116
23117 /* We still have to do the compare, because isel doesn't do a
23118 compare, it just looks at the CRx bits set by a previous compare
23119 instruction. */
23120 condition_rtx = rs6000_generate_compare (op, mode);
23121 cond_code = GET_CODE (condition_rtx);
23122 cr = XEXP (condition_rtx, 0);
23123 signedp = GET_MODE (cr) == CCmode;
23124
23125 isel_func = (mode == SImode
23126 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23127 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23128
23129 switch (cond_code)
23130 {
23131 case LT: case GT: case LTU: case GTU: case EQ:
23132 /* isel handles these directly. */
23133 break;
23134
23135 default:
23136 /* We need to swap the sense of the comparison. */
23137 {
23138 std::swap (false_cond, true_cond);
23139 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23140 }
23141 break;
23142 }
23143
23144 false_cond = force_reg (mode, false_cond);
23145 if (true_cond != const0_rtx)
23146 true_cond = force_reg (mode, true_cond);
23147
23148 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23149
23150 return 1;
23151 }
23152
23153 void
23154 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23155 {
23156 machine_mode mode = GET_MODE (op0);
23157 enum rtx_code c;
23158 rtx target;
23159
23160 /* VSX/altivec have direct min/max insns. */
23161 if ((code == SMAX || code == SMIN)
23162 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23163 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23164 {
23165 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23166 return;
23167 }
23168
23169 if (code == SMAX || code == SMIN)
23170 c = GE;
23171 else
23172 c = GEU;
23173
23174 if (code == SMAX || code == UMAX)
23175 target = emit_conditional_move (dest, c, op0, op1, mode,
23176 op0, op1, mode, 0);
23177 else
23178 target = emit_conditional_move (dest, c, op0, op1, mode,
23179 op1, op0, mode, 0);
23180 gcc_assert (target);
23181 if (target != dest)
23182 emit_move_insn (dest, target);
23183 }
23184
23185 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23186 COND is true. Mark the jump as unlikely to be taken. */
23187
23188 static void
23189 emit_unlikely_jump (rtx cond, rtx label)
23190 {
23191 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23192 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23193 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23194 }
23195
23196 /* A subroutine of the atomic operation splitters. Emit a load-locked
23197 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23198 the zero_extend operation. */
23199
23200 static void
23201 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23202 {
23203 rtx (*fn) (rtx, rtx) = NULL;
23204
23205 switch (mode)
23206 {
23207 case E_QImode:
23208 fn = gen_load_lockedqi;
23209 break;
23210 case E_HImode:
23211 fn = gen_load_lockedhi;
23212 break;
23213 case E_SImode:
23214 if (GET_MODE (mem) == QImode)
23215 fn = gen_load_lockedqi_si;
23216 else if (GET_MODE (mem) == HImode)
23217 fn = gen_load_lockedhi_si;
23218 else
23219 fn = gen_load_lockedsi;
23220 break;
23221 case E_DImode:
23222 fn = gen_load_lockeddi;
23223 break;
23224 case E_TImode:
23225 fn = gen_load_lockedti;
23226 break;
23227 default:
23228 gcc_unreachable ();
23229 }
23230 emit_insn (fn (reg, mem));
23231 }
23232
23233 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23234 instruction in MODE. */
23235
23236 static void
23237 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23238 {
23239 rtx (*fn) (rtx, rtx, rtx) = NULL;
23240
23241 switch (mode)
23242 {
23243 case E_QImode:
23244 fn = gen_store_conditionalqi;
23245 break;
23246 case E_HImode:
23247 fn = gen_store_conditionalhi;
23248 break;
23249 case E_SImode:
23250 fn = gen_store_conditionalsi;
23251 break;
23252 case E_DImode:
23253 fn = gen_store_conditionaldi;
23254 break;
23255 case E_TImode:
23256 fn = gen_store_conditionalti;
23257 break;
23258 default:
23259 gcc_unreachable ();
23260 }
23261
23262 /* Emit sync before stwcx. to address PPC405 Erratum. */
23263 if (PPC405_ERRATUM77)
23264 emit_insn (gen_hwsync ());
23265
23266 emit_insn (fn (res, mem, val));
23267 }
23268
23269 /* Expand barriers before and after a load_locked/store_cond sequence. */
23270
23271 static rtx
23272 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23273 {
23274 rtx addr = XEXP (mem, 0);
23275
23276 if (!legitimate_indirect_address_p (addr, reload_completed)
23277 && !legitimate_indexed_address_p (addr, reload_completed))
23278 {
23279 addr = force_reg (Pmode, addr);
23280 mem = replace_equiv_address_nv (mem, addr);
23281 }
23282
23283 switch (model)
23284 {
23285 case MEMMODEL_RELAXED:
23286 case MEMMODEL_CONSUME:
23287 case MEMMODEL_ACQUIRE:
23288 break;
23289 case MEMMODEL_RELEASE:
23290 case MEMMODEL_ACQ_REL:
23291 emit_insn (gen_lwsync ());
23292 break;
23293 case MEMMODEL_SEQ_CST:
23294 emit_insn (gen_hwsync ());
23295 break;
23296 default:
23297 gcc_unreachable ();
23298 }
23299 return mem;
23300 }
23301
23302 static void
23303 rs6000_post_atomic_barrier (enum memmodel model)
23304 {
23305 switch (model)
23306 {
23307 case MEMMODEL_RELAXED:
23308 case MEMMODEL_CONSUME:
23309 case MEMMODEL_RELEASE:
23310 break;
23311 case MEMMODEL_ACQUIRE:
23312 case MEMMODEL_ACQ_REL:
23313 case MEMMODEL_SEQ_CST:
23314 emit_insn (gen_isync ());
23315 break;
23316 default:
23317 gcc_unreachable ();
23318 }
23319 }
23320
23321 /* A subroutine of the various atomic expanders. For sub-word operations,
23322 we must adjust things to operate on SImode. Given the original MEM,
23323 return a new aligned memory. Also build and return the quantities by
23324 which to shift and mask. */
23325
23326 static rtx
23327 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23328 {
23329 rtx addr, align, shift, mask, mem;
23330 HOST_WIDE_INT shift_mask;
23331 machine_mode mode = GET_MODE (orig_mem);
23332
23333 /* For smaller modes, we have to implement this via SImode. */
23334 shift_mask = (mode == QImode ? 0x18 : 0x10);
23335
23336 addr = XEXP (orig_mem, 0);
23337 addr = force_reg (GET_MODE (addr), addr);
23338
23339 /* Aligned memory containing subword. Generate a new memory. We
23340 do not want any of the existing MEM_ATTR data, as we're now
23341 accessing memory outside the original object. */
23342 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23343 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23344 mem = gen_rtx_MEM (SImode, align);
23345 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23346 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23347 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23348
23349 /* Shift amount for subword relative to aligned word. */
23350 shift = gen_reg_rtx (SImode);
23351 addr = gen_lowpart (SImode, addr);
23352 rtx tmp = gen_reg_rtx (SImode);
23353 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23354 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23355 if (BYTES_BIG_ENDIAN)
23356 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23357 shift, 1, OPTAB_LIB_WIDEN);
23358 *pshift = shift;
23359
23360 /* Mask for insertion. */
23361 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23362 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23363 *pmask = mask;
23364
23365 return mem;
23366 }
23367
23368 /* A subroutine of the various atomic expanders. For sub-word operands,
23369 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23370
23371 static rtx
23372 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23373 {
23374 rtx x;
23375
23376 x = gen_reg_rtx (SImode);
23377 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23378 gen_rtx_NOT (SImode, mask),
23379 oldval)));
23380
23381 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23382
23383 return x;
23384 }
23385
23386 /* A subroutine of the various atomic expanders. For sub-word operands,
23387 extract WIDE to NARROW via SHIFT. */
23388
23389 static void
23390 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23391 {
23392 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23393 wide, 1, OPTAB_LIB_WIDEN);
23394 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23395 }
23396
23397 /* Expand an atomic compare and swap operation. */
23398
23399 void
23400 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23401 {
23402 rtx boolval, retval, mem, oldval, newval, cond;
23403 rtx label1, label2, x, mask, shift;
23404 machine_mode mode, orig_mode;
23405 enum memmodel mod_s, mod_f;
23406 bool is_weak;
23407
23408 boolval = operands[0];
23409 retval = operands[1];
23410 mem = operands[2];
23411 oldval = operands[3];
23412 newval = operands[4];
23413 is_weak = (INTVAL (operands[5]) != 0);
23414 mod_s = memmodel_base (INTVAL (operands[6]));
23415 mod_f = memmodel_base (INTVAL (operands[7]));
23416 orig_mode = mode = GET_MODE (mem);
23417
23418 mask = shift = NULL_RTX;
23419 if (mode == QImode || mode == HImode)
23420 {
23421 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23422 lwarx and shift/mask operations. With power8, we need to do the
23423 comparison in SImode, but the store is still done in QI/HImode. */
23424 oldval = convert_modes (SImode, mode, oldval, 1);
23425
23426 if (!TARGET_SYNC_HI_QI)
23427 {
23428 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23429
23430 /* Shift and mask OLDVAL into position with the word. */
23431 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23432 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23433
23434 /* Shift and mask NEWVAL into position within the word. */
23435 newval = convert_modes (SImode, mode, newval, 1);
23436 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23437 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23438 }
23439
23440 /* Prepare to adjust the return value. */
23441 retval = gen_reg_rtx (SImode);
23442 mode = SImode;
23443 }
23444 else if (reg_overlap_mentioned_p (retval, oldval))
23445 oldval = copy_to_reg (oldval);
23446
23447 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23448 oldval = copy_to_mode_reg (mode, oldval);
23449
23450 if (reg_overlap_mentioned_p (retval, newval))
23451 newval = copy_to_reg (newval);
23452
23453 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23454
23455 label1 = NULL_RTX;
23456 if (!is_weak)
23457 {
23458 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23459 emit_label (XEXP (label1, 0));
23460 }
23461 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23462
23463 emit_load_locked (mode, retval, mem);
23464
23465 x = retval;
23466 if (mask)
23467 x = expand_simple_binop (SImode, AND, retval, mask,
23468 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23469
23470 cond = gen_reg_rtx (CCmode);
23471 /* If we have TImode, synthesize a comparison. */
23472 if (mode != TImode)
23473 x = gen_rtx_COMPARE (CCmode, x, oldval);
23474 else
23475 {
23476 rtx xor1_result = gen_reg_rtx (DImode);
23477 rtx xor2_result = gen_reg_rtx (DImode);
23478 rtx or_result = gen_reg_rtx (DImode);
23479 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23480 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23481 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23482 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23483
23484 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23485 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23486 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23487 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23488 }
23489
23490 emit_insn (gen_rtx_SET (cond, x));
23491
23492 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23493 emit_unlikely_jump (x, label2);
23494
23495 x = newval;
23496 if (mask)
23497 x = rs6000_mask_atomic_subword (retval, newval, mask);
23498
23499 emit_store_conditional (orig_mode, cond, mem, x);
23500
23501 if (!is_weak)
23502 {
23503 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23504 emit_unlikely_jump (x, label1);
23505 }
23506
23507 if (!is_mm_relaxed (mod_f))
23508 emit_label (XEXP (label2, 0));
23509
23510 rs6000_post_atomic_barrier (mod_s);
23511
23512 if (is_mm_relaxed (mod_f))
23513 emit_label (XEXP (label2, 0));
23514
23515 if (shift)
23516 rs6000_finish_atomic_subword (operands[1], retval, shift);
23517 else if (mode != GET_MODE (operands[1]))
23518 convert_move (operands[1], retval, 1);
23519
23520 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23521 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23522 emit_insn (gen_rtx_SET (boolval, x));
23523 }
23524
23525 /* Expand an atomic exchange operation. */
23526
23527 void
23528 rs6000_expand_atomic_exchange (rtx operands[])
23529 {
23530 rtx retval, mem, val, cond;
23531 machine_mode mode;
23532 enum memmodel model;
23533 rtx label, x, mask, shift;
23534
23535 retval = operands[0];
23536 mem = operands[1];
23537 val = operands[2];
23538 model = memmodel_base (INTVAL (operands[3]));
23539 mode = GET_MODE (mem);
23540
23541 mask = shift = NULL_RTX;
23542 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23543 {
23544 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23545
23546 /* Shift and mask VAL into position with the word. */
23547 val = convert_modes (SImode, mode, val, 1);
23548 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23549 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23550
23551 /* Prepare to adjust the return value. */
23552 retval = gen_reg_rtx (SImode);
23553 mode = SImode;
23554 }
23555
23556 mem = rs6000_pre_atomic_barrier (mem, model);
23557
23558 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23559 emit_label (XEXP (label, 0));
23560
23561 emit_load_locked (mode, retval, mem);
23562
23563 x = val;
23564 if (mask)
23565 x = rs6000_mask_atomic_subword (retval, val, mask);
23566
23567 cond = gen_reg_rtx (CCmode);
23568 emit_store_conditional (mode, cond, mem, x);
23569
23570 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23571 emit_unlikely_jump (x, label);
23572
23573 rs6000_post_atomic_barrier (model);
23574
23575 if (shift)
23576 rs6000_finish_atomic_subword (operands[0], retval, shift);
23577 }
23578
23579 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23580 to perform. MEM is the memory on which to operate. VAL is the second
23581 operand of the binary operator. BEFORE and AFTER are optional locations to
23582 return the value of MEM either before of after the operation. MODEL_RTX
23583 is a CONST_INT containing the memory model to use. */
23584
23585 void
23586 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23587 rtx orig_before, rtx orig_after, rtx model_rtx)
23588 {
23589 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23590 machine_mode mode = GET_MODE (mem);
23591 machine_mode store_mode = mode;
23592 rtx label, x, cond, mask, shift;
23593 rtx before = orig_before, after = orig_after;
23594
23595 mask = shift = NULL_RTX;
23596 /* On power8, we want to use SImode for the operation. On previous systems,
23597 use the operation in a subword and shift/mask to get the proper byte or
23598 halfword. */
23599 if (mode == QImode || mode == HImode)
23600 {
23601 if (TARGET_SYNC_HI_QI)
23602 {
23603 val = convert_modes (SImode, mode, val, 1);
23604
23605 /* Prepare to adjust the return value. */
23606 before = gen_reg_rtx (SImode);
23607 if (after)
23608 after = gen_reg_rtx (SImode);
23609 mode = SImode;
23610 }
23611 else
23612 {
23613 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23614
23615 /* Shift and mask VAL into position with the word. */
23616 val = convert_modes (SImode, mode, val, 1);
23617 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23618 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23619
23620 switch (code)
23621 {
23622 case IOR:
23623 case XOR:
23624 /* We've already zero-extended VAL. That is sufficient to
23625 make certain that it does not affect other bits. */
23626 mask = NULL;
23627 break;
23628
23629 case AND:
23630 /* If we make certain that all of the other bits in VAL are
23631 set, that will be sufficient to not affect other bits. */
23632 x = gen_rtx_NOT (SImode, mask);
23633 x = gen_rtx_IOR (SImode, x, val);
23634 emit_insn (gen_rtx_SET (val, x));
23635 mask = NULL;
23636 break;
23637
23638 case NOT:
23639 case PLUS:
23640 case MINUS:
23641 /* These will all affect bits outside the field and need
23642 adjustment via MASK within the loop. */
23643 break;
23644
23645 default:
23646 gcc_unreachable ();
23647 }
23648
23649 /* Prepare to adjust the return value. */
23650 before = gen_reg_rtx (SImode);
23651 if (after)
23652 after = gen_reg_rtx (SImode);
23653 store_mode = mode = SImode;
23654 }
23655 }
23656
23657 mem = rs6000_pre_atomic_barrier (mem, model);
23658
23659 label = gen_label_rtx ();
23660 emit_label (label);
23661 label = gen_rtx_LABEL_REF (VOIDmode, label);
23662
23663 if (before == NULL_RTX)
23664 before = gen_reg_rtx (mode);
23665
23666 emit_load_locked (mode, before, mem);
23667
23668 if (code == NOT)
23669 {
23670 x = expand_simple_binop (mode, AND, before, val,
23671 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23672 after = expand_simple_unop (mode, NOT, x, after, 1);
23673 }
23674 else
23675 {
23676 after = expand_simple_binop (mode, code, before, val,
23677 after, 1, OPTAB_LIB_WIDEN);
23678 }
23679
23680 x = after;
23681 if (mask)
23682 {
23683 x = expand_simple_binop (SImode, AND, after, mask,
23684 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23685 x = rs6000_mask_atomic_subword (before, x, mask);
23686 }
23687 else if (store_mode != mode)
23688 x = convert_modes (store_mode, mode, x, 1);
23689
23690 cond = gen_reg_rtx (CCmode);
23691 emit_store_conditional (store_mode, cond, mem, x);
23692
23693 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23694 emit_unlikely_jump (x, label);
23695
23696 rs6000_post_atomic_barrier (model);
23697
23698 if (shift)
23699 {
23700 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23701 then do the calcuations in a SImode register. */
23702 if (orig_before)
23703 rs6000_finish_atomic_subword (orig_before, before, shift);
23704 if (orig_after)
23705 rs6000_finish_atomic_subword (orig_after, after, shift);
23706 }
23707 else if (store_mode != mode)
23708 {
23709 /* QImode/HImode on machines with lbarx/lharx where we do the native
23710 operation and then do the calcuations in a SImode register. */
23711 if (orig_before)
23712 convert_move (orig_before, before, 1);
23713 if (orig_after)
23714 convert_move (orig_after, after, 1);
23715 }
23716 else if (orig_after && after != orig_after)
23717 emit_move_insn (orig_after, after);
23718 }
23719
23720 /* Emit instructions to move SRC to DST. Called by splitters for
23721 multi-register moves. It will emit at most one instruction for
23722 each register that is accessed; that is, it won't emit li/lis pairs
23723 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23724 register. */
23725
23726 void
23727 rs6000_split_multireg_move (rtx dst, rtx src)
23728 {
23729 /* The register number of the first register being moved. */
23730 int reg;
23731 /* The mode that is to be moved. */
23732 machine_mode mode;
23733 /* The mode that the move is being done in, and its size. */
23734 machine_mode reg_mode;
23735 int reg_mode_size;
23736 /* The number of registers that will be moved. */
23737 int nregs;
23738
23739 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23740 mode = GET_MODE (dst);
23741 nregs = hard_regno_nregs (reg, mode);
23742 if (FP_REGNO_P (reg))
23743 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23744 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23745 else if (ALTIVEC_REGNO_P (reg))
23746 reg_mode = V16QImode;
23747 else
23748 reg_mode = word_mode;
23749 reg_mode_size = GET_MODE_SIZE (reg_mode);
23750
23751 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23752
23753 /* TDmode residing in FP registers is special, since the ISA requires that
23754 the lower-numbered word of a register pair is always the most significant
23755 word, even in little-endian mode. This does not match the usual subreg
23756 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23757 the appropriate constituent registers "by hand" in little-endian mode.
23758
23759 Note we do not need to check for destructive overlap here since TDmode
23760 can only reside in even/odd register pairs. */
23761 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23762 {
23763 rtx p_src, p_dst;
23764 int i;
23765
23766 for (i = 0; i < nregs; i++)
23767 {
23768 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23769 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23770 else
23771 p_src = simplify_gen_subreg (reg_mode, src, mode,
23772 i * reg_mode_size);
23773
23774 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23775 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23776 else
23777 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23778 i * reg_mode_size);
23779
23780 emit_insn (gen_rtx_SET (p_dst, p_src));
23781 }
23782
23783 return;
23784 }
23785
23786 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23787 {
23788 /* Move register range backwards, if we might have destructive
23789 overlap. */
23790 int i;
23791 for (i = nregs - 1; i >= 0; i--)
23792 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23793 i * reg_mode_size),
23794 simplify_gen_subreg (reg_mode, src, mode,
23795 i * reg_mode_size)));
23796 }
23797 else
23798 {
23799 int i;
23800 int j = -1;
23801 bool used_update = false;
23802 rtx restore_basereg = NULL_RTX;
23803
23804 if (MEM_P (src) && INT_REGNO_P (reg))
23805 {
23806 rtx breg;
23807
23808 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23809 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23810 {
23811 rtx delta_rtx;
23812 breg = XEXP (XEXP (src, 0), 0);
23813 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23814 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23815 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23816 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23817 src = replace_equiv_address (src, breg);
23818 }
23819 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23820 {
23821 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23822 {
23823 rtx basereg = XEXP (XEXP (src, 0), 0);
23824 if (TARGET_UPDATE)
23825 {
23826 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23827 emit_insn (gen_rtx_SET (ndst,
23828 gen_rtx_MEM (reg_mode,
23829 XEXP (src, 0))));
23830 used_update = true;
23831 }
23832 else
23833 emit_insn (gen_rtx_SET (basereg,
23834 XEXP (XEXP (src, 0), 1)));
23835 src = replace_equiv_address (src, basereg);
23836 }
23837 else
23838 {
23839 rtx basereg = gen_rtx_REG (Pmode, reg);
23840 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23841 src = replace_equiv_address (src, basereg);
23842 }
23843 }
23844
23845 breg = XEXP (src, 0);
23846 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23847 breg = XEXP (breg, 0);
23848
23849 /* If the base register we are using to address memory is
23850 also a destination reg, then change that register last. */
23851 if (REG_P (breg)
23852 && REGNO (breg) >= REGNO (dst)
23853 && REGNO (breg) < REGNO (dst) + nregs)
23854 j = REGNO (breg) - REGNO (dst);
23855 }
23856 else if (MEM_P (dst) && INT_REGNO_P (reg))
23857 {
23858 rtx breg;
23859
23860 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23861 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23862 {
23863 rtx delta_rtx;
23864 breg = XEXP (XEXP (dst, 0), 0);
23865 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23866 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23867 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23868
23869 /* We have to update the breg before doing the store.
23870 Use store with update, if available. */
23871
23872 if (TARGET_UPDATE)
23873 {
23874 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23875 emit_insn (TARGET_32BIT
23876 ? (TARGET_POWERPC64
23877 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23878 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
23879 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23880 used_update = true;
23881 }
23882 else
23883 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23884 dst = replace_equiv_address (dst, breg);
23885 }
23886 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
23887 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23888 {
23889 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23890 {
23891 rtx basereg = XEXP (XEXP (dst, 0), 0);
23892 if (TARGET_UPDATE)
23893 {
23894 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23895 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23896 XEXP (dst, 0)),
23897 nsrc));
23898 used_update = true;
23899 }
23900 else
23901 emit_insn (gen_rtx_SET (basereg,
23902 XEXP (XEXP (dst, 0), 1)));
23903 dst = replace_equiv_address (dst, basereg);
23904 }
23905 else
23906 {
23907 rtx basereg = XEXP (XEXP (dst, 0), 0);
23908 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23909 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23910 && REG_P (basereg)
23911 && REG_P (offsetreg)
23912 && REGNO (basereg) != REGNO (offsetreg));
23913 if (REGNO (basereg) == 0)
23914 {
23915 rtx tmp = offsetreg;
23916 offsetreg = basereg;
23917 basereg = tmp;
23918 }
23919 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23920 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23921 dst = replace_equiv_address (dst, basereg);
23922 }
23923 }
23924 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23925 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
23926 }
23927
23928 for (i = 0; i < nregs; i++)
23929 {
23930 /* Calculate index to next subword. */
23931 ++j;
23932 if (j == nregs)
23933 j = 0;
23934
23935 /* If compiler already emitted move of first word by
23936 store with update, no need to do anything. */
23937 if (j == 0 && used_update)
23938 continue;
23939
23940 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23941 j * reg_mode_size),
23942 simplify_gen_subreg (reg_mode, src, mode,
23943 j * reg_mode_size)));
23944 }
23945 if (restore_basereg != NULL_RTX)
23946 emit_insn (restore_basereg);
23947 }
23948 }
23949
23950 \f
23951 /* This page contains routines that are used to determine what the
23952 function prologue and epilogue code will do and write them out. */
23953
23954 /* Determine whether the REG is really used. */
23955
23956 static bool
23957 save_reg_p (int reg)
23958 {
23959 /* We need to mark the PIC offset register live for the same conditions
23960 as it is set up, or otherwise it won't be saved before we clobber it. */
23961
23962 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
23963 {
23964 /* When calling eh_return, we must return true for all the cases
23965 where conditional_register_usage marks the PIC offset reg
23966 call used. */
23967 if (TARGET_TOC && TARGET_MINIMAL_TOC
23968 && (crtl->calls_eh_return
23969 || df_regs_ever_live_p (reg)
23970 || !constant_pool_empty_p ()))
23971 return true;
23972
23973 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
23974 && flag_pic && crtl->uses_pic_offset_table)
23975 return true;
23976 }
23977
23978 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
23979 }
23980
23981 /* Return the first fixed-point register that is required to be
23982 saved. 32 if none. */
23983
23984 int
23985 first_reg_to_save (void)
23986 {
23987 int first_reg;
23988
23989 /* Find lowest numbered live register. */
23990 for (first_reg = 13; first_reg <= 31; first_reg++)
23991 if (save_reg_p (first_reg))
23992 break;
23993
23994 return first_reg;
23995 }
23996
23997 /* Similar, for FP regs. */
23998
23999 int
24000 first_fp_reg_to_save (void)
24001 {
24002 int first_reg;
24003
24004 /* Find lowest numbered live register. */
24005 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24006 if (save_reg_p (first_reg))
24007 break;
24008
24009 return first_reg;
24010 }
24011
24012 /* Similar, for AltiVec regs. */
24013
24014 static int
24015 first_altivec_reg_to_save (void)
24016 {
24017 int i;
24018
24019 /* Stack frame remains as is unless we are in AltiVec ABI. */
24020 if (! TARGET_ALTIVEC_ABI)
24021 return LAST_ALTIVEC_REGNO + 1;
24022
24023 /* On Darwin, the unwind routines are compiled without
24024 TARGET_ALTIVEC, and use save_world to save/restore the
24025 altivec registers when necessary. */
24026 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24027 && ! TARGET_ALTIVEC)
24028 return FIRST_ALTIVEC_REGNO + 20;
24029
24030 /* Find lowest numbered live register. */
24031 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24032 if (save_reg_p (i))
24033 break;
24034
24035 return i;
24036 }
24037
24038 /* Return a 32-bit mask of the AltiVec registers we need to set in
24039 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24040 the 32-bit word is 0. */
24041
24042 static unsigned int
24043 compute_vrsave_mask (void)
24044 {
24045 unsigned int i, mask = 0;
24046
24047 /* On Darwin, the unwind routines are compiled without
24048 TARGET_ALTIVEC, and use save_world to save/restore the
24049 call-saved altivec registers when necessary. */
24050 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24051 && ! TARGET_ALTIVEC)
24052 mask |= 0xFFF;
24053
24054 /* First, find out if we use _any_ altivec registers. */
24055 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24056 if (df_regs_ever_live_p (i))
24057 mask |= ALTIVEC_REG_BIT (i);
24058
24059 if (mask == 0)
24060 return mask;
24061
24062 /* Next, remove the argument registers from the set. These must
24063 be in the VRSAVE mask set by the caller, so we don't need to add
24064 them in again. More importantly, the mask we compute here is
24065 used to generate CLOBBERs in the set_vrsave insn, and we do not
24066 wish the argument registers to die. */
24067 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24068 mask &= ~ALTIVEC_REG_BIT (i);
24069
24070 /* Similarly, remove the return value from the set. */
24071 {
24072 bool yes = false;
24073 diddle_return_value (is_altivec_return_reg, &yes);
24074 if (yes)
24075 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24076 }
24077
24078 return mask;
24079 }
24080
24081 /* For a very restricted set of circumstances, we can cut down the
24082 size of prologues/epilogues by calling our own save/restore-the-world
24083 routines. */
24084
24085 static void
24086 compute_save_world_info (rs6000_stack_t *info)
24087 {
24088 info->world_save_p = 1;
24089 info->world_save_p
24090 = (WORLD_SAVE_P (info)
24091 && DEFAULT_ABI == ABI_DARWIN
24092 && !cfun->has_nonlocal_label
24093 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24094 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24095 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24096 && info->cr_save_p);
24097
24098 /* This will not work in conjunction with sibcalls. Make sure there
24099 are none. (This check is expensive, but seldom executed.) */
24100 if (WORLD_SAVE_P (info))
24101 {
24102 rtx_insn *insn;
24103 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24104 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24105 {
24106 info->world_save_p = 0;
24107 break;
24108 }
24109 }
24110
24111 if (WORLD_SAVE_P (info))
24112 {
24113 /* Even if we're not touching VRsave, make sure there's room on the
24114 stack for it, if it looks like we're calling SAVE_WORLD, which
24115 will attempt to save it. */
24116 info->vrsave_size = 4;
24117
24118 /* If we are going to save the world, we need to save the link register too. */
24119 info->lr_save_p = 1;
24120
24121 /* "Save" the VRsave register too if we're saving the world. */
24122 if (info->vrsave_mask == 0)
24123 info->vrsave_mask = compute_vrsave_mask ();
24124
24125 /* Because the Darwin register save/restore routines only handle
24126 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24127 check. */
24128 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24129 && (info->first_altivec_reg_save
24130 >= FIRST_SAVED_ALTIVEC_REGNO));
24131 }
24132
24133 return;
24134 }
24135
24136
24137 static void
24138 is_altivec_return_reg (rtx reg, void *xyes)
24139 {
24140 bool *yes = (bool *) xyes;
24141 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24142 *yes = true;
24143 }
24144
24145 \f
24146 /* Return whether REG is a global user reg or has been specifed by
24147 -ffixed-REG. We should not restore these, and so cannot use
24148 lmw or out-of-line restore functions if there are any. We also
24149 can't save them (well, emit frame notes for them), because frame
24150 unwinding during exception handling will restore saved registers. */
24151
24152 static bool
24153 fixed_reg_p (int reg)
24154 {
24155 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24156 backend sets it, overriding anything the user might have given. */
24157 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24158 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24159 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24160 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24161 return false;
24162
24163 return fixed_regs[reg];
24164 }
24165
24166 /* Determine the strategy for savings/restoring registers. */
24167
24168 enum {
24169 SAVE_MULTIPLE = 0x1,
24170 SAVE_INLINE_GPRS = 0x2,
24171 SAVE_INLINE_FPRS = 0x4,
24172 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24173 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24174 SAVE_INLINE_VRS = 0x20,
24175 REST_MULTIPLE = 0x100,
24176 REST_INLINE_GPRS = 0x200,
24177 REST_INLINE_FPRS = 0x400,
24178 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24179 REST_INLINE_VRS = 0x1000
24180 };
24181
24182 static int
24183 rs6000_savres_strategy (rs6000_stack_t *info,
24184 bool using_static_chain_p)
24185 {
24186 int strategy = 0;
24187
24188 /* Select between in-line and out-of-line save and restore of regs.
24189 First, all the obvious cases where we don't use out-of-line. */
24190 if (crtl->calls_eh_return
24191 || cfun->machine->ra_need_lr)
24192 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24193 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24194 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24195
24196 if (info->first_gp_reg_save == 32)
24197 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24198
24199 if (info->first_fp_reg_save == 64)
24200 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24201
24202 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24203 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24204
24205 /* Define cutoff for using out-of-line functions to save registers. */
24206 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24207 {
24208 if (!optimize_size)
24209 {
24210 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24211 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24212 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24213 }
24214 else
24215 {
24216 /* Prefer out-of-line restore if it will exit. */
24217 if (info->first_fp_reg_save > 61)
24218 strategy |= SAVE_INLINE_FPRS;
24219 if (info->first_gp_reg_save > 29)
24220 {
24221 if (info->first_fp_reg_save == 64)
24222 strategy |= SAVE_INLINE_GPRS;
24223 else
24224 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24225 }
24226 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24227 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24228 }
24229 }
24230 else if (DEFAULT_ABI == ABI_DARWIN)
24231 {
24232 if (info->first_fp_reg_save > 60)
24233 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24234 if (info->first_gp_reg_save > 29)
24235 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24236 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24237 }
24238 else
24239 {
24240 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24241 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24242 || info->first_fp_reg_save > 61)
24243 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24244 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24245 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24246 }
24247
24248 /* Don't bother to try to save things out-of-line if r11 is occupied
24249 by the static chain. It would require too much fiddling and the
24250 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24251 pointer on Darwin, and AIX uses r1 or r12. */
24252 if (using_static_chain_p
24253 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24254 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24255 | SAVE_INLINE_GPRS
24256 | SAVE_INLINE_VRS);
24257
24258 /* Don't ever restore fixed regs. That means we can't use the
24259 out-of-line register restore functions if a fixed reg is in the
24260 range of regs restored. */
24261 if (!(strategy & REST_INLINE_FPRS))
24262 for (int i = info->first_fp_reg_save; i < 64; i++)
24263 if (fixed_regs[i])
24264 {
24265 strategy |= REST_INLINE_FPRS;
24266 break;
24267 }
24268
24269 /* We can only use the out-of-line routines to restore fprs if we've
24270 saved all the registers from first_fp_reg_save in the prologue.
24271 Otherwise, we risk loading garbage. Of course, if we have saved
24272 out-of-line then we know we haven't skipped any fprs. */
24273 if ((strategy & SAVE_INLINE_FPRS)
24274 && !(strategy & REST_INLINE_FPRS))
24275 for (int i = info->first_fp_reg_save; i < 64; i++)
24276 if (!save_reg_p (i))
24277 {
24278 strategy |= REST_INLINE_FPRS;
24279 break;
24280 }
24281
24282 /* Similarly, for altivec regs. */
24283 if (!(strategy & REST_INLINE_VRS))
24284 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24285 if (fixed_regs[i])
24286 {
24287 strategy |= REST_INLINE_VRS;
24288 break;
24289 }
24290
24291 if ((strategy & SAVE_INLINE_VRS)
24292 && !(strategy & REST_INLINE_VRS))
24293 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24294 if (!save_reg_p (i))
24295 {
24296 strategy |= REST_INLINE_VRS;
24297 break;
24298 }
24299
24300 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24301 saved is an out-of-line save or restore. Set up the value for
24302 the next test (excluding out-of-line gprs). */
24303 bool lr_save_p = (info->lr_save_p
24304 || !(strategy & SAVE_INLINE_FPRS)
24305 || !(strategy & SAVE_INLINE_VRS)
24306 || !(strategy & REST_INLINE_FPRS)
24307 || !(strategy & REST_INLINE_VRS));
24308
24309 if (TARGET_MULTIPLE
24310 && !TARGET_POWERPC64
24311 && info->first_gp_reg_save < 31
24312 && !(flag_shrink_wrap
24313 && flag_shrink_wrap_separate
24314 && optimize_function_for_speed_p (cfun)))
24315 {
24316 int count = 0;
24317 for (int i = info->first_gp_reg_save; i < 32; i++)
24318 if (save_reg_p (i))
24319 count++;
24320
24321 if (count <= 1)
24322 /* Don't use store multiple if only one reg needs to be
24323 saved. This can occur for example when the ABI_V4 pic reg
24324 (r30) needs to be saved to make calls, but r31 is not
24325 used. */
24326 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24327 else
24328 {
24329 /* Prefer store multiple for saves over out-of-line
24330 routines, since the store-multiple instruction will
24331 always be smaller. */
24332 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24333
24334 /* The situation is more complicated with load multiple.
24335 We'd prefer to use the out-of-line routines for restores,
24336 since the "exit" out-of-line routines can handle the
24337 restore of LR and the frame teardown. However if doesn't
24338 make sense to use the out-of-line routine if that is the
24339 only reason we'd need to save LR, and we can't use the
24340 "exit" out-of-line gpr restore if we have saved some
24341 fprs; In those cases it is advantageous to use load
24342 multiple when available. */
24343 if (info->first_fp_reg_save != 64 || !lr_save_p)
24344 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24345 }
24346 }
24347
24348 /* Using the "exit" out-of-line routine does not improve code size
24349 if using it would require lr to be saved and if only saving one
24350 or two gprs. */
24351 else if (!lr_save_p && info->first_gp_reg_save > 29)
24352 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24353
24354 /* Don't ever restore fixed regs. */
24355 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24356 for (int i = info->first_gp_reg_save; i < 32; i++)
24357 if (fixed_reg_p (i))
24358 {
24359 strategy |= REST_INLINE_GPRS;
24360 strategy &= ~REST_MULTIPLE;
24361 break;
24362 }
24363
24364 /* We can only use load multiple or the out-of-line routines to
24365 restore gprs if we've saved all the registers from
24366 first_gp_reg_save. Otherwise, we risk loading garbage.
24367 Of course, if we have saved out-of-line or used stmw then we know
24368 we haven't skipped any gprs. */
24369 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24370 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24371 for (int i = info->first_gp_reg_save; i < 32; i++)
24372 if (!save_reg_p (i))
24373 {
24374 strategy |= REST_INLINE_GPRS;
24375 strategy &= ~REST_MULTIPLE;
24376 break;
24377 }
24378
24379 if (TARGET_ELF && TARGET_64BIT)
24380 {
24381 if (!(strategy & SAVE_INLINE_FPRS))
24382 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24383 else if (!(strategy & SAVE_INLINE_GPRS)
24384 && info->first_fp_reg_save == 64)
24385 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24386 }
24387 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24388 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24389
24390 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24391 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24392
24393 return strategy;
24394 }
24395
24396 /* Calculate the stack information for the current function. This is
24397 complicated by having two separate calling sequences, the AIX calling
24398 sequence and the V.4 calling sequence.
24399
24400 AIX (and Darwin/Mac OS X) stack frames look like:
24401 32-bit 64-bit
24402 SP----> +---------------------------------------+
24403 | back chain to caller | 0 0
24404 +---------------------------------------+
24405 | saved CR | 4 8 (8-11)
24406 +---------------------------------------+
24407 | saved LR | 8 16
24408 +---------------------------------------+
24409 | reserved for compilers | 12 24
24410 +---------------------------------------+
24411 | reserved for binders | 16 32
24412 +---------------------------------------+
24413 | saved TOC pointer | 20 40
24414 +---------------------------------------+
24415 | Parameter save area (+padding*) (P) | 24 48
24416 +---------------------------------------+
24417 | Alloca space (A) | 24+P etc.
24418 +---------------------------------------+
24419 | Local variable space (L) | 24+P+A
24420 +---------------------------------------+
24421 | Float/int conversion temporary (X) | 24+P+A+L
24422 +---------------------------------------+
24423 | Save area for AltiVec registers (W) | 24+P+A+L+X
24424 +---------------------------------------+
24425 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24426 +---------------------------------------+
24427 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24428 +---------------------------------------+
24429 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24430 +---------------------------------------+
24431 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24432 +---------------------------------------+
24433 old SP->| back chain to caller's caller |
24434 +---------------------------------------+
24435
24436 * If the alloca area is present, the parameter save area is
24437 padded so that the former starts 16-byte aligned.
24438
24439 The required alignment for AIX configurations is two words (i.e., 8
24440 or 16 bytes).
24441
24442 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24443
24444 SP----> +---------------------------------------+
24445 | Back chain to caller | 0
24446 +---------------------------------------+
24447 | Save area for CR | 8
24448 +---------------------------------------+
24449 | Saved LR | 16
24450 +---------------------------------------+
24451 | Saved TOC pointer | 24
24452 +---------------------------------------+
24453 | Parameter save area (+padding*) (P) | 32
24454 +---------------------------------------+
24455 | Alloca space (A) | 32+P
24456 +---------------------------------------+
24457 | Local variable space (L) | 32+P+A
24458 +---------------------------------------+
24459 | Save area for AltiVec registers (W) | 32+P+A+L
24460 +---------------------------------------+
24461 | AltiVec alignment padding (Y) | 32+P+A+L+W
24462 +---------------------------------------+
24463 | Save area for GP registers (G) | 32+P+A+L+W+Y
24464 +---------------------------------------+
24465 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24466 +---------------------------------------+
24467 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24468 +---------------------------------------+
24469
24470 * If the alloca area is present, the parameter save area is
24471 padded so that the former starts 16-byte aligned.
24472
24473 V.4 stack frames look like:
24474
24475 SP----> +---------------------------------------+
24476 | back chain to caller | 0
24477 +---------------------------------------+
24478 | caller's saved LR | 4
24479 +---------------------------------------+
24480 | Parameter save area (+padding*) (P) | 8
24481 +---------------------------------------+
24482 | Alloca space (A) | 8+P
24483 +---------------------------------------+
24484 | Varargs save area (V) | 8+P+A
24485 +---------------------------------------+
24486 | Local variable space (L) | 8+P+A+V
24487 +---------------------------------------+
24488 | Float/int conversion temporary (X) | 8+P+A+V+L
24489 +---------------------------------------+
24490 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24491 +---------------------------------------+
24492 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24493 +---------------------------------------+
24494 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24495 +---------------------------------------+
24496 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24497 +---------------------------------------+
24498 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24499 +---------------------------------------+
24500 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24501 +---------------------------------------+
24502 old SP->| back chain to caller's caller |
24503 +---------------------------------------+
24504
24505 * If the alloca area is present and the required alignment is
24506 16 bytes, the parameter save area is padded so that the
24507 alloca area starts 16-byte aligned.
24508
24509 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24510 given. (But note below and in sysv4.h that we require only 8 and
24511 may round up the size of our stack frame anyways. The historical
24512 reason is early versions of powerpc-linux which didn't properly
24513 align the stack at program startup. A happy side-effect is that
24514 -mno-eabi libraries can be used with -meabi programs.)
24515
24516 The EABI configuration defaults to the V.4 layout. However,
24517 the stack alignment requirements may differ. If -mno-eabi is not
24518 given, the required stack alignment is 8 bytes; if -mno-eabi is
24519 given, the required alignment is 16 bytes. (But see V.4 comment
24520 above.) */
24521
24522 #ifndef ABI_STACK_BOUNDARY
24523 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24524 #endif
24525
24526 static rs6000_stack_t *
24527 rs6000_stack_info (void)
24528 {
24529 /* We should never be called for thunks, we are not set up for that. */
24530 gcc_assert (!cfun->is_thunk);
24531
24532 rs6000_stack_t *info = &stack_info;
24533 int reg_size = TARGET_32BIT ? 4 : 8;
24534 int ehrd_size;
24535 int ehcr_size;
24536 int save_align;
24537 int first_gp;
24538 HOST_WIDE_INT non_fixed_size;
24539 bool using_static_chain_p;
24540
24541 if (reload_completed && info->reload_completed)
24542 return info;
24543
24544 memset (info, 0, sizeof (*info));
24545 info->reload_completed = reload_completed;
24546
24547 /* Select which calling sequence. */
24548 info->abi = DEFAULT_ABI;
24549
24550 /* Calculate which registers need to be saved & save area size. */
24551 info->first_gp_reg_save = first_reg_to_save ();
24552 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24553 even if it currently looks like we won't. Reload may need it to
24554 get at a constant; if so, it will have already created a constant
24555 pool entry for it. */
24556 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24557 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24558 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24559 && crtl->uses_const_pool
24560 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24561 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24562 else
24563 first_gp = info->first_gp_reg_save;
24564
24565 info->gp_size = reg_size * (32 - first_gp);
24566
24567 info->first_fp_reg_save = first_fp_reg_to_save ();
24568 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24569
24570 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24571 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24572 - info->first_altivec_reg_save);
24573
24574 /* Does this function call anything? */
24575 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24576
24577 /* Determine if we need to save the condition code registers. */
24578 if (save_reg_p (CR2_REGNO)
24579 || save_reg_p (CR3_REGNO)
24580 || save_reg_p (CR4_REGNO))
24581 {
24582 info->cr_save_p = 1;
24583 if (DEFAULT_ABI == ABI_V4)
24584 info->cr_size = reg_size;
24585 }
24586
24587 /* If the current function calls __builtin_eh_return, then we need
24588 to allocate stack space for registers that will hold data for
24589 the exception handler. */
24590 if (crtl->calls_eh_return)
24591 {
24592 unsigned int i;
24593 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24594 continue;
24595
24596 ehrd_size = i * UNITS_PER_WORD;
24597 }
24598 else
24599 ehrd_size = 0;
24600
24601 /* In the ELFv2 ABI, we also need to allocate space for separate
24602 CR field save areas if the function calls __builtin_eh_return. */
24603 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24604 {
24605 /* This hard-codes that we have three call-saved CR fields. */
24606 ehcr_size = 3 * reg_size;
24607 /* We do *not* use the regular CR save mechanism. */
24608 info->cr_save_p = 0;
24609 }
24610 else
24611 ehcr_size = 0;
24612
24613 /* Determine various sizes. */
24614 info->reg_size = reg_size;
24615 info->fixed_size = RS6000_SAVE_AREA;
24616 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24617 if (cfun->calls_alloca)
24618 info->parm_size =
24619 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24620 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24621 else
24622 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24623 TARGET_ALTIVEC ? 16 : 8);
24624 if (FRAME_GROWS_DOWNWARD)
24625 info->vars_size
24626 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24627 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24628 - (info->fixed_size + info->vars_size + info->parm_size);
24629
24630 if (TARGET_ALTIVEC_ABI)
24631 info->vrsave_mask = compute_vrsave_mask ();
24632
24633 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24634 info->vrsave_size = 4;
24635
24636 compute_save_world_info (info);
24637
24638 /* Calculate the offsets. */
24639 switch (DEFAULT_ABI)
24640 {
24641 case ABI_NONE:
24642 default:
24643 gcc_unreachable ();
24644
24645 case ABI_AIX:
24646 case ABI_ELFv2:
24647 case ABI_DARWIN:
24648 info->fp_save_offset = -info->fp_size;
24649 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24650
24651 if (TARGET_ALTIVEC_ABI)
24652 {
24653 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24654
24655 /* Align stack so vector save area is on a quadword boundary.
24656 The padding goes above the vectors. */
24657 if (info->altivec_size != 0)
24658 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24659
24660 info->altivec_save_offset = info->vrsave_save_offset
24661 - info->altivec_padding_size
24662 - info->altivec_size;
24663 gcc_assert (info->altivec_size == 0
24664 || info->altivec_save_offset % 16 == 0);
24665
24666 /* Adjust for AltiVec case. */
24667 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24668 }
24669 else
24670 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24671
24672 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24673 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24674 info->lr_save_offset = 2*reg_size;
24675 break;
24676
24677 case ABI_V4:
24678 info->fp_save_offset = -info->fp_size;
24679 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24680 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24681
24682 if (TARGET_ALTIVEC_ABI)
24683 {
24684 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24685
24686 /* Align stack so vector save area is on a quadword boundary. */
24687 if (info->altivec_size != 0)
24688 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24689
24690 info->altivec_save_offset = info->vrsave_save_offset
24691 - info->altivec_padding_size
24692 - info->altivec_size;
24693
24694 /* Adjust for AltiVec case. */
24695 info->ehrd_offset = info->altivec_save_offset;
24696 }
24697 else
24698 info->ehrd_offset = info->cr_save_offset;
24699
24700 info->ehrd_offset -= ehrd_size;
24701 info->lr_save_offset = reg_size;
24702 }
24703
24704 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24705 info->save_size = RS6000_ALIGN (info->fp_size
24706 + info->gp_size
24707 + info->altivec_size
24708 + info->altivec_padding_size
24709 + ehrd_size
24710 + ehcr_size
24711 + info->cr_size
24712 + info->vrsave_size,
24713 save_align);
24714
24715 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24716
24717 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24718 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24719
24720 /* Determine if we need to save the link register. */
24721 if (info->calls_p
24722 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24723 && crtl->profile
24724 && !TARGET_PROFILE_KERNEL)
24725 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24726 #ifdef TARGET_RELOCATABLE
24727 || (DEFAULT_ABI == ABI_V4
24728 && (TARGET_RELOCATABLE || flag_pic > 1)
24729 && !constant_pool_empty_p ())
24730 #endif
24731 || rs6000_ra_ever_killed ())
24732 info->lr_save_p = 1;
24733
24734 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24735 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24736 && call_used_regs[STATIC_CHAIN_REGNUM]);
24737 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24738
24739 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24740 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24741 || !(info->savres_strategy & SAVE_INLINE_VRS)
24742 || !(info->savres_strategy & REST_INLINE_GPRS)
24743 || !(info->savres_strategy & REST_INLINE_FPRS)
24744 || !(info->savres_strategy & REST_INLINE_VRS))
24745 info->lr_save_p = 1;
24746
24747 if (info->lr_save_p)
24748 df_set_regs_ever_live (LR_REGNO, true);
24749
24750 /* Determine if we need to allocate any stack frame:
24751
24752 For AIX we need to push the stack if a frame pointer is needed
24753 (because the stack might be dynamically adjusted), if we are
24754 debugging, if we make calls, or if the sum of fp_save, gp_save,
24755 and local variables are more than the space needed to save all
24756 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24757 + 18*8 = 288 (GPR13 reserved).
24758
24759 For V.4 we don't have the stack cushion that AIX uses, but assume
24760 that the debugger can handle stackless frames. */
24761
24762 if (info->calls_p)
24763 info->push_p = 1;
24764
24765 else if (DEFAULT_ABI == ABI_V4)
24766 info->push_p = non_fixed_size != 0;
24767
24768 else if (frame_pointer_needed)
24769 info->push_p = 1;
24770
24771 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24772 info->push_p = 1;
24773
24774 else
24775 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24776
24777 return info;
24778 }
24779
24780 static void
24781 debug_stack_info (rs6000_stack_t *info)
24782 {
24783 const char *abi_string;
24784
24785 if (! info)
24786 info = rs6000_stack_info ();
24787
24788 fprintf (stderr, "\nStack information for function %s:\n",
24789 ((current_function_decl && DECL_NAME (current_function_decl))
24790 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24791 : "<unknown>"));
24792
24793 switch (info->abi)
24794 {
24795 default: abi_string = "Unknown"; break;
24796 case ABI_NONE: abi_string = "NONE"; break;
24797 case ABI_AIX: abi_string = "AIX"; break;
24798 case ABI_ELFv2: abi_string = "ELFv2"; break;
24799 case ABI_DARWIN: abi_string = "Darwin"; break;
24800 case ABI_V4: abi_string = "V.4"; break;
24801 }
24802
24803 fprintf (stderr, "\tABI = %5s\n", abi_string);
24804
24805 if (TARGET_ALTIVEC_ABI)
24806 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24807
24808 if (info->first_gp_reg_save != 32)
24809 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24810
24811 if (info->first_fp_reg_save != 64)
24812 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24813
24814 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24815 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24816 info->first_altivec_reg_save);
24817
24818 if (info->lr_save_p)
24819 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24820
24821 if (info->cr_save_p)
24822 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24823
24824 if (info->vrsave_mask)
24825 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24826
24827 if (info->push_p)
24828 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24829
24830 if (info->calls_p)
24831 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24832
24833 if (info->gp_size)
24834 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24835
24836 if (info->fp_size)
24837 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24838
24839 if (info->altivec_size)
24840 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24841 info->altivec_save_offset);
24842
24843 if (info->vrsave_size)
24844 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24845 info->vrsave_save_offset);
24846
24847 if (info->lr_save_p)
24848 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24849
24850 if (info->cr_save_p)
24851 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24852
24853 if (info->varargs_save_offset)
24854 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24855
24856 if (info->total_size)
24857 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24858 info->total_size);
24859
24860 if (info->vars_size)
24861 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24862 info->vars_size);
24863
24864 if (info->parm_size)
24865 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24866
24867 if (info->fixed_size)
24868 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24869
24870 if (info->gp_size)
24871 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24872
24873 if (info->fp_size)
24874 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24875
24876 if (info->altivec_size)
24877 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24878
24879 if (info->vrsave_size)
24880 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24881
24882 if (info->altivec_padding_size)
24883 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24884 info->altivec_padding_size);
24885
24886 if (info->cr_size)
24887 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
24888
24889 if (info->save_size)
24890 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
24891
24892 if (info->reg_size != 4)
24893 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
24894
24895 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
24896
24897 fprintf (stderr, "\n");
24898 }
24899
24900 rtx
24901 rs6000_return_addr (int count, rtx frame)
24902 {
24903 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
24904 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
24905 if (count != 0
24906 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
24907 {
24908 cfun->machine->ra_needs_full_frame = 1;
24909
24910 if (count == 0)
24911 /* FRAME is set to frame_pointer_rtx by the generic code, but that
24912 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
24913 frame = stack_pointer_rtx;
24914 rtx prev_frame_addr = memory_address (Pmode, frame);
24915 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
24916 rtx lr_save_off = plus_constant (Pmode,
24917 prev_frame, RETURN_ADDRESS_OFFSET);
24918 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
24919 return gen_rtx_MEM (Pmode, lr_save_addr);
24920 }
24921
24922 cfun->machine->ra_need_lr = 1;
24923 return get_hard_reg_initial_val (Pmode, LR_REGNO);
24924 }
24925
24926 /* Say whether a function is a candidate for sibcall handling or not. */
24927
24928 static bool
24929 rs6000_function_ok_for_sibcall (tree decl, tree exp)
24930 {
24931 tree fntype;
24932
24933 /* The sibcall epilogue may clobber the static chain register.
24934 ??? We could work harder and avoid that, but it's probably
24935 not worth the hassle in practice. */
24936 if (CALL_EXPR_STATIC_CHAIN (exp))
24937 return false;
24938
24939 if (decl)
24940 fntype = TREE_TYPE (decl);
24941 else
24942 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
24943
24944 /* We can't do it if the called function has more vector parameters
24945 than the current function; there's nowhere to put the VRsave code. */
24946 if (TARGET_ALTIVEC_ABI
24947 && TARGET_ALTIVEC_VRSAVE
24948 && !(decl && decl == current_function_decl))
24949 {
24950 function_args_iterator args_iter;
24951 tree type;
24952 int nvreg = 0;
24953
24954 /* Functions with vector parameters are required to have a
24955 prototype, so the argument type info must be available
24956 here. */
24957 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
24958 if (TREE_CODE (type) == VECTOR_TYPE
24959 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24960 nvreg++;
24961
24962 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
24963 if (TREE_CODE (type) == VECTOR_TYPE
24964 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24965 nvreg--;
24966
24967 if (nvreg > 0)
24968 return false;
24969 }
24970
24971 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
24972 functions, because the callee may have a different TOC pointer to
24973 the caller and there's no way to ensure we restore the TOC when
24974 we return. With the secure-plt SYSV ABI we can't make non-local
24975 calls when -fpic/PIC because the plt call stubs use r30. */
24976 if (DEFAULT_ABI == ABI_DARWIN
24977 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24978 && decl
24979 && !DECL_EXTERNAL (decl)
24980 && !DECL_WEAK (decl)
24981 && (*targetm.binds_local_p) (decl))
24982 || (DEFAULT_ABI == ABI_V4
24983 && (!TARGET_SECURE_PLT
24984 || !flag_pic
24985 || (decl
24986 && (*targetm.binds_local_p) (decl)))))
24987 {
24988 tree attr_list = TYPE_ATTRIBUTES (fntype);
24989
24990 if (!lookup_attribute ("longcall", attr_list)
24991 || lookup_attribute ("shortcall", attr_list))
24992 return true;
24993 }
24994
24995 return false;
24996 }
24997
24998 static int
24999 rs6000_ra_ever_killed (void)
25000 {
25001 rtx_insn *top;
25002 rtx reg;
25003 rtx_insn *insn;
25004
25005 if (cfun->is_thunk)
25006 return 0;
25007
25008 if (cfun->machine->lr_save_state)
25009 return cfun->machine->lr_save_state - 1;
25010
25011 /* regs_ever_live has LR marked as used if any sibcalls are present,
25012 but this should not force saving and restoring in the
25013 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25014 clobbers LR, so that is inappropriate. */
25015
25016 /* Also, the prologue can generate a store into LR that
25017 doesn't really count, like this:
25018
25019 move LR->R0
25020 bcl to set PIC register
25021 move LR->R31
25022 move R0->LR
25023
25024 When we're called from the epilogue, we need to avoid counting
25025 this as a store. */
25026
25027 push_topmost_sequence ();
25028 top = get_insns ();
25029 pop_topmost_sequence ();
25030 reg = gen_rtx_REG (Pmode, LR_REGNO);
25031
25032 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25033 {
25034 if (INSN_P (insn))
25035 {
25036 if (CALL_P (insn))
25037 {
25038 if (!SIBLING_CALL_P (insn))
25039 return 1;
25040 }
25041 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25042 return 1;
25043 else if (set_of (reg, insn) != NULL_RTX
25044 && !prologue_epilogue_contains (insn))
25045 return 1;
25046 }
25047 }
25048 return 0;
25049 }
25050 \f
25051 /* Emit instructions needed to load the TOC register.
25052 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25053 a constant pool; or for SVR4 -fpic. */
25054
25055 void
25056 rs6000_emit_load_toc_table (int fromprolog)
25057 {
25058 rtx dest;
25059 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25060
25061 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25062 {
25063 char buf[30];
25064 rtx lab, tmp1, tmp2, got;
25065
25066 lab = gen_label_rtx ();
25067 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25068 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25069 if (flag_pic == 2)
25070 {
25071 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25072 need_toc_init = 1;
25073 }
25074 else
25075 got = rs6000_got_sym ();
25076 tmp1 = tmp2 = dest;
25077 if (!fromprolog)
25078 {
25079 tmp1 = gen_reg_rtx (Pmode);
25080 tmp2 = gen_reg_rtx (Pmode);
25081 }
25082 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25083 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25084 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25085 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25086 }
25087 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25088 {
25089 emit_insn (gen_load_toc_v4_pic_si ());
25090 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25091 }
25092 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25093 {
25094 char buf[30];
25095 rtx temp0 = (fromprolog
25096 ? gen_rtx_REG (Pmode, 0)
25097 : gen_reg_rtx (Pmode));
25098
25099 if (fromprolog)
25100 {
25101 rtx symF, symL;
25102
25103 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25104 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25105
25106 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25107 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25108
25109 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25110 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25111 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25112 }
25113 else
25114 {
25115 rtx tocsym, lab;
25116
25117 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25118 need_toc_init = 1;
25119 lab = gen_label_rtx ();
25120 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25121 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25122 if (TARGET_LINK_STACK)
25123 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25124 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25125 }
25126 emit_insn (gen_addsi3 (dest, temp0, dest));
25127 }
25128 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25129 {
25130 /* This is for AIX code running in non-PIC ELF32. */
25131 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25132
25133 need_toc_init = 1;
25134 emit_insn (gen_elf_high (dest, realsym));
25135 emit_insn (gen_elf_low (dest, dest, realsym));
25136 }
25137 else
25138 {
25139 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25140
25141 if (TARGET_32BIT)
25142 emit_insn (gen_load_toc_aix_si (dest));
25143 else
25144 emit_insn (gen_load_toc_aix_di (dest));
25145 }
25146 }
25147
25148 /* Emit instructions to restore the link register after determining where
25149 its value has been stored. */
25150
25151 void
25152 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25153 {
25154 rs6000_stack_t *info = rs6000_stack_info ();
25155 rtx operands[2];
25156
25157 operands[0] = source;
25158 operands[1] = scratch;
25159
25160 if (info->lr_save_p)
25161 {
25162 rtx frame_rtx = stack_pointer_rtx;
25163 HOST_WIDE_INT sp_offset = 0;
25164 rtx tmp;
25165
25166 if (frame_pointer_needed
25167 || cfun->calls_alloca
25168 || info->total_size > 32767)
25169 {
25170 tmp = gen_frame_mem (Pmode, frame_rtx);
25171 emit_move_insn (operands[1], tmp);
25172 frame_rtx = operands[1];
25173 }
25174 else if (info->push_p)
25175 sp_offset = info->total_size;
25176
25177 tmp = plus_constant (Pmode, frame_rtx,
25178 info->lr_save_offset + sp_offset);
25179 tmp = gen_frame_mem (Pmode, tmp);
25180 emit_move_insn (tmp, operands[0]);
25181 }
25182 else
25183 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25184
25185 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25186 state of lr_save_p so any change from here on would be a bug. In
25187 particular, stop rs6000_ra_ever_killed from considering the SET
25188 of lr we may have added just above. */
25189 cfun->machine->lr_save_state = info->lr_save_p + 1;
25190 }
25191
25192 static GTY(()) alias_set_type set = -1;
25193
25194 alias_set_type
25195 get_TOC_alias_set (void)
25196 {
25197 if (set == -1)
25198 set = new_alias_set ();
25199 return set;
25200 }
25201
25202 /* This returns nonzero if the current function uses the TOC. This is
25203 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25204 is generated by the ABI_V4 load_toc_* patterns.
25205 Return 2 instead of 1 if the load_toc_* pattern is in the function
25206 partition that doesn't start the function. */
25207 #if TARGET_ELF
25208 static int
25209 uses_TOC (void)
25210 {
25211 rtx_insn *insn;
25212 int ret = 1;
25213
25214 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25215 {
25216 if (INSN_P (insn))
25217 {
25218 rtx pat = PATTERN (insn);
25219 int i;
25220
25221 if (GET_CODE (pat) == PARALLEL)
25222 for (i = 0; i < XVECLEN (pat, 0); i++)
25223 {
25224 rtx sub = XVECEXP (pat, 0, i);
25225 if (GET_CODE (sub) == USE)
25226 {
25227 sub = XEXP (sub, 0);
25228 if (GET_CODE (sub) == UNSPEC
25229 && XINT (sub, 1) == UNSPEC_TOC)
25230 return ret;
25231 }
25232 }
25233 }
25234 else if (crtl->has_bb_partition
25235 && NOTE_P (insn)
25236 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25237 ret = 2;
25238 }
25239 return 0;
25240 }
25241 #endif
25242
25243 rtx
25244 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25245 {
25246 rtx tocrel, tocreg, hi;
25247
25248 if (TARGET_DEBUG_ADDR)
25249 {
25250 if (GET_CODE (symbol) == SYMBOL_REF)
25251 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25252 XSTR (symbol, 0));
25253 else
25254 {
25255 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25256 GET_RTX_NAME (GET_CODE (symbol)));
25257 debug_rtx (symbol);
25258 }
25259 }
25260
25261 if (!can_create_pseudo_p ())
25262 df_set_regs_ever_live (TOC_REGISTER, true);
25263
25264 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25265 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25266 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25267 return tocrel;
25268
25269 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25270 if (largetoc_reg != NULL)
25271 {
25272 emit_move_insn (largetoc_reg, hi);
25273 hi = largetoc_reg;
25274 }
25275 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25276 }
25277
25278 /* Issue assembly directives that create a reference to the given DWARF
25279 FRAME_TABLE_LABEL from the current function section. */
25280 void
25281 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25282 {
25283 fprintf (asm_out_file, "\t.ref %s\n",
25284 (* targetm.strip_name_encoding) (frame_table_label));
25285 }
25286 \f
25287 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25288 and the change to the stack pointer. */
25289
25290 static void
25291 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25292 {
25293 rtvec p;
25294 int i;
25295 rtx regs[3];
25296
25297 i = 0;
25298 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25299 if (hard_frame_needed)
25300 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25301 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25302 || (hard_frame_needed
25303 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25304 regs[i++] = fp;
25305
25306 p = rtvec_alloc (i);
25307 while (--i >= 0)
25308 {
25309 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25310 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25311 }
25312
25313 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25314 }
25315
25316 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25317 and set the appropriate attributes for the generated insn. Return the
25318 first insn which adjusts the stack pointer or the last insn before
25319 the stack adjustment loop.
25320
25321 SIZE_INT is used to create the CFI note for the allocation.
25322
25323 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25324 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25325
25326 ORIG_SP contains the backchain value that must be stored at *sp. */
25327
25328 static rtx_insn *
25329 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
25330 {
25331 rtx_insn *insn;
25332
25333 rtx size_rtx = GEN_INT (-size_int);
25334 if (size_int > 32767)
25335 {
25336 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25337 /* Need a note here so that try_split doesn't get confused. */
25338 if (get_last_insn () == NULL_RTX)
25339 emit_note (NOTE_INSN_DELETED);
25340 insn = emit_move_insn (tmp_reg, size_rtx);
25341 try_split (PATTERN (insn), insn, 0);
25342 size_rtx = tmp_reg;
25343 }
25344
25345 if (Pmode == SImode)
25346 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
25347 stack_pointer_rtx,
25348 size_rtx,
25349 orig_sp));
25350 else
25351 insn = emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
25352 stack_pointer_rtx,
25353 size_rtx,
25354 orig_sp));
25355 rtx par = PATTERN (insn);
25356 gcc_assert (GET_CODE (par) == PARALLEL);
25357 rtx set = XVECEXP (par, 0, 0);
25358 gcc_assert (GET_CODE (set) == SET);
25359 rtx mem = SET_DEST (set);
25360 gcc_assert (MEM_P (mem));
25361 MEM_NOTRAP_P (mem) = 1;
25362 set_mem_alias_set (mem, get_frame_alias_set ());
25363
25364 RTX_FRAME_RELATED_P (insn) = 1;
25365 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25366 gen_rtx_SET (stack_pointer_rtx,
25367 gen_rtx_PLUS (Pmode,
25368 stack_pointer_rtx,
25369 GEN_INT (-size_int))));
25370
25371 /* Emit a blockage to ensure the allocation/probing insns are
25372 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25373 note for similar reasons. */
25374 if (flag_stack_clash_protection)
25375 {
25376 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
25377 emit_insn (gen_blockage ());
25378 }
25379
25380 return insn;
25381 }
25382
25383 static HOST_WIDE_INT
25384 get_stack_clash_protection_probe_interval (void)
25385 {
25386 return (HOST_WIDE_INT_1U
25387 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
25388 }
25389
25390 static HOST_WIDE_INT
25391 get_stack_clash_protection_guard_size (void)
25392 {
25393 return (HOST_WIDE_INT_1U
25394 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
25395 }
25396
25397 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25398 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25399
25400 COPY_REG, if non-null, should contain a copy of the original
25401 stack pointer at exit from this function.
25402
25403 This is subtly different than the Ada probing in that it tries hard to
25404 prevent attacks that jump the stack guard. Thus it is never allowed to
25405 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25406 space without a suitable probe. */
25407 static rtx_insn *
25408 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
25409 rtx copy_reg)
25410 {
25411 rtx orig_sp = copy_reg;
25412
25413 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25414
25415 /* Round the size down to a multiple of PROBE_INTERVAL. */
25416 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
25417
25418 /* If explicitly requested,
25419 or the rounded size is not the same as the original size
25420 or the the rounded size is greater than a page,
25421 then we will need a copy of the original stack pointer. */
25422 if (rounded_size != orig_size
25423 || rounded_size > probe_interval
25424 || copy_reg)
25425 {
25426 /* If the caller did not request a copy of the incoming stack
25427 pointer, then we use r0 to hold the copy. */
25428 if (!copy_reg)
25429 orig_sp = gen_rtx_REG (Pmode, 0);
25430 emit_move_insn (orig_sp, stack_pointer_rtx);
25431 }
25432
25433 /* There's three cases here.
25434
25435 One is a single probe which is the most common and most efficiently
25436 implemented as it does not have to have a copy of the original
25437 stack pointer if there are no residuals.
25438
25439 Second is unrolled allocation/probes which we use if there's just
25440 a few of them. It needs to save the original stack pointer into a
25441 temporary for use as a source register in the allocation/probe.
25442
25443 Last is a loop. This is the most uncommon case and least efficient. */
25444 rtx_insn *retval = NULL;
25445 if (rounded_size == probe_interval)
25446 {
25447 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
25448
25449 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25450 }
25451 else if (rounded_size <= 8 * probe_interval)
25452 {
25453 /* The ABI requires using the store with update insns to allocate
25454 space and store the backchain into the stack
25455
25456 So we save the current stack pointer into a temporary, then
25457 emit the store-with-update insns to store the saved stack pointer
25458 into the right location in each new page. */
25459 for (int i = 0; i < rounded_size; i += probe_interval)
25460 {
25461 rtx_insn *insn
25462 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
25463
25464 /* Save the first stack adjustment in RETVAL. */
25465 if (i == 0)
25466 retval = insn;
25467 }
25468
25469 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25470 }
25471 else
25472 {
25473 /* Compute the ending address. */
25474 rtx end_addr
25475 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
25476 rtx rs = GEN_INT (-rounded_size);
25477 rtx_insn *insn;
25478 if (add_operand (rs, Pmode))
25479 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
25480 else
25481 {
25482 emit_move_insn (end_addr, GEN_INT (-rounded_size));
25483 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
25484 stack_pointer_rtx));
25485 /* Describe the effect of INSN to the CFI engine. */
25486 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25487 gen_rtx_SET (end_addr,
25488 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25489 rs)));
25490 }
25491 RTX_FRAME_RELATED_P (insn) = 1;
25492
25493 /* Emit the loop. */
25494 if (TARGET_64BIT)
25495 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
25496 stack_pointer_rtx, orig_sp,
25497 end_addr));
25498 else
25499 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
25500 stack_pointer_rtx, orig_sp,
25501 end_addr));
25502 RTX_FRAME_RELATED_P (retval) = 1;
25503 /* Describe the effect of INSN to the CFI engine. */
25504 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
25505 gen_rtx_SET (stack_pointer_rtx, end_addr));
25506
25507 /* Emit a blockage to ensure the allocation/probing insns are
25508 not optimized, combined, removed, etc. Other cases handle this
25509 within their call to rs6000_emit_allocate_stack_1. */
25510 emit_insn (gen_blockage ());
25511
25512 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
25513 }
25514
25515 if (orig_size != rounded_size)
25516 {
25517 /* Allocate (and implicitly probe) any residual space. */
25518 HOST_WIDE_INT residual = orig_size - rounded_size;
25519
25520 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
25521
25522 /* If the residual was the only allocation, then we can return the
25523 allocating insn. */
25524 if (!retval)
25525 retval = insn;
25526 }
25527
25528 return retval;
25529 }
25530
25531 /* Emit the correct code for allocating stack space, as insns.
25532 If COPY_REG, make sure a copy of the old frame is left there.
25533 The generated code may use hard register 0 as a temporary. */
25534
25535 static rtx_insn *
25536 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25537 {
25538 rtx_insn *insn;
25539 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25540 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25541 rtx todec = gen_int_mode (-size, Pmode);
25542
25543 if (INTVAL (todec) != -size)
25544 {
25545 warning (0, "stack frame too large");
25546 emit_insn (gen_trap ());
25547 return 0;
25548 }
25549
25550 if (crtl->limit_stack)
25551 {
25552 if (REG_P (stack_limit_rtx)
25553 && REGNO (stack_limit_rtx) > 1
25554 && REGNO (stack_limit_rtx) <= 31)
25555 {
25556 rtx_insn *insn
25557 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25558 gcc_assert (insn);
25559 emit_insn (insn);
25560 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25561 }
25562 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
25563 && TARGET_32BIT
25564 && DEFAULT_ABI == ABI_V4
25565 && !flag_pic)
25566 {
25567 rtx toload = gen_rtx_CONST (VOIDmode,
25568 gen_rtx_PLUS (Pmode,
25569 stack_limit_rtx,
25570 GEN_INT (size)));
25571
25572 emit_insn (gen_elf_high (tmp_reg, toload));
25573 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25574 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25575 const0_rtx));
25576 }
25577 else
25578 warning (0, "stack limit expression is not supported");
25579 }
25580
25581 if (flag_stack_clash_protection)
25582 {
25583 if (size < get_stack_clash_protection_guard_size ())
25584 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25585 else
25586 {
25587 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25588 copy_reg);
25589
25590 /* If we asked for a copy with an offset, then we still need add in
25591 the offset. */
25592 if (copy_reg && copy_off)
25593 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25594 return insn;
25595 }
25596 }
25597
25598 if (copy_reg)
25599 {
25600 if (copy_off != 0)
25601 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25602 else
25603 emit_move_insn (copy_reg, stack_reg);
25604 }
25605
25606 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25607 it now and set the alias set/attributes. The above gen_*_update
25608 calls will generate a PARALLEL with the MEM set being the first
25609 operation. */
25610 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25611 return insn;
25612 }
25613
25614 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25615
25616 #if PROBE_INTERVAL > 32768
25617 #error Cannot use indexed addressing mode for stack probing
25618 #endif
25619
25620 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25621 inclusive. These are offsets from the current stack pointer. */
25622
25623 static void
25624 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25625 {
25626 /* See if we have a constant small number of probes to generate. If so,
25627 that's the easy case. */
25628 if (first + size <= 32768)
25629 {
25630 HOST_WIDE_INT i;
25631
25632 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25633 it exceeds SIZE. If only one probe is needed, this will not
25634 generate any code. Then probe at FIRST + SIZE. */
25635 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25636 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25637 -(first + i)));
25638
25639 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25640 -(first + size)));
25641 }
25642
25643 /* Otherwise, do the same as above, but in a loop. Note that we must be
25644 extra careful with variables wrapping around because we might be at
25645 the very top (or the very bottom) of the address space and we have
25646 to be able to handle this case properly; in particular, we use an
25647 equality test for the loop condition. */
25648 else
25649 {
25650 HOST_WIDE_INT rounded_size;
25651 rtx r12 = gen_rtx_REG (Pmode, 12);
25652 rtx r0 = gen_rtx_REG (Pmode, 0);
25653
25654 /* Sanity check for the addressing mode we're going to use. */
25655 gcc_assert (first <= 32768);
25656
25657 /* Step 1: round SIZE to the previous multiple of the interval. */
25658
25659 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25660
25661
25662 /* Step 2: compute initial and final value of the loop counter. */
25663
25664 /* TEST_ADDR = SP + FIRST. */
25665 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25666 -first)));
25667
25668 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25669 if (rounded_size > 32768)
25670 {
25671 emit_move_insn (r0, GEN_INT (-rounded_size));
25672 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25673 }
25674 else
25675 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25676 -rounded_size)));
25677
25678
25679 /* Step 3: the loop
25680
25681 do
25682 {
25683 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25684 probe at TEST_ADDR
25685 }
25686 while (TEST_ADDR != LAST_ADDR)
25687
25688 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25689 until it is equal to ROUNDED_SIZE. */
25690
25691 if (TARGET_64BIT)
25692 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25693 else
25694 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25695
25696
25697 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25698 that SIZE is equal to ROUNDED_SIZE. */
25699
25700 if (size != rounded_size)
25701 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25702 }
25703 }
25704
25705 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25706 addresses, not offsets. */
25707
25708 static const char *
25709 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25710 {
25711 static int labelno = 0;
25712 char loop_lab[32];
25713 rtx xops[2];
25714
25715 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25716
25717 /* Loop. */
25718 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25719
25720 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25721 xops[0] = reg1;
25722 xops[1] = GEN_INT (-PROBE_INTERVAL);
25723 output_asm_insn ("addi %0,%0,%1", xops);
25724
25725 /* Probe at TEST_ADDR. */
25726 xops[1] = gen_rtx_REG (Pmode, 0);
25727 output_asm_insn ("stw %1,0(%0)", xops);
25728
25729 /* Test if TEST_ADDR == LAST_ADDR. */
25730 xops[1] = reg2;
25731 if (TARGET_64BIT)
25732 output_asm_insn ("cmpd 0,%0,%1", xops);
25733 else
25734 output_asm_insn ("cmpw 0,%0,%1", xops);
25735
25736 /* Branch. */
25737 fputs ("\tbne 0,", asm_out_file);
25738 assemble_name_raw (asm_out_file, loop_lab);
25739 fputc ('\n', asm_out_file);
25740
25741 return "";
25742 }
25743
25744 /* This function is called when rs6000_frame_related is processing
25745 SETs within a PARALLEL, and returns whether the REGNO save ought to
25746 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25747 for out-of-line register save functions, store multiple, and the
25748 Darwin world_save. They may contain registers that don't really
25749 need saving. */
25750
25751 static bool
25752 interesting_frame_related_regno (unsigned int regno)
25753 {
25754 /* Saves apparently of r0 are actually saving LR. It doesn't make
25755 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25756 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25757 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25758 as frame related. */
25759 if (regno == 0)
25760 return true;
25761 /* If we see CR2 then we are here on a Darwin world save. Saves of
25762 CR2 signify the whole CR is being saved. This is a long-standing
25763 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25764 that CR needs to be saved. */
25765 if (regno == CR2_REGNO)
25766 return true;
25767 /* Omit frame info for any user-defined global regs. If frame info
25768 is supplied for them, frame unwinding will restore a user reg.
25769 Also omit frame info for any reg we don't need to save, as that
25770 bloats frame info and can cause problems with shrink wrapping.
25771 Since global regs won't be seen as needing to be saved, both of
25772 these conditions are covered by save_reg_p. */
25773 return save_reg_p (regno);
25774 }
25775
25776 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25777 addresses, not offsets.
25778
25779 REG2 contains the backchain that must be stored into *sp at each allocation.
25780
25781 This is subtly different than the Ada probing above in that it tries hard
25782 to prevent attacks that jump the stack guard. Thus, it is never allowed
25783 to allocate more than PROBE_INTERVAL bytes of stack space without a
25784 suitable probe. */
25785
25786 static const char *
25787 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25788 {
25789 static int labelno = 0;
25790 char loop_lab[32];
25791 rtx xops[3];
25792
25793 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25794
25795 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25796
25797 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25798
25799 /* This allocates and probes. */
25800 xops[0] = reg1;
25801 xops[1] = reg2;
25802 xops[2] = GEN_INT (-probe_interval);
25803 if (TARGET_64BIT)
25804 output_asm_insn ("stdu %1,%2(%0)", xops);
25805 else
25806 output_asm_insn ("stwu %1,%2(%0)", xops);
25807
25808 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25809 xops[0] = reg1;
25810 xops[1] = reg3;
25811 if (TARGET_64BIT)
25812 output_asm_insn ("cmpd 0,%0,%1", xops);
25813 else
25814 output_asm_insn ("cmpw 0,%0,%1", xops);
25815
25816 fputs ("\tbne 0,", asm_out_file);
25817 assemble_name_raw (asm_out_file, loop_lab);
25818 fputc ('\n', asm_out_file);
25819
25820 return "";
25821 }
25822
25823 /* Wrapper around the output_probe_stack_range routines. */
25824 const char *
25825 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
25826 {
25827 if (flag_stack_clash_protection)
25828 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
25829 else
25830 return output_probe_stack_range_1 (reg1, reg3);
25831 }
25832
25833 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25834 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25835 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25836 deduce these equivalences by itself so it wasn't necessary to hold
25837 its hand so much. Don't be tempted to always supply d2_f_d_e with
25838 the actual cfa register, ie. r31 when we are using a hard frame
25839 pointer. That fails when saving regs off r1, and sched moves the
25840 r31 setup past the reg saves. */
25841
25842 static rtx_insn *
25843 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25844 rtx reg2, rtx repl2)
25845 {
25846 rtx repl;
25847
25848 if (REGNO (reg) == STACK_POINTER_REGNUM)
25849 {
25850 gcc_checking_assert (val == 0);
25851 repl = NULL_RTX;
25852 }
25853 else
25854 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25855 GEN_INT (val));
25856
25857 rtx pat = PATTERN (insn);
25858 if (!repl && !reg2)
25859 {
25860 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25861 if (GET_CODE (pat) == PARALLEL)
25862 for (int i = 0; i < XVECLEN (pat, 0); i++)
25863 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25864 {
25865 rtx set = XVECEXP (pat, 0, i);
25866
25867 if (!REG_P (SET_SRC (set))
25868 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25869 RTX_FRAME_RELATED_P (set) = 1;
25870 }
25871 RTX_FRAME_RELATED_P (insn) = 1;
25872 return insn;
25873 }
25874
25875 /* We expect that 'pat' is either a SET or a PARALLEL containing
25876 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25877 are important so they all have to be marked RTX_FRAME_RELATED_P.
25878 Call simplify_replace_rtx on the SETs rather than the whole insn
25879 so as to leave the other stuff alone (for example USE of r12). */
25880
25881 set_used_flags (pat);
25882 if (GET_CODE (pat) == SET)
25883 {
25884 if (repl)
25885 pat = simplify_replace_rtx (pat, reg, repl);
25886 if (reg2)
25887 pat = simplify_replace_rtx (pat, reg2, repl2);
25888 }
25889 else if (GET_CODE (pat) == PARALLEL)
25890 {
25891 pat = shallow_copy_rtx (pat);
25892 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25893
25894 for (int i = 0; i < XVECLEN (pat, 0); i++)
25895 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25896 {
25897 rtx set = XVECEXP (pat, 0, i);
25898
25899 if (repl)
25900 set = simplify_replace_rtx (set, reg, repl);
25901 if (reg2)
25902 set = simplify_replace_rtx (set, reg2, repl2);
25903 XVECEXP (pat, 0, i) = set;
25904
25905 if (!REG_P (SET_SRC (set))
25906 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25907 RTX_FRAME_RELATED_P (set) = 1;
25908 }
25909 }
25910 else
25911 gcc_unreachable ();
25912
25913 RTX_FRAME_RELATED_P (insn) = 1;
25914 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
25915
25916 return insn;
25917 }
25918
25919 /* Returns an insn that has a vrsave set operation with the
25920 appropriate CLOBBERs. */
25921
25922 static rtx
25923 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25924 {
25925 int nclobs, i;
25926 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25927 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25928
25929 clobs[0]
25930 = gen_rtx_SET (vrsave,
25931 gen_rtx_UNSPEC_VOLATILE (SImode,
25932 gen_rtvec (2, reg, vrsave),
25933 UNSPECV_SET_VRSAVE));
25934
25935 nclobs = 1;
25936
25937 /* We need to clobber the registers in the mask so the scheduler
25938 does not move sets to VRSAVE before sets of AltiVec registers.
25939
25940 However, if the function receives nonlocal gotos, reload will set
25941 all call saved registers live. We will end up with:
25942
25943 (set (reg 999) (mem))
25944 (parallel [ (set (reg vrsave) (unspec blah))
25945 (clobber (reg 999))])
25946
25947 The clobber will cause the store into reg 999 to be dead, and
25948 flow will attempt to delete an epilogue insn. In this case, we
25949 need an unspec use/set of the register. */
25950
25951 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25952 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25953 {
25954 if (!epiloguep || call_used_regs [i])
25955 clobs[nclobs++] = gen_hard_reg_clobber (V4SImode, i);
25956 else
25957 {
25958 rtx reg = gen_rtx_REG (V4SImode, i);
25959
25960 clobs[nclobs++]
25961 = gen_rtx_SET (reg,
25962 gen_rtx_UNSPEC (V4SImode,
25963 gen_rtvec (1, reg), 27));
25964 }
25965 }
25966
25967 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25968
25969 for (i = 0; i < nclobs; ++i)
25970 XVECEXP (insn, 0, i) = clobs[i];
25971
25972 return insn;
25973 }
25974
25975 static rtx
25976 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25977 {
25978 rtx addr, mem;
25979
25980 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25981 mem = gen_frame_mem (GET_MODE (reg), addr);
25982 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25983 }
25984
25985 static rtx
25986 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25987 {
25988 return gen_frame_set (reg, frame_reg, offset, false);
25989 }
25990
25991 static rtx
25992 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25993 {
25994 return gen_frame_set (reg, frame_reg, offset, true);
25995 }
25996
25997 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25998 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25999
26000 static rtx_insn *
26001 emit_frame_save (rtx frame_reg, machine_mode mode,
26002 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
26003 {
26004 rtx reg;
26005
26006 /* Some cases that need register indexed addressing. */
26007 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
26008 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
26009
26010 reg = gen_rtx_REG (mode, regno);
26011 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
26012 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
26013 NULL_RTX, NULL_RTX);
26014 }
26015
26016 /* Emit an offset memory reference suitable for a frame store, while
26017 converting to a valid addressing mode. */
26018
26019 static rtx
26020 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
26021 {
26022 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
26023 }
26024
26025 #ifndef TARGET_FIX_AND_CONTINUE
26026 #define TARGET_FIX_AND_CONTINUE 0
26027 #endif
26028
26029 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
26030 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
26031 #define LAST_SAVRES_REGISTER 31
26032 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
26033
26034 enum {
26035 SAVRES_LR = 0x1,
26036 SAVRES_SAVE = 0x2,
26037 SAVRES_REG = 0x0c,
26038 SAVRES_GPR = 0,
26039 SAVRES_FPR = 4,
26040 SAVRES_VR = 8
26041 };
26042
26043 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
26044
26045 /* Temporary holding space for an out-of-line register save/restore
26046 routine name. */
26047 static char savres_routine_name[30];
26048
26049 /* Return the name for an out-of-line register save/restore routine.
26050 We are saving/restoring GPRs if GPR is true. */
26051
26052 static char *
26053 rs6000_savres_routine_name (int regno, int sel)
26054 {
26055 const char *prefix = "";
26056 const char *suffix = "";
26057
26058 /* Different targets are supposed to define
26059 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
26060 routine name could be defined with:
26061
26062 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
26063
26064 This is a nice idea in practice, but in reality, things are
26065 complicated in several ways:
26066
26067 - ELF targets have save/restore routines for GPRs.
26068
26069 - PPC64 ELF targets have routines for save/restore of GPRs that
26070 differ in what they do with the link register, so having a set
26071 prefix doesn't work. (We only use one of the save routines at
26072 the moment, though.)
26073
26074 - PPC32 elf targets have "exit" versions of the restore routines
26075 that restore the link register and can save some extra space.
26076 These require an extra suffix. (There are also "tail" versions
26077 of the restore routines and "GOT" versions of the save routines,
26078 but we don't generate those at present. Same problems apply,
26079 though.)
26080
26081 We deal with all this by synthesizing our own prefix/suffix and
26082 using that for the simple sprintf call shown above. */
26083 if (DEFAULT_ABI == ABI_V4)
26084 {
26085 if (TARGET_64BIT)
26086 goto aix_names;
26087
26088 if ((sel & SAVRES_REG) == SAVRES_GPR)
26089 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
26090 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26091 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
26092 else if ((sel & SAVRES_REG) == SAVRES_VR)
26093 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26094 else
26095 abort ();
26096
26097 if ((sel & SAVRES_LR))
26098 suffix = "_x";
26099 }
26100 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26101 {
26102 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
26103 /* No out-of-line save/restore routines for GPRs on AIX. */
26104 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
26105 #endif
26106
26107 aix_names:
26108 if ((sel & SAVRES_REG) == SAVRES_GPR)
26109 prefix = ((sel & SAVRES_SAVE)
26110 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
26111 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
26112 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26113 {
26114 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26115 if ((sel & SAVRES_LR))
26116 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
26117 else
26118 #endif
26119 {
26120 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
26121 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
26122 }
26123 }
26124 else if ((sel & SAVRES_REG) == SAVRES_VR)
26125 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26126 else
26127 abort ();
26128 }
26129
26130 if (DEFAULT_ABI == ABI_DARWIN)
26131 {
26132 /* The Darwin approach is (slightly) different, in order to be
26133 compatible with code generated by the system toolchain. There is a
26134 single symbol for the start of save sequence, and the code here
26135 embeds an offset into that code on the basis of the first register
26136 to be saved. */
26137 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
26138 if ((sel & SAVRES_REG) == SAVRES_GPR)
26139 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
26140 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
26141 (regno - 13) * 4, prefix, regno);
26142 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26143 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
26144 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
26145 else if ((sel & SAVRES_REG) == SAVRES_VR)
26146 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
26147 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
26148 else
26149 abort ();
26150 }
26151 else
26152 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
26153
26154 return savres_routine_name;
26155 }
26156
26157 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26158 We are saving/restoring GPRs if GPR is true. */
26159
26160 static rtx
26161 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
26162 {
26163 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
26164 ? info->first_gp_reg_save
26165 : (sel & SAVRES_REG) == SAVRES_FPR
26166 ? info->first_fp_reg_save - 32
26167 : (sel & SAVRES_REG) == SAVRES_VR
26168 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
26169 : -1);
26170 rtx sym;
26171 int select = sel;
26172
26173 /* Don't generate bogus routine names. */
26174 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26175 && regno <= LAST_SAVRES_REGISTER
26176 && select >= 0 && select <= 12);
26177
26178 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26179
26180 if (sym == NULL)
26181 {
26182 char *name;
26183
26184 name = rs6000_savres_routine_name (regno, sel);
26185
26186 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26187 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26188 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26189 }
26190
26191 return sym;
26192 }
26193
26194 /* Emit a sequence of insns, including a stack tie if needed, for
26195 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26196 reset the stack pointer, but move the base of the frame into
26197 reg UPDT_REGNO for use by out-of-line register restore routines. */
26198
26199 static rtx
26200 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26201 unsigned updt_regno)
26202 {
26203 /* If there is nothing to do, don't do anything. */
26204 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
26205 return NULL_RTX;
26206
26207 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26208
26209 /* This blockage is needed so that sched doesn't decide to move
26210 the sp change before the register restores. */
26211 if (DEFAULT_ABI == ABI_V4)
26212 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
26213 GEN_INT (frame_off)));
26214
26215 /* If we are restoring registers out-of-line, we will be using the
26216 "exit" variants of the restore routines, which will reset the
26217 stack for us. But we do need to point updt_reg into the
26218 right place for those routines. */
26219 if (frame_off != 0)
26220 return emit_insn (gen_add3_insn (updt_reg_rtx,
26221 frame_reg_rtx, GEN_INT (frame_off)));
26222 else
26223 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26224
26225 return NULL_RTX;
26226 }
26227
26228 /* Return the register number used as a pointer by out-of-line
26229 save/restore functions. */
26230
26231 static inline unsigned
26232 ptr_regno_for_savres (int sel)
26233 {
26234 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26235 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26236 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26237 }
26238
26239 /* Construct a parallel rtx describing the effect of a call to an
26240 out-of-line register save/restore routine, and emit the insn
26241 or jump_insn as appropriate. */
26242
26243 static rtx_insn *
26244 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26245 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26246 machine_mode reg_mode, int sel)
26247 {
26248 int i;
26249 int offset, start_reg, end_reg, n_regs, use_reg;
26250 int reg_size = GET_MODE_SIZE (reg_mode);
26251 rtx sym;
26252 rtvec p;
26253 rtx par;
26254 rtx_insn *insn;
26255
26256 offset = 0;
26257 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26258 ? info->first_gp_reg_save
26259 : (sel & SAVRES_REG) == SAVRES_FPR
26260 ? info->first_fp_reg_save
26261 : (sel & SAVRES_REG) == SAVRES_VR
26262 ? info->first_altivec_reg_save
26263 : -1);
26264 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26265 ? 32
26266 : (sel & SAVRES_REG) == SAVRES_FPR
26267 ? 64
26268 : (sel & SAVRES_REG) == SAVRES_VR
26269 ? LAST_ALTIVEC_REGNO + 1
26270 : -1);
26271 n_regs = end_reg - start_reg;
26272 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26273 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26274 + n_regs);
26275
26276 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26277 RTVEC_ELT (p, offset++) = ret_rtx;
26278
26279 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
26280
26281 sym = rs6000_savres_routine_sym (info, sel);
26282 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26283
26284 use_reg = ptr_regno_for_savres (sel);
26285 if ((sel & SAVRES_REG) == SAVRES_VR)
26286 {
26287 /* Vector regs are saved/restored using [reg+reg] addressing. */
26288 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, use_reg);
26289 RTVEC_ELT (p, offset++)
26290 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26291 }
26292 else
26293 RTVEC_ELT (p, offset++)
26294 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26295
26296 for (i = 0; i < end_reg - start_reg; i++)
26297 RTVEC_ELT (p, i + offset)
26298 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26299 frame_reg_rtx, save_area_offset + reg_size * i,
26300 (sel & SAVRES_SAVE) != 0);
26301
26302 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26303 RTVEC_ELT (p, i + offset)
26304 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26305
26306 par = gen_rtx_PARALLEL (VOIDmode, p);
26307
26308 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26309 {
26310 insn = emit_jump_insn (par);
26311 JUMP_LABEL (insn) = ret_rtx;
26312 }
26313 else
26314 insn = emit_insn (par);
26315 return insn;
26316 }
26317
26318 /* Emit prologue code to store CR fields that need to be saved into REG. This
26319 function should only be called when moving the non-volatile CRs to REG, it
26320 is not a general purpose routine to move the entire set of CRs to REG.
26321 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26322 volatile CRs. */
26323
26324 static void
26325 rs6000_emit_prologue_move_from_cr (rtx reg)
26326 {
26327 /* Only the ELFv2 ABI allows storing only selected fields. */
26328 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26329 {
26330 int i, cr_reg[8], count = 0;
26331
26332 /* Collect CR fields that must be saved. */
26333 for (i = 0; i < 8; i++)
26334 if (save_reg_p (CR0_REGNO + i))
26335 cr_reg[count++] = i;
26336
26337 /* If it's just a single one, use mfcrf. */
26338 if (count == 1)
26339 {
26340 rtvec p = rtvec_alloc (1);
26341 rtvec r = rtvec_alloc (2);
26342 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26343 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26344 RTVEC_ELT (p, 0)
26345 = gen_rtx_SET (reg,
26346 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26347
26348 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26349 return;
26350 }
26351
26352 /* ??? It might be better to handle count == 2 / 3 cases here
26353 as well, using logical operations to combine the values. */
26354 }
26355
26356 emit_insn (gen_prologue_movesi_from_cr (reg));
26357 }
26358
26359 /* Return whether the split-stack arg pointer (r12) is used. */
26360
26361 static bool
26362 split_stack_arg_pointer_used_p (void)
26363 {
26364 /* If the pseudo holding the arg pointer is no longer a pseudo,
26365 then the arg pointer is used. */
26366 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26367 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26368 || (REGNO (cfun->machine->split_stack_arg_pointer)
26369 < FIRST_PSEUDO_REGISTER)))
26370 return true;
26371
26372 /* Unfortunately we also need to do some code scanning, since
26373 r12 may have been substituted for the pseudo. */
26374 rtx_insn *insn;
26375 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26376 FOR_BB_INSNS (bb, insn)
26377 if (NONDEBUG_INSN_P (insn))
26378 {
26379 /* A call destroys r12. */
26380 if (CALL_P (insn))
26381 return false;
26382
26383 df_ref use;
26384 FOR_EACH_INSN_USE (use, insn)
26385 {
26386 rtx x = DF_REF_REG (use);
26387 if (REG_P (x) && REGNO (x) == 12)
26388 return true;
26389 }
26390 df_ref def;
26391 FOR_EACH_INSN_DEF (def, insn)
26392 {
26393 rtx x = DF_REF_REG (def);
26394 if (REG_P (x) && REGNO (x) == 12)
26395 return false;
26396 }
26397 }
26398 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26399 }
26400
26401 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26402
26403 static bool
26404 rs6000_global_entry_point_needed_p (void)
26405 {
26406 /* Only needed for the ELFv2 ABI. */
26407 if (DEFAULT_ABI != ABI_ELFv2)
26408 return false;
26409
26410 /* With -msingle-pic-base, we assume the whole program shares the same
26411 TOC, so no global entry point prologues are needed anywhere. */
26412 if (TARGET_SINGLE_PIC_BASE)
26413 return false;
26414
26415 /* Ensure we have a global entry point for thunks. ??? We could
26416 avoid that if the target routine doesn't need a global entry point,
26417 but we do not know whether this is the case at this point. */
26418 if (cfun->is_thunk)
26419 return true;
26420
26421 /* For regular functions, rs6000_emit_prologue sets this flag if the
26422 routine ever uses the TOC pointer. */
26423 return cfun->machine->r2_setup_needed;
26424 }
26425
26426 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26427 static sbitmap
26428 rs6000_get_separate_components (void)
26429 {
26430 rs6000_stack_t *info = rs6000_stack_info ();
26431
26432 if (WORLD_SAVE_P (info))
26433 return NULL;
26434
26435 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26436 && !(info->savres_strategy & REST_MULTIPLE));
26437
26438 /* Component 0 is the save/restore of LR (done via GPR0).
26439 Component 2 is the save of the TOC (GPR2).
26440 Components 13..31 are the save/restore of GPR13..GPR31.
26441 Components 46..63 are the save/restore of FPR14..FPR31. */
26442
26443 cfun->machine->n_components = 64;
26444
26445 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26446 bitmap_clear (components);
26447
26448 int reg_size = TARGET_32BIT ? 4 : 8;
26449 int fp_reg_size = 8;
26450
26451 /* The GPRs we need saved to the frame. */
26452 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26453 && (info->savres_strategy & REST_INLINE_GPRS))
26454 {
26455 int offset = info->gp_save_offset;
26456 if (info->push_p)
26457 offset += info->total_size;
26458
26459 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26460 {
26461 if (IN_RANGE (offset, -0x8000, 0x7fff)
26462 && save_reg_p (regno))
26463 bitmap_set_bit (components, regno);
26464
26465 offset += reg_size;
26466 }
26467 }
26468
26469 /* Don't mess with the hard frame pointer. */
26470 if (frame_pointer_needed)
26471 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26472
26473 /* Don't mess with the fixed TOC register. */
26474 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26475 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26476 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26477 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26478
26479 /* The FPRs we need saved to the frame. */
26480 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26481 && (info->savres_strategy & REST_INLINE_FPRS))
26482 {
26483 int offset = info->fp_save_offset;
26484 if (info->push_p)
26485 offset += info->total_size;
26486
26487 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26488 {
26489 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26490 bitmap_set_bit (components, regno);
26491
26492 offset += fp_reg_size;
26493 }
26494 }
26495
26496 /* Optimize LR save and restore if we can. This is component 0. Any
26497 out-of-line register save/restore routines need LR. */
26498 if (info->lr_save_p
26499 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26500 && (info->savres_strategy & SAVE_INLINE_GPRS)
26501 && (info->savres_strategy & REST_INLINE_GPRS)
26502 && (info->savres_strategy & SAVE_INLINE_FPRS)
26503 && (info->savres_strategy & REST_INLINE_FPRS)
26504 && (info->savres_strategy & SAVE_INLINE_VRS)
26505 && (info->savres_strategy & REST_INLINE_VRS))
26506 {
26507 int offset = info->lr_save_offset;
26508 if (info->push_p)
26509 offset += info->total_size;
26510 if (IN_RANGE (offset, -0x8000, 0x7fff))
26511 bitmap_set_bit (components, 0);
26512 }
26513
26514 /* Optimize saving the TOC. This is component 2. */
26515 if (cfun->machine->save_toc_in_prologue)
26516 bitmap_set_bit (components, 2);
26517
26518 return components;
26519 }
26520
26521 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26522 static sbitmap
26523 rs6000_components_for_bb (basic_block bb)
26524 {
26525 rs6000_stack_t *info = rs6000_stack_info ();
26526
26527 bitmap in = DF_LIVE_IN (bb);
26528 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26529 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26530
26531 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26532 bitmap_clear (components);
26533
26534 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26535
26536 /* GPRs. */
26537 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26538 if (bitmap_bit_p (in, regno)
26539 || bitmap_bit_p (gen, regno)
26540 || bitmap_bit_p (kill, regno))
26541 bitmap_set_bit (components, regno);
26542
26543 /* FPRs. */
26544 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26545 if (bitmap_bit_p (in, regno)
26546 || bitmap_bit_p (gen, regno)
26547 || bitmap_bit_p (kill, regno))
26548 bitmap_set_bit (components, regno);
26549
26550 /* The link register. */
26551 if (bitmap_bit_p (in, LR_REGNO)
26552 || bitmap_bit_p (gen, LR_REGNO)
26553 || bitmap_bit_p (kill, LR_REGNO))
26554 bitmap_set_bit (components, 0);
26555
26556 /* The TOC save. */
26557 if (bitmap_bit_p (in, TOC_REGNUM)
26558 || bitmap_bit_p (gen, TOC_REGNUM)
26559 || bitmap_bit_p (kill, TOC_REGNUM))
26560 bitmap_set_bit (components, 2);
26561
26562 return components;
26563 }
26564
26565 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26566 static void
26567 rs6000_disqualify_components (sbitmap components, edge e,
26568 sbitmap edge_components, bool /*is_prologue*/)
26569 {
26570 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26571 live where we want to place that code. */
26572 if (bitmap_bit_p (edge_components, 0)
26573 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26574 {
26575 if (dump_file)
26576 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26577 "on entry to bb %d\n", e->dest->index);
26578 bitmap_clear_bit (components, 0);
26579 }
26580 }
26581
26582 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26583 static void
26584 rs6000_emit_prologue_components (sbitmap components)
26585 {
26586 rs6000_stack_t *info = rs6000_stack_info ();
26587 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26588 ? HARD_FRAME_POINTER_REGNUM
26589 : STACK_POINTER_REGNUM);
26590
26591 machine_mode reg_mode = Pmode;
26592 int reg_size = TARGET_32BIT ? 4 : 8;
26593 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26594 int fp_reg_size = 8;
26595
26596 /* Prologue for LR. */
26597 if (bitmap_bit_p (components, 0))
26598 {
26599 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26600 rtx reg = gen_rtx_REG (reg_mode, 0);
26601 rtx_insn *insn = emit_move_insn (reg, lr);
26602 RTX_FRAME_RELATED_P (insn) = 1;
26603 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (reg, lr));
26604
26605 int offset = info->lr_save_offset;
26606 if (info->push_p)
26607 offset += info->total_size;
26608
26609 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26610 RTX_FRAME_RELATED_P (insn) = 1;
26611 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26612 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26613 }
26614
26615 /* Prologue for TOC. */
26616 if (bitmap_bit_p (components, 2))
26617 {
26618 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26619 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26620 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26621 }
26622
26623 /* Prologue for the GPRs. */
26624 int offset = info->gp_save_offset;
26625 if (info->push_p)
26626 offset += info->total_size;
26627
26628 for (int i = info->first_gp_reg_save; i < 32; i++)
26629 {
26630 if (bitmap_bit_p (components, i))
26631 {
26632 rtx reg = gen_rtx_REG (reg_mode, i);
26633 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26634 RTX_FRAME_RELATED_P (insn) = 1;
26635 rtx set = copy_rtx (single_set (insn));
26636 add_reg_note (insn, REG_CFA_OFFSET, set);
26637 }
26638
26639 offset += reg_size;
26640 }
26641
26642 /* Prologue for the FPRs. */
26643 offset = info->fp_save_offset;
26644 if (info->push_p)
26645 offset += info->total_size;
26646
26647 for (int i = info->first_fp_reg_save; i < 64; i++)
26648 {
26649 if (bitmap_bit_p (components, i))
26650 {
26651 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26652 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26653 RTX_FRAME_RELATED_P (insn) = 1;
26654 rtx set = copy_rtx (single_set (insn));
26655 add_reg_note (insn, REG_CFA_OFFSET, set);
26656 }
26657
26658 offset += fp_reg_size;
26659 }
26660 }
26661
26662 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26663 static void
26664 rs6000_emit_epilogue_components (sbitmap components)
26665 {
26666 rs6000_stack_t *info = rs6000_stack_info ();
26667 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26668 ? HARD_FRAME_POINTER_REGNUM
26669 : STACK_POINTER_REGNUM);
26670
26671 machine_mode reg_mode = Pmode;
26672 int reg_size = TARGET_32BIT ? 4 : 8;
26673
26674 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26675 int fp_reg_size = 8;
26676
26677 /* Epilogue for the FPRs. */
26678 int offset = info->fp_save_offset;
26679 if (info->push_p)
26680 offset += info->total_size;
26681
26682 for (int i = info->first_fp_reg_save; i < 64; i++)
26683 {
26684 if (bitmap_bit_p (components, i))
26685 {
26686 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26687 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26688 RTX_FRAME_RELATED_P (insn) = 1;
26689 add_reg_note (insn, REG_CFA_RESTORE, reg);
26690 }
26691
26692 offset += fp_reg_size;
26693 }
26694
26695 /* Epilogue for the GPRs. */
26696 offset = info->gp_save_offset;
26697 if (info->push_p)
26698 offset += info->total_size;
26699
26700 for (int i = info->first_gp_reg_save; i < 32; i++)
26701 {
26702 if (bitmap_bit_p (components, i))
26703 {
26704 rtx reg = gen_rtx_REG (reg_mode, i);
26705 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26706 RTX_FRAME_RELATED_P (insn) = 1;
26707 add_reg_note (insn, REG_CFA_RESTORE, reg);
26708 }
26709
26710 offset += reg_size;
26711 }
26712
26713 /* Epilogue for LR. */
26714 if (bitmap_bit_p (components, 0))
26715 {
26716 int offset = info->lr_save_offset;
26717 if (info->push_p)
26718 offset += info->total_size;
26719
26720 rtx reg = gen_rtx_REG (reg_mode, 0);
26721 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26722
26723 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26724 insn = emit_move_insn (lr, reg);
26725 RTX_FRAME_RELATED_P (insn) = 1;
26726 add_reg_note (insn, REG_CFA_RESTORE, lr);
26727 }
26728 }
26729
26730 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26731 static void
26732 rs6000_set_handled_components (sbitmap components)
26733 {
26734 rs6000_stack_t *info = rs6000_stack_info ();
26735
26736 for (int i = info->first_gp_reg_save; i < 32; i++)
26737 if (bitmap_bit_p (components, i))
26738 cfun->machine->gpr_is_wrapped_separately[i] = true;
26739
26740 for (int i = info->first_fp_reg_save; i < 64; i++)
26741 if (bitmap_bit_p (components, i))
26742 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26743
26744 if (bitmap_bit_p (components, 0))
26745 cfun->machine->lr_is_wrapped_separately = true;
26746
26747 if (bitmap_bit_p (components, 2))
26748 cfun->machine->toc_is_wrapped_separately = true;
26749 }
26750
26751 /* VRSAVE is a bit vector representing which AltiVec registers
26752 are used. The OS uses this to determine which vector
26753 registers to save on a context switch. We need to save
26754 VRSAVE on the stack frame, add whatever AltiVec registers we
26755 used in this function, and do the corresponding magic in the
26756 epilogue. */
26757 static void
26758 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26759 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26760 {
26761 /* Get VRSAVE into a GPR. */
26762 rtx reg = gen_rtx_REG (SImode, save_regno);
26763 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26764 if (TARGET_MACHO)
26765 emit_insn (gen_get_vrsave_internal (reg));
26766 else
26767 emit_insn (gen_rtx_SET (reg, vrsave));
26768
26769 /* Save VRSAVE. */
26770 int offset = info->vrsave_save_offset + frame_off;
26771 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26772
26773 /* Include the registers in the mask. */
26774 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26775
26776 emit_insn (generate_set_vrsave (reg, info, 0));
26777 }
26778
26779 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26780 called, it left the arg pointer to the old stack in r29. Otherwise, the
26781 arg pointer is the top of the current frame. */
26782 static void
26783 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26784 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26785 {
26786 cfun->machine->split_stack_argp_used = true;
26787
26788 if (sp_adjust)
26789 {
26790 rtx r12 = gen_rtx_REG (Pmode, 12);
26791 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26792 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26793 emit_insn_before (set_r12, sp_adjust);
26794 }
26795 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26796 {
26797 rtx r12 = gen_rtx_REG (Pmode, 12);
26798 if (frame_off == 0)
26799 emit_move_insn (r12, frame_reg_rtx);
26800 else
26801 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26802 }
26803
26804 if (info->push_p)
26805 {
26806 rtx r12 = gen_rtx_REG (Pmode, 12);
26807 rtx r29 = gen_rtx_REG (Pmode, 29);
26808 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26809 rtx not_more = gen_label_rtx ();
26810 rtx jump;
26811
26812 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26813 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26814 gen_rtx_LABEL_REF (VOIDmode, not_more),
26815 pc_rtx);
26816 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26817 JUMP_LABEL (jump) = not_more;
26818 LABEL_NUSES (not_more) += 1;
26819 emit_move_insn (r12, r29);
26820 emit_label (not_more);
26821 }
26822 }
26823
26824 /* Emit function prologue as insns. */
26825
26826 void
26827 rs6000_emit_prologue (void)
26828 {
26829 rs6000_stack_t *info = rs6000_stack_info ();
26830 machine_mode reg_mode = Pmode;
26831 int reg_size = TARGET_32BIT ? 4 : 8;
26832 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26833 int fp_reg_size = 8;
26834 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26835 rtx frame_reg_rtx = sp_reg_rtx;
26836 unsigned int cr_save_regno;
26837 rtx cr_save_rtx = NULL_RTX;
26838 rtx_insn *insn;
26839 int strategy;
26840 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26841 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26842 && call_used_regs[STATIC_CHAIN_REGNUM]);
26843 int using_split_stack = (flag_split_stack
26844 && (lookup_attribute ("no_split_stack",
26845 DECL_ATTRIBUTES (cfun->decl))
26846 == NULL));
26847
26848 /* Offset to top of frame for frame_reg and sp respectively. */
26849 HOST_WIDE_INT frame_off = 0;
26850 HOST_WIDE_INT sp_off = 0;
26851 /* sp_adjust is the stack adjusting instruction, tracked so that the
26852 insn setting up the split-stack arg pointer can be emitted just
26853 prior to it, when r12 is not used here for other purposes. */
26854 rtx_insn *sp_adjust = 0;
26855
26856 #if CHECKING_P
26857 /* Track and check usage of r0, r11, r12. */
26858 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26859 #define START_USE(R) do \
26860 { \
26861 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26862 reg_inuse |= 1 << (R); \
26863 } while (0)
26864 #define END_USE(R) do \
26865 { \
26866 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26867 reg_inuse &= ~(1 << (R)); \
26868 } while (0)
26869 #define NOT_INUSE(R) do \
26870 { \
26871 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26872 } while (0)
26873 #else
26874 #define START_USE(R) do {} while (0)
26875 #define END_USE(R) do {} while (0)
26876 #define NOT_INUSE(R) do {} while (0)
26877 #endif
26878
26879 if (DEFAULT_ABI == ABI_ELFv2
26880 && !TARGET_SINGLE_PIC_BASE)
26881 {
26882 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26883
26884 /* With -mminimal-toc we may generate an extra use of r2 below. */
26885 if (TARGET_TOC && TARGET_MINIMAL_TOC
26886 && !constant_pool_empty_p ())
26887 cfun->machine->r2_setup_needed = true;
26888 }
26889
26890
26891 if (flag_stack_usage_info)
26892 current_function_static_stack_size = info->total_size;
26893
26894 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26895 {
26896 HOST_WIDE_INT size = info->total_size;
26897
26898 if (crtl->is_leaf && !cfun->calls_alloca)
26899 {
26900 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
26901 rs6000_emit_probe_stack_range (get_stack_check_protect (),
26902 size - get_stack_check_protect ());
26903 }
26904 else if (size > 0)
26905 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
26906 }
26907
26908 if (TARGET_FIX_AND_CONTINUE)
26909 {
26910 /* gdb on darwin arranges to forward a function from the old
26911 address by modifying the first 5 instructions of the function
26912 to branch to the overriding function. This is necessary to
26913 permit function pointers that point to the old function to
26914 actually forward to the new function. */
26915 emit_insn (gen_nop ());
26916 emit_insn (gen_nop ());
26917 emit_insn (gen_nop ());
26918 emit_insn (gen_nop ());
26919 emit_insn (gen_nop ());
26920 }
26921
26922 /* Handle world saves specially here. */
26923 if (WORLD_SAVE_P (info))
26924 {
26925 int i, j, sz;
26926 rtx treg;
26927 rtvec p;
26928 rtx reg0;
26929
26930 /* save_world expects lr in r0. */
26931 reg0 = gen_rtx_REG (Pmode, 0);
26932 if (info->lr_save_p)
26933 {
26934 insn = emit_move_insn (reg0,
26935 gen_rtx_REG (Pmode, LR_REGNO));
26936 RTX_FRAME_RELATED_P (insn) = 1;
26937 }
26938
26939 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26940 assumptions about the offsets of various bits of the stack
26941 frame. */
26942 gcc_assert (info->gp_save_offset == -220
26943 && info->fp_save_offset == -144
26944 && info->lr_save_offset == 8
26945 && info->cr_save_offset == 4
26946 && info->push_p
26947 && info->lr_save_p
26948 && (!crtl->calls_eh_return
26949 || info->ehrd_offset == -432)
26950 && info->vrsave_save_offset == -224
26951 && info->altivec_save_offset == -416);
26952
26953 treg = gen_rtx_REG (SImode, 11);
26954 emit_move_insn (treg, GEN_INT (-info->total_size));
26955
26956 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26957 in R11. It also clobbers R12, so beware! */
26958
26959 /* Preserve CR2 for save_world prologues */
26960 sz = 5;
26961 sz += 32 - info->first_gp_reg_save;
26962 sz += 64 - info->first_fp_reg_save;
26963 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26964 p = rtvec_alloc (sz);
26965 j = 0;
26966 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, LR_REGNO);
26967 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26968 gen_rtx_SYMBOL_REF (Pmode,
26969 "*save_world"));
26970 /* We do floats first so that the instruction pattern matches
26971 properly. */
26972 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26973 RTVEC_ELT (p, j++)
26974 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
26975 info->first_fp_reg_save + i),
26976 frame_reg_rtx,
26977 info->fp_save_offset + frame_off + 8 * i);
26978 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26979 RTVEC_ELT (p, j++)
26980 = gen_frame_store (gen_rtx_REG (V4SImode,
26981 info->first_altivec_reg_save + i),
26982 frame_reg_rtx,
26983 info->altivec_save_offset + frame_off + 16 * i);
26984 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26985 RTVEC_ELT (p, j++)
26986 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26987 frame_reg_rtx,
26988 info->gp_save_offset + frame_off + reg_size * i);
26989
26990 /* CR register traditionally saved as CR2. */
26991 RTVEC_ELT (p, j++)
26992 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26993 frame_reg_rtx, info->cr_save_offset + frame_off);
26994 /* Explain about use of R0. */
26995 if (info->lr_save_p)
26996 RTVEC_ELT (p, j++)
26997 = gen_frame_store (reg0,
26998 frame_reg_rtx, info->lr_save_offset + frame_off);
26999 /* Explain what happens to the stack pointer. */
27000 {
27001 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
27002 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
27003 }
27004
27005 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27006 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27007 treg, GEN_INT (-info->total_size));
27008 sp_off = frame_off = info->total_size;
27009 }
27010
27011 strategy = info->savres_strategy;
27012
27013 /* For V.4, update stack before we do any saving and set back pointer. */
27014 if (! WORLD_SAVE_P (info)
27015 && info->push_p
27016 && (DEFAULT_ABI == ABI_V4
27017 || crtl->calls_eh_return))
27018 {
27019 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
27020 || !(strategy & SAVE_INLINE_GPRS)
27021 || !(strategy & SAVE_INLINE_VRS));
27022 int ptr_regno = -1;
27023 rtx ptr_reg = NULL_RTX;
27024 int ptr_off = 0;
27025
27026 if (info->total_size < 32767)
27027 frame_off = info->total_size;
27028 else if (need_r11)
27029 ptr_regno = 11;
27030 else if (info->cr_save_p
27031 || info->lr_save_p
27032 || info->first_fp_reg_save < 64
27033 || info->first_gp_reg_save < 32
27034 || info->altivec_size != 0
27035 || info->vrsave_size != 0
27036 || crtl->calls_eh_return)
27037 ptr_regno = 12;
27038 else
27039 {
27040 /* The prologue won't be saving any regs so there is no need
27041 to set up a frame register to access any frame save area.
27042 We also won't be using frame_off anywhere below, but set
27043 the correct value anyway to protect against future
27044 changes to this function. */
27045 frame_off = info->total_size;
27046 }
27047 if (ptr_regno != -1)
27048 {
27049 /* Set up the frame offset to that needed by the first
27050 out-of-line save function. */
27051 START_USE (ptr_regno);
27052 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27053 frame_reg_rtx = ptr_reg;
27054 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
27055 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
27056 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
27057 ptr_off = info->gp_save_offset + info->gp_size;
27058 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
27059 ptr_off = info->altivec_save_offset + info->altivec_size;
27060 frame_off = -ptr_off;
27061 }
27062 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27063 ptr_reg, ptr_off);
27064 if (REGNO (frame_reg_rtx) == 12)
27065 sp_adjust = 0;
27066 sp_off = info->total_size;
27067 if (frame_reg_rtx != sp_reg_rtx)
27068 rs6000_emit_stack_tie (frame_reg_rtx, false);
27069 }
27070
27071 /* If we use the link register, get it into r0. */
27072 if (!WORLD_SAVE_P (info) && info->lr_save_p
27073 && !cfun->machine->lr_is_wrapped_separately)
27074 {
27075 rtx addr, reg, mem;
27076
27077 reg = gen_rtx_REG (Pmode, 0);
27078 START_USE (0);
27079 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
27080 RTX_FRAME_RELATED_P (insn) = 1;
27081
27082 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
27083 | SAVE_NOINLINE_FPRS_SAVES_LR)))
27084 {
27085 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27086 GEN_INT (info->lr_save_offset + frame_off));
27087 mem = gen_rtx_MEM (Pmode, addr);
27088 /* This should not be of rs6000_sr_alias_set, because of
27089 __builtin_return_address. */
27090
27091 insn = emit_move_insn (mem, reg);
27092 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27093 NULL_RTX, NULL_RTX);
27094 END_USE (0);
27095 }
27096 }
27097
27098 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
27099 r12 will be needed by out-of-line gpr save. */
27100 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27101 && !(strategy & (SAVE_INLINE_GPRS
27102 | SAVE_NOINLINE_GPRS_SAVES_LR))
27103 ? 11 : 12);
27104 if (!WORLD_SAVE_P (info)
27105 && info->cr_save_p
27106 && REGNO (frame_reg_rtx) != cr_save_regno
27107 && !(using_static_chain_p && cr_save_regno == 11)
27108 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
27109 {
27110 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
27111 START_USE (cr_save_regno);
27112 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27113 }
27114
27115 /* Do any required saving of fpr's. If only one or two to save, do
27116 it ourselves. Otherwise, call function. */
27117 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
27118 {
27119 int offset = info->fp_save_offset + frame_off;
27120 for (int i = info->first_fp_reg_save; i < 64; i++)
27121 {
27122 if (save_reg_p (i)
27123 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
27124 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
27125 sp_off - frame_off);
27126
27127 offset += fp_reg_size;
27128 }
27129 }
27130 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
27131 {
27132 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27133 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27134 unsigned ptr_regno = ptr_regno_for_savres (sel);
27135 rtx ptr_reg = frame_reg_rtx;
27136
27137 if (REGNO (frame_reg_rtx) == ptr_regno)
27138 gcc_checking_assert (frame_off == 0);
27139 else
27140 {
27141 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27142 NOT_INUSE (ptr_regno);
27143 emit_insn (gen_add3_insn (ptr_reg,
27144 frame_reg_rtx, GEN_INT (frame_off)));
27145 }
27146 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27147 info->fp_save_offset,
27148 info->lr_save_offset,
27149 DFmode, sel);
27150 rs6000_frame_related (insn, ptr_reg, sp_off,
27151 NULL_RTX, NULL_RTX);
27152 if (lr)
27153 END_USE (0);
27154 }
27155
27156 /* Save GPRs. This is done as a PARALLEL if we are using
27157 the store-multiple instructions. */
27158 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
27159 {
27160 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
27161 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
27162 unsigned ptr_regno = ptr_regno_for_savres (sel);
27163 rtx ptr_reg = frame_reg_rtx;
27164 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
27165 int end_save = info->gp_save_offset + info->gp_size;
27166 int ptr_off;
27167
27168 if (ptr_regno == 12)
27169 sp_adjust = 0;
27170 if (!ptr_set_up)
27171 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27172
27173 /* Need to adjust r11 (r12) if we saved any FPRs. */
27174 if (end_save + frame_off != 0)
27175 {
27176 rtx offset = GEN_INT (end_save + frame_off);
27177
27178 if (ptr_set_up)
27179 frame_off = -end_save;
27180 else
27181 NOT_INUSE (ptr_regno);
27182 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27183 }
27184 else if (!ptr_set_up)
27185 {
27186 NOT_INUSE (ptr_regno);
27187 emit_move_insn (ptr_reg, frame_reg_rtx);
27188 }
27189 ptr_off = -end_save;
27190 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27191 info->gp_save_offset + ptr_off,
27192 info->lr_save_offset + ptr_off,
27193 reg_mode, sel);
27194 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
27195 NULL_RTX, NULL_RTX);
27196 if (lr)
27197 END_USE (0);
27198 }
27199 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
27200 {
27201 rtvec p;
27202 int i;
27203 p = rtvec_alloc (32 - info->first_gp_reg_save);
27204 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27205 RTVEC_ELT (p, i)
27206 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27207 frame_reg_rtx,
27208 info->gp_save_offset + frame_off + reg_size * i);
27209 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27210 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27211 NULL_RTX, NULL_RTX);
27212 }
27213 else if (!WORLD_SAVE_P (info))
27214 {
27215 int offset = info->gp_save_offset + frame_off;
27216 for (int i = info->first_gp_reg_save; i < 32; i++)
27217 {
27218 if (save_reg_p (i)
27219 && !cfun->machine->gpr_is_wrapped_separately[i])
27220 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27221 sp_off - frame_off);
27222
27223 offset += reg_size;
27224 }
27225 }
27226
27227 if (crtl->calls_eh_return)
27228 {
27229 unsigned int i;
27230 rtvec p;
27231
27232 for (i = 0; ; ++i)
27233 {
27234 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27235 if (regno == INVALID_REGNUM)
27236 break;
27237 }
27238
27239 p = rtvec_alloc (i);
27240
27241 for (i = 0; ; ++i)
27242 {
27243 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27244 if (regno == INVALID_REGNUM)
27245 break;
27246
27247 rtx set
27248 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27249 sp_reg_rtx,
27250 info->ehrd_offset + sp_off + reg_size * (int) i);
27251 RTVEC_ELT (p, i) = set;
27252 RTX_FRAME_RELATED_P (set) = 1;
27253 }
27254
27255 insn = emit_insn (gen_blockage ());
27256 RTX_FRAME_RELATED_P (insn) = 1;
27257 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27258 }
27259
27260 /* In AIX ABI we need to make sure r2 is really saved. */
27261 if (TARGET_AIX && crtl->calls_eh_return)
27262 {
27263 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27264 rtx join_insn, note;
27265 rtx_insn *save_insn;
27266 long toc_restore_insn;
27267
27268 tmp_reg = gen_rtx_REG (Pmode, 11);
27269 tmp_reg_si = gen_rtx_REG (SImode, 11);
27270 if (using_static_chain_p)
27271 {
27272 START_USE (0);
27273 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27274 }
27275 else
27276 START_USE (11);
27277 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27278 /* Peek at instruction to which this function returns. If it's
27279 restoring r2, then we know we've already saved r2. We can't
27280 unconditionally save r2 because the value we have will already
27281 be updated if we arrived at this function via a plt call or
27282 toc adjusting stub. */
27283 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27284 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27285 + RS6000_TOC_SAVE_SLOT);
27286 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27287 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27288 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27289 validate_condition_mode (EQ, CCUNSmode);
27290 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27291 emit_insn (gen_rtx_SET (compare_result,
27292 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27293 toc_save_done = gen_label_rtx ();
27294 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27295 gen_rtx_EQ (VOIDmode, compare_result,
27296 const0_rtx),
27297 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27298 pc_rtx);
27299 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27300 JUMP_LABEL (jump) = toc_save_done;
27301 LABEL_NUSES (toc_save_done) += 1;
27302
27303 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27304 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27305 sp_off - frame_off);
27306
27307 emit_label (toc_save_done);
27308
27309 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27310 have a CFG that has different saves along different paths.
27311 Move the note to a dummy blockage insn, which describes that
27312 R2 is unconditionally saved after the label. */
27313 /* ??? An alternate representation might be a special insn pattern
27314 containing both the branch and the store. That might let the
27315 code that minimizes the number of DW_CFA_advance opcodes better
27316 freedom in placing the annotations. */
27317 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27318 if (note)
27319 remove_note (save_insn, note);
27320 else
27321 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27322 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27323 RTX_FRAME_RELATED_P (save_insn) = 0;
27324
27325 join_insn = emit_insn (gen_blockage ());
27326 REG_NOTES (join_insn) = note;
27327 RTX_FRAME_RELATED_P (join_insn) = 1;
27328
27329 if (using_static_chain_p)
27330 {
27331 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27332 END_USE (0);
27333 }
27334 else
27335 END_USE (11);
27336 }
27337
27338 /* Save CR if we use any that must be preserved. */
27339 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27340 {
27341 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27342 GEN_INT (info->cr_save_offset + frame_off));
27343 rtx mem = gen_frame_mem (SImode, addr);
27344
27345 /* If we didn't copy cr before, do so now using r0. */
27346 if (cr_save_rtx == NULL_RTX)
27347 {
27348 START_USE (0);
27349 cr_save_rtx = gen_rtx_REG (SImode, 0);
27350 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27351 }
27352
27353 /* Saving CR requires a two-instruction sequence: one instruction
27354 to move the CR to a general-purpose register, and a second
27355 instruction that stores the GPR to memory.
27356
27357 We do not emit any DWARF CFI records for the first of these,
27358 because we cannot properly represent the fact that CR is saved in
27359 a register. One reason is that we cannot express that multiple
27360 CR fields are saved; another reason is that on 64-bit, the size
27361 of the CR register in DWARF (4 bytes) differs from the size of
27362 a general-purpose register.
27363
27364 This means if any intervening instruction were to clobber one of
27365 the call-saved CR fields, we'd have incorrect CFI. To prevent
27366 this from happening, we mark the store to memory as a use of
27367 those CR fields, which prevents any such instruction from being
27368 scheduled in between the two instructions. */
27369 rtx crsave_v[9];
27370 int n_crsave = 0;
27371 int i;
27372
27373 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27374 for (i = 0; i < 8; i++)
27375 if (save_reg_p (CR0_REGNO + i))
27376 crsave_v[n_crsave++]
27377 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27378
27379 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27380 gen_rtvec_v (n_crsave, crsave_v)));
27381 END_USE (REGNO (cr_save_rtx));
27382
27383 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27384 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27385 so we need to construct a frame expression manually. */
27386 RTX_FRAME_RELATED_P (insn) = 1;
27387
27388 /* Update address to be stack-pointer relative, like
27389 rs6000_frame_related would do. */
27390 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27391 GEN_INT (info->cr_save_offset + sp_off));
27392 mem = gen_frame_mem (SImode, addr);
27393
27394 if (DEFAULT_ABI == ABI_ELFv2)
27395 {
27396 /* In the ELFv2 ABI we generate separate CFI records for each
27397 CR field that was actually saved. They all point to the
27398 same 32-bit stack slot. */
27399 rtx crframe[8];
27400 int n_crframe = 0;
27401
27402 for (i = 0; i < 8; i++)
27403 if (save_reg_p (CR0_REGNO + i))
27404 {
27405 crframe[n_crframe]
27406 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27407
27408 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27409 n_crframe++;
27410 }
27411
27412 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27413 gen_rtx_PARALLEL (VOIDmode,
27414 gen_rtvec_v (n_crframe, crframe)));
27415 }
27416 else
27417 {
27418 /* In other ABIs, by convention, we use a single CR regnum to
27419 represent the fact that all call-saved CR fields are saved.
27420 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27421 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27422 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27423 }
27424 }
27425
27426 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27427 *separate* slots if the routine calls __builtin_eh_return, so
27428 that they can be independently restored by the unwinder. */
27429 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27430 {
27431 int i, cr_off = info->ehcr_offset;
27432 rtx crsave;
27433
27434 /* ??? We might get better performance by using multiple mfocrf
27435 instructions. */
27436 crsave = gen_rtx_REG (SImode, 0);
27437 emit_insn (gen_prologue_movesi_from_cr (crsave));
27438
27439 for (i = 0; i < 8; i++)
27440 if (!call_used_regs[CR0_REGNO + i])
27441 {
27442 rtvec p = rtvec_alloc (2);
27443 RTVEC_ELT (p, 0)
27444 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27445 RTVEC_ELT (p, 1)
27446 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27447
27448 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27449
27450 RTX_FRAME_RELATED_P (insn) = 1;
27451 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27452 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27453 sp_reg_rtx, cr_off + sp_off));
27454
27455 cr_off += reg_size;
27456 }
27457 }
27458
27459 /* If we are emitting stack probes, but allocate no stack, then
27460 just note that in the dump file. */
27461 if (flag_stack_clash_protection
27462 && dump_file
27463 && !info->push_p)
27464 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
27465
27466 /* Update stack and set back pointer unless this is V.4,
27467 for which it was done previously. */
27468 if (!WORLD_SAVE_P (info) && info->push_p
27469 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27470 {
27471 rtx ptr_reg = NULL;
27472 int ptr_off = 0;
27473
27474 /* If saving altivec regs we need to be able to address all save
27475 locations using a 16-bit offset. */
27476 if ((strategy & SAVE_INLINE_VRS) == 0
27477 || (info->altivec_size != 0
27478 && (info->altivec_save_offset + info->altivec_size - 16
27479 + info->total_size - frame_off) > 32767)
27480 || (info->vrsave_size != 0
27481 && (info->vrsave_save_offset
27482 + info->total_size - frame_off) > 32767))
27483 {
27484 int sel = SAVRES_SAVE | SAVRES_VR;
27485 unsigned ptr_regno = ptr_regno_for_savres (sel);
27486
27487 if (using_static_chain_p
27488 && ptr_regno == STATIC_CHAIN_REGNUM)
27489 ptr_regno = 12;
27490 if (REGNO (frame_reg_rtx) != ptr_regno)
27491 START_USE (ptr_regno);
27492 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27493 frame_reg_rtx = ptr_reg;
27494 ptr_off = info->altivec_save_offset + info->altivec_size;
27495 frame_off = -ptr_off;
27496 }
27497 else if (REGNO (frame_reg_rtx) == 1)
27498 frame_off = info->total_size;
27499 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27500 ptr_reg, ptr_off);
27501 if (REGNO (frame_reg_rtx) == 12)
27502 sp_adjust = 0;
27503 sp_off = info->total_size;
27504 if (frame_reg_rtx != sp_reg_rtx)
27505 rs6000_emit_stack_tie (frame_reg_rtx, false);
27506 }
27507
27508 /* Set frame pointer, if needed. */
27509 if (frame_pointer_needed)
27510 {
27511 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27512 sp_reg_rtx);
27513 RTX_FRAME_RELATED_P (insn) = 1;
27514 }
27515
27516 /* Save AltiVec registers if needed. Save here because the red zone does
27517 not always include AltiVec registers. */
27518 if (!WORLD_SAVE_P (info)
27519 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27520 {
27521 int end_save = info->altivec_save_offset + info->altivec_size;
27522 int ptr_off;
27523 /* Oddly, the vector save/restore functions point r0 at the end
27524 of the save area, then use r11 or r12 to load offsets for
27525 [reg+reg] addressing. */
27526 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27527 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27528 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27529
27530 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27531 NOT_INUSE (0);
27532 if (scratch_regno == 12)
27533 sp_adjust = 0;
27534 if (end_save + frame_off != 0)
27535 {
27536 rtx offset = GEN_INT (end_save + frame_off);
27537
27538 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27539 }
27540 else
27541 emit_move_insn (ptr_reg, frame_reg_rtx);
27542
27543 ptr_off = -end_save;
27544 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27545 info->altivec_save_offset + ptr_off,
27546 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27547 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27548 NULL_RTX, NULL_RTX);
27549 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27550 {
27551 /* The oddity mentioned above clobbered our frame reg. */
27552 emit_move_insn (frame_reg_rtx, ptr_reg);
27553 frame_off = ptr_off;
27554 }
27555 }
27556 else if (!WORLD_SAVE_P (info)
27557 && info->altivec_size != 0)
27558 {
27559 int i;
27560
27561 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27562 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27563 {
27564 rtx areg, savereg, mem;
27565 HOST_WIDE_INT offset;
27566
27567 offset = (info->altivec_save_offset + frame_off
27568 + 16 * (i - info->first_altivec_reg_save));
27569
27570 savereg = gen_rtx_REG (V4SImode, i);
27571
27572 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27573 {
27574 mem = gen_frame_mem (V4SImode,
27575 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27576 GEN_INT (offset)));
27577 insn = emit_insn (gen_rtx_SET (mem, savereg));
27578 areg = NULL_RTX;
27579 }
27580 else
27581 {
27582 NOT_INUSE (0);
27583 areg = gen_rtx_REG (Pmode, 0);
27584 emit_move_insn (areg, GEN_INT (offset));
27585
27586 /* AltiVec addressing mode is [reg+reg]. */
27587 mem = gen_frame_mem (V4SImode,
27588 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27589
27590 /* Rather than emitting a generic move, force use of the stvx
27591 instruction, which we always want on ISA 2.07 (power8) systems.
27592 In particular we don't want xxpermdi/stxvd2x for little
27593 endian. */
27594 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27595 }
27596
27597 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27598 areg, GEN_INT (offset));
27599 }
27600 }
27601
27602 /* VRSAVE is a bit vector representing which AltiVec registers
27603 are used. The OS uses this to determine which vector
27604 registers to save on a context switch. We need to save
27605 VRSAVE on the stack frame, add whatever AltiVec registers we
27606 used in this function, and do the corresponding magic in the
27607 epilogue. */
27608
27609 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27610 {
27611 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27612 be using r12 as frame_reg_rtx and r11 as the static chain
27613 pointer for nested functions. */
27614 int save_regno = 12;
27615 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27616 && !using_static_chain_p)
27617 save_regno = 11;
27618 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27619 {
27620 save_regno = 11;
27621 if (using_static_chain_p)
27622 save_regno = 0;
27623 }
27624 NOT_INUSE (save_regno);
27625
27626 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27627 }
27628
27629 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27630 if (!TARGET_SINGLE_PIC_BASE
27631 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27632 && !constant_pool_empty_p ())
27633 || (DEFAULT_ABI == ABI_V4
27634 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27635 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27636 {
27637 /* If emit_load_toc_table will use the link register, we need to save
27638 it. We use R12 for this purpose because emit_load_toc_table
27639 can use register 0. This allows us to use a plain 'blr' to return
27640 from the procedure more often. */
27641 int save_LR_around_toc_setup = (TARGET_ELF
27642 && DEFAULT_ABI == ABI_V4
27643 && flag_pic
27644 && ! info->lr_save_p
27645 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27646 if (save_LR_around_toc_setup)
27647 {
27648 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27649 rtx tmp = gen_rtx_REG (Pmode, 12);
27650
27651 sp_adjust = 0;
27652 insn = emit_move_insn (tmp, lr);
27653 RTX_FRAME_RELATED_P (insn) = 1;
27654
27655 rs6000_emit_load_toc_table (TRUE);
27656
27657 insn = emit_move_insn (lr, tmp);
27658 add_reg_note (insn, REG_CFA_RESTORE, lr);
27659 RTX_FRAME_RELATED_P (insn) = 1;
27660 }
27661 else
27662 rs6000_emit_load_toc_table (TRUE);
27663 }
27664
27665 #if TARGET_MACHO
27666 if (!TARGET_SINGLE_PIC_BASE
27667 && DEFAULT_ABI == ABI_DARWIN
27668 && flag_pic && crtl->uses_pic_offset_table)
27669 {
27670 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27671 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27672
27673 /* Save and restore LR locally around this call (in R0). */
27674 if (!info->lr_save_p)
27675 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27676
27677 emit_insn (gen_load_macho_picbase (src));
27678
27679 emit_move_insn (gen_rtx_REG (Pmode,
27680 RS6000_PIC_OFFSET_TABLE_REGNUM),
27681 lr);
27682
27683 if (!info->lr_save_p)
27684 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27685 }
27686 #endif
27687
27688 /* If we need to, save the TOC register after doing the stack setup.
27689 Do not emit eh frame info for this save. The unwinder wants info,
27690 conceptually attached to instructions in this function, about
27691 register values in the caller of this function. This R2 may have
27692 already been changed from the value in the caller.
27693 We don't attempt to write accurate DWARF EH frame info for R2
27694 because code emitted by gcc for a (non-pointer) function call
27695 doesn't save and restore R2. Instead, R2 is managed out-of-line
27696 by a linker generated plt call stub when the function resides in
27697 a shared library. This behavior is costly to describe in DWARF,
27698 both in terms of the size of DWARF info and the time taken in the
27699 unwinder to interpret it. R2 changes, apart from the
27700 calls_eh_return case earlier in this function, are handled by
27701 linux-unwind.h frob_update_context. */
27702 if (rs6000_save_toc_in_prologue_p ()
27703 && !cfun->machine->toc_is_wrapped_separately)
27704 {
27705 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27706 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27707 }
27708
27709 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27710 if (using_split_stack && split_stack_arg_pointer_used_p ())
27711 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27712 }
27713
27714 /* Output .extern statements for the save/restore routines we use. */
27715
27716 static void
27717 rs6000_output_savres_externs (FILE *file)
27718 {
27719 rs6000_stack_t *info = rs6000_stack_info ();
27720
27721 if (TARGET_DEBUG_STACK)
27722 debug_stack_info (info);
27723
27724 /* Write .extern for any function we will call to save and restore
27725 fp values. */
27726 if (info->first_fp_reg_save < 64
27727 && !TARGET_MACHO
27728 && !TARGET_ELF)
27729 {
27730 char *name;
27731 int regno = info->first_fp_reg_save - 32;
27732
27733 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27734 {
27735 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27736 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27737 name = rs6000_savres_routine_name (regno, sel);
27738 fprintf (file, "\t.extern %s\n", name);
27739 }
27740 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27741 {
27742 bool lr = (info->savres_strategy
27743 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27744 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27745 name = rs6000_savres_routine_name (regno, sel);
27746 fprintf (file, "\t.extern %s\n", name);
27747 }
27748 }
27749 }
27750
27751 /* Write function prologue. */
27752
27753 static void
27754 rs6000_output_function_prologue (FILE *file)
27755 {
27756 if (!cfun->is_thunk)
27757 rs6000_output_savres_externs (file);
27758
27759 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27760 immediately after the global entry point label. */
27761 if (rs6000_global_entry_point_needed_p ())
27762 {
27763 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27764
27765 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27766
27767 if (TARGET_CMODEL != CMODEL_LARGE)
27768 {
27769 /* In the small and medium code models, we assume the TOC is less
27770 2 GB away from the text section, so it can be computed via the
27771 following two-instruction sequence. */
27772 char buf[256];
27773
27774 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27775 fprintf (file, "0:\taddis 2,12,.TOC.-");
27776 assemble_name (file, buf);
27777 fprintf (file, "@ha\n");
27778 fprintf (file, "\taddi 2,2,.TOC.-");
27779 assemble_name (file, buf);
27780 fprintf (file, "@l\n");
27781 }
27782 else
27783 {
27784 /* In the large code model, we allow arbitrary offsets between the
27785 TOC and the text section, so we have to load the offset from
27786 memory. The data field is emitted directly before the global
27787 entry point in rs6000_elf_declare_function_name. */
27788 char buf[256];
27789
27790 #ifdef HAVE_AS_ENTRY_MARKERS
27791 /* If supported by the linker, emit a marker relocation. If the
27792 total code size of the final executable or shared library
27793 happens to fit into 2 GB after all, the linker will replace
27794 this code sequence with the sequence for the small or medium
27795 code model. */
27796 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27797 #endif
27798 fprintf (file, "\tld 2,");
27799 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27800 assemble_name (file, buf);
27801 fprintf (file, "-");
27802 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27803 assemble_name (file, buf);
27804 fprintf (file, "(12)\n");
27805 fprintf (file, "\tadd 2,2,12\n");
27806 }
27807
27808 fputs ("\t.localentry\t", file);
27809 assemble_name (file, name);
27810 fputs (",.-", file);
27811 assemble_name (file, name);
27812 fputs ("\n", file);
27813 }
27814
27815 /* Output -mprofile-kernel code. This needs to be done here instead of
27816 in output_function_profile since it must go after the ELFv2 ABI
27817 local entry point. */
27818 if (TARGET_PROFILE_KERNEL && crtl->profile)
27819 {
27820 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27821 gcc_assert (!TARGET_32BIT);
27822
27823 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27824
27825 /* In the ELFv2 ABI we have no compiler stack word. It must be
27826 the resposibility of _mcount to preserve the static chain
27827 register if required. */
27828 if (DEFAULT_ABI != ABI_ELFv2
27829 && cfun->static_chain_decl != NULL)
27830 {
27831 asm_fprintf (file, "\tstd %s,24(%s)\n",
27832 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27833 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27834 asm_fprintf (file, "\tld %s,24(%s)\n",
27835 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27836 }
27837 else
27838 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27839 }
27840
27841 rs6000_pic_labelno++;
27842 }
27843
27844 /* -mprofile-kernel code calls mcount before the function prolog,
27845 so a profiled leaf function should stay a leaf function. */
27846 static bool
27847 rs6000_keep_leaf_when_profiled ()
27848 {
27849 return TARGET_PROFILE_KERNEL;
27850 }
27851
27852 /* Non-zero if vmx regs are restored before the frame pop, zero if
27853 we restore after the pop when possible. */
27854 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27855
27856 /* Restoring cr is a two step process: loading a reg from the frame
27857 save, then moving the reg to cr. For ABI_V4 we must let the
27858 unwinder know that the stack location is no longer valid at or
27859 before the stack deallocation, but we can't emit a cfa_restore for
27860 cr at the stack deallocation like we do for other registers.
27861 The trouble is that it is possible for the move to cr to be
27862 scheduled after the stack deallocation. So say exactly where cr
27863 is located on each of the two insns. */
27864
27865 static rtx
27866 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27867 {
27868 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27869 rtx reg = gen_rtx_REG (SImode, regno);
27870 rtx_insn *insn = emit_move_insn (reg, mem);
27871
27872 if (!exit_func && DEFAULT_ABI == ABI_V4)
27873 {
27874 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27875 rtx set = gen_rtx_SET (reg, cr);
27876
27877 add_reg_note (insn, REG_CFA_REGISTER, set);
27878 RTX_FRAME_RELATED_P (insn) = 1;
27879 }
27880 return reg;
27881 }
27882
27883 /* Reload CR from REG. */
27884
27885 static void
27886 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27887 {
27888 int count = 0;
27889 int i;
27890
27891 if (using_mfcr_multiple)
27892 {
27893 for (i = 0; i < 8; i++)
27894 if (save_reg_p (CR0_REGNO + i))
27895 count++;
27896 gcc_assert (count);
27897 }
27898
27899 if (using_mfcr_multiple && count > 1)
27900 {
27901 rtx_insn *insn;
27902 rtvec p;
27903 int ndx;
27904
27905 p = rtvec_alloc (count);
27906
27907 ndx = 0;
27908 for (i = 0; i < 8; i++)
27909 if (save_reg_p (CR0_REGNO + i))
27910 {
27911 rtvec r = rtvec_alloc (2);
27912 RTVEC_ELT (r, 0) = reg;
27913 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27914 RTVEC_ELT (p, ndx) =
27915 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27916 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27917 ndx++;
27918 }
27919 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27920 gcc_assert (ndx == count);
27921
27922 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27923 CR field separately. */
27924 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27925 {
27926 for (i = 0; i < 8; i++)
27927 if (save_reg_p (CR0_REGNO + i))
27928 add_reg_note (insn, REG_CFA_RESTORE,
27929 gen_rtx_REG (SImode, CR0_REGNO + i));
27930
27931 RTX_FRAME_RELATED_P (insn) = 1;
27932 }
27933 }
27934 else
27935 for (i = 0; i < 8; i++)
27936 if (save_reg_p (CR0_REGNO + i))
27937 {
27938 rtx insn = emit_insn (gen_movsi_to_cr_one
27939 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27940
27941 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27942 CR field separately, attached to the insn that in fact
27943 restores this particular CR field. */
27944 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27945 {
27946 add_reg_note (insn, REG_CFA_RESTORE,
27947 gen_rtx_REG (SImode, CR0_REGNO + i));
27948
27949 RTX_FRAME_RELATED_P (insn) = 1;
27950 }
27951 }
27952
27953 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27954 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27955 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27956 {
27957 rtx_insn *insn = get_last_insn ();
27958 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27959
27960 add_reg_note (insn, REG_CFA_RESTORE, cr);
27961 RTX_FRAME_RELATED_P (insn) = 1;
27962 }
27963 }
27964
27965 /* Like cr, the move to lr instruction can be scheduled after the
27966 stack deallocation, but unlike cr, its stack frame save is still
27967 valid. So we only need to emit the cfa_restore on the correct
27968 instruction. */
27969
27970 static void
27971 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27972 {
27973 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27974 rtx reg = gen_rtx_REG (Pmode, regno);
27975
27976 emit_move_insn (reg, mem);
27977 }
27978
27979 static void
27980 restore_saved_lr (int regno, bool exit_func)
27981 {
27982 rtx reg = gen_rtx_REG (Pmode, regno);
27983 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27984 rtx_insn *insn = emit_move_insn (lr, reg);
27985
27986 if (!exit_func && flag_shrink_wrap)
27987 {
27988 add_reg_note (insn, REG_CFA_RESTORE, lr);
27989 RTX_FRAME_RELATED_P (insn) = 1;
27990 }
27991 }
27992
27993 static rtx
27994 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27995 {
27996 if (DEFAULT_ABI == ABI_ELFv2)
27997 {
27998 int i;
27999 for (i = 0; i < 8; i++)
28000 if (save_reg_p (CR0_REGNO + i))
28001 {
28002 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
28003 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
28004 cfa_restores);
28005 }
28006 }
28007 else if (info->cr_save_p)
28008 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28009 gen_rtx_REG (SImode, CR2_REGNO),
28010 cfa_restores);
28011
28012 if (info->lr_save_p)
28013 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28014 gen_rtx_REG (Pmode, LR_REGNO),
28015 cfa_restores);
28016 return cfa_restores;
28017 }
28018
28019 /* Return true if OFFSET from stack pointer can be clobbered by signals.
28020 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
28021 below stack pointer not cloberred by signals. */
28022
28023 static inline bool
28024 offset_below_red_zone_p (HOST_WIDE_INT offset)
28025 {
28026 return offset < (DEFAULT_ABI == ABI_V4
28027 ? 0
28028 : TARGET_32BIT ? -220 : -288);
28029 }
28030
28031 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
28032
28033 static void
28034 emit_cfa_restores (rtx cfa_restores)
28035 {
28036 rtx_insn *insn = get_last_insn ();
28037 rtx *loc = &REG_NOTES (insn);
28038
28039 while (*loc)
28040 loc = &XEXP (*loc, 1);
28041 *loc = cfa_restores;
28042 RTX_FRAME_RELATED_P (insn) = 1;
28043 }
28044
28045 /* Emit function epilogue as insns. */
28046
28047 void
28048 rs6000_emit_epilogue (int sibcall)
28049 {
28050 rs6000_stack_t *info;
28051 int restoring_GPRs_inline;
28052 int restoring_FPRs_inline;
28053 int using_load_multiple;
28054 int using_mtcr_multiple;
28055 int use_backchain_to_restore_sp;
28056 int restore_lr;
28057 int strategy;
28058 HOST_WIDE_INT frame_off = 0;
28059 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
28060 rtx frame_reg_rtx = sp_reg_rtx;
28061 rtx cfa_restores = NULL_RTX;
28062 rtx insn;
28063 rtx cr_save_reg = NULL_RTX;
28064 machine_mode reg_mode = Pmode;
28065 int reg_size = TARGET_32BIT ? 4 : 8;
28066 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
28067 int fp_reg_size = 8;
28068 int i;
28069 bool exit_func;
28070 unsigned ptr_regno;
28071
28072 info = rs6000_stack_info ();
28073
28074 strategy = info->savres_strategy;
28075 using_load_multiple = strategy & REST_MULTIPLE;
28076 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
28077 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
28078 using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
28079 || rs6000_tune == PROCESSOR_PPC603
28080 || rs6000_tune == PROCESSOR_PPC750
28081 || optimize_size);
28082 /* Restore via the backchain when we have a large frame, since this
28083 is more efficient than an addis, addi pair. The second condition
28084 here will not trigger at the moment; We don't actually need a
28085 frame pointer for alloca, but the generic parts of the compiler
28086 give us one anyway. */
28087 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
28088 ? info->lr_save_offset
28089 : 0) > 32767
28090 || (cfun->calls_alloca
28091 && !frame_pointer_needed));
28092 restore_lr = (info->lr_save_p
28093 && (restoring_FPRs_inline
28094 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
28095 && (restoring_GPRs_inline
28096 || info->first_fp_reg_save < 64)
28097 && !cfun->machine->lr_is_wrapped_separately);
28098
28099
28100 if (WORLD_SAVE_P (info))
28101 {
28102 int i, j;
28103 char rname[30];
28104 const char *alloc_rname;
28105 rtvec p;
28106
28107 /* eh_rest_world_r10 will return to the location saved in the LR
28108 stack slot (which is not likely to be our caller.)
28109 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
28110 rest_world is similar, except any R10 parameter is ignored.
28111 The exception-handling stuff that was here in 2.95 is no
28112 longer necessary. */
28113
28114 p = rtvec_alloc (9
28115 + 32 - info->first_gp_reg_save
28116 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
28117 + 63 + 1 - info->first_fp_reg_save);
28118
28119 strcpy (rname, ((crtl->calls_eh_return) ?
28120 "*eh_rest_world_r10" : "*rest_world"));
28121 alloc_rname = ggc_strdup (rname);
28122
28123 j = 0;
28124 RTVEC_ELT (p, j++) = ret_rtx;
28125 RTVEC_ELT (p, j++)
28126 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
28127 /* The instruction pattern requires a clobber here;
28128 it is shared with the restVEC helper. */
28129 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 11);
28130
28131 {
28132 /* CR register traditionally saved as CR2. */
28133 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
28134 RTVEC_ELT (p, j++)
28135 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
28136 if (flag_shrink_wrap)
28137 {
28138 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28139 gen_rtx_REG (Pmode, LR_REGNO),
28140 cfa_restores);
28141 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28142 }
28143 }
28144
28145 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28146 {
28147 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
28148 RTVEC_ELT (p, j++)
28149 = gen_frame_load (reg,
28150 frame_reg_rtx, info->gp_save_offset + reg_size * i);
28151 if (flag_shrink_wrap
28152 && save_reg_p (info->first_gp_reg_save + i))
28153 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28154 }
28155 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
28156 {
28157 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
28158 RTVEC_ELT (p, j++)
28159 = gen_frame_load (reg,
28160 frame_reg_rtx, info->altivec_save_offset + 16 * i);
28161 if (flag_shrink_wrap
28162 && save_reg_p (info->first_altivec_reg_save + i))
28163 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28164 }
28165 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
28166 {
28167 rtx reg = gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
28168 info->first_fp_reg_save + i);
28169 RTVEC_ELT (p, j++)
28170 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
28171 if (flag_shrink_wrap
28172 && save_reg_p (info->first_fp_reg_save + i))
28173 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28174 }
28175 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 0);
28176 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 12);
28177 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 7);
28178 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 8);
28179 RTVEC_ELT (p, j++)
28180 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
28181 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28182
28183 if (flag_shrink_wrap)
28184 {
28185 REG_NOTES (insn) = cfa_restores;
28186 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28187 RTX_FRAME_RELATED_P (insn) = 1;
28188 }
28189 return;
28190 }
28191
28192 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28193 if (info->push_p)
28194 frame_off = info->total_size;
28195
28196 /* Restore AltiVec registers if we must do so before adjusting the
28197 stack. */
28198 if (info->altivec_size != 0
28199 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28200 || (DEFAULT_ABI != ABI_V4
28201 && offset_below_red_zone_p (info->altivec_save_offset))))
28202 {
28203 int i;
28204 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28205
28206 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28207 if (use_backchain_to_restore_sp)
28208 {
28209 int frame_regno = 11;
28210
28211 if ((strategy & REST_INLINE_VRS) == 0)
28212 {
28213 /* Of r11 and r12, select the one not clobbered by an
28214 out-of-line restore function for the frame register. */
28215 frame_regno = 11 + 12 - scratch_regno;
28216 }
28217 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28218 emit_move_insn (frame_reg_rtx,
28219 gen_rtx_MEM (Pmode, sp_reg_rtx));
28220 frame_off = 0;
28221 }
28222 else if (frame_pointer_needed)
28223 frame_reg_rtx = hard_frame_pointer_rtx;
28224
28225 if ((strategy & REST_INLINE_VRS) == 0)
28226 {
28227 int end_save = info->altivec_save_offset + info->altivec_size;
28228 int ptr_off;
28229 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28230 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28231
28232 if (end_save + frame_off != 0)
28233 {
28234 rtx offset = GEN_INT (end_save + frame_off);
28235
28236 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28237 }
28238 else
28239 emit_move_insn (ptr_reg, frame_reg_rtx);
28240
28241 ptr_off = -end_save;
28242 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28243 info->altivec_save_offset + ptr_off,
28244 0, V4SImode, SAVRES_VR);
28245 }
28246 else
28247 {
28248 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28249 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28250 {
28251 rtx addr, areg, mem, insn;
28252 rtx reg = gen_rtx_REG (V4SImode, i);
28253 HOST_WIDE_INT offset
28254 = (info->altivec_save_offset + frame_off
28255 + 16 * (i - info->first_altivec_reg_save));
28256
28257 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28258 {
28259 mem = gen_frame_mem (V4SImode,
28260 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28261 GEN_INT (offset)));
28262 insn = gen_rtx_SET (reg, mem);
28263 }
28264 else
28265 {
28266 areg = gen_rtx_REG (Pmode, 0);
28267 emit_move_insn (areg, GEN_INT (offset));
28268
28269 /* AltiVec addressing mode is [reg+reg]. */
28270 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28271 mem = gen_frame_mem (V4SImode, addr);
28272
28273 /* Rather than emitting a generic move, force use of the
28274 lvx instruction, which we always want. In particular we
28275 don't want lxvd2x/xxpermdi for little endian. */
28276 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28277 }
28278
28279 (void) emit_insn (insn);
28280 }
28281 }
28282
28283 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28284 if (((strategy & REST_INLINE_VRS) == 0
28285 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28286 && (flag_shrink_wrap
28287 || (offset_below_red_zone_p
28288 (info->altivec_save_offset
28289 + 16 * (i - info->first_altivec_reg_save))))
28290 && save_reg_p (i))
28291 {
28292 rtx reg = gen_rtx_REG (V4SImode, i);
28293 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28294 }
28295 }
28296
28297 /* Restore VRSAVE if we must do so before adjusting the stack. */
28298 if (info->vrsave_size != 0
28299 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28300 || (DEFAULT_ABI != ABI_V4
28301 && offset_below_red_zone_p (info->vrsave_save_offset))))
28302 {
28303 rtx reg;
28304
28305 if (frame_reg_rtx == sp_reg_rtx)
28306 {
28307 if (use_backchain_to_restore_sp)
28308 {
28309 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28310 emit_move_insn (frame_reg_rtx,
28311 gen_rtx_MEM (Pmode, sp_reg_rtx));
28312 frame_off = 0;
28313 }
28314 else if (frame_pointer_needed)
28315 frame_reg_rtx = hard_frame_pointer_rtx;
28316 }
28317
28318 reg = gen_rtx_REG (SImode, 12);
28319 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28320 info->vrsave_save_offset + frame_off));
28321
28322 emit_insn (generate_set_vrsave (reg, info, 1));
28323 }
28324
28325 insn = NULL_RTX;
28326 /* If we have a large stack frame, restore the old stack pointer
28327 using the backchain. */
28328 if (use_backchain_to_restore_sp)
28329 {
28330 if (frame_reg_rtx == sp_reg_rtx)
28331 {
28332 /* Under V.4, don't reset the stack pointer until after we're done
28333 loading the saved registers. */
28334 if (DEFAULT_ABI == ABI_V4)
28335 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28336
28337 insn = emit_move_insn (frame_reg_rtx,
28338 gen_rtx_MEM (Pmode, sp_reg_rtx));
28339 frame_off = 0;
28340 }
28341 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28342 && DEFAULT_ABI == ABI_V4)
28343 /* frame_reg_rtx has been set up by the altivec restore. */
28344 ;
28345 else
28346 {
28347 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28348 frame_reg_rtx = sp_reg_rtx;
28349 }
28350 }
28351 /* If we have a frame pointer, we can restore the old stack pointer
28352 from it. */
28353 else if (frame_pointer_needed)
28354 {
28355 frame_reg_rtx = sp_reg_rtx;
28356 if (DEFAULT_ABI == ABI_V4)
28357 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28358 /* Prevent reordering memory accesses against stack pointer restore. */
28359 else if (cfun->calls_alloca
28360 || offset_below_red_zone_p (-info->total_size))
28361 rs6000_emit_stack_tie (frame_reg_rtx, true);
28362
28363 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28364 GEN_INT (info->total_size)));
28365 frame_off = 0;
28366 }
28367 else if (info->push_p
28368 && DEFAULT_ABI != ABI_V4
28369 && !crtl->calls_eh_return)
28370 {
28371 /* Prevent reordering memory accesses against stack pointer restore. */
28372 if (cfun->calls_alloca
28373 || offset_below_red_zone_p (-info->total_size))
28374 rs6000_emit_stack_tie (frame_reg_rtx, false);
28375 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28376 GEN_INT (info->total_size)));
28377 frame_off = 0;
28378 }
28379 if (insn && frame_reg_rtx == sp_reg_rtx)
28380 {
28381 if (cfa_restores)
28382 {
28383 REG_NOTES (insn) = cfa_restores;
28384 cfa_restores = NULL_RTX;
28385 }
28386 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28387 RTX_FRAME_RELATED_P (insn) = 1;
28388 }
28389
28390 /* Restore AltiVec registers if we have not done so already. */
28391 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28392 && info->altivec_size != 0
28393 && (DEFAULT_ABI == ABI_V4
28394 || !offset_below_red_zone_p (info->altivec_save_offset)))
28395 {
28396 int i;
28397
28398 if ((strategy & REST_INLINE_VRS) == 0)
28399 {
28400 int end_save = info->altivec_save_offset + info->altivec_size;
28401 int ptr_off;
28402 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28403 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28404 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28405
28406 if (end_save + frame_off != 0)
28407 {
28408 rtx offset = GEN_INT (end_save + frame_off);
28409
28410 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28411 }
28412 else
28413 emit_move_insn (ptr_reg, frame_reg_rtx);
28414
28415 ptr_off = -end_save;
28416 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28417 info->altivec_save_offset + ptr_off,
28418 0, V4SImode, SAVRES_VR);
28419 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28420 {
28421 /* Frame reg was clobbered by out-of-line save. Restore it
28422 from ptr_reg, and if we are calling out-of-line gpr or
28423 fpr restore set up the correct pointer and offset. */
28424 unsigned newptr_regno = 1;
28425 if (!restoring_GPRs_inline)
28426 {
28427 bool lr = info->gp_save_offset + info->gp_size == 0;
28428 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28429 newptr_regno = ptr_regno_for_savres (sel);
28430 end_save = info->gp_save_offset + info->gp_size;
28431 }
28432 else if (!restoring_FPRs_inline)
28433 {
28434 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28435 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28436 newptr_regno = ptr_regno_for_savres (sel);
28437 end_save = info->fp_save_offset + info->fp_size;
28438 }
28439
28440 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28441 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28442
28443 if (end_save + ptr_off != 0)
28444 {
28445 rtx offset = GEN_INT (end_save + ptr_off);
28446
28447 frame_off = -end_save;
28448 if (TARGET_32BIT)
28449 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28450 ptr_reg, offset));
28451 else
28452 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28453 ptr_reg, offset));
28454 }
28455 else
28456 {
28457 frame_off = ptr_off;
28458 emit_move_insn (frame_reg_rtx, ptr_reg);
28459 }
28460 }
28461 }
28462 else
28463 {
28464 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28465 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28466 {
28467 rtx addr, areg, mem, insn;
28468 rtx reg = gen_rtx_REG (V4SImode, i);
28469 HOST_WIDE_INT offset
28470 = (info->altivec_save_offset + frame_off
28471 + 16 * (i - info->first_altivec_reg_save));
28472
28473 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28474 {
28475 mem = gen_frame_mem (V4SImode,
28476 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28477 GEN_INT (offset)));
28478 insn = gen_rtx_SET (reg, mem);
28479 }
28480 else
28481 {
28482 areg = gen_rtx_REG (Pmode, 0);
28483 emit_move_insn (areg, GEN_INT (offset));
28484
28485 /* AltiVec addressing mode is [reg+reg]. */
28486 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28487 mem = gen_frame_mem (V4SImode, addr);
28488
28489 /* Rather than emitting a generic move, force use of the
28490 lvx instruction, which we always want. In particular we
28491 don't want lxvd2x/xxpermdi for little endian. */
28492 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28493 }
28494
28495 (void) emit_insn (insn);
28496 }
28497 }
28498
28499 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28500 if (((strategy & REST_INLINE_VRS) == 0
28501 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28502 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28503 && save_reg_p (i))
28504 {
28505 rtx reg = gen_rtx_REG (V4SImode, i);
28506 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28507 }
28508 }
28509
28510 /* Restore VRSAVE if we have not done so already. */
28511 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28512 && info->vrsave_size != 0
28513 && (DEFAULT_ABI == ABI_V4
28514 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28515 {
28516 rtx reg;
28517
28518 reg = gen_rtx_REG (SImode, 12);
28519 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28520 info->vrsave_save_offset + frame_off));
28521
28522 emit_insn (generate_set_vrsave (reg, info, 1));
28523 }
28524
28525 /* If we exit by an out-of-line restore function on ABI_V4 then that
28526 function will deallocate the stack, so we don't need to worry
28527 about the unwinder restoring cr from an invalid stack frame
28528 location. */
28529 exit_func = (!restoring_FPRs_inline
28530 || (!restoring_GPRs_inline
28531 && info->first_fp_reg_save == 64));
28532
28533 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28534 *separate* slots if the routine calls __builtin_eh_return, so
28535 that they can be independently restored by the unwinder. */
28536 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28537 {
28538 int i, cr_off = info->ehcr_offset;
28539
28540 for (i = 0; i < 8; i++)
28541 if (!call_used_regs[CR0_REGNO + i])
28542 {
28543 rtx reg = gen_rtx_REG (SImode, 0);
28544 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28545 cr_off + frame_off));
28546
28547 insn = emit_insn (gen_movsi_to_cr_one
28548 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28549
28550 if (!exit_func && flag_shrink_wrap)
28551 {
28552 add_reg_note (insn, REG_CFA_RESTORE,
28553 gen_rtx_REG (SImode, CR0_REGNO + i));
28554
28555 RTX_FRAME_RELATED_P (insn) = 1;
28556 }
28557
28558 cr_off += reg_size;
28559 }
28560 }
28561
28562 /* Get the old lr if we saved it. If we are restoring registers
28563 out-of-line, then the out-of-line routines can do this for us. */
28564 if (restore_lr && restoring_GPRs_inline)
28565 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28566
28567 /* Get the old cr if we saved it. */
28568 if (info->cr_save_p)
28569 {
28570 unsigned cr_save_regno = 12;
28571
28572 if (!restoring_GPRs_inline)
28573 {
28574 /* Ensure we don't use the register used by the out-of-line
28575 gpr register restore below. */
28576 bool lr = info->gp_save_offset + info->gp_size == 0;
28577 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28578 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28579
28580 if (gpr_ptr_regno == 12)
28581 cr_save_regno = 11;
28582 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28583 }
28584 else if (REGNO (frame_reg_rtx) == 12)
28585 cr_save_regno = 11;
28586
28587 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28588 info->cr_save_offset + frame_off,
28589 exit_func);
28590 }
28591
28592 /* Set LR here to try to overlap restores below. */
28593 if (restore_lr && restoring_GPRs_inline)
28594 restore_saved_lr (0, exit_func);
28595
28596 /* Load exception handler data registers, if needed. */
28597 if (crtl->calls_eh_return)
28598 {
28599 unsigned int i, regno;
28600
28601 if (TARGET_AIX)
28602 {
28603 rtx reg = gen_rtx_REG (reg_mode, 2);
28604 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28605 frame_off + RS6000_TOC_SAVE_SLOT));
28606 }
28607
28608 for (i = 0; ; ++i)
28609 {
28610 rtx mem;
28611
28612 regno = EH_RETURN_DATA_REGNO (i);
28613 if (regno == INVALID_REGNUM)
28614 break;
28615
28616 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28617 info->ehrd_offset + frame_off
28618 + reg_size * (int) i);
28619
28620 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28621 }
28622 }
28623
28624 /* Restore GPRs. This is done as a PARALLEL if we are using
28625 the load-multiple instructions. */
28626 if (!restoring_GPRs_inline)
28627 {
28628 /* We are jumping to an out-of-line function. */
28629 rtx ptr_reg;
28630 int end_save = info->gp_save_offset + info->gp_size;
28631 bool can_use_exit = end_save == 0;
28632 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28633 int ptr_off;
28634
28635 /* Emit stack reset code if we need it. */
28636 ptr_regno = ptr_regno_for_savres (sel);
28637 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28638 if (can_use_exit)
28639 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28640 else if (end_save + frame_off != 0)
28641 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28642 GEN_INT (end_save + frame_off)));
28643 else if (REGNO (frame_reg_rtx) != ptr_regno)
28644 emit_move_insn (ptr_reg, frame_reg_rtx);
28645 if (REGNO (frame_reg_rtx) == ptr_regno)
28646 frame_off = -end_save;
28647
28648 if (can_use_exit && info->cr_save_p)
28649 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28650
28651 ptr_off = -end_save;
28652 rs6000_emit_savres_rtx (info, ptr_reg,
28653 info->gp_save_offset + ptr_off,
28654 info->lr_save_offset + ptr_off,
28655 reg_mode, sel);
28656 }
28657 else if (using_load_multiple)
28658 {
28659 rtvec p;
28660 p = rtvec_alloc (32 - info->first_gp_reg_save);
28661 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28662 RTVEC_ELT (p, i)
28663 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28664 frame_reg_rtx,
28665 info->gp_save_offset + frame_off + reg_size * i);
28666 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28667 }
28668 else
28669 {
28670 int offset = info->gp_save_offset + frame_off;
28671 for (i = info->first_gp_reg_save; i < 32; i++)
28672 {
28673 if (save_reg_p (i)
28674 && !cfun->machine->gpr_is_wrapped_separately[i])
28675 {
28676 rtx reg = gen_rtx_REG (reg_mode, i);
28677 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28678 }
28679
28680 offset += reg_size;
28681 }
28682 }
28683
28684 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28685 {
28686 /* If the frame pointer was used then we can't delay emitting
28687 a REG_CFA_DEF_CFA note. This must happen on the insn that
28688 restores the frame pointer, r31. We may have already emitted
28689 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28690 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28691 be harmless if emitted. */
28692 if (frame_pointer_needed)
28693 {
28694 insn = get_last_insn ();
28695 add_reg_note (insn, REG_CFA_DEF_CFA,
28696 plus_constant (Pmode, frame_reg_rtx, frame_off));
28697 RTX_FRAME_RELATED_P (insn) = 1;
28698 }
28699
28700 /* Set up cfa_restores. We always need these when
28701 shrink-wrapping. If not shrink-wrapping then we only need
28702 the cfa_restore when the stack location is no longer valid.
28703 The cfa_restores must be emitted on or before the insn that
28704 invalidates the stack, and of course must not be emitted
28705 before the insn that actually does the restore. The latter
28706 is why it is a bad idea to emit the cfa_restores as a group
28707 on the last instruction here that actually does a restore:
28708 That insn may be reordered with respect to others doing
28709 restores. */
28710 if (flag_shrink_wrap
28711 && !restoring_GPRs_inline
28712 && info->first_fp_reg_save == 64)
28713 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28714
28715 for (i = info->first_gp_reg_save; i < 32; i++)
28716 if (save_reg_p (i)
28717 && !cfun->machine->gpr_is_wrapped_separately[i])
28718 {
28719 rtx reg = gen_rtx_REG (reg_mode, i);
28720 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28721 }
28722 }
28723
28724 if (!restoring_GPRs_inline
28725 && info->first_fp_reg_save == 64)
28726 {
28727 /* We are jumping to an out-of-line function. */
28728 if (cfa_restores)
28729 emit_cfa_restores (cfa_restores);
28730 return;
28731 }
28732
28733 if (restore_lr && !restoring_GPRs_inline)
28734 {
28735 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28736 restore_saved_lr (0, exit_func);
28737 }
28738
28739 /* Restore fpr's if we need to do it without calling a function. */
28740 if (restoring_FPRs_inline)
28741 {
28742 int offset = info->fp_save_offset + frame_off;
28743 for (i = info->first_fp_reg_save; i < 64; i++)
28744 {
28745 if (save_reg_p (i)
28746 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28747 {
28748 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28749 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28750 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28751 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28752 cfa_restores);
28753 }
28754
28755 offset += fp_reg_size;
28756 }
28757 }
28758
28759 /* If we saved cr, restore it here. Just those that were used. */
28760 if (info->cr_save_p)
28761 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28762
28763 /* If this is V.4, unwind the stack pointer after all of the loads
28764 have been done, or set up r11 if we are restoring fp out of line. */
28765 ptr_regno = 1;
28766 if (!restoring_FPRs_inline)
28767 {
28768 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28769 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28770 ptr_regno = ptr_regno_for_savres (sel);
28771 }
28772
28773 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28774 if (REGNO (frame_reg_rtx) == ptr_regno)
28775 frame_off = 0;
28776
28777 if (insn && restoring_FPRs_inline)
28778 {
28779 if (cfa_restores)
28780 {
28781 REG_NOTES (insn) = cfa_restores;
28782 cfa_restores = NULL_RTX;
28783 }
28784 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28785 RTX_FRAME_RELATED_P (insn) = 1;
28786 }
28787
28788 if (crtl->calls_eh_return)
28789 {
28790 rtx sa = EH_RETURN_STACKADJ_RTX;
28791 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28792 }
28793
28794 if (!sibcall && restoring_FPRs_inline)
28795 {
28796 if (cfa_restores)
28797 {
28798 /* We can't hang the cfa_restores off a simple return,
28799 since the shrink-wrap code sometimes uses an existing
28800 return. This means there might be a path from
28801 pre-prologue code to this return, and dwarf2cfi code
28802 wants the eh_frame unwinder state to be the same on
28803 all paths to any point. So we need to emit the
28804 cfa_restores before the return. For -m64 we really
28805 don't need epilogue cfa_restores at all, except for
28806 this irritating dwarf2cfi with shrink-wrap
28807 requirement; The stack red-zone means eh_frame info
28808 from the prologue telling the unwinder to restore
28809 from the stack is perfectly good right to the end of
28810 the function. */
28811 emit_insn (gen_blockage ());
28812 emit_cfa_restores (cfa_restores);
28813 cfa_restores = NULL_RTX;
28814 }
28815
28816 emit_jump_insn (targetm.gen_simple_return ());
28817 }
28818
28819 if (!sibcall && !restoring_FPRs_inline)
28820 {
28821 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28822 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28823 int elt = 0;
28824 RTVEC_ELT (p, elt++) = ret_rtx;
28825 if (lr)
28826 RTVEC_ELT (p, elt++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
28827
28828 /* We have to restore more than two FP registers, so branch to the
28829 restore function. It will return to our caller. */
28830 int i;
28831 int reg;
28832 rtx sym;
28833
28834 if (flag_shrink_wrap)
28835 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28836
28837 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28838 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28839 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28840 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28841
28842 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28843 {
28844 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28845
28846 RTVEC_ELT (p, elt++)
28847 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28848 if (flag_shrink_wrap
28849 && save_reg_p (info->first_fp_reg_save + i))
28850 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28851 }
28852
28853 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28854 }
28855
28856 if (cfa_restores)
28857 {
28858 if (sibcall)
28859 /* Ensure the cfa_restores are hung off an insn that won't
28860 be reordered above other restores. */
28861 emit_insn (gen_blockage ());
28862
28863 emit_cfa_restores (cfa_restores);
28864 }
28865 }
28866
28867 /* Write function epilogue. */
28868
28869 static void
28870 rs6000_output_function_epilogue (FILE *file)
28871 {
28872 #if TARGET_MACHO
28873 macho_branch_islands ();
28874
28875 {
28876 rtx_insn *insn = get_last_insn ();
28877 rtx_insn *deleted_debug_label = NULL;
28878
28879 /* Mach-O doesn't support labels at the end of objects, so if
28880 it looks like we might want one, take special action.
28881
28882 First, collect any sequence of deleted debug labels. */
28883 while (insn
28884 && NOTE_P (insn)
28885 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28886 {
28887 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28888 notes only, instead set their CODE_LABEL_NUMBER to -1,
28889 otherwise there would be code generation differences
28890 in between -g and -g0. */
28891 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28892 deleted_debug_label = insn;
28893 insn = PREV_INSN (insn);
28894 }
28895
28896 /* Second, if we have:
28897 label:
28898 barrier
28899 then this needs to be detected, so skip past the barrier. */
28900
28901 if (insn && BARRIER_P (insn))
28902 insn = PREV_INSN (insn);
28903
28904 /* Up to now we've only seen notes or barriers. */
28905 if (insn)
28906 {
28907 if (LABEL_P (insn)
28908 || (NOTE_P (insn)
28909 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
28910 /* Trailing label: <barrier>. */
28911 fputs ("\tnop\n", file);
28912 else
28913 {
28914 /* Lastly, see if we have a completely empty function body. */
28915 while (insn && ! INSN_P (insn))
28916 insn = PREV_INSN (insn);
28917 /* If we don't find any insns, we've got an empty function body;
28918 I.e. completely empty - without a return or branch. This is
28919 taken as the case where a function body has been removed
28920 because it contains an inline __builtin_unreachable(). GCC
28921 states that reaching __builtin_unreachable() means UB so we're
28922 not obliged to do anything special; however, we want
28923 non-zero-sized function bodies. To meet this, and help the
28924 user out, let's trap the case. */
28925 if (insn == NULL)
28926 fputs ("\ttrap\n", file);
28927 }
28928 }
28929 else if (deleted_debug_label)
28930 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28931 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28932 CODE_LABEL_NUMBER (insn) = -1;
28933 }
28934 #endif
28935
28936 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28937 on its format.
28938
28939 We don't output a traceback table if -finhibit-size-directive was
28940 used. The documentation for -finhibit-size-directive reads
28941 ``don't output a @code{.size} assembler directive, or anything
28942 else that would cause trouble if the function is split in the
28943 middle, and the two halves are placed at locations far apart in
28944 memory.'' The traceback table has this property, since it
28945 includes the offset from the start of the function to the
28946 traceback table itself.
28947
28948 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28949 different traceback table. */
28950 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28951 && ! flag_inhibit_size_directive
28952 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28953 {
28954 const char *fname = NULL;
28955 const char *language_string = lang_hooks.name;
28956 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28957 int i;
28958 int optional_tbtab;
28959 rs6000_stack_t *info = rs6000_stack_info ();
28960
28961 if (rs6000_traceback == traceback_full)
28962 optional_tbtab = 1;
28963 else if (rs6000_traceback == traceback_part)
28964 optional_tbtab = 0;
28965 else
28966 optional_tbtab = !optimize_size && !TARGET_ELF;
28967
28968 if (optional_tbtab)
28969 {
28970 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28971 while (*fname == '.') /* V.4 encodes . in the name */
28972 fname++;
28973
28974 /* Need label immediately before tbtab, so we can compute
28975 its offset from the function start. */
28976 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28977 ASM_OUTPUT_LABEL (file, fname);
28978 }
28979
28980 /* The .tbtab pseudo-op can only be used for the first eight
28981 expressions, since it can't handle the possibly variable
28982 length fields that follow. However, if you omit the optional
28983 fields, the assembler outputs zeros for all optional fields
28984 anyways, giving each variable length field is minimum length
28985 (as defined in sys/debug.h). Thus we can not use the .tbtab
28986 pseudo-op at all. */
28987
28988 /* An all-zero word flags the start of the tbtab, for debuggers
28989 that have to find it by searching forward from the entry
28990 point or from the current pc. */
28991 fputs ("\t.long 0\n", file);
28992
28993 /* Tbtab format type. Use format type 0. */
28994 fputs ("\t.byte 0,", file);
28995
28996 /* Language type. Unfortunately, there does not seem to be any
28997 official way to discover the language being compiled, so we
28998 use language_string.
28999 C is 0. Fortran is 1. Ada is 3. C++ is 9.
29000 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
29001 a number, so for now use 9. LTO, Go, D, and JIT aren't assigned
29002 numbers either, so for now use 0. */
29003 if (lang_GNU_C ()
29004 || ! strcmp (language_string, "GNU GIMPLE")
29005 || ! strcmp (language_string, "GNU Go")
29006 || ! strcmp (language_string, "GNU D")
29007 || ! strcmp (language_string, "libgccjit"))
29008 i = 0;
29009 else if (! strcmp (language_string, "GNU F77")
29010 || lang_GNU_Fortran ())
29011 i = 1;
29012 else if (! strcmp (language_string, "GNU Ada"))
29013 i = 3;
29014 else if (lang_GNU_CXX ()
29015 || ! strcmp (language_string, "GNU Objective-C++"))
29016 i = 9;
29017 else if (! strcmp (language_string, "GNU Java"))
29018 i = 13;
29019 else if (! strcmp (language_string, "GNU Objective-C"))
29020 i = 14;
29021 else
29022 gcc_unreachable ();
29023 fprintf (file, "%d,", i);
29024
29025 /* 8 single bit fields: global linkage (not set for C extern linkage,
29026 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
29027 from start of procedure stored in tbtab, internal function, function
29028 has controlled storage, function has no toc, function uses fp,
29029 function logs/aborts fp operations. */
29030 /* Assume that fp operations are used if any fp reg must be saved. */
29031 fprintf (file, "%d,",
29032 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
29033
29034 /* 6 bitfields: function is interrupt handler, name present in
29035 proc table, function calls alloca, on condition directives
29036 (controls stack walks, 3 bits), saves condition reg, saves
29037 link reg. */
29038 /* The `function calls alloca' bit seems to be set whenever reg 31 is
29039 set up as a frame pointer, even when there is no alloca call. */
29040 fprintf (file, "%d,",
29041 ((optional_tbtab << 6)
29042 | ((optional_tbtab & frame_pointer_needed) << 5)
29043 | (info->cr_save_p << 1)
29044 | (info->lr_save_p)));
29045
29046 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
29047 (6 bits). */
29048 fprintf (file, "%d,",
29049 (info->push_p << 7) | (64 - info->first_fp_reg_save));
29050
29051 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
29052 fprintf (file, "%d,", (32 - first_reg_to_save ()));
29053
29054 if (optional_tbtab)
29055 {
29056 /* Compute the parameter info from the function decl argument
29057 list. */
29058 tree decl;
29059 int next_parm_info_bit = 31;
29060
29061 for (decl = DECL_ARGUMENTS (current_function_decl);
29062 decl; decl = DECL_CHAIN (decl))
29063 {
29064 rtx parameter = DECL_INCOMING_RTL (decl);
29065 machine_mode mode = GET_MODE (parameter);
29066
29067 if (GET_CODE (parameter) == REG)
29068 {
29069 if (SCALAR_FLOAT_MODE_P (mode))
29070 {
29071 int bits;
29072
29073 float_parms++;
29074
29075 switch (mode)
29076 {
29077 case E_SFmode:
29078 case E_SDmode:
29079 bits = 0x2;
29080 break;
29081
29082 case E_DFmode:
29083 case E_DDmode:
29084 case E_TFmode:
29085 case E_TDmode:
29086 case E_IFmode:
29087 case E_KFmode:
29088 bits = 0x3;
29089 break;
29090
29091 default:
29092 gcc_unreachable ();
29093 }
29094
29095 /* If only one bit will fit, don't or in this entry. */
29096 if (next_parm_info_bit > 0)
29097 parm_info |= (bits << (next_parm_info_bit - 1));
29098 next_parm_info_bit -= 2;
29099 }
29100 else
29101 {
29102 fixed_parms += ((GET_MODE_SIZE (mode)
29103 + (UNITS_PER_WORD - 1))
29104 / UNITS_PER_WORD);
29105 next_parm_info_bit -= 1;
29106 }
29107 }
29108 }
29109 }
29110
29111 /* Number of fixed point parameters. */
29112 /* This is actually the number of words of fixed point parameters; thus
29113 an 8 byte struct counts as 2; and thus the maximum value is 8. */
29114 fprintf (file, "%d,", fixed_parms);
29115
29116 /* 2 bitfields: number of floating point parameters (7 bits), parameters
29117 all on stack. */
29118 /* This is actually the number of fp registers that hold parameters;
29119 and thus the maximum value is 13. */
29120 /* Set parameters on stack bit if parameters are not in their original
29121 registers, regardless of whether they are on the stack? Xlc
29122 seems to set the bit when not optimizing. */
29123 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
29124
29125 if (optional_tbtab)
29126 {
29127 /* Optional fields follow. Some are variable length. */
29128
29129 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
29130 float, 11 double float. */
29131 /* There is an entry for each parameter in a register, in the order
29132 that they occur in the parameter list. Any intervening arguments
29133 on the stack are ignored. If the list overflows a long (max
29134 possible length 34 bits) then completely leave off all elements
29135 that don't fit. */
29136 /* Only emit this long if there was at least one parameter. */
29137 if (fixed_parms || float_parms)
29138 fprintf (file, "\t.long %d\n", parm_info);
29139
29140 /* Offset from start of code to tb table. */
29141 fputs ("\t.long ", file);
29142 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29143 RS6000_OUTPUT_BASENAME (file, fname);
29144 putc ('-', file);
29145 rs6000_output_function_entry (file, fname);
29146 putc ('\n', file);
29147
29148 /* Interrupt handler mask. */
29149 /* Omit this long, since we never set the interrupt handler bit
29150 above. */
29151
29152 /* Number of CTL (controlled storage) anchors. */
29153 /* Omit this long, since the has_ctl bit is never set above. */
29154
29155 /* Displacement into stack of each CTL anchor. */
29156 /* Omit this list of longs, because there are no CTL anchors. */
29157
29158 /* Length of function name. */
29159 if (*fname == '*')
29160 ++fname;
29161 fprintf (file, "\t.short %d\n", (int) strlen (fname));
29162
29163 /* Function name. */
29164 assemble_string (fname, strlen (fname));
29165
29166 /* Register for alloca automatic storage; this is always reg 31.
29167 Only emit this if the alloca bit was set above. */
29168 if (frame_pointer_needed)
29169 fputs ("\t.byte 31\n", file);
29170
29171 fputs ("\t.align 2\n", file);
29172 }
29173 }
29174
29175 /* Arrange to define .LCTOC1 label, if not already done. */
29176 if (need_toc_init)
29177 {
29178 need_toc_init = 0;
29179 if (!toc_initialized)
29180 {
29181 switch_to_section (toc_section);
29182 switch_to_section (current_function_section ());
29183 }
29184 }
29185 }
29186
29187 /* -fsplit-stack support. */
29188
29189 /* A SYMBOL_REF for __morestack. */
29190 static GTY(()) rtx morestack_ref;
29191
29192 static rtx
29193 gen_add3_const (rtx rt, rtx ra, long c)
29194 {
29195 if (TARGET_64BIT)
29196 return gen_adddi3 (rt, ra, GEN_INT (c));
29197 else
29198 return gen_addsi3 (rt, ra, GEN_INT (c));
29199 }
29200
29201 /* Emit -fsplit-stack prologue, which goes before the regular function
29202 prologue (at local entry point in the case of ELFv2). */
29203
29204 void
29205 rs6000_expand_split_stack_prologue (void)
29206 {
29207 rs6000_stack_t *info = rs6000_stack_info ();
29208 unsigned HOST_WIDE_INT allocate;
29209 long alloc_hi, alloc_lo;
29210 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29211 rtx_insn *insn;
29212
29213 gcc_assert (flag_split_stack && reload_completed);
29214
29215 if (!info->push_p)
29216 return;
29217
29218 if (global_regs[29])
29219 {
29220 error ("%qs uses register r29", "-fsplit-stack");
29221 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29222 "conflicts with %qD", global_regs_decl[29]);
29223 }
29224
29225 allocate = info->total_size;
29226 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29227 {
29228 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29229 return;
29230 }
29231 if (morestack_ref == NULL_RTX)
29232 {
29233 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29234 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29235 | SYMBOL_FLAG_FUNCTION);
29236 }
29237
29238 r0 = gen_rtx_REG (Pmode, 0);
29239 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29240 r12 = gen_rtx_REG (Pmode, 12);
29241 emit_insn (gen_load_split_stack_limit (r0));
29242 /* Always emit two insns here to calculate the requested stack,
29243 so that the linker can edit them when adjusting size for calling
29244 non-split-stack code. */
29245 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29246 alloc_lo = -allocate - alloc_hi;
29247 if (alloc_hi != 0)
29248 {
29249 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29250 if (alloc_lo != 0)
29251 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29252 else
29253 emit_insn (gen_nop ());
29254 }
29255 else
29256 {
29257 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29258 emit_insn (gen_nop ());
29259 }
29260
29261 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29262 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29263 ok_label = gen_label_rtx ();
29264 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29265 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29266 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29267 pc_rtx);
29268 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29269 JUMP_LABEL (insn) = ok_label;
29270 /* Mark the jump as very likely to be taken. */
29271 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29272
29273 lr = gen_rtx_REG (Pmode, LR_REGNO);
29274 insn = emit_move_insn (r0, lr);
29275 RTX_FRAME_RELATED_P (insn) = 1;
29276 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29277 RTX_FRAME_RELATED_P (insn) = 1;
29278
29279 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29280 const0_rtx, const0_rtx));
29281 call_fusage = NULL_RTX;
29282 use_reg (&call_fusage, r12);
29283 /* Say the call uses r0, even though it doesn't, to stop regrename
29284 from twiddling with the insns saving lr, trashing args for cfun.
29285 The insns restoring lr are similarly protected by making
29286 split_stack_return use r0. */
29287 use_reg (&call_fusage, r0);
29288 add_function_usage_to (insn, call_fusage);
29289 /* Indicate that this function can't jump to non-local gotos. */
29290 make_reg_eh_region_note_nothrow_nononlocal (insn);
29291 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29292 insn = emit_move_insn (lr, r0);
29293 add_reg_note (insn, REG_CFA_RESTORE, lr);
29294 RTX_FRAME_RELATED_P (insn) = 1;
29295 emit_insn (gen_split_stack_return ());
29296
29297 emit_label (ok_label);
29298 LABEL_NUSES (ok_label) = 1;
29299 }
29300
29301 /* Return the internal arg pointer used for function incoming
29302 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29303 to copy it to a pseudo in order for it to be preserved over calls
29304 and suchlike. We'd really like to use a pseudo here for the
29305 internal arg pointer but data-flow analysis is not prepared to
29306 accept pseudos as live at the beginning of a function. */
29307
29308 static rtx
29309 rs6000_internal_arg_pointer (void)
29310 {
29311 if (flag_split_stack
29312 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29313 == NULL))
29314
29315 {
29316 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29317 {
29318 rtx pat;
29319
29320 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29321 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29322
29323 /* Put the pseudo initialization right after the note at the
29324 beginning of the function. */
29325 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29326 gen_rtx_REG (Pmode, 12));
29327 push_topmost_sequence ();
29328 emit_insn_after (pat, get_insns ());
29329 pop_topmost_sequence ();
29330 }
29331 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29332 FIRST_PARM_OFFSET (current_function_decl));
29333 return copy_to_reg (ret);
29334 }
29335 return virtual_incoming_args_rtx;
29336 }
29337
29338 /* We may have to tell the dataflow pass that the split stack prologue
29339 is initializing a register. */
29340
29341 static void
29342 rs6000_live_on_entry (bitmap regs)
29343 {
29344 if (flag_split_stack)
29345 bitmap_set_bit (regs, 12);
29346 }
29347
29348 /* Emit -fsplit-stack dynamic stack allocation space check. */
29349
29350 void
29351 rs6000_split_stack_space_check (rtx size, rtx label)
29352 {
29353 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29354 rtx limit = gen_reg_rtx (Pmode);
29355 rtx requested = gen_reg_rtx (Pmode);
29356 rtx cmp = gen_reg_rtx (CCUNSmode);
29357 rtx jump;
29358
29359 emit_insn (gen_load_split_stack_limit (limit));
29360 if (CONST_INT_P (size))
29361 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29362 else
29363 {
29364 size = force_reg (Pmode, size);
29365 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29366 }
29367 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29368 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29369 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29370 gen_rtx_LABEL_REF (VOIDmode, label),
29371 pc_rtx);
29372 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29373 JUMP_LABEL (jump) = label;
29374 }
29375 \f
29376 /* A C compound statement that outputs the assembler code for a thunk
29377 function, used to implement C++ virtual function calls with
29378 multiple inheritance. The thunk acts as a wrapper around a virtual
29379 function, adjusting the implicit object parameter before handing
29380 control off to the real function.
29381
29382 First, emit code to add the integer DELTA to the location that
29383 contains the incoming first argument. Assume that this argument
29384 contains a pointer, and is the one used to pass the `this' pointer
29385 in C++. This is the incoming argument *before* the function
29386 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29387 values of all other incoming arguments.
29388
29389 After the addition, emit code to jump to FUNCTION, which is a
29390 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29391 not touch the return address. Hence returning from FUNCTION will
29392 return to whoever called the current `thunk'.
29393
29394 The effect must be as if FUNCTION had been called directly with the
29395 adjusted first argument. This macro is responsible for emitting
29396 all of the code for a thunk function; output_function_prologue()
29397 and output_function_epilogue() are not invoked.
29398
29399 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29400 been extracted from it.) It might possibly be useful on some
29401 targets, but probably not.
29402
29403 If you do not define this macro, the target-independent code in the
29404 C++ frontend will generate a less efficient heavyweight thunk that
29405 calls FUNCTION instead of jumping to it. The generic approach does
29406 not support varargs. */
29407
29408 static void
29409 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29410 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29411 tree function)
29412 {
29413 rtx this_rtx, funexp;
29414 rtx_insn *insn;
29415
29416 reload_completed = 1;
29417 epilogue_completed = 1;
29418
29419 /* Mark the end of the (empty) prologue. */
29420 emit_note (NOTE_INSN_PROLOGUE_END);
29421
29422 /* Find the "this" pointer. If the function returns a structure,
29423 the structure return pointer is in r3. */
29424 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29425 this_rtx = gen_rtx_REG (Pmode, 4);
29426 else
29427 this_rtx = gen_rtx_REG (Pmode, 3);
29428
29429 /* Apply the constant offset, if required. */
29430 if (delta)
29431 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29432
29433 /* Apply the offset from the vtable, if required. */
29434 if (vcall_offset)
29435 {
29436 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29437 rtx tmp = gen_rtx_REG (Pmode, 12);
29438
29439 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29440 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29441 {
29442 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29443 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29444 }
29445 else
29446 {
29447 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29448
29449 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29450 }
29451 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29452 }
29453
29454 /* Generate a tail call to the target function. */
29455 if (!TREE_USED (function))
29456 {
29457 assemble_external (function);
29458 TREE_USED (function) = 1;
29459 }
29460 funexp = XEXP (DECL_RTL (function), 0);
29461 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29462
29463 #if TARGET_MACHO
29464 if (MACHOPIC_INDIRECT)
29465 funexp = machopic_indirect_call_target (funexp);
29466 #endif
29467
29468 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29469 generate sibcall RTL explicitly. */
29470 insn = emit_call_insn (
29471 gen_rtx_PARALLEL (VOIDmode,
29472 gen_rtvec (3,
29473 gen_rtx_CALL (VOIDmode,
29474 funexp, const0_rtx),
29475 gen_rtx_USE (VOIDmode, const0_rtx),
29476 simple_return_rtx)));
29477 SIBLING_CALL_P (insn) = 1;
29478 emit_barrier ();
29479
29480 /* Run just enough of rest_of_compilation to get the insns emitted.
29481 There's not really enough bulk here to make other passes such as
29482 instruction scheduling worth while. Note that use_thunk calls
29483 assemble_start_function and assemble_end_function. */
29484 insn = get_insns ();
29485 shorten_branches (insn);
29486 final_start_function (insn, file, 1);
29487 final (insn, file, 1);
29488 final_end_function ();
29489
29490 reload_completed = 0;
29491 epilogue_completed = 0;
29492 }
29493 \f
29494 /* A quick summary of the various types of 'constant-pool tables'
29495 under PowerPC:
29496
29497 Target Flags Name One table per
29498 AIX (none) AIX TOC object file
29499 AIX -mfull-toc AIX TOC object file
29500 AIX -mminimal-toc AIX minimal TOC translation unit
29501 SVR4/EABI (none) SVR4 SDATA object file
29502 SVR4/EABI -fpic SVR4 pic object file
29503 SVR4/EABI -fPIC SVR4 PIC translation unit
29504 SVR4/EABI -mrelocatable EABI TOC function
29505 SVR4/EABI -maix AIX TOC object file
29506 SVR4/EABI -maix -mminimal-toc
29507 AIX minimal TOC translation unit
29508
29509 Name Reg. Set by entries contains:
29510 made by addrs? fp? sum?
29511
29512 AIX TOC 2 crt0 as Y option option
29513 AIX minimal TOC 30 prolog gcc Y Y option
29514 SVR4 SDATA 13 crt0 gcc N Y N
29515 SVR4 pic 30 prolog ld Y not yet N
29516 SVR4 PIC 30 prolog gcc Y option option
29517 EABI TOC 30 prolog gcc Y option option
29518
29519 */
29520
29521 /* Hash functions for the hash table. */
29522
29523 static unsigned
29524 rs6000_hash_constant (rtx k)
29525 {
29526 enum rtx_code code = GET_CODE (k);
29527 machine_mode mode = GET_MODE (k);
29528 unsigned result = (code << 3) ^ mode;
29529 const char *format;
29530 int flen, fidx;
29531
29532 format = GET_RTX_FORMAT (code);
29533 flen = strlen (format);
29534 fidx = 0;
29535
29536 switch (code)
29537 {
29538 case LABEL_REF:
29539 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29540
29541 case CONST_WIDE_INT:
29542 {
29543 int i;
29544 flen = CONST_WIDE_INT_NUNITS (k);
29545 for (i = 0; i < flen; i++)
29546 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29547 return result;
29548 }
29549
29550 case CONST_DOUBLE:
29551 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29552
29553 case CODE_LABEL:
29554 fidx = 3;
29555 break;
29556
29557 default:
29558 break;
29559 }
29560
29561 for (; fidx < flen; fidx++)
29562 switch (format[fidx])
29563 {
29564 case 's':
29565 {
29566 unsigned i, len;
29567 const char *str = XSTR (k, fidx);
29568 len = strlen (str);
29569 result = result * 613 + len;
29570 for (i = 0; i < len; i++)
29571 result = result * 613 + (unsigned) str[i];
29572 break;
29573 }
29574 case 'u':
29575 case 'e':
29576 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29577 break;
29578 case 'i':
29579 case 'n':
29580 result = result * 613 + (unsigned) XINT (k, fidx);
29581 break;
29582 case 'w':
29583 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29584 result = result * 613 + (unsigned) XWINT (k, fidx);
29585 else
29586 {
29587 size_t i;
29588 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29589 result = result * 613 + (unsigned) (XWINT (k, fidx)
29590 >> CHAR_BIT * i);
29591 }
29592 break;
29593 case '0':
29594 break;
29595 default:
29596 gcc_unreachable ();
29597 }
29598
29599 return result;
29600 }
29601
29602 hashval_t
29603 toc_hasher::hash (toc_hash_struct *thc)
29604 {
29605 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29606 }
29607
29608 /* Compare H1 and H2 for equivalence. */
29609
29610 bool
29611 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29612 {
29613 rtx r1 = h1->key;
29614 rtx r2 = h2->key;
29615
29616 if (h1->key_mode != h2->key_mode)
29617 return 0;
29618
29619 return rtx_equal_p (r1, r2);
29620 }
29621
29622 /* These are the names given by the C++ front-end to vtables, and
29623 vtable-like objects. Ideally, this logic should not be here;
29624 instead, there should be some programmatic way of inquiring as
29625 to whether or not an object is a vtable. */
29626
29627 #define VTABLE_NAME_P(NAME) \
29628 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29629 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29630 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29631 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29632 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29633
29634 #ifdef NO_DOLLAR_IN_LABEL
29635 /* Return a GGC-allocated character string translating dollar signs in
29636 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29637
29638 const char *
29639 rs6000_xcoff_strip_dollar (const char *name)
29640 {
29641 char *strip, *p;
29642 const char *q;
29643 size_t len;
29644
29645 q = (const char *) strchr (name, '$');
29646
29647 if (q == 0 || q == name)
29648 return name;
29649
29650 len = strlen (name);
29651 strip = XALLOCAVEC (char, len + 1);
29652 strcpy (strip, name);
29653 p = strip + (q - name);
29654 while (p)
29655 {
29656 *p = '_';
29657 p = strchr (p + 1, '$');
29658 }
29659
29660 return ggc_alloc_string (strip, len);
29661 }
29662 #endif
29663
29664 void
29665 rs6000_output_symbol_ref (FILE *file, rtx x)
29666 {
29667 const char *name = XSTR (x, 0);
29668
29669 /* Currently C++ toc references to vtables can be emitted before it
29670 is decided whether the vtable is public or private. If this is
29671 the case, then the linker will eventually complain that there is
29672 a reference to an unknown section. Thus, for vtables only,
29673 we emit the TOC reference to reference the identifier and not the
29674 symbol. */
29675 if (VTABLE_NAME_P (name))
29676 {
29677 RS6000_OUTPUT_BASENAME (file, name);
29678 }
29679 else
29680 assemble_name (file, name);
29681 }
29682
29683 /* Output a TOC entry. We derive the entry name from what is being
29684 written. */
29685
29686 void
29687 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29688 {
29689 char buf[256];
29690 const char *name = buf;
29691 rtx base = x;
29692 HOST_WIDE_INT offset = 0;
29693
29694 gcc_assert (!TARGET_NO_TOC);
29695
29696 /* When the linker won't eliminate them, don't output duplicate
29697 TOC entries (this happens on AIX if there is any kind of TOC,
29698 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29699 CODE_LABELs. */
29700 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29701 {
29702 struct toc_hash_struct *h;
29703
29704 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29705 time because GGC is not initialized at that point. */
29706 if (toc_hash_table == NULL)
29707 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29708
29709 h = ggc_alloc<toc_hash_struct> ();
29710 h->key = x;
29711 h->key_mode = mode;
29712 h->labelno = labelno;
29713
29714 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29715 if (*found == NULL)
29716 *found = h;
29717 else /* This is indeed a duplicate.
29718 Set this label equal to that label. */
29719 {
29720 fputs ("\t.set ", file);
29721 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29722 fprintf (file, "%d,", labelno);
29723 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29724 fprintf (file, "%d\n", ((*found)->labelno));
29725
29726 #ifdef HAVE_AS_TLS
29727 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
29728 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29729 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29730 {
29731 fputs ("\t.set ", file);
29732 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29733 fprintf (file, "%d,", labelno);
29734 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29735 fprintf (file, "%d\n", ((*found)->labelno));
29736 }
29737 #endif
29738 return;
29739 }
29740 }
29741
29742 /* If we're going to put a double constant in the TOC, make sure it's
29743 aligned properly when strict alignment is on. */
29744 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29745 && STRICT_ALIGNMENT
29746 && GET_MODE_BITSIZE (mode) >= 64
29747 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29748 ASM_OUTPUT_ALIGN (file, 3);
29749 }
29750
29751 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29752
29753 /* Handle FP constants specially. Note that if we have a minimal
29754 TOC, things we put here aren't actually in the TOC, so we can allow
29755 FP constants. */
29756 if (CONST_DOUBLE_P (x)
29757 && (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29758 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29759 {
29760 long k[4];
29761
29762 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29763 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29764 else
29765 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29766
29767 if (TARGET_64BIT)
29768 {
29769 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29770 fputs (DOUBLE_INT_ASM_OP, file);
29771 else
29772 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29773 k[0] & 0xffffffff, k[1] & 0xffffffff,
29774 k[2] & 0xffffffff, k[3] & 0xffffffff);
29775 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29776 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29777 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29778 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29779 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29780 return;
29781 }
29782 else
29783 {
29784 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29785 fputs ("\t.long ", file);
29786 else
29787 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29788 k[0] & 0xffffffff, k[1] & 0xffffffff,
29789 k[2] & 0xffffffff, k[3] & 0xffffffff);
29790 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29791 k[0] & 0xffffffff, k[1] & 0xffffffff,
29792 k[2] & 0xffffffff, k[3] & 0xffffffff);
29793 return;
29794 }
29795 }
29796 else if (CONST_DOUBLE_P (x)
29797 && (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29798 {
29799 long k[2];
29800
29801 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29802 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29803 else
29804 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29805
29806 if (TARGET_64BIT)
29807 {
29808 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29809 fputs (DOUBLE_INT_ASM_OP, file);
29810 else
29811 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29812 k[0] & 0xffffffff, k[1] & 0xffffffff);
29813 fprintf (file, "0x%lx%08lx\n",
29814 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29815 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29816 return;
29817 }
29818 else
29819 {
29820 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29821 fputs ("\t.long ", file);
29822 else
29823 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29824 k[0] & 0xffffffff, k[1] & 0xffffffff);
29825 fprintf (file, "0x%lx,0x%lx\n",
29826 k[0] & 0xffffffff, k[1] & 0xffffffff);
29827 return;
29828 }
29829 }
29830 else if (CONST_DOUBLE_P (x)
29831 && (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29832 {
29833 long l;
29834
29835 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29836 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29837 else
29838 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29839
29840 if (TARGET_64BIT)
29841 {
29842 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29843 fputs (DOUBLE_INT_ASM_OP, file);
29844 else
29845 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29846 if (WORDS_BIG_ENDIAN)
29847 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29848 else
29849 fprintf (file, "0x%lx\n", l & 0xffffffff);
29850 return;
29851 }
29852 else
29853 {
29854 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29855 fputs ("\t.long ", file);
29856 else
29857 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29858 fprintf (file, "0x%lx\n", l & 0xffffffff);
29859 return;
29860 }
29861 }
29862 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
29863 {
29864 unsigned HOST_WIDE_INT low;
29865 HOST_WIDE_INT high;
29866
29867 low = INTVAL (x) & 0xffffffff;
29868 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29869
29870 /* TOC entries are always Pmode-sized, so when big-endian
29871 smaller integer constants in the TOC need to be padded.
29872 (This is still a win over putting the constants in
29873 a separate constant pool, because then we'd have
29874 to have both a TOC entry _and_ the actual constant.)
29875
29876 For a 32-bit target, CONST_INT values are loaded and shifted
29877 entirely within `low' and can be stored in one TOC entry. */
29878
29879 /* It would be easy to make this work, but it doesn't now. */
29880 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29881
29882 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29883 {
29884 low |= high << 32;
29885 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29886 high = (HOST_WIDE_INT) low >> 32;
29887 low &= 0xffffffff;
29888 }
29889
29890 if (TARGET_64BIT)
29891 {
29892 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29893 fputs (DOUBLE_INT_ASM_OP, file);
29894 else
29895 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29896 (long) high & 0xffffffff, (long) low & 0xffffffff);
29897 fprintf (file, "0x%lx%08lx\n",
29898 (long) high & 0xffffffff, (long) low & 0xffffffff);
29899 return;
29900 }
29901 else
29902 {
29903 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29904 {
29905 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29906 fputs ("\t.long ", file);
29907 else
29908 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29909 (long) high & 0xffffffff, (long) low & 0xffffffff);
29910 fprintf (file, "0x%lx,0x%lx\n",
29911 (long) high & 0xffffffff, (long) low & 0xffffffff);
29912 }
29913 else
29914 {
29915 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29916 fputs ("\t.long ", file);
29917 else
29918 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29919 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29920 }
29921 return;
29922 }
29923 }
29924
29925 if (GET_CODE (x) == CONST)
29926 {
29927 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29928 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
29929
29930 base = XEXP (XEXP (x, 0), 0);
29931 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29932 }
29933
29934 switch (GET_CODE (base))
29935 {
29936 case SYMBOL_REF:
29937 name = XSTR (base, 0);
29938 break;
29939
29940 case LABEL_REF:
29941 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29942 CODE_LABEL_NUMBER (XEXP (base, 0)));
29943 break;
29944
29945 case CODE_LABEL:
29946 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29947 break;
29948
29949 default:
29950 gcc_unreachable ();
29951 }
29952
29953 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29954 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29955 else
29956 {
29957 fputs ("\t.tc ", file);
29958 RS6000_OUTPUT_BASENAME (file, name);
29959
29960 if (offset < 0)
29961 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29962 else if (offset)
29963 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29964
29965 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29966 after other TOC symbols, reducing overflow of small TOC access
29967 to [TC] symbols. */
29968 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29969 ? "[TE]," : "[TC],", file);
29970 }
29971
29972 /* Currently C++ toc references to vtables can be emitted before it
29973 is decided whether the vtable is public or private. If this is
29974 the case, then the linker will eventually complain that there is
29975 a TOC reference to an unknown section. Thus, for vtables only,
29976 we emit the TOC reference to reference the symbol and not the
29977 section. */
29978 if (VTABLE_NAME_P (name))
29979 {
29980 RS6000_OUTPUT_BASENAME (file, name);
29981 if (offset < 0)
29982 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29983 else if (offset > 0)
29984 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29985 }
29986 else
29987 output_addr_const (file, x);
29988
29989 #if HAVE_AS_TLS
29990 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
29991 {
29992 switch (SYMBOL_REF_TLS_MODEL (base))
29993 {
29994 case 0:
29995 break;
29996 case TLS_MODEL_LOCAL_EXEC:
29997 fputs ("@le", file);
29998 break;
29999 case TLS_MODEL_INITIAL_EXEC:
30000 fputs ("@ie", file);
30001 break;
30002 /* Use global-dynamic for local-dynamic. */
30003 case TLS_MODEL_GLOBAL_DYNAMIC:
30004 case TLS_MODEL_LOCAL_DYNAMIC:
30005 putc ('\n', file);
30006 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
30007 fputs ("\t.tc .", file);
30008 RS6000_OUTPUT_BASENAME (file, name);
30009 fputs ("[TC],", file);
30010 output_addr_const (file, x);
30011 fputs ("@m", file);
30012 break;
30013 default:
30014 gcc_unreachable ();
30015 }
30016 }
30017 #endif
30018
30019 putc ('\n', file);
30020 }
30021 \f
30022 /* Output an assembler pseudo-op to write an ASCII string of N characters
30023 starting at P to FILE.
30024
30025 On the RS/6000, we have to do this using the .byte operation and
30026 write out special characters outside the quoted string.
30027 Also, the assembler is broken; very long strings are truncated,
30028 so we must artificially break them up early. */
30029
30030 void
30031 output_ascii (FILE *file, const char *p, int n)
30032 {
30033 char c;
30034 int i, count_string;
30035 const char *for_string = "\t.byte \"";
30036 const char *for_decimal = "\t.byte ";
30037 const char *to_close = NULL;
30038
30039 count_string = 0;
30040 for (i = 0; i < n; i++)
30041 {
30042 c = *p++;
30043 if (c >= ' ' && c < 0177)
30044 {
30045 if (for_string)
30046 fputs (for_string, file);
30047 putc (c, file);
30048
30049 /* Write two quotes to get one. */
30050 if (c == '"')
30051 {
30052 putc (c, file);
30053 ++count_string;
30054 }
30055
30056 for_string = NULL;
30057 for_decimal = "\"\n\t.byte ";
30058 to_close = "\"\n";
30059 ++count_string;
30060
30061 if (count_string >= 512)
30062 {
30063 fputs (to_close, file);
30064
30065 for_string = "\t.byte \"";
30066 for_decimal = "\t.byte ";
30067 to_close = NULL;
30068 count_string = 0;
30069 }
30070 }
30071 else
30072 {
30073 if (for_decimal)
30074 fputs (for_decimal, file);
30075 fprintf (file, "%d", c);
30076
30077 for_string = "\n\t.byte \"";
30078 for_decimal = ", ";
30079 to_close = "\n";
30080 count_string = 0;
30081 }
30082 }
30083
30084 /* Now close the string if we have written one. Then end the line. */
30085 if (to_close)
30086 fputs (to_close, file);
30087 }
30088 \f
30089 /* Generate a unique section name for FILENAME for a section type
30090 represented by SECTION_DESC. Output goes into BUF.
30091
30092 SECTION_DESC can be any string, as long as it is different for each
30093 possible section type.
30094
30095 We name the section in the same manner as xlc. The name begins with an
30096 underscore followed by the filename (after stripping any leading directory
30097 names) with the last period replaced by the string SECTION_DESC. If
30098 FILENAME does not contain a period, SECTION_DESC is appended to the end of
30099 the name. */
30100
30101 void
30102 rs6000_gen_section_name (char **buf, const char *filename,
30103 const char *section_desc)
30104 {
30105 const char *q, *after_last_slash, *last_period = 0;
30106 char *p;
30107 int len;
30108
30109 after_last_slash = filename;
30110 for (q = filename; *q; q++)
30111 {
30112 if (*q == '/')
30113 after_last_slash = q + 1;
30114 else if (*q == '.')
30115 last_period = q;
30116 }
30117
30118 len = strlen (after_last_slash) + strlen (section_desc) + 2;
30119 *buf = (char *) xmalloc (len);
30120
30121 p = *buf;
30122 *p++ = '_';
30123
30124 for (q = after_last_slash; *q; q++)
30125 {
30126 if (q == last_period)
30127 {
30128 strcpy (p, section_desc);
30129 p += strlen (section_desc);
30130 break;
30131 }
30132
30133 else if (ISALNUM (*q))
30134 *p++ = *q;
30135 }
30136
30137 if (last_period == 0)
30138 strcpy (p, section_desc);
30139 else
30140 *p = '\0';
30141 }
30142 \f
30143 /* Emit profile function. */
30144
30145 void
30146 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
30147 {
30148 /* Non-standard profiling for kernels, which just saves LR then calls
30149 _mcount without worrying about arg saves. The idea is to change
30150 the function prologue as little as possible as it isn't easy to
30151 account for arg save/restore code added just for _mcount. */
30152 if (TARGET_PROFILE_KERNEL)
30153 return;
30154
30155 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
30156 {
30157 #ifndef NO_PROFILE_COUNTERS
30158 # define NO_PROFILE_COUNTERS 0
30159 #endif
30160 if (NO_PROFILE_COUNTERS)
30161 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30162 LCT_NORMAL, VOIDmode);
30163 else
30164 {
30165 char buf[30];
30166 const char *label_name;
30167 rtx fun;
30168
30169 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30170 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
30171 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
30172
30173 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30174 LCT_NORMAL, VOIDmode, fun, Pmode);
30175 }
30176 }
30177 else if (DEFAULT_ABI == ABI_DARWIN)
30178 {
30179 const char *mcount_name = RS6000_MCOUNT;
30180 int caller_addr_regno = LR_REGNO;
30181
30182 /* Be conservative and always set this, at least for now. */
30183 crtl->uses_pic_offset_table = 1;
30184
30185 #if TARGET_MACHO
30186 /* For PIC code, set up a stub and collect the caller's address
30187 from r0, which is where the prologue puts it. */
30188 if (MACHOPIC_INDIRECT
30189 && crtl->uses_pic_offset_table)
30190 caller_addr_regno = 0;
30191 #endif
30192 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
30193 LCT_NORMAL, VOIDmode,
30194 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30195 }
30196 }
30197
30198 /* Write function profiler code. */
30199
30200 void
30201 output_function_profiler (FILE *file, int labelno)
30202 {
30203 char buf[100];
30204
30205 switch (DEFAULT_ABI)
30206 {
30207 default:
30208 gcc_unreachable ();
30209
30210 case ABI_V4:
30211 if (!TARGET_32BIT)
30212 {
30213 warning (0, "no profiling of 64-bit code for this ABI");
30214 return;
30215 }
30216 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30217 fprintf (file, "\tmflr %s\n", reg_names[0]);
30218 if (NO_PROFILE_COUNTERS)
30219 {
30220 asm_fprintf (file, "\tstw %s,4(%s)\n",
30221 reg_names[0], reg_names[1]);
30222 }
30223 else if (TARGET_SECURE_PLT && flag_pic)
30224 {
30225 if (TARGET_LINK_STACK)
30226 {
30227 char name[32];
30228 get_ppc476_thunk_name (name);
30229 asm_fprintf (file, "\tbl %s\n", name);
30230 }
30231 else
30232 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30233 asm_fprintf (file, "\tstw %s,4(%s)\n",
30234 reg_names[0], reg_names[1]);
30235 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30236 asm_fprintf (file, "\taddis %s,%s,",
30237 reg_names[12], reg_names[12]);
30238 assemble_name (file, buf);
30239 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30240 assemble_name (file, buf);
30241 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30242 }
30243 else if (flag_pic == 1)
30244 {
30245 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30246 asm_fprintf (file, "\tstw %s,4(%s)\n",
30247 reg_names[0], reg_names[1]);
30248 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30249 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30250 assemble_name (file, buf);
30251 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30252 }
30253 else if (flag_pic > 1)
30254 {
30255 asm_fprintf (file, "\tstw %s,4(%s)\n",
30256 reg_names[0], reg_names[1]);
30257 /* Now, we need to get the address of the label. */
30258 if (TARGET_LINK_STACK)
30259 {
30260 char name[32];
30261 get_ppc476_thunk_name (name);
30262 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30263 assemble_name (file, buf);
30264 fputs ("-.\n1:", file);
30265 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30266 asm_fprintf (file, "\taddi %s,%s,4\n",
30267 reg_names[11], reg_names[11]);
30268 }
30269 else
30270 {
30271 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30272 assemble_name (file, buf);
30273 fputs ("-.\n1:", file);
30274 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30275 }
30276 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30277 reg_names[0], reg_names[11]);
30278 asm_fprintf (file, "\tadd %s,%s,%s\n",
30279 reg_names[0], reg_names[0], reg_names[11]);
30280 }
30281 else
30282 {
30283 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30284 assemble_name (file, buf);
30285 fputs ("@ha\n", file);
30286 asm_fprintf (file, "\tstw %s,4(%s)\n",
30287 reg_names[0], reg_names[1]);
30288 asm_fprintf (file, "\tla %s,", reg_names[0]);
30289 assemble_name (file, buf);
30290 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30291 }
30292
30293 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30294 fprintf (file, "\tbl %s%s\n",
30295 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30296 break;
30297
30298 case ABI_AIX:
30299 case ABI_ELFv2:
30300 case ABI_DARWIN:
30301 /* Don't do anything, done in output_profile_hook (). */
30302 break;
30303 }
30304 }
30305
30306 \f
30307
30308 /* The following variable value is the last issued insn. */
30309
30310 static rtx_insn *last_scheduled_insn;
30311
30312 /* The following variable helps to balance issuing of load and
30313 store instructions */
30314
30315 static int load_store_pendulum;
30316
30317 /* The following variable helps pair divide insns during scheduling. */
30318 static int divide_cnt;
30319 /* The following variable helps pair and alternate vector and vector load
30320 insns during scheduling. */
30321 static int vec_pairing;
30322
30323
30324 /* Power4 load update and store update instructions are cracked into a
30325 load or store and an integer insn which are executed in the same cycle.
30326 Branches have their own dispatch slot which does not count against the
30327 GCC issue rate, but it changes the program flow so there are no other
30328 instructions to issue in this cycle. */
30329
30330 static int
30331 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30332 {
30333 last_scheduled_insn = insn;
30334 if (GET_CODE (PATTERN (insn)) == USE
30335 || GET_CODE (PATTERN (insn)) == CLOBBER)
30336 {
30337 cached_can_issue_more = more;
30338 return cached_can_issue_more;
30339 }
30340
30341 if (insn_terminates_group_p (insn, current_group))
30342 {
30343 cached_can_issue_more = 0;
30344 return cached_can_issue_more;
30345 }
30346
30347 /* If no reservation, but reach here */
30348 if (recog_memoized (insn) < 0)
30349 return more;
30350
30351 if (rs6000_sched_groups)
30352 {
30353 if (is_microcoded_insn (insn))
30354 cached_can_issue_more = 0;
30355 else if (is_cracked_insn (insn))
30356 cached_can_issue_more = more > 2 ? more - 2 : 0;
30357 else
30358 cached_can_issue_more = more - 1;
30359
30360 return cached_can_issue_more;
30361 }
30362
30363 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
30364 return 0;
30365
30366 cached_can_issue_more = more - 1;
30367 return cached_can_issue_more;
30368 }
30369
30370 static int
30371 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30372 {
30373 int r = rs6000_variable_issue_1 (insn, more);
30374 if (verbose)
30375 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30376 return r;
30377 }
30378
30379 /* Adjust the cost of a scheduling dependency. Return the new cost of
30380 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30381
30382 static int
30383 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30384 unsigned int)
30385 {
30386 enum attr_type attr_type;
30387
30388 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30389 return cost;
30390
30391 switch (dep_type)
30392 {
30393 case REG_DEP_TRUE:
30394 {
30395 /* Data dependency; DEP_INSN writes a register that INSN reads
30396 some cycles later. */
30397
30398 /* Separate a load from a narrower, dependent store. */
30399 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
30400 && GET_CODE (PATTERN (insn)) == SET
30401 && GET_CODE (PATTERN (dep_insn)) == SET
30402 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
30403 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
30404 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30405 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30406 return cost + 14;
30407
30408 attr_type = get_attr_type (insn);
30409
30410 switch (attr_type)
30411 {
30412 case TYPE_JMPREG:
30413 /* Tell the first scheduling pass about the latency between
30414 a mtctr and bctr (and mtlr and br/blr). The first
30415 scheduling pass will not know about this latency since
30416 the mtctr instruction, which has the latency associated
30417 to it, will be generated by reload. */
30418 return 4;
30419 case TYPE_BRANCH:
30420 /* Leave some extra cycles between a compare and its
30421 dependent branch, to inhibit expensive mispredicts. */
30422 if ((rs6000_tune == PROCESSOR_PPC603
30423 || rs6000_tune == PROCESSOR_PPC604
30424 || rs6000_tune == PROCESSOR_PPC604e
30425 || rs6000_tune == PROCESSOR_PPC620
30426 || rs6000_tune == PROCESSOR_PPC630
30427 || rs6000_tune == PROCESSOR_PPC750
30428 || rs6000_tune == PROCESSOR_PPC7400
30429 || rs6000_tune == PROCESSOR_PPC7450
30430 || rs6000_tune == PROCESSOR_PPCE5500
30431 || rs6000_tune == PROCESSOR_PPCE6500
30432 || rs6000_tune == PROCESSOR_POWER4
30433 || rs6000_tune == PROCESSOR_POWER5
30434 || rs6000_tune == PROCESSOR_POWER7
30435 || rs6000_tune == PROCESSOR_POWER8
30436 || rs6000_tune == PROCESSOR_POWER9
30437 || rs6000_tune == PROCESSOR_CELL)
30438 && recog_memoized (dep_insn)
30439 && (INSN_CODE (dep_insn) >= 0))
30440
30441 switch (get_attr_type (dep_insn))
30442 {
30443 case TYPE_CMP:
30444 case TYPE_FPCOMPARE:
30445 case TYPE_CR_LOGICAL:
30446 return cost + 2;
30447 case TYPE_EXTS:
30448 case TYPE_MUL:
30449 if (get_attr_dot (dep_insn) == DOT_YES)
30450 return cost + 2;
30451 else
30452 break;
30453 case TYPE_SHIFT:
30454 if (get_attr_dot (dep_insn) == DOT_YES
30455 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30456 return cost + 2;
30457 else
30458 break;
30459 default:
30460 break;
30461 }
30462 break;
30463
30464 case TYPE_STORE:
30465 case TYPE_FPSTORE:
30466 if ((rs6000_tune == PROCESSOR_POWER6)
30467 && recog_memoized (dep_insn)
30468 && (INSN_CODE (dep_insn) >= 0))
30469 {
30470
30471 if (GET_CODE (PATTERN (insn)) != SET)
30472 /* If this happens, we have to extend this to schedule
30473 optimally. Return default for now. */
30474 return cost;
30475
30476 /* Adjust the cost for the case where the value written
30477 by a fixed point operation is used as the address
30478 gen value on a store. */
30479 switch (get_attr_type (dep_insn))
30480 {
30481 case TYPE_LOAD:
30482 case TYPE_CNTLZ:
30483 {
30484 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30485 return get_attr_sign_extend (dep_insn)
30486 == SIGN_EXTEND_YES ? 6 : 4;
30487 break;
30488 }
30489 case TYPE_SHIFT:
30490 {
30491 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30492 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30493 6 : 3;
30494 break;
30495 }
30496 case TYPE_INTEGER:
30497 case TYPE_ADD:
30498 case TYPE_LOGICAL:
30499 case TYPE_EXTS:
30500 case TYPE_INSERT:
30501 {
30502 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30503 return 3;
30504 break;
30505 }
30506 case TYPE_STORE:
30507 case TYPE_FPLOAD:
30508 case TYPE_FPSTORE:
30509 {
30510 if (get_attr_update (dep_insn) == UPDATE_YES
30511 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30512 return 3;
30513 break;
30514 }
30515 case TYPE_MUL:
30516 {
30517 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30518 return 17;
30519 break;
30520 }
30521 case TYPE_DIV:
30522 {
30523 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30524 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30525 break;
30526 }
30527 default:
30528 break;
30529 }
30530 }
30531 break;
30532
30533 case TYPE_LOAD:
30534 if ((rs6000_tune == PROCESSOR_POWER6)
30535 && recog_memoized (dep_insn)
30536 && (INSN_CODE (dep_insn) >= 0))
30537 {
30538
30539 /* Adjust the cost for the case where the value written
30540 by a fixed point instruction is used within the address
30541 gen portion of a subsequent load(u)(x) */
30542 switch (get_attr_type (dep_insn))
30543 {
30544 case TYPE_LOAD:
30545 case TYPE_CNTLZ:
30546 {
30547 if (set_to_load_agen (dep_insn, insn))
30548 return get_attr_sign_extend (dep_insn)
30549 == SIGN_EXTEND_YES ? 6 : 4;
30550 break;
30551 }
30552 case TYPE_SHIFT:
30553 {
30554 if (set_to_load_agen (dep_insn, insn))
30555 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30556 6 : 3;
30557 break;
30558 }
30559 case TYPE_INTEGER:
30560 case TYPE_ADD:
30561 case TYPE_LOGICAL:
30562 case TYPE_EXTS:
30563 case TYPE_INSERT:
30564 {
30565 if (set_to_load_agen (dep_insn, insn))
30566 return 3;
30567 break;
30568 }
30569 case TYPE_STORE:
30570 case TYPE_FPLOAD:
30571 case TYPE_FPSTORE:
30572 {
30573 if (get_attr_update (dep_insn) == UPDATE_YES
30574 && set_to_load_agen (dep_insn, insn))
30575 return 3;
30576 break;
30577 }
30578 case TYPE_MUL:
30579 {
30580 if (set_to_load_agen (dep_insn, insn))
30581 return 17;
30582 break;
30583 }
30584 case TYPE_DIV:
30585 {
30586 if (set_to_load_agen (dep_insn, insn))
30587 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30588 break;
30589 }
30590 default:
30591 break;
30592 }
30593 }
30594 break;
30595
30596 case TYPE_FPLOAD:
30597 if ((rs6000_tune == PROCESSOR_POWER6)
30598 && get_attr_update (insn) == UPDATE_NO
30599 && recog_memoized (dep_insn)
30600 && (INSN_CODE (dep_insn) >= 0)
30601 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30602 return 2;
30603
30604 default:
30605 break;
30606 }
30607
30608 /* Fall out to return default cost. */
30609 }
30610 break;
30611
30612 case REG_DEP_OUTPUT:
30613 /* Output dependency; DEP_INSN writes a register that INSN writes some
30614 cycles later. */
30615 if ((rs6000_tune == PROCESSOR_POWER6)
30616 && recog_memoized (dep_insn)
30617 && (INSN_CODE (dep_insn) >= 0))
30618 {
30619 attr_type = get_attr_type (insn);
30620
30621 switch (attr_type)
30622 {
30623 case TYPE_FP:
30624 case TYPE_FPSIMPLE:
30625 if (get_attr_type (dep_insn) == TYPE_FP
30626 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30627 return 1;
30628 break;
30629 case TYPE_FPLOAD:
30630 if (get_attr_update (insn) == UPDATE_NO
30631 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30632 return 2;
30633 break;
30634 default:
30635 break;
30636 }
30637 }
30638 /* Fall through, no cost for output dependency. */
30639 /* FALLTHRU */
30640
30641 case REG_DEP_ANTI:
30642 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30643 cycles later. */
30644 return 0;
30645
30646 default:
30647 gcc_unreachable ();
30648 }
30649
30650 return cost;
30651 }
30652
30653 /* Debug version of rs6000_adjust_cost. */
30654
30655 static int
30656 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30657 int cost, unsigned int dw)
30658 {
30659 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30660
30661 if (ret != cost)
30662 {
30663 const char *dep;
30664
30665 switch (dep_type)
30666 {
30667 default: dep = "unknown depencency"; break;
30668 case REG_DEP_TRUE: dep = "data dependency"; break;
30669 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30670 case REG_DEP_ANTI: dep = "anti depencency"; break;
30671 }
30672
30673 fprintf (stderr,
30674 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30675 "%s, insn:\n", ret, cost, dep);
30676
30677 debug_rtx (insn);
30678 }
30679
30680 return ret;
30681 }
30682
30683 /* The function returns a true if INSN is microcoded.
30684 Return false otherwise. */
30685
30686 static bool
30687 is_microcoded_insn (rtx_insn *insn)
30688 {
30689 if (!insn || !NONDEBUG_INSN_P (insn)
30690 || GET_CODE (PATTERN (insn)) == USE
30691 || GET_CODE (PATTERN (insn)) == CLOBBER)
30692 return false;
30693
30694 if (rs6000_tune == PROCESSOR_CELL)
30695 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30696
30697 if (rs6000_sched_groups
30698 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30699 {
30700 enum attr_type type = get_attr_type (insn);
30701 if ((type == TYPE_LOAD
30702 && get_attr_update (insn) == UPDATE_YES
30703 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30704 || ((type == TYPE_LOAD || type == TYPE_STORE)
30705 && get_attr_update (insn) == UPDATE_YES
30706 && get_attr_indexed (insn) == INDEXED_YES)
30707 || type == TYPE_MFCR)
30708 return true;
30709 }
30710
30711 return false;
30712 }
30713
30714 /* The function returns true if INSN is cracked into 2 instructions
30715 by the processor (and therefore occupies 2 issue slots). */
30716
30717 static bool
30718 is_cracked_insn (rtx_insn *insn)
30719 {
30720 if (!insn || !NONDEBUG_INSN_P (insn)
30721 || GET_CODE (PATTERN (insn)) == USE
30722 || GET_CODE (PATTERN (insn)) == CLOBBER)
30723 return false;
30724
30725 if (rs6000_sched_groups
30726 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30727 {
30728 enum attr_type type = get_attr_type (insn);
30729 if ((type == TYPE_LOAD
30730 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30731 && get_attr_update (insn) == UPDATE_NO)
30732 || (type == TYPE_LOAD
30733 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30734 && get_attr_update (insn) == UPDATE_YES
30735 && get_attr_indexed (insn) == INDEXED_NO)
30736 || (type == TYPE_STORE
30737 && get_attr_update (insn) == UPDATE_YES
30738 && get_attr_indexed (insn) == INDEXED_NO)
30739 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30740 && get_attr_update (insn) == UPDATE_YES)
30741 || (type == TYPE_CR_LOGICAL
30742 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
30743 || (type == TYPE_EXTS
30744 && get_attr_dot (insn) == DOT_YES)
30745 || (type == TYPE_SHIFT
30746 && get_attr_dot (insn) == DOT_YES
30747 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30748 || (type == TYPE_MUL
30749 && get_attr_dot (insn) == DOT_YES)
30750 || type == TYPE_DIV
30751 || (type == TYPE_INSERT
30752 && get_attr_size (insn) == SIZE_32))
30753 return true;
30754 }
30755
30756 return false;
30757 }
30758
30759 /* The function returns true if INSN can be issued only from
30760 the branch slot. */
30761
30762 static bool
30763 is_branch_slot_insn (rtx_insn *insn)
30764 {
30765 if (!insn || !NONDEBUG_INSN_P (insn)
30766 || GET_CODE (PATTERN (insn)) == USE
30767 || GET_CODE (PATTERN (insn)) == CLOBBER)
30768 return false;
30769
30770 if (rs6000_sched_groups)
30771 {
30772 enum attr_type type = get_attr_type (insn);
30773 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30774 return true;
30775 return false;
30776 }
30777
30778 return false;
30779 }
30780
30781 /* The function returns true if out_inst sets a value that is
30782 used in the address generation computation of in_insn */
30783 static bool
30784 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30785 {
30786 rtx out_set, in_set;
30787
30788 /* For performance reasons, only handle the simple case where
30789 both loads are a single_set. */
30790 out_set = single_set (out_insn);
30791 if (out_set)
30792 {
30793 in_set = single_set (in_insn);
30794 if (in_set)
30795 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30796 }
30797
30798 return false;
30799 }
30800
30801 /* Try to determine base/offset/size parts of the given MEM.
30802 Return true if successful, false if all the values couldn't
30803 be determined.
30804
30805 This function only looks for REG or REG+CONST address forms.
30806 REG+REG address form will return false. */
30807
30808 static bool
30809 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30810 HOST_WIDE_INT *size)
30811 {
30812 rtx addr_rtx;
30813 if MEM_SIZE_KNOWN_P (mem)
30814 *size = MEM_SIZE (mem);
30815 else
30816 return false;
30817
30818 addr_rtx = (XEXP (mem, 0));
30819 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30820 addr_rtx = XEXP (addr_rtx, 1);
30821
30822 *offset = 0;
30823 while (GET_CODE (addr_rtx) == PLUS
30824 && CONST_INT_P (XEXP (addr_rtx, 1)))
30825 {
30826 *offset += INTVAL (XEXP (addr_rtx, 1));
30827 addr_rtx = XEXP (addr_rtx, 0);
30828 }
30829 if (!REG_P (addr_rtx))
30830 return false;
30831
30832 *base = addr_rtx;
30833 return true;
30834 }
30835
30836 /* The function returns true if the target storage location of
30837 mem1 is adjacent to the target storage location of mem2 */
30838 /* Return 1 if memory locations are adjacent. */
30839
30840 static bool
30841 adjacent_mem_locations (rtx mem1, rtx mem2)
30842 {
30843 rtx reg1, reg2;
30844 HOST_WIDE_INT off1, size1, off2, size2;
30845
30846 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30847 && get_memref_parts (mem2, &reg2, &off2, &size2))
30848 return ((REGNO (reg1) == REGNO (reg2))
30849 && ((off1 + size1 == off2)
30850 || (off2 + size2 == off1)));
30851
30852 return false;
30853 }
30854
30855 /* This function returns true if it can be determined that the two MEM
30856 locations overlap by at least 1 byte based on base reg/offset/size. */
30857
30858 static bool
30859 mem_locations_overlap (rtx mem1, rtx mem2)
30860 {
30861 rtx reg1, reg2;
30862 HOST_WIDE_INT off1, size1, off2, size2;
30863
30864 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30865 && get_memref_parts (mem2, &reg2, &off2, &size2))
30866 return ((REGNO (reg1) == REGNO (reg2))
30867 && (((off1 <= off2) && (off1 + size1 > off2))
30868 || ((off2 <= off1) && (off2 + size2 > off1))));
30869
30870 return false;
30871 }
30872
30873 /* A C statement (sans semicolon) to update the integer scheduling
30874 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30875 INSN earlier, reduce the priority to execute INSN later. Do not
30876 define this macro if you do not need to adjust the scheduling
30877 priorities of insns. */
30878
30879 static int
30880 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30881 {
30882 rtx load_mem, str_mem;
30883 /* On machines (like the 750) which have asymmetric integer units,
30884 where one integer unit can do multiply and divides and the other
30885 can't, reduce the priority of multiply/divide so it is scheduled
30886 before other integer operations. */
30887
30888 #if 0
30889 if (! INSN_P (insn))
30890 return priority;
30891
30892 if (GET_CODE (PATTERN (insn)) == USE)
30893 return priority;
30894
30895 switch (rs6000_tune) {
30896 case PROCESSOR_PPC750:
30897 switch (get_attr_type (insn))
30898 {
30899 default:
30900 break;
30901
30902 case TYPE_MUL:
30903 case TYPE_DIV:
30904 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30905 priority, priority);
30906 if (priority >= 0 && priority < 0x01000000)
30907 priority >>= 3;
30908 break;
30909 }
30910 }
30911 #endif
30912
30913 if (insn_must_be_first_in_group (insn)
30914 && reload_completed
30915 && current_sched_info->sched_max_insns_priority
30916 && rs6000_sched_restricted_insns_priority)
30917 {
30918
30919 /* Prioritize insns that can be dispatched only in the first
30920 dispatch slot. */
30921 if (rs6000_sched_restricted_insns_priority == 1)
30922 /* Attach highest priority to insn. This means that in
30923 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30924 precede 'priority' (critical path) considerations. */
30925 return current_sched_info->sched_max_insns_priority;
30926 else if (rs6000_sched_restricted_insns_priority == 2)
30927 /* Increase priority of insn by a minimal amount. This means that in
30928 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30929 considerations precede dispatch-slot restriction considerations. */
30930 return (priority + 1);
30931 }
30932
30933 if (rs6000_tune == PROCESSOR_POWER6
30934 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30935 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30936 /* Attach highest priority to insn if the scheduler has just issued two
30937 stores and this instruction is a load, or two loads and this instruction
30938 is a store. Power6 wants loads and stores scheduled alternately
30939 when possible */
30940 return current_sched_info->sched_max_insns_priority;
30941
30942 return priority;
30943 }
30944
30945 /* Return true if the instruction is nonpipelined on the Cell. */
30946 static bool
30947 is_nonpipeline_insn (rtx_insn *insn)
30948 {
30949 enum attr_type type;
30950 if (!insn || !NONDEBUG_INSN_P (insn)
30951 || GET_CODE (PATTERN (insn)) == USE
30952 || GET_CODE (PATTERN (insn)) == CLOBBER)
30953 return false;
30954
30955 type = get_attr_type (insn);
30956 if (type == TYPE_MUL
30957 || type == TYPE_DIV
30958 || type == TYPE_SDIV
30959 || type == TYPE_DDIV
30960 || type == TYPE_SSQRT
30961 || type == TYPE_DSQRT
30962 || type == TYPE_MFCR
30963 || type == TYPE_MFCRF
30964 || type == TYPE_MFJMPR)
30965 {
30966 return true;
30967 }
30968 return false;
30969 }
30970
30971
30972 /* Return how many instructions the machine can issue per cycle. */
30973
30974 static int
30975 rs6000_issue_rate (void)
30976 {
30977 /* Unless scheduling for register pressure, use issue rate of 1 for
30978 first scheduling pass to decrease degradation. */
30979 if (!reload_completed && !flag_sched_pressure)
30980 return 1;
30981
30982 switch (rs6000_tune) {
30983 case PROCESSOR_RS64A:
30984 case PROCESSOR_PPC601: /* ? */
30985 case PROCESSOR_PPC7450:
30986 return 3;
30987 case PROCESSOR_PPC440:
30988 case PROCESSOR_PPC603:
30989 case PROCESSOR_PPC750:
30990 case PROCESSOR_PPC7400:
30991 case PROCESSOR_PPC8540:
30992 case PROCESSOR_PPC8548:
30993 case PROCESSOR_CELL:
30994 case PROCESSOR_PPCE300C2:
30995 case PROCESSOR_PPCE300C3:
30996 case PROCESSOR_PPCE500MC:
30997 case PROCESSOR_PPCE500MC64:
30998 case PROCESSOR_PPCE5500:
30999 case PROCESSOR_PPCE6500:
31000 case PROCESSOR_TITAN:
31001 return 2;
31002 case PROCESSOR_PPC476:
31003 case PROCESSOR_PPC604:
31004 case PROCESSOR_PPC604e:
31005 case PROCESSOR_PPC620:
31006 case PROCESSOR_PPC630:
31007 return 4;
31008 case PROCESSOR_POWER4:
31009 case PROCESSOR_POWER5:
31010 case PROCESSOR_POWER6:
31011 case PROCESSOR_POWER7:
31012 return 5;
31013 case PROCESSOR_POWER8:
31014 return 7;
31015 case PROCESSOR_POWER9:
31016 return 6;
31017 default:
31018 return 1;
31019 }
31020 }
31021
31022 /* Return how many instructions to look ahead for better insn
31023 scheduling. */
31024
31025 static int
31026 rs6000_use_sched_lookahead (void)
31027 {
31028 switch (rs6000_tune)
31029 {
31030 case PROCESSOR_PPC8540:
31031 case PROCESSOR_PPC8548:
31032 return 4;
31033
31034 case PROCESSOR_CELL:
31035 return (reload_completed ? 8 : 0);
31036
31037 default:
31038 return 0;
31039 }
31040 }
31041
31042 /* We are choosing insn from the ready queue. Return zero if INSN can be
31043 chosen. */
31044 static int
31045 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
31046 {
31047 if (ready_index == 0)
31048 return 0;
31049
31050 if (rs6000_tune != PROCESSOR_CELL)
31051 return 0;
31052
31053 gcc_assert (insn != NULL_RTX && INSN_P (insn));
31054
31055 if (!reload_completed
31056 || is_nonpipeline_insn (insn)
31057 || is_microcoded_insn (insn))
31058 return 1;
31059
31060 return 0;
31061 }
31062
31063 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
31064 and return true. */
31065
31066 static bool
31067 find_mem_ref (rtx pat, rtx *mem_ref)
31068 {
31069 const char * fmt;
31070 int i, j;
31071
31072 /* stack_tie does not produce any real memory traffic. */
31073 if (tie_operand (pat, VOIDmode))
31074 return false;
31075
31076 if (GET_CODE (pat) == MEM)
31077 {
31078 *mem_ref = pat;
31079 return true;
31080 }
31081
31082 /* Recursively process the pattern. */
31083 fmt = GET_RTX_FORMAT (GET_CODE (pat));
31084
31085 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
31086 {
31087 if (fmt[i] == 'e')
31088 {
31089 if (find_mem_ref (XEXP (pat, i), mem_ref))
31090 return true;
31091 }
31092 else if (fmt[i] == 'E')
31093 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
31094 {
31095 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
31096 return true;
31097 }
31098 }
31099
31100 return false;
31101 }
31102
31103 /* Determine if PAT is a PATTERN of a load insn. */
31104
31105 static bool
31106 is_load_insn1 (rtx pat, rtx *load_mem)
31107 {
31108 if (!pat || pat == NULL_RTX)
31109 return false;
31110
31111 if (GET_CODE (pat) == SET)
31112 return find_mem_ref (SET_SRC (pat), load_mem);
31113
31114 if (GET_CODE (pat) == PARALLEL)
31115 {
31116 int i;
31117
31118 for (i = 0; i < XVECLEN (pat, 0); i++)
31119 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
31120 return true;
31121 }
31122
31123 return false;
31124 }
31125
31126 /* Determine if INSN loads from memory. */
31127
31128 static bool
31129 is_load_insn (rtx insn, rtx *load_mem)
31130 {
31131 if (!insn || !INSN_P (insn))
31132 return false;
31133
31134 if (CALL_P (insn))
31135 return false;
31136
31137 return is_load_insn1 (PATTERN (insn), load_mem);
31138 }
31139
31140 /* Determine if PAT is a PATTERN of a store insn. */
31141
31142 static bool
31143 is_store_insn1 (rtx pat, rtx *str_mem)
31144 {
31145 if (!pat || pat == NULL_RTX)
31146 return false;
31147
31148 if (GET_CODE (pat) == SET)
31149 return find_mem_ref (SET_DEST (pat), str_mem);
31150
31151 if (GET_CODE (pat) == PARALLEL)
31152 {
31153 int i;
31154
31155 for (i = 0; i < XVECLEN (pat, 0); i++)
31156 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
31157 return true;
31158 }
31159
31160 return false;
31161 }
31162
31163 /* Determine if INSN stores to memory. */
31164
31165 static bool
31166 is_store_insn (rtx insn, rtx *str_mem)
31167 {
31168 if (!insn || !INSN_P (insn))
31169 return false;
31170
31171 return is_store_insn1 (PATTERN (insn), str_mem);
31172 }
31173
31174 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31175
31176 static bool
31177 is_power9_pairable_vec_type (enum attr_type type)
31178 {
31179 switch (type)
31180 {
31181 case TYPE_VECSIMPLE:
31182 case TYPE_VECCOMPLEX:
31183 case TYPE_VECDIV:
31184 case TYPE_VECCMP:
31185 case TYPE_VECPERM:
31186 case TYPE_VECFLOAT:
31187 case TYPE_VECFDIV:
31188 case TYPE_VECDOUBLE:
31189 return true;
31190 default:
31191 break;
31192 }
31193 return false;
31194 }
31195
31196 /* Returns whether the dependence between INSN and NEXT is considered
31197 costly by the given target. */
31198
31199 static bool
31200 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31201 {
31202 rtx insn;
31203 rtx next;
31204 rtx load_mem, str_mem;
31205
31206 /* If the flag is not enabled - no dependence is considered costly;
31207 allow all dependent insns in the same group.
31208 This is the most aggressive option. */
31209 if (rs6000_sched_costly_dep == no_dep_costly)
31210 return false;
31211
31212 /* If the flag is set to 1 - a dependence is always considered costly;
31213 do not allow dependent instructions in the same group.
31214 This is the most conservative option. */
31215 if (rs6000_sched_costly_dep == all_deps_costly)
31216 return true;
31217
31218 insn = DEP_PRO (dep);
31219 next = DEP_CON (dep);
31220
31221 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31222 && is_load_insn (next, &load_mem)
31223 && is_store_insn (insn, &str_mem))
31224 /* Prevent load after store in the same group. */
31225 return true;
31226
31227 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31228 && is_load_insn (next, &load_mem)
31229 && is_store_insn (insn, &str_mem)
31230 && DEP_TYPE (dep) == REG_DEP_TRUE
31231 && mem_locations_overlap(str_mem, load_mem))
31232 /* Prevent load after store in the same group if it is a true
31233 dependence. */
31234 return true;
31235
31236 /* The flag is set to X; dependences with latency >= X are considered costly,
31237 and will not be scheduled in the same group. */
31238 if (rs6000_sched_costly_dep <= max_dep_latency
31239 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31240 return true;
31241
31242 return false;
31243 }
31244
31245 /* Return the next insn after INSN that is found before TAIL is reached,
31246 skipping any "non-active" insns - insns that will not actually occupy
31247 an issue slot. Return NULL_RTX if such an insn is not found. */
31248
31249 static rtx_insn *
31250 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31251 {
31252 if (insn == NULL_RTX || insn == tail)
31253 return NULL;
31254
31255 while (1)
31256 {
31257 insn = NEXT_INSN (insn);
31258 if (insn == NULL_RTX || insn == tail)
31259 return NULL;
31260
31261 if (CALL_P (insn)
31262 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31263 || (NONJUMP_INSN_P (insn)
31264 && GET_CODE (PATTERN (insn)) != USE
31265 && GET_CODE (PATTERN (insn)) != CLOBBER
31266 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31267 break;
31268 }
31269 return insn;
31270 }
31271
31272 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31273
31274 static int
31275 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31276 {
31277 int pos;
31278 int i;
31279 rtx_insn *tmp;
31280 enum attr_type type, type2;
31281
31282 type = get_attr_type (last_scheduled_insn);
31283
31284 /* Try to issue fixed point divides back-to-back in pairs so they will be
31285 routed to separate execution units and execute in parallel. */
31286 if (type == TYPE_DIV && divide_cnt == 0)
31287 {
31288 /* First divide has been scheduled. */
31289 divide_cnt = 1;
31290
31291 /* Scan the ready list looking for another divide, if found move it
31292 to the end of the list so it is chosen next. */
31293 pos = lastpos;
31294 while (pos >= 0)
31295 {
31296 if (recog_memoized (ready[pos]) >= 0
31297 && get_attr_type (ready[pos]) == TYPE_DIV)
31298 {
31299 tmp = ready[pos];
31300 for (i = pos; i < lastpos; i++)
31301 ready[i] = ready[i + 1];
31302 ready[lastpos] = tmp;
31303 break;
31304 }
31305 pos--;
31306 }
31307 }
31308 else
31309 {
31310 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31311 divide_cnt = 0;
31312
31313 /* The best dispatch throughput for vector and vector load insns can be
31314 achieved by interleaving a vector and vector load such that they'll
31315 dispatch to the same superslice. If this pairing cannot be achieved
31316 then it is best to pair vector insns together and vector load insns
31317 together.
31318
31319 To aid in this pairing, vec_pairing maintains the current state with
31320 the following values:
31321
31322 0 : Initial state, no vecload/vector pairing has been started.
31323
31324 1 : A vecload or vector insn has been issued and a candidate for
31325 pairing has been found and moved to the end of the ready
31326 list. */
31327 if (type == TYPE_VECLOAD)
31328 {
31329 /* Issued a vecload. */
31330 if (vec_pairing == 0)
31331 {
31332 int vecload_pos = -1;
31333 /* We issued a single vecload, look for a vector insn to pair it
31334 with. If one isn't found, try to pair another vecload. */
31335 pos = lastpos;
31336 while (pos >= 0)
31337 {
31338 if (recog_memoized (ready[pos]) >= 0)
31339 {
31340 type2 = get_attr_type (ready[pos]);
31341 if (is_power9_pairable_vec_type (type2))
31342 {
31343 /* Found a vector insn to pair with, move it to the
31344 end of the ready list so it is scheduled next. */
31345 tmp = ready[pos];
31346 for (i = pos; i < lastpos; i++)
31347 ready[i] = ready[i + 1];
31348 ready[lastpos] = tmp;
31349 vec_pairing = 1;
31350 return cached_can_issue_more;
31351 }
31352 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31353 /* Remember position of first vecload seen. */
31354 vecload_pos = pos;
31355 }
31356 pos--;
31357 }
31358 if (vecload_pos >= 0)
31359 {
31360 /* Didn't find a vector to pair with but did find a vecload,
31361 move it to the end of the ready list. */
31362 tmp = ready[vecload_pos];
31363 for (i = vecload_pos; i < lastpos; i++)
31364 ready[i] = ready[i + 1];
31365 ready[lastpos] = tmp;
31366 vec_pairing = 1;
31367 return cached_can_issue_more;
31368 }
31369 }
31370 }
31371 else if (is_power9_pairable_vec_type (type))
31372 {
31373 /* Issued a vector operation. */
31374 if (vec_pairing == 0)
31375 {
31376 int vec_pos = -1;
31377 /* We issued a single vector insn, look for a vecload to pair it
31378 with. If one isn't found, try to pair another vector. */
31379 pos = lastpos;
31380 while (pos >= 0)
31381 {
31382 if (recog_memoized (ready[pos]) >= 0)
31383 {
31384 type2 = get_attr_type (ready[pos]);
31385 if (type2 == TYPE_VECLOAD)
31386 {
31387 /* Found a vecload insn to pair with, move it to the
31388 end of the ready list so it is scheduled next. */
31389 tmp = ready[pos];
31390 for (i = pos; i < lastpos; i++)
31391 ready[i] = ready[i + 1];
31392 ready[lastpos] = tmp;
31393 vec_pairing = 1;
31394 return cached_can_issue_more;
31395 }
31396 else if (is_power9_pairable_vec_type (type2)
31397 && vec_pos == -1)
31398 /* Remember position of first vector insn seen. */
31399 vec_pos = pos;
31400 }
31401 pos--;
31402 }
31403 if (vec_pos >= 0)
31404 {
31405 /* Didn't find a vecload to pair with but did find a vector
31406 insn, move it to the end of the ready list. */
31407 tmp = ready[vec_pos];
31408 for (i = vec_pos; i < lastpos; i++)
31409 ready[i] = ready[i + 1];
31410 ready[lastpos] = tmp;
31411 vec_pairing = 1;
31412 return cached_can_issue_more;
31413 }
31414 }
31415 }
31416
31417 /* We've either finished a vec/vecload pair, couldn't find an insn to
31418 continue the current pair, or the last insn had nothing to do with
31419 with pairing. In any case, reset the state. */
31420 vec_pairing = 0;
31421 }
31422
31423 return cached_can_issue_more;
31424 }
31425
31426 /* We are about to begin issuing insns for this clock cycle. */
31427
31428 static int
31429 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31430 rtx_insn **ready ATTRIBUTE_UNUSED,
31431 int *pn_ready ATTRIBUTE_UNUSED,
31432 int clock_var ATTRIBUTE_UNUSED)
31433 {
31434 int n_ready = *pn_ready;
31435
31436 if (sched_verbose)
31437 fprintf (dump, "// rs6000_sched_reorder :\n");
31438
31439 /* Reorder the ready list, if the second to last ready insn
31440 is a nonepipeline insn. */
31441 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
31442 {
31443 if (is_nonpipeline_insn (ready[n_ready - 1])
31444 && (recog_memoized (ready[n_ready - 2]) > 0))
31445 /* Simply swap first two insns. */
31446 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31447 }
31448
31449 if (rs6000_tune == PROCESSOR_POWER6)
31450 load_store_pendulum = 0;
31451
31452 return rs6000_issue_rate ();
31453 }
31454
31455 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31456
31457 static int
31458 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31459 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31460 {
31461 if (sched_verbose)
31462 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31463
31464 /* For Power6, we need to handle some special cases to try and keep the
31465 store queue from overflowing and triggering expensive flushes.
31466
31467 This code monitors how load and store instructions are being issued
31468 and skews the ready list one way or the other to increase the likelihood
31469 that a desired instruction is issued at the proper time.
31470
31471 A couple of things are done. First, we maintain a "load_store_pendulum"
31472 to track the current state of load/store issue.
31473
31474 - If the pendulum is at zero, then no loads or stores have been
31475 issued in the current cycle so we do nothing.
31476
31477 - If the pendulum is 1, then a single load has been issued in this
31478 cycle and we attempt to locate another load in the ready list to
31479 issue with it.
31480
31481 - If the pendulum is -2, then two stores have already been
31482 issued in this cycle, so we increase the priority of the first load
31483 in the ready list to increase it's likelihood of being chosen first
31484 in the next cycle.
31485
31486 - If the pendulum is -1, then a single store has been issued in this
31487 cycle and we attempt to locate another store in the ready list to
31488 issue with it, preferring a store to an adjacent memory location to
31489 facilitate store pairing in the store queue.
31490
31491 - If the pendulum is 2, then two loads have already been
31492 issued in this cycle, so we increase the priority of the first store
31493 in the ready list to increase it's likelihood of being chosen first
31494 in the next cycle.
31495
31496 - If the pendulum < -2 or > 2, then do nothing.
31497
31498 Note: This code covers the most common scenarios. There exist non
31499 load/store instructions which make use of the LSU and which
31500 would need to be accounted for to strictly model the behavior
31501 of the machine. Those instructions are currently unaccounted
31502 for to help minimize compile time overhead of this code.
31503 */
31504 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
31505 {
31506 int pos;
31507 int i;
31508 rtx_insn *tmp;
31509 rtx load_mem, str_mem;
31510
31511 if (is_store_insn (last_scheduled_insn, &str_mem))
31512 /* Issuing a store, swing the load_store_pendulum to the left */
31513 load_store_pendulum--;
31514 else if (is_load_insn (last_scheduled_insn, &load_mem))
31515 /* Issuing a load, swing the load_store_pendulum to the right */
31516 load_store_pendulum++;
31517 else
31518 return cached_can_issue_more;
31519
31520 /* If the pendulum is balanced, or there is only one instruction on
31521 the ready list, then all is well, so return. */
31522 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31523 return cached_can_issue_more;
31524
31525 if (load_store_pendulum == 1)
31526 {
31527 /* A load has been issued in this cycle. Scan the ready list
31528 for another load to issue with it */
31529 pos = *pn_ready-1;
31530
31531 while (pos >= 0)
31532 {
31533 if (is_load_insn (ready[pos], &load_mem))
31534 {
31535 /* Found a load. Move it to the head of the ready list,
31536 and adjust it's priority so that it is more likely to
31537 stay there */
31538 tmp = ready[pos];
31539 for (i=pos; i<*pn_ready-1; i++)
31540 ready[i] = ready[i + 1];
31541 ready[*pn_ready-1] = tmp;
31542
31543 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31544 INSN_PRIORITY (tmp)++;
31545 break;
31546 }
31547 pos--;
31548 }
31549 }
31550 else if (load_store_pendulum == -2)
31551 {
31552 /* Two stores have been issued in this cycle. Increase the
31553 priority of the first load in the ready list to favor it for
31554 issuing in the next cycle. */
31555 pos = *pn_ready-1;
31556
31557 while (pos >= 0)
31558 {
31559 if (is_load_insn (ready[pos], &load_mem)
31560 && !sel_sched_p ()
31561 && INSN_PRIORITY_KNOWN (ready[pos]))
31562 {
31563 INSN_PRIORITY (ready[pos])++;
31564
31565 /* Adjust the pendulum to account for the fact that a load
31566 was found and increased in priority. This is to prevent
31567 increasing the priority of multiple loads */
31568 load_store_pendulum--;
31569
31570 break;
31571 }
31572 pos--;
31573 }
31574 }
31575 else if (load_store_pendulum == -1)
31576 {
31577 /* A store has been issued in this cycle. Scan the ready list for
31578 another store to issue with it, preferring a store to an adjacent
31579 memory location */
31580 int first_store_pos = -1;
31581
31582 pos = *pn_ready-1;
31583
31584 while (pos >= 0)
31585 {
31586 if (is_store_insn (ready[pos], &str_mem))
31587 {
31588 rtx str_mem2;
31589 /* Maintain the index of the first store found on the
31590 list */
31591 if (first_store_pos == -1)
31592 first_store_pos = pos;
31593
31594 if (is_store_insn (last_scheduled_insn, &str_mem2)
31595 && adjacent_mem_locations (str_mem, str_mem2))
31596 {
31597 /* Found an adjacent store. Move it to the head of the
31598 ready list, and adjust it's priority so that it is
31599 more likely to stay there */
31600 tmp = ready[pos];
31601 for (i=pos; i<*pn_ready-1; i++)
31602 ready[i] = ready[i + 1];
31603 ready[*pn_ready-1] = tmp;
31604
31605 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31606 INSN_PRIORITY (tmp)++;
31607
31608 first_store_pos = -1;
31609
31610 break;
31611 };
31612 }
31613 pos--;
31614 }
31615
31616 if (first_store_pos >= 0)
31617 {
31618 /* An adjacent store wasn't found, but a non-adjacent store was,
31619 so move the non-adjacent store to the front of the ready
31620 list, and adjust its priority so that it is more likely to
31621 stay there. */
31622 tmp = ready[first_store_pos];
31623 for (i=first_store_pos; i<*pn_ready-1; i++)
31624 ready[i] = ready[i + 1];
31625 ready[*pn_ready-1] = tmp;
31626 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31627 INSN_PRIORITY (tmp)++;
31628 }
31629 }
31630 else if (load_store_pendulum == 2)
31631 {
31632 /* Two loads have been issued in this cycle. Increase the priority
31633 of the first store in the ready list to favor it for issuing in
31634 the next cycle. */
31635 pos = *pn_ready-1;
31636
31637 while (pos >= 0)
31638 {
31639 if (is_store_insn (ready[pos], &str_mem)
31640 && !sel_sched_p ()
31641 && INSN_PRIORITY_KNOWN (ready[pos]))
31642 {
31643 INSN_PRIORITY (ready[pos])++;
31644
31645 /* Adjust the pendulum to account for the fact that a store
31646 was found and increased in priority. This is to prevent
31647 increasing the priority of multiple stores */
31648 load_store_pendulum++;
31649
31650 break;
31651 }
31652 pos--;
31653 }
31654 }
31655 }
31656
31657 /* Do Power9 dependent reordering if necessary. */
31658 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31659 && recog_memoized (last_scheduled_insn) >= 0)
31660 return power9_sched_reorder2 (ready, *pn_ready - 1);
31661
31662 return cached_can_issue_more;
31663 }
31664
31665 /* Return whether the presence of INSN causes a dispatch group termination
31666 of group WHICH_GROUP.
31667
31668 If WHICH_GROUP == current_group, this function will return true if INSN
31669 causes the termination of the current group (i.e, the dispatch group to
31670 which INSN belongs). This means that INSN will be the last insn in the
31671 group it belongs to.
31672
31673 If WHICH_GROUP == previous_group, this function will return true if INSN
31674 causes the termination of the previous group (i.e, the dispatch group that
31675 precedes the group to which INSN belongs). This means that INSN will be
31676 the first insn in the group it belongs to). */
31677
31678 static bool
31679 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31680 {
31681 bool first, last;
31682
31683 if (! insn)
31684 return false;
31685
31686 first = insn_must_be_first_in_group (insn);
31687 last = insn_must_be_last_in_group (insn);
31688
31689 if (first && last)
31690 return true;
31691
31692 if (which_group == current_group)
31693 return last;
31694 else if (which_group == previous_group)
31695 return first;
31696
31697 return false;
31698 }
31699
31700
31701 static bool
31702 insn_must_be_first_in_group (rtx_insn *insn)
31703 {
31704 enum attr_type type;
31705
31706 if (!insn
31707 || NOTE_P (insn)
31708 || DEBUG_INSN_P (insn)
31709 || GET_CODE (PATTERN (insn)) == USE
31710 || GET_CODE (PATTERN (insn)) == CLOBBER)
31711 return false;
31712
31713 switch (rs6000_tune)
31714 {
31715 case PROCESSOR_POWER5:
31716 if (is_cracked_insn (insn))
31717 return true;
31718 /* FALLTHRU */
31719 case PROCESSOR_POWER4:
31720 if (is_microcoded_insn (insn))
31721 return true;
31722
31723 if (!rs6000_sched_groups)
31724 return false;
31725
31726 type = get_attr_type (insn);
31727
31728 switch (type)
31729 {
31730 case TYPE_MFCR:
31731 case TYPE_MFCRF:
31732 case TYPE_MTCR:
31733 case TYPE_CR_LOGICAL:
31734 case TYPE_MTJMPR:
31735 case TYPE_MFJMPR:
31736 case TYPE_DIV:
31737 case TYPE_LOAD_L:
31738 case TYPE_STORE_C:
31739 case TYPE_ISYNC:
31740 case TYPE_SYNC:
31741 return true;
31742 default:
31743 break;
31744 }
31745 break;
31746 case PROCESSOR_POWER6:
31747 type = get_attr_type (insn);
31748
31749 switch (type)
31750 {
31751 case TYPE_EXTS:
31752 case TYPE_CNTLZ:
31753 case TYPE_TRAP:
31754 case TYPE_MUL:
31755 case TYPE_INSERT:
31756 case TYPE_FPCOMPARE:
31757 case TYPE_MFCR:
31758 case TYPE_MTCR:
31759 case TYPE_MFJMPR:
31760 case TYPE_MTJMPR:
31761 case TYPE_ISYNC:
31762 case TYPE_SYNC:
31763 case TYPE_LOAD_L:
31764 case TYPE_STORE_C:
31765 return true;
31766 case TYPE_SHIFT:
31767 if (get_attr_dot (insn) == DOT_NO
31768 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31769 return true;
31770 else
31771 break;
31772 case TYPE_DIV:
31773 if (get_attr_size (insn) == SIZE_32)
31774 return true;
31775 else
31776 break;
31777 case TYPE_LOAD:
31778 case TYPE_STORE:
31779 case TYPE_FPLOAD:
31780 case TYPE_FPSTORE:
31781 if (get_attr_update (insn) == UPDATE_YES)
31782 return true;
31783 else
31784 break;
31785 default:
31786 break;
31787 }
31788 break;
31789 case PROCESSOR_POWER7:
31790 type = get_attr_type (insn);
31791
31792 switch (type)
31793 {
31794 case TYPE_CR_LOGICAL:
31795 case TYPE_MFCR:
31796 case TYPE_MFCRF:
31797 case TYPE_MTCR:
31798 case TYPE_DIV:
31799 case TYPE_ISYNC:
31800 case TYPE_LOAD_L:
31801 case TYPE_STORE_C:
31802 case TYPE_MFJMPR:
31803 case TYPE_MTJMPR:
31804 return true;
31805 case TYPE_MUL:
31806 case TYPE_SHIFT:
31807 case TYPE_EXTS:
31808 if (get_attr_dot (insn) == DOT_YES)
31809 return true;
31810 else
31811 break;
31812 case TYPE_LOAD:
31813 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31814 || get_attr_update (insn) == UPDATE_YES)
31815 return true;
31816 else
31817 break;
31818 case TYPE_STORE:
31819 case TYPE_FPLOAD:
31820 case TYPE_FPSTORE:
31821 if (get_attr_update (insn) == UPDATE_YES)
31822 return true;
31823 else
31824 break;
31825 default:
31826 break;
31827 }
31828 break;
31829 case PROCESSOR_POWER8:
31830 type = get_attr_type (insn);
31831
31832 switch (type)
31833 {
31834 case TYPE_CR_LOGICAL:
31835 case TYPE_MFCR:
31836 case TYPE_MFCRF:
31837 case TYPE_MTCR:
31838 case TYPE_SYNC:
31839 case TYPE_ISYNC:
31840 case TYPE_LOAD_L:
31841 case TYPE_STORE_C:
31842 case TYPE_VECSTORE:
31843 case TYPE_MFJMPR:
31844 case TYPE_MTJMPR:
31845 return true;
31846 case TYPE_SHIFT:
31847 case TYPE_EXTS:
31848 case TYPE_MUL:
31849 if (get_attr_dot (insn) == DOT_YES)
31850 return true;
31851 else
31852 break;
31853 case TYPE_LOAD:
31854 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31855 || get_attr_update (insn) == UPDATE_YES)
31856 return true;
31857 else
31858 break;
31859 case TYPE_STORE:
31860 if (get_attr_update (insn) == UPDATE_YES
31861 && get_attr_indexed (insn) == INDEXED_YES)
31862 return true;
31863 else
31864 break;
31865 default:
31866 break;
31867 }
31868 break;
31869 default:
31870 break;
31871 }
31872
31873 return false;
31874 }
31875
31876 static bool
31877 insn_must_be_last_in_group (rtx_insn *insn)
31878 {
31879 enum attr_type type;
31880
31881 if (!insn
31882 || NOTE_P (insn)
31883 || DEBUG_INSN_P (insn)
31884 || GET_CODE (PATTERN (insn)) == USE
31885 || GET_CODE (PATTERN (insn)) == CLOBBER)
31886 return false;
31887
31888 switch (rs6000_tune) {
31889 case PROCESSOR_POWER4:
31890 case PROCESSOR_POWER5:
31891 if (is_microcoded_insn (insn))
31892 return true;
31893
31894 if (is_branch_slot_insn (insn))
31895 return true;
31896
31897 break;
31898 case PROCESSOR_POWER6:
31899 type = get_attr_type (insn);
31900
31901 switch (type)
31902 {
31903 case TYPE_EXTS:
31904 case TYPE_CNTLZ:
31905 case TYPE_TRAP:
31906 case TYPE_MUL:
31907 case TYPE_FPCOMPARE:
31908 case TYPE_MFCR:
31909 case TYPE_MTCR:
31910 case TYPE_MFJMPR:
31911 case TYPE_MTJMPR:
31912 case TYPE_ISYNC:
31913 case TYPE_SYNC:
31914 case TYPE_LOAD_L:
31915 case TYPE_STORE_C:
31916 return true;
31917 case TYPE_SHIFT:
31918 if (get_attr_dot (insn) == DOT_NO
31919 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31920 return true;
31921 else
31922 break;
31923 case TYPE_DIV:
31924 if (get_attr_size (insn) == SIZE_32)
31925 return true;
31926 else
31927 break;
31928 default:
31929 break;
31930 }
31931 break;
31932 case PROCESSOR_POWER7:
31933 type = get_attr_type (insn);
31934
31935 switch (type)
31936 {
31937 case TYPE_ISYNC:
31938 case TYPE_SYNC:
31939 case TYPE_LOAD_L:
31940 case TYPE_STORE_C:
31941 return true;
31942 case TYPE_LOAD:
31943 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31944 && get_attr_update (insn) == UPDATE_YES)
31945 return true;
31946 else
31947 break;
31948 case TYPE_STORE:
31949 if (get_attr_update (insn) == UPDATE_YES
31950 && get_attr_indexed (insn) == INDEXED_YES)
31951 return true;
31952 else
31953 break;
31954 default:
31955 break;
31956 }
31957 break;
31958 case PROCESSOR_POWER8:
31959 type = get_attr_type (insn);
31960
31961 switch (type)
31962 {
31963 case TYPE_MFCR:
31964 case TYPE_MTCR:
31965 case TYPE_ISYNC:
31966 case TYPE_SYNC:
31967 case TYPE_LOAD_L:
31968 case TYPE_STORE_C:
31969 return true;
31970 case TYPE_LOAD:
31971 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31972 && get_attr_update (insn) == UPDATE_YES)
31973 return true;
31974 else
31975 break;
31976 case TYPE_STORE:
31977 if (get_attr_update (insn) == UPDATE_YES
31978 && get_attr_indexed (insn) == INDEXED_YES)
31979 return true;
31980 else
31981 break;
31982 default:
31983 break;
31984 }
31985 break;
31986 default:
31987 break;
31988 }
31989
31990 return false;
31991 }
31992
31993 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31994 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31995
31996 static bool
31997 is_costly_group (rtx *group_insns, rtx next_insn)
31998 {
31999 int i;
32000 int issue_rate = rs6000_issue_rate ();
32001
32002 for (i = 0; i < issue_rate; i++)
32003 {
32004 sd_iterator_def sd_it;
32005 dep_t dep;
32006 rtx insn = group_insns[i];
32007
32008 if (!insn)
32009 continue;
32010
32011 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
32012 {
32013 rtx next = DEP_CON (dep);
32014
32015 if (next == next_insn
32016 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
32017 return true;
32018 }
32019 }
32020
32021 return false;
32022 }
32023
32024 /* Utility of the function redefine_groups.
32025 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
32026 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
32027 to keep it "far" (in a separate group) from GROUP_INSNS, following
32028 one of the following schemes, depending on the value of the flag
32029 -minsert_sched_nops = X:
32030 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
32031 in order to force NEXT_INSN into a separate group.
32032 (2) X < sched_finish_regroup_exact: insert exactly X nops.
32033 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
32034 insertion (has a group just ended, how many vacant issue slots remain in the
32035 last group, and how many dispatch groups were encountered so far). */
32036
32037 static int
32038 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
32039 rtx_insn *next_insn, bool *group_end, int can_issue_more,
32040 int *group_count)
32041 {
32042 rtx nop;
32043 bool force;
32044 int issue_rate = rs6000_issue_rate ();
32045 bool end = *group_end;
32046 int i;
32047
32048 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
32049 return can_issue_more;
32050
32051 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
32052 return can_issue_more;
32053
32054 force = is_costly_group (group_insns, next_insn);
32055 if (!force)
32056 return can_issue_more;
32057
32058 if (sched_verbose > 6)
32059 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
32060 *group_count ,can_issue_more);
32061
32062 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
32063 {
32064 if (*group_end)
32065 can_issue_more = 0;
32066
32067 /* Since only a branch can be issued in the last issue_slot, it is
32068 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
32069 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
32070 in this case the last nop will start a new group and the branch
32071 will be forced to the new group. */
32072 if (can_issue_more && !is_branch_slot_insn (next_insn))
32073 can_issue_more--;
32074
32075 /* Do we have a special group ending nop? */
32076 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
32077 || rs6000_tune == PROCESSOR_POWER8)
32078 {
32079 nop = gen_group_ending_nop ();
32080 emit_insn_before (nop, next_insn);
32081 can_issue_more = 0;
32082 }
32083 else
32084 while (can_issue_more > 0)
32085 {
32086 nop = gen_nop ();
32087 emit_insn_before (nop, next_insn);
32088 can_issue_more--;
32089 }
32090
32091 *group_end = true;
32092 return 0;
32093 }
32094
32095 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
32096 {
32097 int n_nops = rs6000_sched_insert_nops;
32098
32099 /* Nops can't be issued from the branch slot, so the effective
32100 issue_rate for nops is 'issue_rate - 1'. */
32101 if (can_issue_more == 0)
32102 can_issue_more = issue_rate;
32103 can_issue_more--;
32104 if (can_issue_more == 0)
32105 {
32106 can_issue_more = issue_rate - 1;
32107 (*group_count)++;
32108 end = true;
32109 for (i = 0; i < issue_rate; i++)
32110 {
32111 group_insns[i] = 0;
32112 }
32113 }
32114
32115 while (n_nops > 0)
32116 {
32117 nop = gen_nop ();
32118 emit_insn_before (nop, next_insn);
32119 if (can_issue_more == issue_rate - 1) /* new group begins */
32120 end = false;
32121 can_issue_more--;
32122 if (can_issue_more == 0)
32123 {
32124 can_issue_more = issue_rate - 1;
32125 (*group_count)++;
32126 end = true;
32127 for (i = 0; i < issue_rate; i++)
32128 {
32129 group_insns[i] = 0;
32130 }
32131 }
32132 n_nops--;
32133 }
32134
32135 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
32136 can_issue_more++;
32137
32138 /* Is next_insn going to start a new group? */
32139 *group_end
32140 = (end
32141 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32142 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32143 || (can_issue_more < issue_rate &&
32144 insn_terminates_group_p (next_insn, previous_group)));
32145 if (*group_end && end)
32146 (*group_count)--;
32147
32148 if (sched_verbose > 6)
32149 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
32150 *group_count, can_issue_more);
32151 return can_issue_more;
32152 }
32153
32154 return can_issue_more;
32155 }
32156
32157 /* This function tries to synch the dispatch groups that the compiler "sees"
32158 with the dispatch groups that the processor dispatcher is expected to
32159 form in practice. It tries to achieve this synchronization by forcing the
32160 estimated processor grouping on the compiler (as opposed to the function
32161 'pad_goups' which tries to force the scheduler's grouping on the processor).
32162
32163 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32164 examines the (estimated) dispatch groups that will be formed by the processor
32165 dispatcher. It marks these group boundaries to reflect the estimated
32166 processor grouping, overriding the grouping that the scheduler had marked.
32167 Depending on the value of the flag '-minsert-sched-nops' this function can
32168 force certain insns into separate groups or force a certain distance between
32169 them by inserting nops, for example, if there exists a "costly dependence"
32170 between the insns.
32171
32172 The function estimates the group boundaries that the processor will form as
32173 follows: It keeps track of how many vacant issue slots are available after
32174 each insn. A subsequent insn will start a new group if one of the following
32175 4 cases applies:
32176 - no more vacant issue slots remain in the current dispatch group.
32177 - only the last issue slot, which is the branch slot, is vacant, but the next
32178 insn is not a branch.
32179 - only the last 2 or less issue slots, including the branch slot, are vacant,
32180 which means that a cracked insn (which occupies two issue slots) can't be
32181 issued in this group.
32182 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32183 start a new group. */
32184
32185 static int
32186 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32187 rtx_insn *tail)
32188 {
32189 rtx_insn *insn, *next_insn;
32190 int issue_rate;
32191 int can_issue_more;
32192 int slot, i;
32193 bool group_end;
32194 int group_count = 0;
32195 rtx *group_insns;
32196
32197 /* Initialize. */
32198 issue_rate = rs6000_issue_rate ();
32199 group_insns = XALLOCAVEC (rtx, issue_rate);
32200 for (i = 0; i < issue_rate; i++)
32201 {
32202 group_insns[i] = 0;
32203 }
32204 can_issue_more = issue_rate;
32205 slot = 0;
32206 insn = get_next_active_insn (prev_head_insn, tail);
32207 group_end = false;
32208
32209 while (insn != NULL_RTX)
32210 {
32211 slot = (issue_rate - can_issue_more);
32212 group_insns[slot] = insn;
32213 can_issue_more =
32214 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32215 if (insn_terminates_group_p (insn, current_group))
32216 can_issue_more = 0;
32217
32218 next_insn = get_next_active_insn (insn, tail);
32219 if (next_insn == NULL_RTX)
32220 return group_count + 1;
32221
32222 /* Is next_insn going to start a new group? */
32223 group_end
32224 = (can_issue_more == 0
32225 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32226 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32227 || (can_issue_more < issue_rate &&
32228 insn_terminates_group_p (next_insn, previous_group)));
32229
32230 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32231 next_insn, &group_end, can_issue_more,
32232 &group_count);
32233
32234 if (group_end)
32235 {
32236 group_count++;
32237 can_issue_more = 0;
32238 for (i = 0; i < issue_rate; i++)
32239 {
32240 group_insns[i] = 0;
32241 }
32242 }
32243
32244 if (GET_MODE (next_insn) == TImode && can_issue_more)
32245 PUT_MODE (next_insn, VOIDmode);
32246 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32247 PUT_MODE (next_insn, TImode);
32248
32249 insn = next_insn;
32250 if (can_issue_more == 0)
32251 can_issue_more = issue_rate;
32252 } /* while */
32253
32254 return group_count;
32255 }
32256
32257 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32258 dispatch group boundaries that the scheduler had marked. Pad with nops
32259 any dispatch groups which have vacant issue slots, in order to force the
32260 scheduler's grouping on the processor dispatcher. The function
32261 returns the number of dispatch groups found. */
32262
32263 static int
32264 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32265 rtx_insn *tail)
32266 {
32267 rtx_insn *insn, *next_insn;
32268 rtx nop;
32269 int issue_rate;
32270 int can_issue_more;
32271 int group_end;
32272 int group_count = 0;
32273
32274 /* Initialize issue_rate. */
32275 issue_rate = rs6000_issue_rate ();
32276 can_issue_more = issue_rate;
32277
32278 insn = get_next_active_insn (prev_head_insn, tail);
32279 next_insn = get_next_active_insn (insn, tail);
32280
32281 while (insn != NULL_RTX)
32282 {
32283 can_issue_more =
32284 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32285
32286 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32287
32288 if (next_insn == NULL_RTX)
32289 break;
32290
32291 if (group_end)
32292 {
32293 /* If the scheduler had marked group termination at this location
32294 (between insn and next_insn), and neither insn nor next_insn will
32295 force group termination, pad the group with nops to force group
32296 termination. */
32297 if (can_issue_more
32298 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32299 && !insn_terminates_group_p (insn, current_group)
32300 && !insn_terminates_group_p (next_insn, previous_group))
32301 {
32302 if (!is_branch_slot_insn (next_insn))
32303 can_issue_more--;
32304
32305 while (can_issue_more)
32306 {
32307 nop = gen_nop ();
32308 emit_insn_before (nop, next_insn);
32309 can_issue_more--;
32310 }
32311 }
32312
32313 can_issue_more = issue_rate;
32314 group_count++;
32315 }
32316
32317 insn = next_insn;
32318 next_insn = get_next_active_insn (insn, tail);
32319 }
32320
32321 return group_count;
32322 }
32323
32324 /* We're beginning a new block. Initialize data structures as necessary. */
32325
32326 static void
32327 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32328 int sched_verbose ATTRIBUTE_UNUSED,
32329 int max_ready ATTRIBUTE_UNUSED)
32330 {
32331 last_scheduled_insn = NULL;
32332 load_store_pendulum = 0;
32333 divide_cnt = 0;
32334 vec_pairing = 0;
32335 }
32336
32337 /* The following function is called at the end of scheduling BB.
32338 After reload, it inserts nops at insn group bundling. */
32339
32340 static void
32341 rs6000_sched_finish (FILE *dump, int sched_verbose)
32342 {
32343 int n_groups;
32344
32345 if (sched_verbose)
32346 fprintf (dump, "=== Finishing schedule.\n");
32347
32348 if (reload_completed && rs6000_sched_groups)
32349 {
32350 /* Do not run sched_finish hook when selective scheduling enabled. */
32351 if (sel_sched_p ())
32352 return;
32353
32354 if (rs6000_sched_insert_nops == sched_finish_none)
32355 return;
32356
32357 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32358 n_groups = pad_groups (dump, sched_verbose,
32359 current_sched_info->prev_head,
32360 current_sched_info->next_tail);
32361 else
32362 n_groups = redefine_groups (dump, sched_verbose,
32363 current_sched_info->prev_head,
32364 current_sched_info->next_tail);
32365
32366 if (sched_verbose >= 6)
32367 {
32368 fprintf (dump, "ngroups = %d\n", n_groups);
32369 print_rtl (dump, current_sched_info->prev_head);
32370 fprintf (dump, "Done finish_sched\n");
32371 }
32372 }
32373 }
32374
32375 struct rs6000_sched_context
32376 {
32377 short cached_can_issue_more;
32378 rtx_insn *last_scheduled_insn;
32379 int load_store_pendulum;
32380 int divide_cnt;
32381 int vec_pairing;
32382 };
32383
32384 typedef struct rs6000_sched_context rs6000_sched_context_def;
32385 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32386
32387 /* Allocate store for new scheduling context. */
32388 static void *
32389 rs6000_alloc_sched_context (void)
32390 {
32391 return xmalloc (sizeof (rs6000_sched_context_def));
32392 }
32393
32394 /* If CLEAN_P is true then initializes _SC with clean data,
32395 and from the global context otherwise. */
32396 static void
32397 rs6000_init_sched_context (void *_sc, bool clean_p)
32398 {
32399 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32400
32401 if (clean_p)
32402 {
32403 sc->cached_can_issue_more = 0;
32404 sc->last_scheduled_insn = NULL;
32405 sc->load_store_pendulum = 0;
32406 sc->divide_cnt = 0;
32407 sc->vec_pairing = 0;
32408 }
32409 else
32410 {
32411 sc->cached_can_issue_more = cached_can_issue_more;
32412 sc->last_scheduled_insn = last_scheduled_insn;
32413 sc->load_store_pendulum = load_store_pendulum;
32414 sc->divide_cnt = divide_cnt;
32415 sc->vec_pairing = vec_pairing;
32416 }
32417 }
32418
32419 /* Sets the global scheduling context to the one pointed to by _SC. */
32420 static void
32421 rs6000_set_sched_context (void *_sc)
32422 {
32423 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32424
32425 gcc_assert (sc != NULL);
32426
32427 cached_can_issue_more = sc->cached_can_issue_more;
32428 last_scheduled_insn = sc->last_scheduled_insn;
32429 load_store_pendulum = sc->load_store_pendulum;
32430 divide_cnt = sc->divide_cnt;
32431 vec_pairing = sc->vec_pairing;
32432 }
32433
32434 /* Free _SC. */
32435 static void
32436 rs6000_free_sched_context (void *_sc)
32437 {
32438 gcc_assert (_sc != NULL);
32439
32440 free (_sc);
32441 }
32442
32443 static bool
32444 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32445 {
32446 switch (get_attr_type (insn))
32447 {
32448 case TYPE_DIV:
32449 case TYPE_SDIV:
32450 case TYPE_DDIV:
32451 case TYPE_VECDIV:
32452 case TYPE_SSQRT:
32453 case TYPE_DSQRT:
32454 return false;
32455
32456 default:
32457 return true;
32458 }
32459 }
32460 \f
32461 /* Length in units of the trampoline for entering a nested function. */
32462
32463 int
32464 rs6000_trampoline_size (void)
32465 {
32466 int ret = 0;
32467
32468 switch (DEFAULT_ABI)
32469 {
32470 default:
32471 gcc_unreachable ();
32472
32473 case ABI_AIX:
32474 ret = (TARGET_32BIT) ? 12 : 24;
32475 break;
32476
32477 case ABI_ELFv2:
32478 gcc_assert (!TARGET_32BIT);
32479 ret = 32;
32480 break;
32481
32482 case ABI_DARWIN:
32483 case ABI_V4:
32484 ret = (TARGET_32BIT) ? 40 : 48;
32485 break;
32486 }
32487
32488 return ret;
32489 }
32490
32491 /* Emit RTL insns to initialize the variable parts of a trampoline.
32492 FNADDR is an RTX for the address of the function's pure code.
32493 CXT is an RTX for the static chain value for the function. */
32494
32495 static void
32496 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32497 {
32498 int regsize = (TARGET_32BIT) ? 4 : 8;
32499 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32500 rtx ctx_reg = force_reg (Pmode, cxt);
32501 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32502
32503 switch (DEFAULT_ABI)
32504 {
32505 default:
32506 gcc_unreachable ();
32507
32508 /* Under AIX, just build the 3 word function descriptor */
32509 case ABI_AIX:
32510 {
32511 rtx fnmem, fn_reg, toc_reg;
32512
32513 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32514 error ("you cannot take the address of a nested function if you use "
32515 "the %qs option", "-mno-pointers-to-nested-functions");
32516
32517 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32518 fn_reg = gen_reg_rtx (Pmode);
32519 toc_reg = gen_reg_rtx (Pmode);
32520
32521 /* Macro to shorten the code expansions below. */
32522 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32523
32524 m_tramp = replace_equiv_address (m_tramp, addr);
32525
32526 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32527 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32528 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32529 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32530 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32531
32532 # undef MEM_PLUS
32533 }
32534 break;
32535
32536 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32537 case ABI_ELFv2:
32538 case ABI_DARWIN:
32539 case ABI_V4:
32540 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32541 LCT_NORMAL, VOIDmode,
32542 addr, Pmode,
32543 GEN_INT (rs6000_trampoline_size ()), SImode,
32544 fnaddr, Pmode,
32545 ctx_reg, Pmode);
32546 break;
32547 }
32548 }
32549
32550 \f
32551 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32552 identifier as an argument, so the front end shouldn't look it up. */
32553
32554 static bool
32555 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32556 {
32557 return is_attribute_p ("altivec", attr_id);
32558 }
32559
32560 /* Handle the "altivec" attribute. The attribute may have
32561 arguments as follows:
32562
32563 __attribute__((altivec(vector__)))
32564 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32565 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32566
32567 and may appear more than once (e.g., 'vector bool char') in a
32568 given declaration. */
32569
32570 static tree
32571 rs6000_handle_altivec_attribute (tree *node,
32572 tree name ATTRIBUTE_UNUSED,
32573 tree args,
32574 int flags ATTRIBUTE_UNUSED,
32575 bool *no_add_attrs)
32576 {
32577 tree type = *node, result = NULL_TREE;
32578 machine_mode mode;
32579 int unsigned_p;
32580 char altivec_type
32581 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32582 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32583 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32584 : '?');
32585
32586 while (POINTER_TYPE_P (type)
32587 || TREE_CODE (type) == FUNCTION_TYPE
32588 || TREE_CODE (type) == METHOD_TYPE
32589 || TREE_CODE (type) == ARRAY_TYPE)
32590 type = TREE_TYPE (type);
32591
32592 mode = TYPE_MODE (type);
32593
32594 /* Check for invalid AltiVec type qualifiers. */
32595 if (type == long_double_type_node)
32596 error ("use of %<long double%> in AltiVec types is invalid");
32597 else if (type == boolean_type_node)
32598 error ("use of boolean types in AltiVec types is invalid");
32599 else if (TREE_CODE (type) == COMPLEX_TYPE)
32600 error ("use of %<complex%> in AltiVec types is invalid");
32601 else if (DECIMAL_FLOAT_MODE_P (mode))
32602 error ("use of decimal floating point types in AltiVec types is invalid");
32603 else if (!TARGET_VSX)
32604 {
32605 if (type == long_unsigned_type_node || type == long_integer_type_node)
32606 {
32607 if (TARGET_64BIT)
32608 error ("use of %<long%> in AltiVec types is invalid for "
32609 "64-bit code without %qs", "-mvsx");
32610 else if (rs6000_warn_altivec_long)
32611 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32612 "use %<int%>");
32613 }
32614 else if (type == long_long_unsigned_type_node
32615 || type == long_long_integer_type_node)
32616 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32617 "-mvsx");
32618 else if (type == double_type_node)
32619 error ("use of %<double%> in AltiVec types is invalid without %qs",
32620 "-mvsx");
32621 }
32622
32623 switch (altivec_type)
32624 {
32625 case 'v':
32626 unsigned_p = TYPE_UNSIGNED (type);
32627 switch (mode)
32628 {
32629 case E_TImode:
32630 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32631 break;
32632 case E_DImode:
32633 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32634 break;
32635 case E_SImode:
32636 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32637 break;
32638 case E_HImode:
32639 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32640 break;
32641 case E_QImode:
32642 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32643 break;
32644 case E_SFmode: result = V4SF_type_node; break;
32645 case E_DFmode: result = V2DF_type_node; break;
32646 /* If the user says 'vector int bool', we may be handed the 'bool'
32647 attribute _before_ the 'vector' attribute, and so select the
32648 proper type in the 'b' case below. */
32649 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32650 case E_V2DImode: case E_V2DFmode:
32651 result = type;
32652 default: break;
32653 }
32654 break;
32655 case 'b':
32656 switch (mode)
32657 {
32658 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32659 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32660 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32661 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32662 default: break;
32663 }
32664 break;
32665 case 'p':
32666 switch (mode)
32667 {
32668 case E_V8HImode: result = pixel_V8HI_type_node;
32669 default: break;
32670 }
32671 default: break;
32672 }
32673
32674 /* Propagate qualifiers attached to the element type
32675 onto the vector type. */
32676 if (result && result != type && TYPE_QUALS (type))
32677 result = build_qualified_type (result, TYPE_QUALS (type));
32678
32679 *no_add_attrs = true; /* No need to hang on to the attribute. */
32680
32681 if (result)
32682 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32683
32684 return NULL_TREE;
32685 }
32686
32687 /* AltiVec defines five built-in scalar types that serve as vector
32688 elements; we must teach the compiler how to mangle them. The 128-bit
32689 floating point mangling is target-specific as well. */
32690
32691 static const char *
32692 rs6000_mangle_type (const_tree type)
32693 {
32694 type = TYPE_MAIN_VARIANT (type);
32695
32696 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32697 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32698 return NULL;
32699
32700 if (type == bool_char_type_node) return "U6__boolc";
32701 if (type == bool_short_type_node) return "U6__bools";
32702 if (type == pixel_type_node) return "u7__pixel";
32703 if (type == bool_int_type_node) return "U6__booli";
32704 if (type == bool_long_long_type_node) return "U6__boolx";
32705
32706 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IBM_P (TYPE_MODE (type)))
32707 return "g";
32708 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IEEE_P (TYPE_MODE (type)))
32709 return ieee128_mangling_gcc_8_1 ? "U10__float128" : "u9__ieee128";
32710
32711 /* For all other types, use the default mangling. */
32712 return NULL;
32713 }
32714
32715 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32716 struct attribute_spec.handler. */
32717
32718 static tree
32719 rs6000_handle_longcall_attribute (tree *node, tree name,
32720 tree args ATTRIBUTE_UNUSED,
32721 int flags ATTRIBUTE_UNUSED,
32722 bool *no_add_attrs)
32723 {
32724 if (TREE_CODE (*node) != FUNCTION_TYPE
32725 && TREE_CODE (*node) != FIELD_DECL
32726 && TREE_CODE (*node) != TYPE_DECL)
32727 {
32728 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32729 name);
32730 *no_add_attrs = true;
32731 }
32732
32733 return NULL_TREE;
32734 }
32735
32736 /* Set longcall attributes on all functions declared when
32737 rs6000_default_long_calls is true. */
32738 static void
32739 rs6000_set_default_type_attributes (tree type)
32740 {
32741 if (rs6000_default_long_calls
32742 && (TREE_CODE (type) == FUNCTION_TYPE
32743 || TREE_CODE (type) == METHOD_TYPE))
32744 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32745 NULL_TREE,
32746 TYPE_ATTRIBUTES (type));
32747
32748 #if TARGET_MACHO
32749 darwin_set_default_type_attributes (type);
32750 #endif
32751 }
32752
32753 /* Return a reference suitable for calling a function with the
32754 longcall attribute. */
32755
32756 static rtx
32757 rs6000_longcall_ref (rtx call_ref, rtx arg)
32758 {
32759 /* System V adds '.' to the internal name, so skip them. */
32760 const char *call_name = XSTR (call_ref, 0);
32761 if (*call_name == '.')
32762 {
32763 while (*call_name == '.')
32764 call_name++;
32765
32766 tree node = get_identifier (call_name);
32767 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32768 }
32769
32770 if (HAVE_AS_PLTSEQ
32771 && TARGET_TLS_MARKERS
32772 && (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4))
32773 {
32774 rtx base = const0_rtx;
32775 int regno;
32776 if (DEFAULT_ABI == ABI_ELFv2)
32777 {
32778 base = gen_rtx_REG (Pmode, TOC_REGISTER);
32779 regno = 12;
32780 }
32781 else
32782 {
32783 if (flag_pic)
32784 base = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32785 regno = 11;
32786 }
32787 /* Reg must match that used by linker PLT stubs. For ELFv2, r12
32788 may be used by a function global entry point. For SysV4, r11
32789 is used by __glink_PLTresolve lazy resolver entry. */
32790 rtx reg = gen_rtx_REG (Pmode, regno);
32791 rtx hi = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, base, call_ref, arg),
32792 UNSPEC_PLT16_HA);
32793 rtx lo = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, reg, call_ref, arg),
32794 UNSPEC_PLT16_LO);
32795 emit_insn (gen_rtx_SET (reg, hi));
32796 emit_insn (gen_rtx_SET (reg, lo));
32797 return reg;
32798 }
32799
32800 return force_reg (Pmode, call_ref);
32801 }
32802 \f
32803 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32804 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32805 #endif
32806
32807 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32808 struct attribute_spec.handler. */
32809 static tree
32810 rs6000_handle_struct_attribute (tree *node, tree name,
32811 tree args ATTRIBUTE_UNUSED,
32812 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32813 {
32814 tree *type = NULL;
32815 if (DECL_P (*node))
32816 {
32817 if (TREE_CODE (*node) == TYPE_DECL)
32818 type = &TREE_TYPE (*node);
32819 }
32820 else
32821 type = node;
32822
32823 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32824 || TREE_CODE (*type) == UNION_TYPE)))
32825 {
32826 warning (OPT_Wattributes, "%qE attribute ignored", name);
32827 *no_add_attrs = true;
32828 }
32829
32830 else if ((is_attribute_p ("ms_struct", name)
32831 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32832 || ((is_attribute_p ("gcc_struct", name)
32833 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32834 {
32835 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32836 name);
32837 *no_add_attrs = true;
32838 }
32839
32840 return NULL_TREE;
32841 }
32842
32843 static bool
32844 rs6000_ms_bitfield_layout_p (const_tree record_type)
32845 {
32846 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32847 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32848 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32849 }
32850 \f
32851 #ifdef USING_ELFOS_H
32852
32853 /* A get_unnamed_section callback, used for switching to toc_section. */
32854
32855 static void
32856 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32857 {
32858 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32859 && TARGET_MINIMAL_TOC)
32860 {
32861 if (!toc_initialized)
32862 {
32863 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32864 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32865 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32866 fprintf (asm_out_file, "\t.tc ");
32867 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32868 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32869 fprintf (asm_out_file, "\n");
32870
32871 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32872 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32873 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32874 fprintf (asm_out_file, " = .+32768\n");
32875 toc_initialized = 1;
32876 }
32877 else
32878 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32879 }
32880 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32881 {
32882 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32883 if (!toc_initialized)
32884 {
32885 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32886 toc_initialized = 1;
32887 }
32888 }
32889 else
32890 {
32891 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32892 if (!toc_initialized)
32893 {
32894 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32895 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32896 fprintf (asm_out_file, " = .+32768\n");
32897 toc_initialized = 1;
32898 }
32899 }
32900 }
32901
32902 /* Implement TARGET_ASM_INIT_SECTIONS. */
32903
32904 static void
32905 rs6000_elf_asm_init_sections (void)
32906 {
32907 toc_section
32908 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32909
32910 sdata2_section
32911 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32912 SDATA2_SECTION_ASM_OP);
32913 }
32914
32915 /* Implement TARGET_SELECT_RTX_SECTION. */
32916
32917 static section *
32918 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32919 unsigned HOST_WIDE_INT align)
32920 {
32921 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32922 return toc_section;
32923 else
32924 return default_elf_select_rtx_section (mode, x, align);
32925 }
32926 \f
32927 /* For a SYMBOL_REF, set generic flags and then perform some
32928 target-specific processing.
32929
32930 When the AIX ABI is requested on a non-AIX system, replace the
32931 function name with the real name (with a leading .) rather than the
32932 function descriptor name. This saves a lot of overriding code to
32933 read the prefixes. */
32934
32935 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32936 static void
32937 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32938 {
32939 default_encode_section_info (decl, rtl, first);
32940
32941 if (first
32942 && TREE_CODE (decl) == FUNCTION_DECL
32943 && !TARGET_AIX
32944 && DEFAULT_ABI == ABI_AIX)
32945 {
32946 rtx sym_ref = XEXP (rtl, 0);
32947 size_t len = strlen (XSTR (sym_ref, 0));
32948 char *str = XALLOCAVEC (char, len + 2);
32949 str[0] = '.';
32950 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32951 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32952 }
32953 }
32954
32955 static inline bool
32956 compare_section_name (const char *section, const char *templ)
32957 {
32958 int len;
32959
32960 len = strlen (templ);
32961 return (strncmp (section, templ, len) == 0
32962 && (section[len] == 0 || section[len] == '.'));
32963 }
32964
32965 bool
32966 rs6000_elf_in_small_data_p (const_tree decl)
32967 {
32968 if (rs6000_sdata == SDATA_NONE)
32969 return false;
32970
32971 /* We want to merge strings, so we never consider them small data. */
32972 if (TREE_CODE (decl) == STRING_CST)
32973 return false;
32974
32975 /* Functions are never in the small data area. */
32976 if (TREE_CODE (decl) == FUNCTION_DECL)
32977 return false;
32978
32979 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32980 {
32981 const char *section = DECL_SECTION_NAME (decl);
32982 if (compare_section_name (section, ".sdata")
32983 || compare_section_name (section, ".sdata2")
32984 || compare_section_name (section, ".gnu.linkonce.s")
32985 || compare_section_name (section, ".sbss")
32986 || compare_section_name (section, ".sbss2")
32987 || compare_section_name (section, ".gnu.linkonce.sb")
32988 || strcmp (section, ".PPC.EMB.sdata0") == 0
32989 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32990 return true;
32991 }
32992 else
32993 {
32994 /* If we are told not to put readonly data in sdata, then don't. */
32995 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
32996 && !rs6000_readonly_in_sdata)
32997 return false;
32998
32999 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
33000
33001 if (size > 0
33002 && size <= g_switch_value
33003 /* If it's not public, and we're not going to reference it there,
33004 there's no need to put it in the small data section. */
33005 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
33006 return true;
33007 }
33008
33009 return false;
33010 }
33011
33012 #endif /* USING_ELFOS_H */
33013 \f
33014 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
33015
33016 static bool
33017 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
33018 {
33019 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
33020 }
33021
33022 /* Do not place thread-local symbols refs in the object blocks. */
33023
33024 static bool
33025 rs6000_use_blocks_for_decl_p (const_tree decl)
33026 {
33027 return !DECL_THREAD_LOCAL_P (decl);
33028 }
33029 \f
33030 /* Return a REG that occurs in ADDR with coefficient 1.
33031 ADDR can be effectively incremented by incrementing REG.
33032
33033 r0 is special and we must not select it as an address
33034 register by this routine since our caller will try to
33035 increment the returned register via an "la" instruction. */
33036
33037 rtx
33038 find_addr_reg (rtx addr)
33039 {
33040 while (GET_CODE (addr) == PLUS)
33041 {
33042 if (GET_CODE (XEXP (addr, 0)) == REG
33043 && REGNO (XEXP (addr, 0)) != 0)
33044 addr = XEXP (addr, 0);
33045 else if (GET_CODE (XEXP (addr, 1)) == REG
33046 && REGNO (XEXP (addr, 1)) != 0)
33047 addr = XEXP (addr, 1);
33048 else if (CONSTANT_P (XEXP (addr, 0)))
33049 addr = XEXP (addr, 1);
33050 else if (CONSTANT_P (XEXP (addr, 1)))
33051 addr = XEXP (addr, 0);
33052 else
33053 gcc_unreachable ();
33054 }
33055 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
33056 return addr;
33057 }
33058
33059 void
33060 rs6000_fatal_bad_address (rtx op)
33061 {
33062 fatal_insn ("bad address", op);
33063 }
33064
33065 #if TARGET_MACHO
33066
33067 typedef struct branch_island_d {
33068 tree function_name;
33069 tree label_name;
33070 int line_number;
33071 } branch_island;
33072
33073
33074 static vec<branch_island, va_gc> *branch_islands;
33075
33076 /* Remember to generate a branch island for far calls to the given
33077 function. */
33078
33079 static void
33080 add_compiler_branch_island (tree label_name, tree function_name,
33081 int line_number)
33082 {
33083 branch_island bi = {function_name, label_name, line_number};
33084 vec_safe_push (branch_islands, bi);
33085 }
33086
33087 /* Generate far-jump branch islands for everything recorded in
33088 branch_islands. Invoked immediately after the last instruction of
33089 the epilogue has been emitted; the branch islands must be appended
33090 to, and contiguous with, the function body. Mach-O stubs are
33091 generated in machopic_output_stub(). */
33092
33093 static void
33094 macho_branch_islands (void)
33095 {
33096 char tmp_buf[512];
33097
33098 while (!vec_safe_is_empty (branch_islands))
33099 {
33100 branch_island *bi = &branch_islands->last ();
33101 const char *label = IDENTIFIER_POINTER (bi->label_name);
33102 const char *name = IDENTIFIER_POINTER (bi->function_name);
33103 char name_buf[512];
33104 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
33105 if (name[0] == '*' || name[0] == '&')
33106 strcpy (name_buf, name+1);
33107 else
33108 {
33109 name_buf[0] = '_';
33110 strcpy (name_buf+1, name);
33111 }
33112 strcpy (tmp_buf, "\n");
33113 strcat (tmp_buf, label);
33114 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33115 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33116 dbxout_stabd (N_SLINE, bi->line_number);
33117 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33118 if (flag_pic)
33119 {
33120 if (TARGET_LINK_STACK)
33121 {
33122 char name[32];
33123 get_ppc476_thunk_name (name);
33124 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
33125 strcat (tmp_buf, name);
33126 strcat (tmp_buf, "\n");
33127 strcat (tmp_buf, label);
33128 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33129 }
33130 else
33131 {
33132 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
33133 strcat (tmp_buf, label);
33134 strcat (tmp_buf, "_pic\n");
33135 strcat (tmp_buf, label);
33136 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33137 }
33138
33139 strcat (tmp_buf, "\taddis r11,r11,ha16(");
33140 strcat (tmp_buf, name_buf);
33141 strcat (tmp_buf, " - ");
33142 strcat (tmp_buf, label);
33143 strcat (tmp_buf, "_pic)\n");
33144
33145 strcat (tmp_buf, "\tmtlr r0\n");
33146
33147 strcat (tmp_buf, "\taddi r12,r11,lo16(");
33148 strcat (tmp_buf, name_buf);
33149 strcat (tmp_buf, " - ");
33150 strcat (tmp_buf, label);
33151 strcat (tmp_buf, "_pic)\n");
33152
33153 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
33154 }
33155 else
33156 {
33157 strcat (tmp_buf, ":\nlis r12,hi16(");
33158 strcat (tmp_buf, name_buf);
33159 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
33160 strcat (tmp_buf, name_buf);
33161 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
33162 }
33163 output_asm_insn (tmp_buf, 0);
33164 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33165 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33166 dbxout_stabd (N_SLINE, bi->line_number);
33167 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33168 branch_islands->pop ();
33169 }
33170 }
33171
33172 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33173 already there or not. */
33174
33175 static int
33176 no_previous_def (tree function_name)
33177 {
33178 branch_island *bi;
33179 unsigned ix;
33180
33181 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33182 if (function_name == bi->function_name)
33183 return 0;
33184 return 1;
33185 }
33186
33187 /* GET_PREV_LABEL gets the label name from the previous definition of
33188 the function. */
33189
33190 static tree
33191 get_prev_label (tree function_name)
33192 {
33193 branch_island *bi;
33194 unsigned ix;
33195
33196 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33197 if (function_name == bi->function_name)
33198 return bi->label_name;
33199 return NULL_TREE;
33200 }
33201
33202 /* Generate PIC and indirect symbol stubs. */
33203
33204 void
33205 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33206 {
33207 unsigned int length;
33208 char *symbol_name, *lazy_ptr_name;
33209 char *local_label_0;
33210 static int label = 0;
33211
33212 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33213 symb = (*targetm.strip_name_encoding) (symb);
33214
33215
33216 length = strlen (symb);
33217 symbol_name = XALLOCAVEC (char, length + 32);
33218 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33219
33220 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33221 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33222
33223 if (flag_pic == 2)
33224 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33225 else
33226 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33227
33228 if (flag_pic == 2)
33229 {
33230 fprintf (file, "\t.align 5\n");
33231
33232 fprintf (file, "%s:\n", stub);
33233 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33234
33235 label++;
33236 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33237 sprintf (local_label_0, "\"L%011d$spb\"", label);
33238
33239 fprintf (file, "\tmflr r0\n");
33240 if (TARGET_LINK_STACK)
33241 {
33242 char name[32];
33243 get_ppc476_thunk_name (name);
33244 fprintf (file, "\tbl %s\n", name);
33245 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33246 }
33247 else
33248 {
33249 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33250 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33251 }
33252 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33253 lazy_ptr_name, local_label_0);
33254 fprintf (file, "\tmtlr r0\n");
33255 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33256 (TARGET_64BIT ? "ldu" : "lwzu"),
33257 lazy_ptr_name, local_label_0);
33258 fprintf (file, "\tmtctr r12\n");
33259 fprintf (file, "\tbctr\n");
33260 }
33261 else
33262 {
33263 fprintf (file, "\t.align 4\n");
33264
33265 fprintf (file, "%s:\n", stub);
33266 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33267
33268 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33269 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33270 (TARGET_64BIT ? "ldu" : "lwzu"),
33271 lazy_ptr_name);
33272 fprintf (file, "\tmtctr r12\n");
33273 fprintf (file, "\tbctr\n");
33274 }
33275
33276 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33277 fprintf (file, "%s:\n", lazy_ptr_name);
33278 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33279 fprintf (file, "%sdyld_stub_binding_helper\n",
33280 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33281 }
33282
33283 /* Legitimize PIC addresses. If the address is already
33284 position-independent, we return ORIG. Newly generated
33285 position-independent addresses go into a reg. This is REG if non
33286 zero, otherwise we allocate register(s) as necessary. */
33287
33288 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33289
33290 rtx
33291 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33292 rtx reg)
33293 {
33294 rtx base, offset;
33295
33296 if (reg == NULL && !reload_completed)
33297 reg = gen_reg_rtx (Pmode);
33298
33299 if (GET_CODE (orig) == CONST)
33300 {
33301 rtx reg_temp;
33302
33303 if (GET_CODE (XEXP (orig, 0)) == PLUS
33304 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33305 return orig;
33306
33307 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33308
33309 /* Use a different reg for the intermediate value, as
33310 it will be marked UNCHANGING. */
33311 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33312 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33313 Pmode, reg_temp);
33314 offset =
33315 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33316 Pmode, reg);
33317
33318 if (GET_CODE (offset) == CONST_INT)
33319 {
33320 if (SMALL_INT (offset))
33321 return plus_constant (Pmode, base, INTVAL (offset));
33322 else if (!reload_completed)
33323 offset = force_reg (Pmode, offset);
33324 else
33325 {
33326 rtx mem = force_const_mem (Pmode, orig);
33327 return machopic_legitimize_pic_address (mem, Pmode, reg);
33328 }
33329 }
33330 return gen_rtx_PLUS (Pmode, base, offset);
33331 }
33332
33333 /* Fall back on generic machopic code. */
33334 return machopic_legitimize_pic_address (orig, mode, reg);
33335 }
33336
33337 /* Output a .machine directive for the Darwin assembler, and call
33338 the generic start_file routine. */
33339
33340 static void
33341 rs6000_darwin_file_start (void)
33342 {
33343 static const struct
33344 {
33345 const char *arg;
33346 const char *name;
33347 HOST_WIDE_INT if_set;
33348 } mapping[] = {
33349 { "ppc64", "ppc64", MASK_64BIT },
33350 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33351 { "power4", "ppc970", 0 },
33352 { "G5", "ppc970", 0 },
33353 { "7450", "ppc7450", 0 },
33354 { "7400", "ppc7400", MASK_ALTIVEC },
33355 { "G4", "ppc7400", 0 },
33356 { "750", "ppc750", 0 },
33357 { "740", "ppc750", 0 },
33358 { "G3", "ppc750", 0 },
33359 { "604e", "ppc604e", 0 },
33360 { "604", "ppc604", 0 },
33361 { "603e", "ppc603", 0 },
33362 { "603", "ppc603", 0 },
33363 { "601", "ppc601", 0 },
33364 { NULL, "ppc", 0 } };
33365 const char *cpu_id = "";
33366 size_t i;
33367
33368 rs6000_file_start ();
33369 darwin_file_start ();
33370
33371 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33372
33373 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33374 cpu_id = rs6000_default_cpu;
33375
33376 if (global_options_set.x_rs6000_cpu_index)
33377 cpu_id = processor_target_table[rs6000_cpu_index].name;
33378
33379 /* Look through the mapping array. Pick the first name that either
33380 matches the argument, has a bit set in IF_SET that is also set
33381 in the target flags, or has a NULL name. */
33382
33383 i = 0;
33384 while (mapping[i].arg != NULL
33385 && strcmp (mapping[i].arg, cpu_id) != 0
33386 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33387 i++;
33388
33389 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33390 }
33391
33392 #endif /* TARGET_MACHO */
33393
33394 #if TARGET_ELF
33395 static int
33396 rs6000_elf_reloc_rw_mask (void)
33397 {
33398 if (flag_pic)
33399 return 3;
33400 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33401 return 2;
33402 else
33403 return 0;
33404 }
33405
33406 /* Record an element in the table of global constructors. SYMBOL is
33407 a SYMBOL_REF of the function to be called; PRIORITY is a number
33408 between 0 and MAX_INIT_PRIORITY.
33409
33410 This differs from default_named_section_asm_out_constructor in
33411 that we have special handling for -mrelocatable. */
33412
33413 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33414 static void
33415 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33416 {
33417 const char *section = ".ctors";
33418 char buf[18];
33419
33420 if (priority != DEFAULT_INIT_PRIORITY)
33421 {
33422 sprintf (buf, ".ctors.%.5u",
33423 /* Invert the numbering so the linker puts us in the proper
33424 order; constructors are run from right to left, and the
33425 linker sorts in increasing order. */
33426 MAX_INIT_PRIORITY - priority);
33427 section = buf;
33428 }
33429
33430 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33431 assemble_align (POINTER_SIZE);
33432
33433 if (DEFAULT_ABI == ABI_V4
33434 && (TARGET_RELOCATABLE || flag_pic > 1))
33435 {
33436 fputs ("\t.long (", asm_out_file);
33437 output_addr_const (asm_out_file, symbol);
33438 fputs (")@fixup\n", asm_out_file);
33439 }
33440 else
33441 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33442 }
33443
33444 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33445 static void
33446 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33447 {
33448 const char *section = ".dtors";
33449 char buf[18];
33450
33451 if (priority != DEFAULT_INIT_PRIORITY)
33452 {
33453 sprintf (buf, ".dtors.%.5u",
33454 /* Invert the numbering so the linker puts us in the proper
33455 order; constructors are run from right to left, and the
33456 linker sorts in increasing order. */
33457 MAX_INIT_PRIORITY - priority);
33458 section = buf;
33459 }
33460
33461 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33462 assemble_align (POINTER_SIZE);
33463
33464 if (DEFAULT_ABI == ABI_V4
33465 && (TARGET_RELOCATABLE || flag_pic > 1))
33466 {
33467 fputs ("\t.long (", asm_out_file);
33468 output_addr_const (asm_out_file, symbol);
33469 fputs (")@fixup\n", asm_out_file);
33470 }
33471 else
33472 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33473 }
33474
33475 void
33476 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33477 {
33478 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33479 {
33480 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33481 ASM_OUTPUT_LABEL (file, name);
33482 fputs (DOUBLE_INT_ASM_OP, file);
33483 rs6000_output_function_entry (file, name);
33484 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33485 if (DOT_SYMBOLS)
33486 {
33487 fputs ("\t.size\t", file);
33488 assemble_name (file, name);
33489 fputs (",24\n\t.type\t.", file);
33490 assemble_name (file, name);
33491 fputs (",@function\n", file);
33492 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33493 {
33494 fputs ("\t.globl\t.", file);
33495 assemble_name (file, name);
33496 putc ('\n', file);
33497 }
33498 }
33499 else
33500 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33501 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33502 rs6000_output_function_entry (file, name);
33503 fputs (":\n", file);
33504 return;
33505 }
33506
33507 int uses_toc;
33508 if (DEFAULT_ABI == ABI_V4
33509 && (TARGET_RELOCATABLE || flag_pic > 1)
33510 && !TARGET_SECURE_PLT
33511 && (!constant_pool_empty_p () || crtl->profile)
33512 && (uses_toc = uses_TOC ()))
33513 {
33514 char buf[256];
33515
33516 if (uses_toc == 2)
33517 switch_to_other_text_partition ();
33518 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33519
33520 fprintf (file, "\t.long ");
33521 assemble_name (file, toc_label_name);
33522 need_toc_init = 1;
33523 putc ('-', file);
33524 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33525 assemble_name (file, buf);
33526 putc ('\n', file);
33527 if (uses_toc == 2)
33528 switch_to_other_text_partition ();
33529 }
33530
33531 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33532 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33533
33534 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33535 {
33536 char buf[256];
33537
33538 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33539
33540 fprintf (file, "\t.quad .TOC.-");
33541 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33542 assemble_name (file, buf);
33543 putc ('\n', file);
33544 }
33545
33546 if (DEFAULT_ABI == ABI_AIX)
33547 {
33548 const char *desc_name, *orig_name;
33549
33550 orig_name = (*targetm.strip_name_encoding) (name);
33551 desc_name = orig_name;
33552 while (*desc_name == '.')
33553 desc_name++;
33554
33555 if (TREE_PUBLIC (decl))
33556 fprintf (file, "\t.globl %s\n", desc_name);
33557
33558 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33559 fprintf (file, "%s:\n", desc_name);
33560 fprintf (file, "\t.long %s\n", orig_name);
33561 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33562 fputs ("\t.long 0\n", file);
33563 fprintf (file, "\t.previous\n");
33564 }
33565 ASM_OUTPUT_LABEL (file, name);
33566 }
33567
33568 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33569 static void
33570 rs6000_elf_file_end (void)
33571 {
33572 #ifdef HAVE_AS_GNU_ATTRIBUTE
33573 /* ??? The value emitted depends on options active at file end.
33574 Assume anyone using #pragma or attributes that might change
33575 options knows what they are doing. */
33576 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33577 && rs6000_passes_float)
33578 {
33579 int fp;
33580
33581 if (TARGET_HARD_FLOAT)
33582 fp = 1;
33583 else
33584 fp = 2;
33585 if (rs6000_passes_long_double)
33586 {
33587 if (!TARGET_LONG_DOUBLE_128)
33588 fp |= 2 * 4;
33589 else if (TARGET_IEEEQUAD)
33590 fp |= 3 * 4;
33591 else
33592 fp |= 1 * 4;
33593 }
33594 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33595 }
33596 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33597 {
33598 if (rs6000_passes_vector)
33599 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33600 (TARGET_ALTIVEC_ABI ? 2 : 1));
33601 if (rs6000_returns_struct)
33602 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33603 aix_struct_return ? 2 : 1);
33604 }
33605 #endif
33606 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33607 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33608 file_end_indicate_exec_stack ();
33609 #endif
33610
33611 if (flag_split_stack)
33612 file_end_indicate_split_stack ();
33613
33614 if (cpu_builtin_p)
33615 {
33616 /* We have expanded a CPU builtin, so we need to emit a reference to
33617 the special symbol that LIBC uses to declare it supports the
33618 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33619 switch_to_section (data_section);
33620 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33621 fprintf (asm_out_file, "\t%s %s\n",
33622 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33623 }
33624 }
33625 #endif
33626
33627 #if TARGET_XCOFF
33628
33629 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33630 #define HAVE_XCOFF_DWARF_EXTRAS 0
33631 #endif
33632
33633 static enum unwind_info_type
33634 rs6000_xcoff_debug_unwind_info (void)
33635 {
33636 return UI_NONE;
33637 }
33638
33639 static void
33640 rs6000_xcoff_asm_output_anchor (rtx symbol)
33641 {
33642 char buffer[100];
33643
33644 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33645 SYMBOL_REF_BLOCK_OFFSET (symbol));
33646 fprintf (asm_out_file, "%s", SET_ASM_OP);
33647 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33648 fprintf (asm_out_file, ",");
33649 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33650 fprintf (asm_out_file, "\n");
33651 }
33652
33653 static void
33654 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33655 {
33656 fputs (GLOBAL_ASM_OP, stream);
33657 RS6000_OUTPUT_BASENAME (stream, name);
33658 putc ('\n', stream);
33659 }
33660
33661 /* A get_unnamed_decl callback, used for read-only sections. PTR
33662 points to the section string variable. */
33663
33664 static void
33665 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33666 {
33667 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33668 *(const char *const *) directive,
33669 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33670 }
33671
33672 /* Likewise for read-write sections. */
33673
33674 static void
33675 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33676 {
33677 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33678 *(const char *const *) directive,
33679 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33680 }
33681
33682 static void
33683 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33684 {
33685 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33686 *(const char *const *) directive,
33687 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33688 }
33689
33690 /* A get_unnamed_section callback, used for switching to toc_section. */
33691
33692 static void
33693 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33694 {
33695 if (TARGET_MINIMAL_TOC)
33696 {
33697 /* toc_section is always selected at least once from
33698 rs6000_xcoff_file_start, so this is guaranteed to
33699 always be defined once and only once in each file. */
33700 if (!toc_initialized)
33701 {
33702 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33703 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33704 toc_initialized = 1;
33705 }
33706 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33707 (TARGET_32BIT ? "" : ",3"));
33708 }
33709 else
33710 fputs ("\t.toc\n", asm_out_file);
33711 }
33712
33713 /* Implement TARGET_ASM_INIT_SECTIONS. */
33714
33715 static void
33716 rs6000_xcoff_asm_init_sections (void)
33717 {
33718 read_only_data_section
33719 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33720 &xcoff_read_only_section_name);
33721
33722 private_data_section
33723 = get_unnamed_section (SECTION_WRITE,
33724 rs6000_xcoff_output_readwrite_section_asm_op,
33725 &xcoff_private_data_section_name);
33726
33727 tls_data_section
33728 = get_unnamed_section (SECTION_TLS,
33729 rs6000_xcoff_output_tls_section_asm_op,
33730 &xcoff_tls_data_section_name);
33731
33732 tls_private_data_section
33733 = get_unnamed_section (SECTION_TLS,
33734 rs6000_xcoff_output_tls_section_asm_op,
33735 &xcoff_private_data_section_name);
33736
33737 read_only_private_data_section
33738 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33739 &xcoff_private_data_section_name);
33740
33741 toc_section
33742 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33743
33744 readonly_data_section = read_only_data_section;
33745 }
33746
33747 static int
33748 rs6000_xcoff_reloc_rw_mask (void)
33749 {
33750 return 3;
33751 }
33752
33753 static void
33754 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33755 tree decl ATTRIBUTE_UNUSED)
33756 {
33757 int smclass;
33758 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33759
33760 if (flags & SECTION_EXCLUDE)
33761 smclass = 4;
33762 else if (flags & SECTION_DEBUG)
33763 {
33764 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33765 return;
33766 }
33767 else if (flags & SECTION_CODE)
33768 smclass = 0;
33769 else if (flags & SECTION_TLS)
33770 smclass = 3;
33771 else if (flags & SECTION_WRITE)
33772 smclass = 2;
33773 else
33774 smclass = 1;
33775
33776 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33777 (flags & SECTION_CODE) ? "." : "",
33778 name, suffix[smclass], flags & SECTION_ENTSIZE);
33779 }
33780
33781 #define IN_NAMED_SECTION(DECL) \
33782 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33783 && DECL_SECTION_NAME (DECL) != NULL)
33784
33785 static section *
33786 rs6000_xcoff_select_section (tree decl, int reloc,
33787 unsigned HOST_WIDE_INT align)
33788 {
33789 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33790 named section. */
33791 if (align > BIGGEST_ALIGNMENT)
33792 {
33793 resolve_unique_section (decl, reloc, true);
33794 if (IN_NAMED_SECTION (decl))
33795 return get_named_section (decl, NULL, reloc);
33796 }
33797
33798 if (decl_readonly_section (decl, reloc))
33799 {
33800 if (TREE_PUBLIC (decl))
33801 return read_only_data_section;
33802 else
33803 return read_only_private_data_section;
33804 }
33805 else
33806 {
33807 #if HAVE_AS_TLS
33808 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33809 {
33810 if (TREE_PUBLIC (decl))
33811 return tls_data_section;
33812 else if (bss_initializer_p (decl))
33813 {
33814 /* Convert to COMMON to emit in BSS. */
33815 DECL_COMMON (decl) = 1;
33816 return tls_comm_section;
33817 }
33818 else
33819 return tls_private_data_section;
33820 }
33821 else
33822 #endif
33823 if (TREE_PUBLIC (decl))
33824 return data_section;
33825 else
33826 return private_data_section;
33827 }
33828 }
33829
33830 static void
33831 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33832 {
33833 const char *name;
33834
33835 /* Use select_section for private data and uninitialized data with
33836 alignment <= BIGGEST_ALIGNMENT. */
33837 if (!TREE_PUBLIC (decl)
33838 || DECL_COMMON (decl)
33839 || (DECL_INITIAL (decl) == NULL_TREE
33840 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33841 || DECL_INITIAL (decl) == error_mark_node
33842 || (flag_zero_initialized_in_bss
33843 && initializer_zerop (DECL_INITIAL (decl))))
33844 return;
33845
33846 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33847 name = (*targetm.strip_name_encoding) (name);
33848 set_decl_section_name (decl, name);
33849 }
33850
33851 /* Select section for constant in constant pool.
33852
33853 On RS/6000, all constants are in the private read-only data area.
33854 However, if this is being placed in the TOC it must be output as a
33855 toc entry. */
33856
33857 static section *
33858 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33859 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33860 {
33861 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33862 return toc_section;
33863 else
33864 return read_only_private_data_section;
33865 }
33866
33867 /* Remove any trailing [DS] or the like from the symbol name. */
33868
33869 static const char *
33870 rs6000_xcoff_strip_name_encoding (const char *name)
33871 {
33872 size_t len;
33873 if (*name == '*')
33874 name++;
33875 len = strlen (name);
33876 if (name[len - 1] == ']')
33877 return ggc_alloc_string (name, len - 4);
33878 else
33879 return name;
33880 }
33881
33882 /* Section attributes. AIX is always PIC. */
33883
33884 static unsigned int
33885 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33886 {
33887 unsigned int align;
33888 unsigned int flags = default_section_type_flags (decl, name, reloc);
33889
33890 /* Align to at least UNIT size. */
33891 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33892 align = MIN_UNITS_PER_WORD;
33893 else
33894 /* Increase alignment of large objects if not already stricter. */
33895 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33896 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33897 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33898
33899 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33900 }
33901
33902 /* Output at beginning of assembler file.
33903
33904 Initialize the section names for the RS/6000 at this point.
33905
33906 Specify filename, including full path, to assembler.
33907
33908 We want to go into the TOC section so at least one .toc will be emitted.
33909 Also, in order to output proper .bs/.es pairs, we need at least one static
33910 [RW] section emitted.
33911
33912 Finally, declare mcount when profiling to make the assembler happy. */
33913
33914 static void
33915 rs6000_xcoff_file_start (void)
33916 {
33917 rs6000_gen_section_name (&xcoff_bss_section_name,
33918 main_input_filename, ".bss_");
33919 rs6000_gen_section_name (&xcoff_private_data_section_name,
33920 main_input_filename, ".rw_");
33921 rs6000_gen_section_name (&xcoff_read_only_section_name,
33922 main_input_filename, ".ro_");
33923 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33924 main_input_filename, ".tls_");
33925 rs6000_gen_section_name (&xcoff_tbss_section_name,
33926 main_input_filename, ".tbss_[UL]");
33927
33928 fputs ("\t.file\t", asm_out_file);
33929 output_quoted_string (asm_out_file, main_input_filename);
33930 fputc ('\n', asm_out_file);
33931 if (write_symbols != NO_DEBUG)
33932 switch_to_section (private_data_section);
33933 switch_to_section (toc_section);
33934 switch_to_section (text_section);
33935 if (profile_flag)
33936 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33937 rs6000_file_start ();
33938 }
33939
33940 /* Output at end of assembler file.
33941 On the RS/6000, referencing data should automatically pull in text. */
33942
33943 static void
33944 rs6000_xcoff_file_end (void)
33945 {
33946 switch_to_section (text_section);
33947 fputs ("_section_.text:\n", asm_out_file);
33948 switch_to_section (data_section);
33949 fputs (TARGET_32BIT
33950 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33951 asm_out_file);
33952 }
33953
33954 struct declare_alias_data
33955 {
33956 FILE *file;
33957 bool function_descriptor;
33958 };
33959
33960 /* Declare alias N. A helper function for for_node_and_aliases. */
33961
33962 static bool
33963 rs6000_declare_alias (struct symtab_node *n, void *d)
33964 {
33965 struct declare_alias_data *data = (struct declare_alias_data *)d;
33966 /* Main symbol is output specially, because varasm machinery does part of
33967 the job for us - we do not need to declare .globl/lglobs and such. */
33968 if (!n->alias || n->weakref)
33969 return false;
33970
33971 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33972 return false;
33973
33974 /* Prevent assemble_alias from trying to use .set pseudo operation
33975 that does not behave as expected by the middle-end. */
33976 TREE_ASM_WRITTEN (n->decl) = true;
33977
33978 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33979 char *buffer = (char *) alloca (strlen (name) + 2);
33980 char *p;
33981 int dollar_inside = 0;
33982
33983 strcpy (buffer, name);
33984 p = strchr (buffer, '$');
33985 while (p) {
33986 *p = '_';
33987 dollar_inside++;
33988 p = strchr (p + 1, '$');
33989 }
33990 if (TREE_PUBLIC (n->decl))
33991 {
33992 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33993 {
33994 if (dollar_inside) {
33995 if (data->function_descriptor)
33996 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33997 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33998 }
33999 if (data->function_descriptor)
34000 {
34001 fputs ("\t.globl .", data->file);
34002 RS6000_OUTPUT_BASENAME (data->file, buffer);
34003 putc ('\n', data->file);
34004 }
34005 fputs ("\t.globl ", data->file);
34006 RS6000_OUTPUT_BASENAME (data->file, buffer);
34007 putc ('\n', data->file);
34008 }
34009 #ifdef ASM_WEAKEN_DECL
34010 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
34011 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
34012 #endif
34013 }
34014 else
34015 {
34016 if (dollar_inside)
34017 {
34018 if (data->function_descriptor)
34019 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34020 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34021 }
34022 if (data->function_descriptor)
34023 {
34024 fputs ("\t.lglobl .", data->file);
34025 RS6000_OUTPUT_BASENAME (data->file, buffer);
34026 putc ('\n', data->file);
34027 }
34028 fputs ("\t.lglobl ", data->file);
34029 RS6000_OUTPUT_BASENAME (data->file, buffer);
34030 putc ('\n', data->file);
34031 }
34032 if (data->function_descriptor)
34033 fputs (".", data->file);
34034 RS6000_OUTPUT_BASENAME (data->file, buffer);
34035 fputs (":\n", data->file);
34036 return false;
34037 }
34038
34039
34040 #ifdef HAVE_GAS_HIDDEN
34041 /* Helper function to calculate visibility of a DECL
34042 and return the value as a const string. */
34043
34044 static const char *
34045 rs6000_xcoff_visibility (tree decl)
34046 {
34047 static const char * const visibility_types[] = {
34048 "", ",protected", ",hidden", ",internal"
34049 };
34050
34051 enum symbol_visibility vis = DECL_VISIBILITY (decl);
34052 return visibility_types[vis];
34053 }
34054 #endif
34055
34056
34057 /* This macro produces the initial definition of a function name.
34058 On the RS/6000, we need to place an extra '.' in the function name and
34059 output the function descriptor.
34060 Dollar signs are converted to underscores.
34061
34062 The csect for the function will have already been created when
34063 text_section was selected. We do have to go back to that csect, however.
34064
34065 The third and fourth parameters to the .function pseudo-op (16 and 044)
34066 are placeholders which no longer have any use.
34067
34068 Because AIX assembler's .set command has unexpected semantics, we output
34069 all aliases as alternative labels in front of the definition. */
34070
34071 void
34072 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
34073 {
34074 char *buffer = (char *) alloca (strlen (name) + 1);
34075 char *p;
34076 int dollar_inside = 0;
34077 struct declare_alias_data data = {file, false};
34078
34079 strcpy (buffer, name);
34080 p = strchr (buffer, '$');
34081 while (p) {
34082 *p = '_';
34083 dollar_inside++;
34084 p = strchr (p + 1, '$');
34085 }
34086 if (TREE_PUBLIC (decl))
34087 {
34088 if (!RS6000_WEAK || !DECL_WEAK (decl))
34089 {
34090 if (dollar_inside) {
34091 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34092 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34093 }
34094 fputs ("\t.globl .", file);
34095 RS6000_OUTPUT_BASENAME (file, buffer);
34096 #ifdef HAVE_GAS_HIDDEN
34097 fputs (rs6000_xcoff_visibility (decl), file);
34098 #endif
34099 putc ('\n', file);
34100 }
34101 }
34102 else
34103 {
34104 if (dollar_inside) {
34105 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34106 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34107 }
34108 fputs ("\t.lglobl .", file);
34109 RS6000_OUTPUT_BASENAME (file, buffer);
34110 putc ('\n', file);
34111 }
34112 fputs ("\t.csect ", file);
34113 RS6000_OUTPUT_BASENAME (file, buffer);
34114 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
34115 RS6000_OUTPUT_BASENAME (file, buffer);
34116 fputs (":\n", file);
34117 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34118 &data, true);
34119 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
34120 RS6000_OUTPUT_BASENAME (file, buffer);
34121 fputs (", TOC[tc0], 0\n", file);
34122 in_section = NULL;
34123 switch_to_section (function_section (decl));
34124 putc ('.', file);
34125 RS6000_OUTPUT_BASENAME (file, buffer);
34126 fputs (":\n", file);
34127 data.function_descriptor = true;
34128 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34129 &data, true);
34130 if (!DECL_IGNORED_P (decl))
34131 {
34132 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
34133 xcoffout_declare_function (file, decl, buffer);
34134 else if (write_symbols == DWARF2_DEBUG)
34135 {
34136 name = (*targetm.strip_name_encoding) (name);
34137 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
34138 }
34139 }
34140 return;
34141 }
34142
34143
34144 /* Output assembly language to globalize a symbol from a DECL,
34145 possibly with visibility. */
34146
34147 void
34148 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
34149 {
34150 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
34151 fputs (GLOBAL_ASM_OP, stream);
34152 RS6000_OUTPUT_BASENAME (stream, name);
34153 #ifdef HAVE_GAS_HIDDEN
34154 fputs (rs6000_xcoff_visibility (decl), stream);
34155 #endif
34156 putc ('\n', stream);
34157 }
34158
34159 /* Output assembly language to define a symbol as COMMON from a DECL,
34160 possibly with visibility. */
34161
34162 void
34163 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
34164 tree decl ATTRIBUTE_UNUSED,
34165 const char *name,
34166 unsigned HOST_WIDE_INT size,
34167 unsigned HOST_WIDE_INT align)
34168 {
34169 unsigned HOST_WIDE_INT align2 = 2;
34170
34171 if (align > 32)
34172 align2 = floor_log2 (align / BITS_PER_UNIT);
34173 else if (size > 4)
34174 align2 = 3;
34175
34176 fputs (COMMON_ASM_OP, stream);
34177 RS6000_OUTPUT_BASENAME (stream, name);
34178
34179 fprintf (stream,
34180 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
34181 size, align2);
34182
34183 #ifdef HAVE_GAS_HIDDEN
34184 if (decl != NULL)
34185 fputs (rs6000_xcoff_visibility (decl), stream);
34186 #endif
34187 putc ('\n', stream);
34188 }
34189
34190 /* This macro produces the initial definition of a object (variable) name.
34191 Because AIX assembler's .set command has unexpected semantics, we output
34192 all aliases as alternative labels in front of the definition. */
34193
34194 void
34195 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34196 {
34197 struct declare_alias_data data = {file, false};
34198 RS6000_OUTPUT_BASENAME (file, name);
34199 fputs (":\n", file);
34200 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34201 &data, true);
34202 }
34203
34204 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34205
34206 void
34207 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34208 {
34209 fputs (integer_asm_op (size, FALSE), file);
34210 assemble_name (file, label);
34211 fputs ("-$", file);
34212 }
34213
34214 /* Output a symbol offset relative to the dbase for the current object.
34215 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34216 signed offsets.
34217
34218 __gcc_unwind_dbase is embedded in all executables/libraries through
34219 libgcc/config/rs6000/crtdbase.S. */
34220
34221 void
34222 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34223 {
34224 fputs (integer_asm_op (size, FALSE), file);
34225 assemble_name (file, label);
34226 fputs("-__gcc_unwind_dbase", file);
34227 }
34228
34229 #ifdef HAVE_AS_TLS
34230 static void
34231 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34232 {
34233 rtx symbol;
34234 int flags;
34235 const char *symname;
34236
34237 default_encode_section_info (decl, rtl, first);
34238
34239 /* Careful not to prod global register variables. */
34240 if (!MEM_P (rtl))
34241 return;
34242 symbol = XEXP (rtl, 0);
34243 if (GET_CODE (symbol) != SYMBOL_REF)
34244 return;
34245
34246 flags = SYMBOL_REF_FLAGS (symbol);
34247
34248 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34249 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34250
34251 SYMBOL_REF_FLAGS (symbol) = flags;
34252
34253 /* Append mapping class to extern decls. */
34254 symname = XSTR (symbol, 0);
34255 if (decl /* sync condition with assemble_external () */
34256 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34257 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34258 || TREE_CODE (decl) == FUNCTION_DECL)
34259 && symname[strlen (symname) - 1] != ']')
34260 {
34261 char *newname = (char *) alloca (strlen (symname) + 5);
34262 strcpy (newname, symname);
34263 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34264 ? "[DS]" : "[UA]"));
34265 XSTR (symbol, 0) = ggc_strdup (newname);
34266 }
34267 }
34268 #endif /* HAVE_AS_TLS */
34269 #endif /* TARGET_XCOFF */
34270
34271 void
34272 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34273 const char *name, const char *val)
34274 {
34275 fputs ("\t.weak\t", stream);
34276 RS6000_OUTPUT_BASENAME (stream, name);
34277 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34278 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34279 {
34280 if (TARGET_XCOFF)
34281 fputs ("[DS]", stream);
34282 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34283 if (TARGET_XCOFF)
34284 fputs (rs6000_xcoff_visibility (decl), stream);
34285 #endif
34286 fputs ("\n\t.weak\t.", stream);
34287 RS6000_OUTPUT_BASENAME (stream, name);
34288 }
34289 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34290 if (TARGET_XCOFF)
34291 fputs (rs6000_xcoff_visibility (decl), stream);
34292 #endif
34293 fputc ('\n', stream);
34294 if (val)
34295 {
34296 #ifdef ASM_OUTPUT_DEF
34297 ASM_OUTPUT_DEF (stream, name, val);
34298 #endif
34299 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34300 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34301 {
34302 fputs ("\t.set\t.", stream);
34303 RS6000_OUTPUT_BASENAME (stream, name);
34304 fputs (",.", stream);
34305 RS6000_OUTPUT_BASENAME (stream, val);
34306 fputc ('\n', stream);
34307 }
34308 }
34309 }
34310
34311
34312 /* Return true if INSN should not be copied. */
34313
34314 static bool
34315 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34316 {
34317 return recog_memoized (insn) >= 0
34318 && get_attr_cannot_copy (insn);
34319 }
34320
34321 /* Compute a (partial) cost for rtx X. Return true if the complete
34322 cost has been computed, and false if subexpressions should be
34323 scanned. In either case, *TOTAL contains the cost result. */
34324
34325 static bool
34326 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34327 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34328 {
34329 int code = GET_CODE (x);
34330
34331 switch (code)
34332 {
34333 /* On the RS/6000, if it is valid in the insn, it is free. */
34334 case CONST_INT:
34335 if (((outer_code == SET
34336 || outer_code == PLUS
34337 || outer_code == MINUS)
34338 && (satisfies_constraint_I (x)
34339 || satisfies_constraint_L (x)))
34340 || (outer_code == AND
34341 && (satisfies_constraint_K (x)
34342 || (mode == SImode
34343 ? satisfies_constraint_L (x)
34344 : satisfies_constraint_J (x))))
34345 || ((outer_code == IOR || outer_code == XOR)
34346 && (satisfies_constraint_K (x)
34347 || (mode == SImode
34348 ? satisfies_constraint_L (x)
34349 : satisfies_constraint_J (x))))
34350 || outer_code == ASHIFT
34351 || outer_code == ASHIFTRT
34352 || outer_code == LSHIFTRT
34353 || outer_code == ROTATE
34354 || outer_code == ROTATERT
34355 || outer_code == ZERO_EXTRACT
34356 || (outer_code == MULT
34357 && satisfies_constraint_I (x))
34358 || ((outer_code == DIV || outer_code == UDIV
34359 || outer_code == MOD || outer_code == UMOD)
34360 && exact_log2 (INTVAL (x)) >= 0)
34361 || (outer_code == COMPARE
34362 && (satisfies_constraint_I (x)
34363 || satisfies_constraint_K (x)))
34364 || ((outer_code == EQ || outer_code == NE)
34365 && (satisfies_constraint_I (x)
34366 || satisfies_constraint_K (x)
34367 || (mode == SImode
34368 ? satisfies_constraint_L (x)
34369 : satisfies_constraint_J (x))))
34370 || (outer_code == GTU
34371 && satisfies_constraint_I (x))
34372 || (outer_code == LTU
34373 && satisfies_constraint_P (x)))
34374 {
34375 *total = 0;
34376 return true;
34377 }
34378 else if ((outer_code == PLUS
34379 && reg_or_add_cint_operand (x, VOIDmode))
34380 || (outer_code == MINUS
34381 && reg_or_sub_cint_operand (x, VOIDmode))
34382 || ((outer_code == SET
34383 || outer_code == IOR
34384 || outer_code == XOR)
34385 && (INTVAL (x)
34386 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34387 {
34388 *total = COSTS_N_INSNS (1);
34389 return true;
34390 }
34391 /* FALLTHRU */
34392
34393 case CONST_DOUBLE:
34394 case CONST_WIDE_INT:
34395 case CONST:
34396 case HIGH:
34397 case SYMBOL_REF:
34398 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34399 return true;
34400
34401 case MEM:
34402 /* When optimizing for size, MEM should be slightly more expensive
34403 than generating address, e.g., (plus (reg) (const)).
34404 L1 cache latency is about two instructions. */
34405 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34406 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34407 *total += COSTS_N_INSNS (100);
34408 return true;
34409
34410 case LABEL_REF:
34411 *total = 0;
34412 return true;
34413
34414 case PLUS:
34415 case MINUS:
34416 if (FLOAT_MODE_P (mode))
34417 *total = rs6000_cost->fp;
34418 else
34419 *total = COSTS_N_INSNS (1);
34420 return false;
34421
34422 case MULT:
34423 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34424 && satisfies_constraint_I (XEXP (x, 1)))
34425 {
34426 if (INTVAL (XEXP (x, 1)) >= -256
34427 && INTVAL (XEXP (x, 1)) <= 255)
34428 *total = rs6000_cost->mulsi_const9;
34429 else
34430 *total = rs6000_cost->mulsi_const;
34431 }
34432 else if (mode == SFmode)
34433 *total = rs6000_cost->fp;
34434 else if (FLOAT_MODE_P (mode))
34435 *total = rs6000_cost->dmul;
34436 else if (mode == DImode)
34437 *total = rs6000_cost->muldi;
34438 else
34439 *total = rs6000_cost->mulsi;
34440 return false;
34441
34442 case FMA:
34443 if (mode == SFmode)
34444 *total = rs6000_cost->fp;
34445 else
34446 *total = rs6000_cost->dmul;
34447 break;
34448
34449 case DIV:
34450 case MOD:
34451 if (FLOAT_MODE_P (mode))
34452 {
34453 *total = mode == DFmode ? rs6000_cost->ddiv
34454 : rs6000_cost->sdiv;
34455 return false;
34456 }
34457 /* FALLTHRU */
34458
34459 case UDIV:
34460 case UMOD:
34461 if (GET_CODE (XEXP (x, 1)) == CONST_INT
34462 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34463 {
34464 if (code == DIV || code == MOD)
34465 /* Shift, addze */
34466 *total = COSTS_N_INSNS (2);
34467 else
34468 /* Shift */
34469 *total = COSTS_N_INSNS (1);
34470 }
34471 else
34472 {
34473 if (GET_MODE (XEXP (x, 1)) == DImode)
34474 *total = rs6000_cost->divdi;
34475 else
34476 *total = rs6000_cost->divsi;
34477 }
34478 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34479 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34480 *total += COSTS_N_INSNS (2);
34481 return false;
34482
34483 case CTZ:
34484 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34485 return false;
34486
34487 case FFS:
34488 *total = COSTS_N_INSNS (4);
34489 return false;
34490
34491 case POPCOUNT:
34492 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34493 return false;
34494
34495 case PARITY:
34496 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34497 return false;
34498
34499 case NOT:
34500 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34501 *total = 0;
34502 else
34503 *total = COSTS_N_INSNS (1);
34504 return false;
34505
34506 case AND:
34507 if (CONST_INT_P (XEXP (x, 1)))
34508 {
34509 rtx left = XEXP (x, 0);
34510 rtx_code left_code = GET_CODE (left);
34511
34512 /* rotate-and-mask: 1 insn. */
34513 if ((left_code == ROTATE
34514 || left_code == ASHIFT
34515 || left_code == LSHIFTRT)
34516 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34517 {
34518 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34519 if (!CONST_INT_P (XEXP (left, 1)))
34520 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34521 *total += COSTS_N_INSNS (1);
34522 return true;
34523 }
34524
34525 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34526 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34527 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34528 || (val & 0xffff) == val
34529 || (val & 0xffff0000) == val
34530 || ((val & 0xffff) == 0 && mode == SImode))
34531 {
34532 *total = rtx_cost (left, mode, AND, 0, speed);
34533 *total += COSTS_N_INSNS (1);
34534 return true;
34535 }
34536
34537 /* 2 insns. */
34538 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34539 {
34540 *total = rtx_cost (left, mode, AND, 0, speed);
34541 *total += COSTS_N_INSNS (2);
34542 return true;
34543 }
34544 }
34545
34546 *total = COSTS_N_INSNS (1);
34547 return false;
34548
34549 case IOR:
34550 /* FIXME */
34551 *total = COSTS_N_INSNS (1);
34552 return true;
34553
34554 case CLZ:
34555 case XOR:
34556 case ZERO_EXTRACT:
34557 *total = COSTS_N_INSNS (1);
34558 return false;
34559
34560 case ASHIFT:
34561 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34562 the sign extend and shift separately within the insn. */
34563 if (TARGET_EXTSWSLI && mode == DImode
34564 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34565 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34566 {
34567 *total = 0;
34568 return false;
34569 }
34570 /* fall through */
34571
34572 case ASHIFTRT:
34573 case LSHIFTRT:
34574 case ROTATE:
34575 case ROTATERT:
34576 /* Handle mul_highpart. */
34577 if (outer_code == TRUNCATE
34578 && GET_CODE (XEXP (x, 0)) == MULT)
34579 {
34580 if (mode == DImode)
34581 *total = rs6000_cost->muldi;
34582 else
34583 *total = rs6000_cost->mulsi;
34584 return true;
34585 }
34586 else if (outer_code == AND)
34587 *total = 0;
34588 else
34589 *total = COSTS_N_INSNS (1);
34590 return false;
34591
34592 case SIGN_EXTEND:
34593 case ZERO_EXTEND:
34594 if (GET_CODE (XEXP (x, 0)) == MEM)
34595 *total = 0;
34596 else
34597 *total = COSTS_N_INSNS (1);
34598 return false;
34599
34600 case COMPARE:
34601 case NEG:
34602 case ABS:
34603 if (!FLOAT_MODE_P (mode))
34604 {
34605 *total = COSTS_N_INSNS (1);
34606 return false;
34607 }
34608 /* FALLTHRU */
34609
34610 case FLOAT:
34611 case UNSIGNED_FLOAT:
34612 case FIX:
34613 case UNSIGNED_FIX:
34614 case FLOAT_TRUNCATE:
34615 *total = rs6000_cost->fp;
34616 return false;
34617
34618 case FLOAT_EXTEND:
34619 if (mode == DFmode)
34620 *total = rs6000_cost->sfdf_convert;
34621 else
34622 *total = rs6000_cost->fp;
34623 return false;
34624
34625 case UNSPEC:
34626 switch (XINT (x, 1))
34627 {
34628 case UNSPEC_FRSP:
34629 *total = rs6000_cost->fp;
34630 return true;
34631
34632 default:
34633 break;
34634 }
34635 break;
34636
34637 case CALL:
34638 case IF_THEN_ELSE:
34639 if (!speed)
34640 {
34641 *total = COSTS_N_INSNS (1);
34642 return true;
34643 }
34644 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34645 {
34646 *total = rs6000_cost->fp;
34647 return false;
34648 }
34649 break;
34650
34651 case NE:
34652 case EQ:
34653 case GTU:
34654 case LTU:
34655 /* Carry bit requires mode == Pmode.
34656 NEG or PLUS already counted so only add one. */
34657 if (mode == Pmode
34658 && (outer_code == NEG || outer_code == PLUS))
34659 {
34660 *total = COSTS_N_INSNS (1);
34661 return true;
34662 }
34663 /* FALLTHRU */
34664
34665 case GT:
34666 case LT:
34667 case UNORDERED:
34668 if (outer_code == SET)
34669 {
34670 if (XEXP (x, 1) == const0_rtx)
34671 {
34672 *total = COSTS_N_INSNS (2);
34673 return true;
34674 }
34675 else
34676 {
34677 *total = COSTS_N_INSNS (3);
34678 return false;
34679 }
34680 }
34681 /* CC COMPARE. */
34682 if (outer_code == COMPARE)
34683 {
34684 *total = 0;
34685 return true;
34686 }
34687 break;
34688
34689 default:
34690 break;
34691 }
34692
34693 return false;
34694 }
34695
34696 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34697
34698 static bool
34699 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34700 int opno, int *total, bool speed)
34701 {
34702 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34703
34704 fprintf (stderr,
34705 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34706 "opno = %d, total = %d, speed = %s, x:\n",
34707 ret ? "complete" : "scan inner",
34708 GET_MODE_NAME (mode),
34709 GET_RTX_NAME (outer_code),
34710 opno,
34711 *total,
34712 speed ? "true" : "false");
34713
34714 debug_rtx (x);
34715
34716 return ret;
34717 }
34718
34719 static int
34720 rs6000_insn_cost (rtx_insn *insn, bool speed)
34721 {
34722 if (recog_memoized (insn) < 0)
34723 return 0;
34724
34725 if (!speed)
34726 return get_attr_length (insn);
34727
34728 int cost = get_attr_cost (insn);
34729 if (cost > 0)
34730 return cost;
34731
34732 int n = get_attr_length (insn) / 4;
34733 enum attr_type type = get_attr_type (insn);
34734
34735 switch (type)
34736 {
34737 case TYPE_LOAD:
34738 case TYPE_FPLOAD:
34739 case TYPE_VECLOAD:
34740 cost = COSTS_N_INSNS (n + 1);
34741 break;
34742
34743 case TYPE_MUL:
34744 switch (get_attr_size (insn))
34745 {
34746 case SIZE_8:
34747 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
34748 break;
34749 case SIZE_16:
34750 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
34751 break;
34752 case SIZE_32:
34753 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
34754 break;
34755 case SIZE_64:
34756 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
34757 break;
34758 default:
34759 gcc_unreachable ();
34760 }
34761 break;
34762 case TYPE_DIV:
34763 switch (get_attr_size (insn))
34764 {
34765 case SIZE_32:
34766 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
34767 break;
34768 case SIZE_64:
34769 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
34770 break;
34771 default:
34772 gcc_unreachable ();
34773 }
34774 break;
34775
34776 case TYPE_FP:
34777 cost = n * rs6000_cost->fp;
34778 break;
34779 case TYPE_DMUL:
34780 cost = n * rs6000_cost->dmul;
34781 break;
34782 case TYPE_SDIV:
34783 cost = n * rs6000_cost->sdiv;
34784 break;
34785 case TYPE_DDIV:
34786 cost = n * rs6000_cost->ddiv;
34787 break;
34788
34789 case TYPE_SYNC:
34790 case TYPE_LOAD_L:
34791 case TYPE_MFCR:
34792 case TYPE_MFCRF:
34793 cost = COSTS_N_INSNS (n + 2);
34794 break;
34795
34796 default:
34797 cost = COSTS_N_INSNS (n);
34798 }
34799
34800 return cost;
34801 }
34802
34803 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34804
34805 static int
34806 rs6000_debug_address_cost (rtx x, machine_mode mode,
34807 addr_space_t as, bool speed)
34808 {
34809 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34810
34811 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34812 ret, speed ? "true" : "false");
34813 debug_rtx (x);
34814
34815 return ret;
34816 }
34817
34818
34819 /* A C expression returning the cost of moving data from a register of class
34820 CLASS1 to one of CLASS2. */
34821
34822 static int
34823 rs6000_register_move_cost (machine_mode mode,
34824 reg_class_t from, reg_class_t to)
34825 {
34826 int ret;
34827
34828 if (TARGET_DEBUG_COST)
34829 dbg_cost_ctrl++;
34830
34831 /* Moves from/to GENERAL_REGS. */
34832 if (reg_classes_intersect_p (to, GENERAL_REGS)
34833 || reg_classes_intersect_p (from, GENERAL_REGS))
34834 {
34835 reg_class_t rclass = from;
34836
34837 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34838 rclass = to;
34839
34840 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34841 ret = (rs6000_memory_move_cost (mode, rclass, false)
34842 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34843
34844 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34845 shift. */
34846 else if (rclass == CR_REGS)
34847 ret = 4;
34848
34849 /* For those processors that have slow LR/CTR moves, make them more
34850 expensive than memory in order to bias spills to memory .*/
34851 else if ((rs6000_tune == PROCESSOR_POWER6
34852 || rs6000_tune == PROCESSOR_POWER7
34853 || rs6000_tune == PROCESSOR_POWER8
34854 || rs6000_tune == PROCESSOR_POWER9)
34855 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34856 ret = 6 * hard_regno_nregs (0, mode);
34857
34858 else
34859 /* A move will cost one instruction per GPR moved. */
34860 ret = 2 * hard_regno_nregs (0, mode);
34861 }
34862
34863 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34864 else if (VECTOR_MEM_VSX_P (mode)
34865 && reg_classes_intersect_p (to, VSX_REGS)
34866 && reg_classes_intersect_p (from, VSX_REGS))
34867 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
34868
34869 /* Moving between two similar registers is just one instruction. */
34870 else if (reg_classes_intersect_p (to, from))
34871 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34872
34873 /* Everything else has to go through GENERAL_REGS. */
34874 else
34875 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34876 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34877
34878 if (TARGET_DEBUG_COST)
34879 {
34880 if (dbg_cost_ctrl == 1)
34881 fprintf (stderr,
34882 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34883 ret, GET_MODE_NAME (mode), reg_class_names[from],
34884 reg_class_names[to]);
34885 dbg_cost_ctrl--;
34886 }
34887
34888 return ret;
34889 }
34890
34891 /* A C expressions returning the cost of moving data of MODE from a register to
34892 or from memory. */
34893
34894 static int
34895 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34896 bool in ATTRIBUTE_UNUSED)
34897 {
34898 int ret;
34899
34900 if (TARGET_DEBUG_COST)
34901 dbg_cost_ctrl++;
34902
34903 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34904 ret = 4 * hard_regno_nregs (0, mode);
34905 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34906 || reg_classes_intersect_p (rclass, VSX_REGS)))
34907 ret = 4 * hard_regno_nregs (32, mode);
34908 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34909 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
34910 else
34911 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34912
34913 if (TARGET_DEBUG_COST)
34914 {
34915 if (dbg_cost_ctrl == 1)
34916 fprintf (stderr,
34917 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34918 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34919 dbg_cost_ctrl--;
34920 }
34921
34922 return ret;
34923 }
34924
34925 /* Returns a code for a target-specific builtin that implements
34926 reciprocal of the function, or NULL_TREE if not available. */
34927
34928 static tree
34929 rs6000_builtin_reciprocal (tree fndecl)
34930 {
34931 switch (DECL_FUNCTION_CODE (fndecl))
34932 {
34933 case VSX_BUILTIN_XVSQRTDP:
34934 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34935 return NULL_TREE;
34936
34937 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34938
34939 case VSX_BUILTIN_XVSQRTSP:
34940 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34941 return NULL_TREE;
34942
34943 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34944
34945 default:
34946 return NULL_TREE;
34947 }
34948 }
34949
34950 /* Load up a constant. If the mode is a vector mode, splat the value across
34951 all of the vector elements. */
34952
34953 static rtx
34954 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34955 {
34956 rtx reg;
34957
34958 if (mode == SFmode || mode == DFmode)
34959 {
34960 rtx d = const_double_from_real_value (dconst, mode);
34961 reg = force_reg (mode, d);
34962 }
34963 else if (mode == V4SFmode)
34964 {
34965 rtx d = const_double_from_real_value (dconst, SFmode);
34966 rtvec v = gen_rtvec (4, d, d, d, d);
34967 reg = gen_reg_rtx (mode);
34968 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34969 }
34970 else if (mode == V2DFmode)
34971 {
34972 rtx d = const_double_from_real_value (dconst, DFmode);
34973 rtvec v = gen_rtvec (2, d, d);
34974 reg = gen_reg_rtx (mode);
34975 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34976 }
34977 else
34978 gcc_unreachable ();
34979
34980 return reg;
34981 }
34982
34983 /* Generate an FMA instruction. */
34984
34985 static void
34986 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34987 {
34988 machine_mode mode = GET_MODE (target);
34989 rtx dst;
34990
34991 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34992 gcc_assert (dst != NULL);
34993
34994 if (dst != target)
34995 emit_move_insn (target, dst);
34996 }
34997
34998 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34999
35000 static void
35001 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
35002 {
35003 machine_mode mode = GET_MODE (dst);
35004 rtx r;
35005
35006 /* This is a tad more complicated, since the fnma_optab is for
35007 a different expression: fma(-m1, m2, a), which is the same
35008 thing except in the case of signed zeros.
35009
35010 Fortunately we know that if FMA is supported that FNMSUB is
35011 also supported in the ISA. Just expand it directly. */
35012
35013 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
35014
35015 r = gen_rtx_NEG (mode, a);
35016 r = gen_rtx_FMA (mode, m1, m2, r);
35017 r = gen_rtx_NEG (mode, r);
35018 emit_insn (gen_rtx_SET (dst, r));
35019 }
35020
35021 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
35022 add a reg_note saying that this was a division. Support both scalar and
35023 vector divide. Assumes no trapping math and finite arguments. */
35024
35025 void
35026 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
35027 {
35028 machine_mode mode = GET_MODE (dst);
35029 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
35030 int i;
35031
35032 /* Low precision estimates guarantee 5 bits of accuracy. High
35033 precision estimates guarantee 14 bits of accuracy. SFmode
35034 requires 23 bits of accuracy. DFmode requires 52 bits of
35035 accuracy. Each pass at least doubles the accuracy, leading
35036 to the following. */
35037 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35038 if (mode == DFmode || mode == V2DFmode)
35039 passes++;
35040
35041 enum insn_code code = optab_handler (smul_optab, mode);
35042 insn_gen_fn gen_mul = GEN_FCN (code);
35043
35044 gcc_assert (code != CODE_FOR_nothing);
35045
35046 one = rs6000_load_constant_and_splat (mode, dconst1);
35047
35048 /* x0 = 1./d estimate */
35049 x0 = gen_reg_rtx (mode);
35050 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
35051 UNSPEC_FRES)));
35052
35053 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
35054 if (passes > 1) {
35055
35056 /* e0 = 1. - d * x0 */
35057 e0 = gen_reg_rtx (mode);
35058 rs6000_emit_nmsub (e0, d, x0, one);
35059
35060 /* x1 = x0 + e0 * x0 */
35061 x1 = gen_reg_rtx (mode);
35062 rs6000_emit_madd (x1, e0, x0, x0);
35063
35064 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
35065 ++i, xprev = xnext, eprev = enext) {
35066
35067 /* enext = eprev * eprev */
35068 enext = gen_reg_rtx (mode);
35069 emit_insn (gen_mul (enext, eprev, eprev));
35070
35071 /* xnext = xprev + enext * xprev */
35072 xnext = gen_reg_rtx (mode);
35073 rs6000_emit_madd (xnext, enext, xprev, xprev);
35074 }
35075
35076 } else
35077 xprev = x0;
35078
35079 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
35080
35081 /* u = n * xprev */
35082 u = gen_reg_rtx (mode);
35083 emit_insn (gen_mul (u, n, xprev));
35084
35085 /* v = n - (d * u) */
35086 v = gen_reg_rtx (mode);
35087 rs6000_emit_nmsub (v, d, u, n);
35088
35089 /* dst = (v * xprev) + u */
35090 rs6000_emit_madd (dst, v, xprev, u);
35091
35092 if (note_p)
35093 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
35094 }
35095
35096 /* Goldschmidt's Algorithm for single/double-precision floating point
35097 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
35098
35099 void
35100 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
35101 {
35102 machine_mode mode = GET_MODE (src);
35103 rtx e = gen_reg_rtx (mode);
35104 rtx g = gen_reg_rtx (mode);
35105 rtx h = gen_reg_rtx (mode);
35106
35107 /* Low precision estimates guarantee 5 bits of accuracy. High
35108 precision estimates guarantee 14 bits of accuracy. SFmode
35109 requires 23 bits of accuracy. DFmode requires 52 bits of
35110 accuracy. Each pass at least doubles the accuracy, leading
35111 to the following. */
35112 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35113 if (mode == DFmode || mode == V2DFmode)
35114 passes++;
35115
35116 int i;
35117 rtx mhalf;
35118 enum insn_code code = optab_handler (smul_optab, mode);
35119 insn_gen_fn gen_mul = GEN_FCN (code);
35120
35121 gcc_assert (code != CODE_FOR_nothing);
35122
35123 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
35124
35125 /* e = rsqrt estimate */
35126 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
35127 UNSPEC_RSQRT)));
35128
35129 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35130 if (!recip)
35131 {
35132 rtx zero = force_reg (mode, CONST0_RTX (mode));
35133
35134 if (mode == SFmode)
35135 {
35136 rtx target = emit_conditional_move (e, GT, src, zero, mode,
35137 e, zero, mode, 0);
35138 if (target != e)
35139 emit_move_insn (e, target);
35140 }
35141 else
35142 {
35143 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
35144 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
35145 }
35146 }
35147
35148 /* g = sqrt estimate. */
35149 emit_insn (gen_mul (g, e, src));
35150 /* h = 1/(2*sqrt) estimate. */
35151 emit_insn (gen_mul (h, e, mhalf));
35152
35153 if (recip)
35154 {
35155 if (passes == 1)
35156 {
35157 rtx t = gen_reg_rtx (mode);
35158 rs6000_emit_nmsub (t, g, h, mhalf);
35159 /* Apply correction directly to 1/rsqrt estimate. */
35160 rs6000_emit_madd (dst, e, t, e);
35161 }
35162 else
35163 {
35164 for (i = 0; i < passes; i++)
35165 {
35166 rtx t1 = gen_reg_rtx (mode);
35167 rtx g1 = gen_reg_rtx (mode);
35168 rtx h1 = gen_reg_rtx (mode);
35169
35170 rs6000_emit_nmsub (t1, g, h, mhalf);
35171 rs6000_emit_madd (g1, g, t1, g);
35172 rs6000_emit_madd (h1, h, t1, h);
35173
35174 g = g1;
35175 h = h1;
35176 }
35177 /* Multiply by 2 for 1/rsqrt. */
35178 emit_insn (gen_add3_insn (dst, h, h));
35179 }
35180 }
35181 else
35182 {
35183 rtx t = gen_reg_rtx (mode);
35184 rs6000_emit_nmsub (t, g, h, mhalf);
35185 rs6000_emit_madd (dst, g, t, g);
35186 }
35187
35188 return;
35189 }
35190
35191 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35192 (Power7) targets. DST is the target, and SRC is the argument operand. */
35193
35194 void
35195 rs6000_emit_popcount (rtx dst, rtx src)
35196 {
35197 machine_mode mode = GET_MODE (dst);
35198 rtx tmp1, tmp2;
35199
35200 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35201 if (TARGET_POPCNTD)
35202 {
35203 if (mode == SImode)
35204 emit_insn (gen_popcntdsi2 (dst, src));
35205 else
35206 emit_insn (gen_popcntddi2 (dst, src));
35207 return;
35208 }
35209
35210 tmp1 = gen_reg_rtx (mode);
35211
35212 if (mode == SImode)
35213 {
35214 emit_insn (gen_popcntbsi2 (tmp1, src));
35215 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35216 NULL_RTX, 0);
35217 tmp2 = force_reg (SImode, tmp2);
35218 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35219 }
35220 else
35221 {
35222 emit_insn (gen_popcntbdi2 (tmp1, src));
35223 tmp2 = expand_mult (DImode, tmp1,
35224 GEN_INT ((HOST_WIDE_INT)
35225 0x01010101 << 32 | 0x01010101),
35226 NULL_RTX, 0);
35227 tmp2 = force_reg (DImode, tmp2);
35228 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35229 }
35230 }
35231
35232
35233 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35234 target, and SRC is the argument operand. */
35235
35236 void
35237 rs6000_emit_parity (rtx dst, rtx src)
35238 {
35239 machine_mode mode = GET_MODE (dst);
35240 rtx tmp;
35241
35242 tmp = gen_reg_rtx (mode);
35243
35244 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35245 if (TARGET_CMPB)
35246 {
35247 if (mode == SImode)
35248 {
35249 emit_insn (gen_popcntbsi2 (tmp, src));
35250 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35251 }
35252 else
35253 {
35254 emit_insn (gen_popcntbdi2 (tmp, src));
35255 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35256 }
35257 return;
35258 }
35259
35260 if (mode == SImode)
35261 {
35262 /* Is mult+shift >= shift+xor+shift+xor? */
35263 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35264 {
35265 rtx tmp1, tmp2, tmp3, tmp4;
35266
35267 tmp1 = gen_reg_rtx (SImode);
35268 emit_insn (gen_popcntbsi2 (tmp1, src));
35269
35270 tmp2 = gen_reg_rtx (SImode);
35271 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35272 tmp3 = gen_reg_rtx (SImode);
35273 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35274
35275 tmp4 = gen_reg_rtx (SImode);
35276 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35277 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35278 }
35279 else
35280 rs6000_emit_popcount (tmp, src);
35281 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35282 }
35283 else
35284 {
35285 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35286 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35287 {
35288 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35289
35290 tmp1 = gen_reg_rtx (DImode);
35291 emit_insn (gen_popcntbdi2 (tmp1, src));
35292
35293 tmp2 = gen_reg_rtx (DImode);
35294 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35295 tmp3 = gen_reg_rtx (DImode);
35296 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35297
35298 tmp4 = gen_reg_rtx (DImode);
35299 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35300 tmp5 = gen_reg_rtx (DImode);
35301 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35302
35303 tmp6 = gen_reg_rtx (DImode);
35304 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35305 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35306 }
35307 else
35308 rs6000_emit_popcount (tmp, src);
35309 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35310 }
35311 }
35312
35313 /* Expand an Altivec constant permutation for little endian mode.
35314 OP0 and OP1 are the input vectors and TARGET is the output vector.
35315 SEL specifies the constant permutation vector.
35316
35317 There are two issues: First, the two input operands must be
35318 swapped so that together they form a double-wide array in LE
35319 order. Second, the vperm instruction has surprising behavior
35320 in LE mode: it interprets the elements of the source vectors
35321 in BE mode ("left to right") and interprets the elements of
35322 the destination vector in LE mode ("right to left"). To
35323 correct for this, we must subtract each element of the permute
35324 control vector from 31.
35325
35326 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35327 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35328 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35329 serve as the permute control vector. Then, in BE mode,
35330
35331 vperm 9,10,11,12
35332
35333 places the desired result in vr9. However, in LE mode the
35334 vector contents will be
35335
35336 vr10 = 00000003 00000002 00000001 00000000
35337 vr11 = 00000007 00000006 00000005 00000004
35338
35339 The result of the vperm using the same permute control vector is
35340
35341 vr9 = 05000000 07000000 01000000 03000000
35342
35343 That is, the leftmost 4 bytes of vr10 are interpreted as the
35344 source for the rightmost 4 bytes of vr9, and so on.
35345
35346 If we change the permute control vector to
35347
35348 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35349
35350 and issue
35351
35352 vperm 9,11,10,12
35353
35354 we get the desired
35355
35356 vr9 = 00000006 00000004 00000002 00000000. */
35357
35358 static void
35359 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
35360 const vec_perm_indices &sel)
35361 {
35362 unsigned int i;
35363 rtx perm[16];
35364 rtx constv, unspec;
35365
35366 /* Unpack and adjust the constant selector. */
35367 for (i = 0; i < 16; ++i)
35368 {
35369 unsigned int elt = 31 - (sel[i] & 31);
35370 perm[i] = GEN_INT (elt);
35371 }
35372
35373 /* Expand to a permute, swapping the inputs and using the
35374 adjusted selector. */
35375 if (!REG_P (op0))
35376 op0 = force_reg (V16QImode, op0);
35377 if (!REG_P (op1))
35378 op1 = force_reg (V16QImode, op1);
35379
35380 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35381 constv = force_reg (V16QImode, constv);
35382 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35383 UNSPEC_VPERM);
35384 if (!REG_P (target))
35385 {
35386 rtx tmp = gen_reg_rtx (V16QImode);
35387 emit_move_insn (tmp, unspec);
35388 unspec = tmp;
35389 }
35390
35391 emit_move_insn (target, unspec);
35392 }
35393
35394 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35395 permute control vector. But here it's not a constant, so we must
35396 generate a vector NAND or NOR to do the adjustment. */
35397
35398 void
35399 altivec_expand_vec_perm_le (rtx operands[4])
35400 {
35401 rtx notx, iorx, unspec;
35402 rtx target = operands[0];
35403 rtx op0 = operands[1];
35404 rtx op1 = operands[2];
35405 rtx sel = operands[3];
35406 rtx tmp = target;
35407 rtx norreg = gen_reg_rtx (V16QImode);
35408 machine_mode mode = GET_MODE (target);
35409
35410 /* Get everything in regs so the pattern matches. */
35411 if (!REG_P (op0))
35412 op0 = force_reg (mode, op0);
35413 if (!REG_P (op1))
35414 op1 = force_reg (mode, op1);
35415 if (!REG_P (sel))
35416 sel = force_reg (V16QImode, sel);
35417 if (!REG_P (target))
35418 tmp = gen_reg_rtx (mode);
35419
35420 if (TARGET_P9_VECTOR)
35421 {
35422 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
35423 UNSPEC_VPERMR);
35424 }
35425 else
35426 {
35427 /* Invert the selector with a VNAND if available, else a VNOR.
35428 The VNAND is preferred for future fusion opportunities. */
35429 notx = gen_rtx_NOT (V16QImode, sel);
35430 iorx = (TARGET_P8_VECTOR
35431 ? gen_rtx_IOR (V16QImode, notx, notx)
35432 : gen_rtx_AND (V16QImode, notx, notx));
35433 emit_insn (gen_rtx_SET (norreg, iorx));
35434
35435 /* Permute with operands reversed and adjusted selector. */
35436 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35437 UNSPEC_VPERM);
35438 }
35439
35440 /* Copy into target, possibly by way of a register. */
35441 if (!REG_P (target))
35442 {
35443 emit_move_insn (tmp, unspec);
35444 unspec = tmp;
35445 }
35446
35447 emit_move_insn (target, unspec);
35448 }
35449
35450 /* Expand an Altivec constant permutation. Return true if we match
35451 an efficient implementation; false to fall back to VPERM.
35452
35453 OP0 and OP1 are the input vectors and TARGET is the output vector.
35454 SEL specifies the constant permutation vector. */
35455
35456 static bool
35457 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
35458 const vec_perm_indices &sel)
35459 {
35460 struct altivec_perm_insn {
35461 HOST_WIDE_INT mask;
35462 enum insn_code impl;
35463 unsigned char perm[16];
35464 };
35465 static const struct altivec_perm_insn patterns[] = {
35466 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35467 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35468 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35469 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35470 { OPTION_MASK_ALTIVEC,
35471 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35472 : CODE_FOR_altivec_vmrglb_direct),
35473 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35474 { OPTION_MASK_ALTIVEC,
35475 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35476 : CODE_FOR_altivec_vmrglh_direct),
35477 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35478 { OPTION_MASK_ALTIVEC,
35479 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35480 : CODE_FOR_altivec_vmrglw_direct),
35481 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35482 { OPTION_MASK_ALTIVEC,
35483 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35484 : CODE_FOR_altivec_vmrghb_direct),
35485 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35486 { OPTION_MASK_ALTIVEC,
35487 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35488 : CODE_FOR_altivec_vmrghh_direct),
35489 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35490 { OPTION_MASK_ALTIVEC,
35491 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35492 : CODE_FOR_altivec_vmrghw_direct),
35493 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35494 { OPTION_MASK_P8_VECTOR,
35495 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35496 : CODE_FOR_p8_vmrgow_v4sf_direct),
35497 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35498 { OPTION_MASK_P8_VECTOR,
35499 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35500 : CODE_FOR_p8_vmrgew_v4sf_direct),
35501 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35502 };
35503
35504 unsigned int i, j, elt, which;
35505 unsigned char perm[16];
35506 rtx x;
35507 bool one_vec;
35508
35509 /* Unpack the constant selector. */
35510 for (i = which = 0; i < 16; ++i)
35511 {
35512 elt = sel[i] & 31;
35513 which |= (elt < 16 ? 1 : 2);
35514 perm[i] = elt;
35515 }
35516
35517 /* Simplify the constant selector based on operands. */
35518 switch (which)
35519 {
35520 default:
35521 gcc_unreachable ();
35522
35523 case 3:
35524 one_vec = false;
35525 if (!rtx_equal_p (op0, op1))
35526 break;
35527 /* FALLTHRU */
35528
35529 case 2:
35530 for (i = 0; i < 16; ++i)
35531 perm[i] &= 15;
35532 op0 = op1;
35533 one_vec = true;
35534 break;
35535
35536 case 1:
35537 op1 = op0;
35538 one_vec = true;
35539 break;
35540 }
35541
35542 /* Look for splat patterns. */
35543 if (one_vec)
35544 {
35545 elt = perm[0];
35546
35547 for (i = 0; i < 16; ++i)
35548 if (perm[i] != elt)
35549 break;
35550 if (i == 16)
35551 {
35552 if (!BYTES_BIG_ENDIAN)
35553 elt = 15 - elt;
35554 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35555 return true;
35556 }
35557
35558 if (elt % 2 == 0)
35559 {
35560 for (i = 0; i < 16; i += 2)
35561 if (perm[i] != elt || perm[i + 1] != elt + 1)
35562 break;
35563 if (i == 16)
35564 {
35565 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35566 x = gen_reg_rtx (V8HImode);
35567 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35568 GEN_INT (field)));
35569 emit_move_insn (target, gen_lowpart (V16QImode, x));
35570 return true;
35571 }
35572 }
35573
35574 if (elt % 4 == 0)
35575 {
35576 for (i = 0; i < 16; i += 4)
35577 if (perm[i] != elt
35578 || perm[i + 1] != elt + 1
35579 || perm[i + 2] != elt + 2
35580 || perm[i + 3] != elt + 3)
35581 break;
35582 if (i == 16)
35583 {
35584 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35585 x = gen_reg_rtx (V4SImode);
35586 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35587 GEN_INT (field)));
35588 emit_move_insn (target, gen_lowpart (V16QImode, x));
35589 return true;
35590 }
35591 }
35592 }
35593
35594 /* Look for merge and pack patterns. */
35595 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35596 {
35597 bool swapped;
35598
35599 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35600 continue;
35601
35602 elt = patterns[j].perm[0];
35603 if (perm[0] == elt)
35604 swapped = false;
35605 else if (perm[0] == elt + 16)
35606 swapped = true;
35607 else
35608 continue;
35609 for (i = 1; i < 16; ++i)
35610 {
35611 elt = patterns[j].perm[i];
35612 if (swapped)
35613 elt = (elt >= 16 ? elt - 16 : elt + 16);
35614 else if (one_vec && elt >= 16)
35615 elt -= 16;
35616 if (perm[i] != elt)
35617 break;
35618 }
35619 if (i == 16)
35620 {
35621 enum insn_code icode = patterns[j].impl;
35622 machine_mode omode = insn_data[icode].operand[0].mode;
35623 machine_mode imode = insn_data[icode].operand[1].mode;
35624
35625 /* For little-endian, don't use vpkuwum and vpkuhum if the
35626 underlying vector type is not V4SI and V8HI, respectively.
35627 For example, using vpkuwum with a V8HI picks up the even
35628 halfwords (BE numbering) when the even halfwords (LE
35629 numbering) are what we need. */
35630 if (!BYTES_BIG_ENDIAN
35631 && icode == CODE_FOR_altivec_vpkuwum_direct
35632 && ((GET_CODE (op0) == REG
35633 && GET_MODE (op0) != V4SImode)
35634 || (GET_CODE (op0) == SUBREG
35635 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35636 continue;
35637 if (!BYTES_BIG_ENDIAN
35638 && icode == CODE_FOR_altivec_vpkuhum_direct
35639 && ((GET_CODE (op0) == REG
35640 && GET_MODE (op0) != V8HImode)
35641 || (GET_CODE (op0) == SUBREG
35642 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35643 continue;
35644
35645 /* For little-endian, the two input operands must be swapped
35646 (or swapped back) to ensure proper right-to-left numbering
35647 from 0 to 2N-1. */
35648 if (swapped ^ !BYTES_BIG_ENDIAN)
35649 std::swap (op0, op1);
35650 if (imode != V16QImode)
35651 {
35652 op0 = gen_lowpart (imode, op0);
35653 op1 = gen_lowpart (imode, op1);
35654 }
35655 if (omode == V16QImode)
35656 x = target;
35657 else
35658 x = gen_reg_rtx (omode);
35659 emit_insn (GEN_FCN (icode) (x, op0, op1));
35660 if (omode != V16QImode)
35661 emit_move_insn (target, gen_lowpart (V16QImode, x));
35662 return true;
35663 }
35664 }
35665
35666 if (!BYTES_BIG_ENDIAN)
35667 {
35668 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
35669 return true;
35670 }
35671
35672 return false;
35673 }
35674
35675 /* Expand a VSX Permute Doubleword constant permutation.
35676 Return true if we match an efficient implementation. */
35677
35678 static bool
35679 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35680 unsigned char perm0, unsigned char perm1)
35681 {
35682 rtx x;
35683
35684 /* If both selectors come from the same operand, fold to single op. */
35685 if ((perm0 & 2) == (perm1 & 2))
35686 {
35687 if (perm0 & 2)
35688 op0 = op1;
35689 else
35690 op1 = op0;
35691 }
35692 /* If both operands are equal, fold to simpler permutation. */
35693 if (rtx_equal_p (op0, op1))
35694 {
35695 perm0 = perm0 & 1;
35696 perm1 = (perm1 & 1) + 2;
35697 }
35698 /* If the first selector comes from the second operand, swap. */
35699 else if (perm0 & 2)
35700 {
35701 if (perm1 & 2)
35702 return false;
35703 perm0 -= 2;
35704 perm1 += 2;
35705 std::swap (op0, op1);
35706 }
35707 /* If the second selector does not come from the second operand, fail. */
35708 else if ((perm1 & 2) == 0)
35709 return false;
35710
35711 /* Success! */
35712 if (target != NULL)
35713 {
35714 machine_mode vmode, dmode;
35715 rtvec v;
35716
35717 vmode = GET_MODE (target);
35718 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35719 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35720 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35721 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35722 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35723 emit_insn (gen_rtx_SET (target, x));
35724 }
35725 return true;
35726 }
35727
35728 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35729
35730 static bool
35731 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
35732 rtx op1, const vec_perm_indices &sel)
35733 {
35734 bool testing_p = !target;
35735
35736 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35737 if (TARGET_ALTIVEC && testing_p)
35738 return true;
35739
35740 /* Check for ps_merge* or xxpermdi insns. */
35741 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
35742 {
35743 if (testing_p)
35744 {
35745 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35746 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35747 }
35748 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
35749 return true;
35750 }
35751
35752 if (TARGET_ALTIVEC)
35753 {
35754 /* Force the target-independent code to lower to V16QImode. */
35755 if (vmode != V16QImode)
35756 return false;
35757 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
35758 return true;
35759 }
35760
35761 return false;
35762 }
35763
35764 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
35765 OP0 and OP1 are the input vectors and TARGET is the output vector.
35766 PERM specifies the constant permutation vector. */
35767
35768 static void
35769 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35770 machine_mode vmode, const vec_perm_builder &perm)
35771 {
35772 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
35773 if (x != target)
35774 emit_move_insn (target, x);
35775 }
35776
35777 /* Expand an extract even operation. */
35778
35779 void
35780 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35781 {
35782 machine_mode vmode = GET_MODE (target);
35783 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35784 vec_perm_builder perm (nelt, nelt, 1);
35785
35786 for (i = 0; i < nelt; i++)
35787 perm.quick_push (i * 2);
35788
35789 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35790 }
35791
35792 /* Expand a vector interleave operation. */
35793
35794 void
35795 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35796 {
35797 machine_mode vmode = GET_MODE (target);
35798 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35799 vec_perm_builder perm (nelt, nelt, 1);
35800
35801 high = (highp ? 0 : nelt / 2);
35802 for (i = 0; i < nelt / 2; i++)
35803 {
35804 perm.quick_push (i + high);
35805 perm.quick_push (i + nelt + high);
35806 }
35807
35808 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35809 }
35810
35811 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35812 void
35813 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35814 {
35815 HOST_WIDE_INT hwi_scale (scale);
35816 REAL_VALUE_TYPE r_pow;
35817 rtvec v = rtvec_alloc (2);
35818 rtx elt;
35819 rtx scale_vec = gen_reg_rtx (V2DFmode);
35820 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35821 elt = const_double_from_real_value (r_pow, DFmode);
35822 RTVEC_ELT (v, 0) = elt;
35823 RTVEC_ELT (v, 1) = elt;
35824 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35825 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35826 }
35827
35828 /* Return an RTX representing where to find the function value of a
35829 function returning MODE. */
35830 static rtx
35831 rs6000_complex_function_value (machine_mode mode)
35832 {
35833 unsigned int regno;
35834 rtx r1, r2;
35835 machine_mode inner = GET_MODE_INNER (mode);
35836 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35837
35838 if (TARGET_FLOAT128_TYPE
35839 && (mode == KCmode
35840 || (mode == TCmode && TARGET_IEEEQUAD)))
35841 regno = ALTIVEC_ARG_RETURN;
35842
35843 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35844 regno = FP_ARG_RETURN;
35845
35846 else
35847 {
35848 regno = GP_ARG_RETURN;
35849
35850 /* 32-bit is OK since it'll go in r3/r4. */
35851 if (TARGET_32BIT && inner_bytes >= 4)
35852 return gen_rtx_REG (mode, regno);
35853 }
35854
35855 if (inner_bytes >= 8)
35856 return gen_rtx_REG (mode, regno);
35857
35858 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35859 const0_rtx);
35860 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35861 GEN_INT (inner_bytes));
35862 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35863 }
35864
35865 /* Return an rtx describing a return value of MODE as a PARALLEL
35866 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35867 stride REG_STRIDE. */
35868
35869 static rtx
35870 rs6000_parallel_return (machine_mode mode,
35871 int n_elts, machine_mode elt_mode,
35872 unsigned int regno, unsigned int reg_stride)
35873 {
35874 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35875
35876 int i;
35877 for (i = 0; i < n_elts; i++)
35878 {
35879 rtx r = gen_rtx_REG (elt_mode, regno);
35880 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35881 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35882 regno += reg_stride;
35883 }
35884
35885 return par;
35886 }
35887
35888 /* Target hook for TARGET_FUNCTION_VALUE.
35889
35890 An integer value is in r3 and a floating-point value is in fp1,
35891 unless -msoft-float. */
35892
35893 static rtx
35894 rs6000_function_value (const_tree valtype,
35895 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35896 bool outgoing ATTRIBUTE_UNUSED)
35897 {
35898 machine_mode mode;
35899 unsigned int regno;
35900 machine_mode elt_mode;
35901 int n_elts;
35902
35903 /* Special handling for structs in darwin64. */
35904 if (TARGET_MACHO
35905 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35906 {
35907 CUMULATIVE_ARGS valcum;
35908 rtx valret;
35909
35910 valcum.words = 0;
35911 valcum.fregno = FP_ARG_MIN_REG;
35912 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35913 /* Do a trial code generation as if this were going to be passed as
35914 an argument; if any part goes in memory, we return NULL. */
35915 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35916 if (valret)
35917 return valret;
35918 /* Otherwise fall through to standard ABI rules. */
35919 }
35920
35921 mode = TYPE_MODE (valtype);
35922
35923 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35924 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35925 {
35926 int first_reg, n_regs;
35927
35928 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35929 {
35930 /* _Decimal128 must use even/odd register pairs. */
35931 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35932 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35933 }
35934 else
35935 {
35936 first_reg = ALTIVEC_ARG_RETURN;
35937 n_regs = 1;
35938 }
35939
35940 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35941 }
35942
35943 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35944 if (TARGET_32BIT && TARGET_POWERPC64)
35945 switch (mode)
35946 {
35947 default:
35948 break;
35949 case E_DImode:
35950 case E_SCmode:
35951 case E_DCmode:
35952 case E_TCmode:
35953 int count = GET_MODE_SIZE (mode) / 4;
35954 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35955 }
35956
35957 if ((INTEGRAL_TYPE_P (valtype)
35958 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35959 || POINTER_TYPE_P (valtype))
35960 mode = TARGET_32BIT ? SImode : DImode;
35961
35962 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35963 /* _Decimal128 must use an even/odd register pair. */
35964 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35965 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
35966 && !FLOAT128_VECTOR_P (mode))
35967 regno = FP_ARG_RETURN;
35968 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35969 && targetm.calls.split_complex_arg)
35970 return rs6000_complex_function_value (mode);
35971 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35972 return register is used in both cases, and we won't see V2DImode/V2DFmode
35973 for pure altivec, combine the two cases. */
35974 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35975 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35976 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35977 regno = ALTIVEC_ARG_RETURN;
35978 else
35979 regno = GP_ARG_RETURN;
35980
35981 return gen_rtx_REG (mode, regno);
35982 }
35983
35984 /* Define how to find the value returned by a library function
35985 assuming the value has mode MODE. */
35986 rtx
35987 rs6000_libcall_value (machine_mode mode)
35988 {
35989 unsigned int regno;
35990
35991 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35992 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35993 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35994
35995 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35996 /* _Decimal128 must use an even/odd register pair. */
35997 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35998 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
35999 regno = FP_ARG_RETURN;
36000 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36001 return register is used in both cases, and we won't see V2DImode/V2DFmode
36002 for pure altivec, combine the two cases. */
36003 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
36004 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
36005 regno = ALTIVEC_ARG_RETURN;
36006 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
36007 return rs6000_complex_function_value (mode);
36008 else
36009 regno = GP_ARG_RETURN;
36010
36011 return gen_rtx_REG (mode, regno);
36012 }
36013
36014 /* Compute register pressure classes. We implement the target hook to avoid
36015 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
36016 lead to incorrect estimates of number of available registers and therefor
36017 increased register pressure/spill. */
36018 static int
36019 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
36020 {
36021 int n;
36022
36023 n = 0;
36024 pressure_classes[n++] = GENERAL_REGS;
36025 if (TARGET_VSX)
36026 pressure_classes[n++] = VSX_REGS;
36027 else
36028 {
36029 if (TARGET_ALTIVEC)
36030 pressure_classes[n++] = ALTIVEC_REGS;
36031 if (TARGET_HARD_FLOAT)
36032 pressure_classes[n++] = FLOAT_REGS;
36033 }
36034 pressure_classes[n++] = CR_REGS;
36035 pressure_classes[n++] = SPECIAL_REGS;
36036
36037 return n;
36038 }
36039
36040 /* Given FROM and TO register numbers, say whether this elimination is allowed.
36041 Frame pointer elimination is automatically handled.
36042
36043 For the RS/6000, if frame pointer elimination is being done, we would like
36044 to convert ap into fp, not sp.
36045
36046 We need r30 if -mminimal-toc was specified, and there are constant pool
36047 references. */
36048
36049 static bool
36050 rs6000_can_eliminate (const int from, const int to)
36051 {
36052 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
36053 ? ! frame_pointer_needed
36054 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
36055 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
36056 || constant_pool_empty_p ()
36057 : true);
36058 }
36059
36060 /* Define the offset between two registers, FROM to be eliminated and its
36061 replacement TO, at the start of a routine. */
36062 HOST_WIDE_INT
36063 rs6000_initial_elimination_offset (int from, int to)
36064 {
36065 rs6000_stack_t *info = rs6000_stack_info ();
36066 HOST_WIDE_INT offset;
36067
36068 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36069 offset = info->push_p ? 0 : -info->total_size;
36070 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36071 {
36072 offset = info->push_p ? 0 : -info->total_size;
36073 if (FRAME_GROWS_DOWNWARD)
36074 offset += info->fixed_size + info->vars_size + info->parm_size;
36075 }
36076 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36077 offset = FRAME_GROWS_DOWNWARD
36078 ? info->fixed_size + info->vars_size + info->parm_size
36079 : 0;
36080 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36081 offset = info->total_size;
36082 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36083 offset = info->push_p ? info->total_size : 0;
36084 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
36085 offset = 0;
36086 else
36087 gcc_unreachable ();
36088
36089 return offset;
36090 }
36091
36092 /* Fill in sizes of registers used by unwinder. */
36093
36094 static void
36095 rs6000_init_dwarf_reg_sizes_extra (tree address)
36096 {
36097 if (TARGET_MACHO && ! TARGET_ALTIVEC)
36098 {
36099 int i;
36100 machine_mode mode = TYPE_MODE (char_type_node);
36101 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
36102 rtx mem = gen_rtx_MEM (BLKmode, addr);
36103 rtx value = gen_int_mode (16, mode);
36104
36105 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
36106 The unwinder still needs to know the size of Altivec registers. */
36107
36108 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
36109 {
36110 int column = DWARF_REG_TO_UNWIND_COLUMN
36111 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
36112 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
36113
36114 emit_move_insn (adjust_address (mem, mode, offset), value);
36115 }
36116 }
36117 }
36118
36119 /* Map internal gcc register numbers to debug format register numbers.
36120 FORMAT specifies the type of debug register number to use:
36121 0 -- debug information, except for frame-related sections
36122 1 -- DWARF .debug_frame section
36123 2 -- DWARF .eh_frame section */
36124
36125 unsigned int
36126 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
36127 {
36128 /* Except for the above, we use the internal number for non-DWARF
36129 debug information, and also for .eh_frame. */
36130 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
36131 return regno;
36132
36133 /* On some platforms, we use the standard DWARF register
36134 numbering for .debug_info and .debug_frame. */
36135 #ifdef RS6000_USE_DWARF_NUMBERING
36136 if (regno <= 63)
36137 return regno;
36138 if (regno == LR_REGNO)
36139 return 108;
36140 if (regno == CTR_REGNO)
36141 return 109;
36142 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36143 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36144 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36145 to the DWARF reg for CR. */
36146 if (format == 1 && regno == CR2_REGNO)
36147 return 64;
36148 if (CR_REGNO_P (regno))
36149 return regno - CR0_REGNO + 86;
36150 if (regno == CA_REGNO)
36151 return 101; /* XER */
36152 if (ALTIVEC_REGNO_P (regno))
36153 return regno - FIRST_ALTIVEC_REGNO + 1124;
36154 if (regno == VRSAVE_REGNO)
36155 return 356;
36156 if (regno == VSCR_REGNO)
36157 return 67;
36158 #endif
36159 return regno;
36160 }
36161
36162 /* target hook eh_return_filter_mode */
36163 static scalar_int_mode
36164 rs6000_eh_return_filter_mode (void)
36165 {
36166 return TARGET_32BIT ? SImode : word_mode;
36167 }
36168
36169 /* Target hook for translate_mode_attribute. */
36170 static machine_mode
36171 rs6000_translate_mode_attribute (machine_mode mode)
36172 {
36173 if ((FLOAT128_IEEE_P (mode)
36174 && ieee128_float_type_node == long_double_type_node)
36175 || (FLOAT128_IBM_P (mode)
36176 && ibm128_float_type_node == long_double_type_node))
36177 return COMPLEX_MODE_P (mode) ? E_TCmode : E_TFmode;
36178 return mode;
36179 }
36180
36181 /* Target hook for scalar_mode_supported_p. */
36182 static bool
36183 rs6000_scalar_mode_supported_p (scalar_mode mode)
36184 {
36185 /* -m32 does not support TImode. This is the default, from
36186 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36187 same ABI as for -m32. But default_scalar_mode_supported_p allows
36188 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36189 for -mpowerpc64. */
36190 if (TARGET_32BIT && mode == TImode)
36191 return false;
36192
36193 if (DECIMAL_FLOAT_MODE_P (mode))
36194 return default_decimal_float_supported_p ();
36195 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36196 return true;
36197 else
36198 return default_scalar_mode_supported_p (mode);
36199 }
36200
36201 /* Target hook for vector_mode_supported_p. */
36202 static bool
36203 rs6000_vector_mode_supported_p (machine_mode mode)
36204 {
36205 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36206 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36207 double-double. */
36208 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36209 return true;
36210
36211 else
36212 return false;
36213 }
36214
36215 /* Target hook for floatn_mode. */
36216 static opt_scalar_float_mode
36217 rs6000_floatn_mode (int n, bool extended)
36218 {
36219 if (extended)
36220 {
36221 switch (n)
36222 {
36223 case 32:
36224 return DFmode;
36225
36226 case 64:
36227 if (TARGET_FLOAT128_TYPE)
36228 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36229 else
36230 return opt_scalar_float_mode ();
36231
36232 case 128:
36233 return opt_scalar_float_mode ();
36234
36235 default:
36236 /* Those are the only valid _FloatNx types. */
36237 gcc_unreachable ();
36238 }
36239 }
36240 else
36241 {
36242 switch (n)
36243 {
36244 case 32:
36245 return SFmode;
36246
36247 case 64:
36248 return DFmode;
36249
36250 case 128:
36251 if (TARGET_FLOAT128_TYPE)
36252 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36253 else
36254 return opt_scalar_float_mode ();
36255
36256 default:
36257 return opt_scalar_float_mode ();
36258 }
36259 }
36260
36261 }
36262
36263 /* Target hook for c_mode_for_suffix. */
36264 static machine_mode
36265 rs6000_c_mode_for_suffix (char suffix)
36266 {
36267 if (TARGET_FLOAT128_TYPE)
36268 {
36269 if (suffix == 'q' || suffix == 'Q')
36270 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36271
36272 /* At the moment, we are not defining a suffix for IBM extended double.
36273 If/when the default for -mabi=ieeelongdouble is changed, and we want
36274 to support __ibm128 constants in legacy library code, we may need to
36275 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36276 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36277 __float80 constants. */
36278 }
36279
36280 return VOIDmode;
36281 }
36282
36283 /* Target hook for invalid_arg_for_unprototyped_fn. */
36284 static const char *
36285 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36286 {
36287 return (!rs6000_darwin64_abi
36288 && typelist == 0
36289 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36290 && (funcdecl == NULL_TREE
36291 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36292 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36293 ? N_("AltiVec argument passed to unprototyped function")
36294 : NULL;
36295 }
36296
36297 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36298 setup by using __stack_chk_fail_local hidden function instead of
36299 calling __stack_chk_fail directly. Otherwise it is better to call
36300 __stack_chk_fail directly. */
36301
36302 static tree ATTRIBUTE_UNUSED
36303 rs6000_stack_protect_fail (void)
36304 {
36305 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36306 ? default_hidden_stack_protect_fail ()
36307 : default_external_stack_protect_fail ();
36308 }
36309
36310 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36311
36312 #if TARGET_ELF
36313 static unsigned HOST_WIDE_INT
36314 rs6000_asan_shadow_offset (void)
36315 {
36316 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36317 }
36318 #endif
36319 \f
36320 /* Mask options that we want to support inside of attribute((target)) and
36321 #pragma GCC target operations. Note, we do not include things like
36322 64/32-bit, endianness, hard/soft floating point, etc. that would have
36323 different calling sequences. */
36324
36325 struct rs6000_opt_mask {
36326 const char *name; /* option name */
36327 HOST_WIDE_INT mask; /* mask to set */
36328 bool invert; /* invert sense of mask */
36329 bool valid_target; /* option is a target option */
36330 };
36331
36332 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36333 {
36334 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36335 { "cmpb", OPTION_MASK_CMPB, false, true },
36336 { "crypto", OPTION_MASK_CRYPTO, false, true },
36337 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36338 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36339 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36340 false, true },
36341 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36342 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36343 { "fprnd", OPTION_MASK_FPRND, false, true },
36344 { "hard-dfp", OPTION_MASK_DFP, false, true },
36345 { "htm", OPTION_MASK_HTM, false, true },
36346 { "isel", OPTION_MASK_ISEL, false, true },
36347 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36348 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36349 { "modulo", OPTION_MASK_MODULO, false, true },
36350 { "mulhw", OPTION_MASK_MULHW, false, true },
36351 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36352 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36353 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36354 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36355 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36356 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36357 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36358 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36359 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36360 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36361 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36362 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36363 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36364 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36365 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36366 { "string", 0, false, true },
36367 { "update", OPTION_MASK_NO_UPDATE, true , true },
36368 { "vsx", OPTION_MASK_VSX, false, true },
36369 #ifdef OPTION_MASK_64BIT
36370 #if TARGET_AIX_OS
36371 { "aix64", OPTION_MASK_64BIT, false, false },
36372 { "aix32", OPTION_MASK_64BIT, true, false },
36373 #else
36374 { "64", OPTION_MASK_64BIT, false, false },
36375 { "32", OPTION_MASK_64BIT, true, false },
36376 #endif
36377 #endif
36378 #ifdef OPTION_MASK_EABI
36379 { "eabi", OPTION_MASK_EABI, false, false },
36380 #endif
36381 #ifdef OPTION_MASK_LITTLE_ENDIAN
36382 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36383 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36384 #endif
36385 #ifdef OPTION_MASK_RELOCATABLE
36386 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36387 #endif
36388 #ifdef OPTION_MASK_STRICT_ALIGN
36389 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36390 #endif
36391 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36392 { "string", 0, false, false },
36393 };
36394
36395 /* Builtin mask mapping for printing the flags. */
36396 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36397 {
36398 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36399 { "vsx", RS6000_BTM_VSX, false, false },
36400 { "fre", RS6000_BTM_FRE, false, false },
36401 { "fres", RS6000_BTM_FRES, false, false },
36402 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36403 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36404 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36405 { "cell", RS6000_BTM_CELL, false, false },
36406 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36407 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36408 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36409 { "crypto", RS6000_BTM_CRYPTO, false, false },
36410 { "htm", RS6000_BTM_HTM, false, false },
36411 { "hard-dfp", RS6000_BTM_DFP, false, false },
36412 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36413 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36414 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
36415 { "float128", RS6000_BTM_FLOAT128, false, false },
36416 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36417 };
36418
36419 /* Option variables that we want to support inside attribute((target)) and
36420 #pragma GCC target operations. */
36421
36422 struct rs6000_opt_var {
36423 const char *name; /* option name */
36424 size_t global_offset; /* offset of the option in global_options. */
36425 size_t target_offset; /* offset of the option in target options. */
36426 };
36427
36428 static struct rs6000_opt_var const rs6000_opt_vars[] =
36429 {
36430 { "friz",
36431 offsetof (struct gcc_options, x_TARGET_FRIZ),
36432 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36433 { "avoid-indexed-addresses",
36434 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36435 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36436 { "longcall",
36437 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36438 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36439 { "optimize-swaps",
36440 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36441 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36442 { "allow-movmisalign",
36443 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36444 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36445 { "sched-groups",
36446 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36447 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36448 { "always-hint",
36449 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36450 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36451 { "align-branch-targets",
36452 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36453 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36454 { "tls-markers",
36455 offsetof (struct gcc_options, x_tls_markers),
36456 offsetof (struct cl_target_option, x_tls_markers), },
36457 { "sched-prolog",
36458 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36459 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36460 { "sched-epilog",
36461 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36462 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36463 { "speculate-indirect-jumps",
36464 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
36465 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
36466 };
36467
36468 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36469 parsing. Return true if there were no errors. */
36470
36471 static bool
36472 rs6000_inner_target_options (tree args, bool attr_p)
36473 {
36474 bool ret = true;
36475
36476 if (args == NULL_TREE)
36477 ;
36478
36479 else if (TREE_CODE (args) == STRING_CST)
36480 {
36481 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36482 char *q;
36483
36484 while ((q = strtok (p, ",")) != NULL)
36485 {
36486 bool error_p = false;
36487 bool not_valid_p = false;
36488 const char *cpu_opt = NULL;
36489
36490 p = NULL;
36491 if (strncmp (q, "cpu=", 4) == 0)
36492 {
36493 int cpu_index = rs6000_cpu_name_lookup (q+4);
36494 if (cpu_index >= 0)
36495 rs6000_cpu_index = cpu_index;
36496 else
36497 {
36498 error_p = true;
36499 cpu_opt = q+4;
36500 }
36501 }
36502 else if (strncmp (q, "tune=", 5) == 0)
36503 {
36504 int tune_index = rs6000_cpu_name_lookup (q+5);
36505 if (tune_index >= 0)
36506 rs6000_tune_index = tune_index;
36507 else
36508 {
36509 error_p = true;
36510 cpu_opt = q+5;
36511 }
36512 }
36513 else
36514 {
36515 size_t i;
36516 bool invert = false;
36517 char *r = q;
36518
36519 error_p = true;
36520 if (strncmp (r, "no-", 3) == 0)
36521 {
36522 invert = true;
36523 r += 3;
36524 }
36525
36526 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36527 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36528 {
36529 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36530
36531 if (!rs6000_opt_masks[i].valid_target)
36532 not_valid_p = true;
36533 else
36534 {
36535 error_p = false;
36536 rs6000_isa_flags_explicit |= mask;
36537
36538 /* VSX needs altivec, so -mvsx automagically sets
36539 altivec and disables -mavoid-indexed-addresses. */
36540 if (!invert)
36541 {
36542 if (mask == OPTION_MASK_VSX)
36543 {
36544 mask |= OPTION_MASK_ALTIVEC;
36545 TARGET_AVOID_XFORM = 0;
36546 }
36547 }
36548
36549 if (rs6000_opt_masks[i].invert)
36550 invert = !invert;
36551
36552 if (invert)
36553 rs6000_isa_flags &= ~mask;
36554 else
36555 rs6000_isa_flags |= mask;
36556 }
36557 break;
36558 }
36559
36560 if (error_p && !not_valid_p)
36561 {
36562 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36563 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36564 {
36565 size_t j = rs6000_opt_vars[i].global_offset;
36566 *((int *) ((char *)&global_options + j)) = !invert;
36567 error_p = false;
36568 not_valid_p = false;
36569 break;
36570 }
36571 }
36572 }
36573
36574 if (error_p)
36575 {
36576 const char *eprefix, *esuffix;
36577
36578 ret = false;
36579 if (attr_p)
36580 {
36581 eprefix = "__attribute__((__target__(";
36582 esuffix = ")))";
36583 }
36584 else
36585 {
36586 eprefix = "#pragma GCC target ";
36587 esuffix = "";
36588 }
36589
36590 if (cpu_opt)
36591 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36592 q, esuffix);
36593 else if (not_valid_p)
36594 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36595 else
36596 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36597 }
36598 }
36599 }
36600
36601 else if (TREE_CODE (args) == TREE_LIST)
36602 {
36603 do
36604 {
36605 tree value = TREE_VALUE (args);
36606 if (value)
36607 {
36608 bool ret2 = rs6000_inner_target_options (value, attr_p);
36609 if (!ret2)
36610 ret = false;
36611 }
36612 args = TREE_CHAIN (args);
36613 }
36614 while (args != NULL_TREE);
36615 }
36616
36617 else
36618 {
36619 error ("attribute %<target%> argument not a string");
36620 return false;
36621 }
36622
36623 return ret;
36624 }
36625
36626 /* Print out the target options as a list for -mdebug=target. */
36627
36628 static void
36629 rs6000_debug_target_options (tree args, const char *prefix)
36630 {
36631 if (args == NULL_TREE)
36632 fprintf (stderr, "%s<NULL>", prefix);
36633
36634 else if (TREE_CODE (args) == STRING_CST)
36635 {
36636 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36637 char *q;
36638
36639 while ((q = strtok (p, ",")) != NULL)
36640 {
36641 p = NULL;
36642 fprintf (stderr, "%s\"%s\"", prefix, q);
36643 prefix = ", ";
36644 }
36645 }
36646
36647 else if (TREE_CODE (args) == TREE_LIST)
36648 {
36649 do
36650 {
36651 tree value = TREE_VALUE (args);
36652 if (value)
36653 {
36654 rs6000_debug_target_options (value, prefix);
36655 prefix = ", ";
36656 }
36657 args = TREE_CHAIN (args);
36658 }
36659 while (args != NULL_TREE);
36660 }
36661
36662 else
36663 gcc_unreachable ();
36664
36665 return;
36666 }
36667
36668 \f
36669 /* Hook to validate attribute((target("..."))). */
36670
36671 static bool
36672 rs6000_valid_attribute_p (tree fndecl,
36673 tree ARG_UNUSED (name),
36674 tree args,
36675 int flags)
36676 {
36677 struct cl_target_option cur_target;
36678 bool ret;
36679 tree old_optimize;
36680 tree new_target, new_optimize;
36681 tree func_optimize;
36682
36683 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36684
36685 if (TARGET_DEBUG_TARGET)
36686 {
36687 tree tname = DECL_NAME (fndecl);
36688 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36689 if (tname)
36690 fprintf (stderr, "function: %.*s\n",
36691 (int) IDENTIFIER_LENGTH (tname),
36692 IDENTIFIER_POINTER (tname));
36693 else
36694 fprintf (stderr, "function: unknown\n");
36695
36696 fprintf (stderr, "args:");
36697 rs6000_debug_target_options (args, " ");
36698 fprintf (stderr, "\n");
36699
36700 if (flags)
36701 fprintf (stderr, "flags: 0x%x\n", flags);
36702
36703 fprintf (stderr, "--------------------\n");
36704 }
36705
36706 /* attribute((target("default"))) does nothing, beyond
36707 affecting multi-versioning. */
36708 if (TREE_VALUE (args)
36709 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36710 && TREE_CHAIN (args) == NULL_TREE
36711 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36712 return true;
36713
36714 old_optimize = build_optimization_node (&global_options);
36715 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36716
36717 /* If the function changed the optimization levels as well as setting target
36718 options, start with the optimizations specified. */
36719 if (func_optimize && func_optimize != old_optimize)
36720 cl_optimization_restore (&global_options,
36721 TREE_OPTIMIZATION (func_optimize));
36722
36723 /* The target attributes may also change some optimization flags, so update
36724 the optimization options if necessary. */
36725 cl_target_option_save (&cur_target, &global_options);
36726 rs6000_cpu_index = rs6000_tune_index = -1;
36727 ret = rs6000_inner_target_options (args, true);
36728
36729 /* Set up any additional state. */
36730 if (ret)
36731 {
36732 ret = rs6000_option_override_internal (false);
36733 new_target = build_target_option_node (&global_options);
36734 }
36735 else
36736 new_target = NULL;
36737
36738 new_optimize = build_optimization_node (&global_options);
36739
36740 if (!new_target)
36741 ret = false;
36742
36743 else if (fndecl)
36744 {
36745 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36746
36747 if (old_optimize != new_optimize)
36748 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36749 }
36750
36751 cl_target_option_restore (&global_options, &cur_target);
36752
36753 if (old_optimize != new_optimize)
36754 cl_optimization_restore (&global_options,
36755 TREE_OPTIMIZATION (old_optimize));
36756
36757 return ret;
36758 }
36759
36760 \f
36761 /* Hook to validate the current #pragma GCC target and set the state, and
36762 update the macros based on what was changed. If ARGS is NULL, then
36763 POP_TARGET is used to reset the options. */
36764
36765 bool
36766 rs6000_pragma_target_parse (tree args, tree pop_target)
36767 {
36768 tree prev_tree = build_target_option_node (&global_options);
36769 tree cur_tree;
36770 struct cl_target_option *prev_opt, *cur_opt;
36771 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36772 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36773
36774 if (TARGET_DEBUG_TARGET)
36775 {
36776 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36777 fprintf (stderr, "args:");
36778 rs6000_debug_target_options (args, " ");
36779 fprintf (stderr, "\n");
36780
36781 if (pop_target)
36782 {
36783 fprintf (stderr, "pop_target:\n");
36784 debug_tree (pop_target);
36785 }
36786 else
36787 fprintf (stderr, "pop_target: <NULL>\n");
36788
36789 fprintf (stderr, "--------------------\n");
36790 }
36791
36792 if (! args)
36793 {
36794 cur_tree = ((pop_target)
36795 ? pop_target
36796 : target_option_default_node);
36797 cl_target_option_restore (&global_options,
36798 TREE_TARGET_OPTION (cur_tree));
36799 }
36800 else
36801 {
36802 rs6000_cpu_index = rs6000_tune_index = -1;
36803 if (!rs6000_inner_target_options (args, false)
36804 || !rs6000_option_override_internal (false)
36805 || (cur_tree = build_target_option_node (&global_options))
36806 == NULL_TREE)
36807 {
36808 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36809 fprintf (stderr, "invalid pragma\n");
36810
36811 return false;
36812 }
36813 }
36814
36815 target_option_current_node = cur_tree;
36816 rs6000_activate_target_options (target_option_current_node);
36817
36818 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36819 change the macros that are defined. */
36820 if (rs6000_target_modify_macros_ptr)
36821 {
36822 prev_opt = TREE_TARGET_OPTION (prev_tree);
36823 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36824 prev_flags = prev_opt->x_rs6000_isa_flags;
36825
36826 cur_opt = TREE_TARGET_OPTION (cur_tree);
36827 cur_flags = cur_opt->x_rs6000_isa_flags;
36828 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36829
36830 diff_bumask = (prev_bumask ^ cur_bumask);
36831 diff_flags = (prev_flags ^ cur_flags);
36832
36833 if ((diff_flags != 0) || (diff_bumask != 0))
36834 {
36835 /* Delete old macros. */
36836 rs6000_target_modify_macros_ptr (false,
36837 prev_flags & diff_flags,
36838 prev_bumask & diff_bumask);
36839
36840 /* Define new macros. */
36841 rs6000_target_modify_macros_ptr (true,
36842 cur_flags & diff_flags,
36843 cur_bumask & diff_bumask);
36844 }
36845 }
36846
36847 return true;
36848 }
36849
36850 \f
36851 /* Remember the last target of rs6000_set_current_function. */
36852 static GTY(()) tree rs6000_previous_fndecl;
36853
36854 /* Restore target's globals from NEW_TREE and invalidate the
36855 rs6000_previous_fndecl cache. */
36856
36857 void
36858 rs6000_activate_target_options (tree new_tree)
36859 {
36860 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36861 if (TREE_TARGET_GLOBALS (new_tree))
36862 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36863 else if (new_tree == target_option_default_node)
36864 restore_target_globals (&default_target_globals);
36865 else
36866 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36867 rs6000_previous_fndecl = NULL_TREE;
36868 }
36869
36870 /* Establish appropriate back-end context for processing the function
36871 FNDECL. The argument might be NULL to indicate processing at top
36872 level, outside of any function scope. */
36873 static void
36874 rs6000_set_current_function (tree fndecl)
36875 {
36876 if (TARGET_DEBUG_TARGET)
36877 {
36878 fprintf (stderr, "\n==================== rs6000_set_current_function");
36879
36880 if (fndecl)
36881 fprintf (stderr, ", fndecl %s (%p)",
36882 (DECL_NAME (fndecl)
36883 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36884 : "<unknown>"), (void *)fndecl);
36885
36886 if (rs6000_previous_fndecl)
36887 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36888
36889 fprintf (stderr, "\n");
36890 }
36891
36892 /* Only change the context if the function changes. This hook is called
36893 several times in the course of compiling a function, and we don't want to
36894 slow things down too much or call target_reinit when it isn't safe. */
36895 if (fndecl == rs6000_previous_fndecl)
36896 return;
36897
36898 tree old_tree;
36899 if (rs6000_previous_fndecl == NULL_TREE)
36900 old_tree = target_option_current_node;
36901 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
36902 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
36903 else
36904 old_tree = target_option_default_node;
36905
36906 tree new_tree;
36907 if (fndecl == NULL_TREE)
36908 {
36909 if (old_tree != target_option_current_node)
36910 new_tree = target_option_current_node;
36911 else
36912 new_tree = NULL_TREE;
36913 }
36914 else
36915 {
36916 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36917 if (new_tree == NULL_TREE)
36918 new_tree = target_option_default_node;
36919 }
36920
36921 if (TARGET_DEBUG_TARGET)
36922 {
36923 if (new_tree)
36924 {
36925 fprintf (stderr, "\nnew fndecl target specific options:\n");
36926 debug_tree (new_tree);
36927 }
36928
36929 if (old_tree)
36930 {
36931 fprintf (stderr, "\nold fndecl target specific options:\n");
36932 debug_tree (old_tree);
36933 }
36934
36935 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
36936 fprintf (stderr, "--------------------\n");
36937 }
36938
36939 if (new_tree && old_tree != new_tree)
36940 rs6000_activate_target_options (new_tree);
36941
36942 if (fndecl)
36943 rs6000_previous_fndecl = fndecl;
36944 }
36945
36946 \f
36947 /* Save the current options */
36948
36949 static void
36950 rs6000_function_specific_save (struct cl_target_option *ptr,
36951 struct gcc_options *opts)
36952 {
36953 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36954 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36955 }
36956
36957 /* Restore the current options */
36958
36959 static void
36960 rs6000_function_specific_restore (struct gcc_options *opts,
36961 struct cl_target_option *ptr)
36962
36963 {
36964 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36965 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36966 (void) rs6000_option_override_internal (false);
36967 }
36968
36969 /* Print the current options */
36970
36971 static void
36972 rs6000_function_specific_print (FILE *file, int indent,
36973 struct cl_target_option *ptr)
36974 {
36975 rs6000_print_isa_options (file, indent, "Isa options set",
36976 ptr->x_rs6000_isa_flags);
36977
36978 rs6000_print_isa_options (file, indent, "Isa options explicit",
36979 ptr->x_rs6000_isa_flags_explicit);
36980 }
36981
36982 /* Helper function to print the current isa or misc options on a line. */
36983
36984 static void
36985 rs6000_print_options_internal (FILE *file,
36986 int indent,
36987 const char *string,
36988 HOST_WIDE_INT flags,
36989 const char *prefix,
36990 const struct rs6000_opt_mask *opts,
36991 size_t num_elements)
36992 {
36993 size_t i;
36994 size_t start_column = 0;
36995 size_t cur_column;
36996 size_t max_column = 120;
36997 size_t prefix_len = strlen (prefix);
36998 size_t comma_len = 0;
36999 const char *comma = "";
37000
37001 if (indent)
37002 start_column += fprintf (file, "%*s", indent, "");
37003
37004 if (!flags)
37005 {
37006 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
37007 return;
37008 }
37009
37010 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
37011
37012 /* Print the various mask options. */
37013 cur_column = start_column;
37014 for (i = 0; i < num_elements; i++)
37015 {
37016 bool invert = opts[i].invert;
37017 const char *name = opts[i].name;
37018 const char *no_str = "";
37019 HOST_WIDE_INT mask = opts[i].mask;
37020 size_t len = comma_len + prefix_len + strlen (name);
37021
37022 if (!invert)
37023 {
37024 if ((flags & mask) == 0)
37025 {
37026 no_str = "no-";
37027 len += sizeof ("no-") - 1;
37028 }
37029
37030 flags &= ~mask;
37031 }
37032
37033 else
37034 {
37035 if ((flags & mask) != 0)
37036 {
37037 no_str = "no-";
37038 len += sizeof ("no-") - 1;
37039 }
37040
37041 flags |= mask;
37042 }
37043
37044 cur_column += len;
37045 if (cur_column > max_column)
37046 {
37047 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
37048 cur_column = start_column + len;
37049 comma = "";
37050 }
37051
37052 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
37053 comma = ", ";
37054 comma_len = sizeof (", ") - 1;
37055 }
37056
37057 fputs ("\n", file);
37058 }
37059
37060 /* Helper function to print the current isa options on a line. */
37061
37062 static void
37063 rs6000_print_isa_options (FILE *file, int indent, const char *string,
37064 HOST_WIDE_INT flags)
37065 {
37066 rs6000_print_options_internal (file, indent, string, flags, "-m",
37067 &rs6000_opt_masks[0],
37068 ARRAY_SIZE (rs6000_opt_masks));
37069 }
37070
37071 static void
37072 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
37073 HOST_WIDE_INT flags)
37074 {
37075 rs6000_print_options_internal (file, indent, string, flags, "",
37076 &rs6000_builtin_mask_names[0],
37077 ARRAY_SIZE (rs6000_builtin_mask_names));
37078 }
37079
37080 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37081 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37082 -mupper-regs-df, etc.).
37083
37084 If the user used -mno-power8-vector, we need to turn off all of the implicit
37085 ISA 2.07 and 3.0 options that relate to the vector unit.
37086
37087 If the user used -mno-power9-vector, we need to turn off all of the implicit
37088 ISA 3.0 options that relate to the vector unit.
37089
37090 This function does not handle explicit options such as the user specifying
37091 -mdirect-move. These are handled in rs6000_option_override_internal, and
37092 the appropriate error is given if needed.
37093
37094 We return a mask of all of the implicit options that should not be enabled
37095 by default. */
37096
37097 static HOST_WIDE_INT
37098 rs6000_disable_incompatible_switches (void)
37099 {
37100 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
37101 size_t i, j;
37102
37103 static const struct {
37104 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
37105 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
37106 const char *const name; /* name of the switch. */
37107 } flags[] = {
37108 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
37109 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
37110 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
37111 };
37112
37113 for (i = 0; i < ARRAY_SIZE (flags); i++)
37114 {
37115 HOST_WIDE_INT no_flag = flags[i].no_flag;
37116
37117 if ((rs6000_isa_flags & no_flag) == 0
37118 && (rs6000_isa_flags_explicit & no_flag) != 0)
37119 {
37120 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
37121 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
37122 & rs6000_isa_flags
37123 & dep_flags);
37124
37125 if (set_flags)
37126 {
37127 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
37128 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
37129 {
37130 set_flags &= ~rs6000_opt_masks[j].mask;
37131 error ("%<-mno-%s%> turns off %<-m%s%>",
37132 flags[i].name,
37133 rs6000_opt_masks[j].name);
37134 }
37135
37136 gcc_assert (!set_flags);
37137 }
37138
37139 rs6000_isa_flags &= ~dep_flags;
37140 ignore_masks |= no_flag | dep_flags;
37141 }
37142 }
37143
37144 return ignore_masks;
37145 }
37146
37147 \f
37148 /* Helper function for printing the function name when debugging. */
37149
37150 static const char *
37151 get_decl_name (tree fn)
37152 {
37153 tree name;
37154
37155 if (!fn)
37156 return "<null>";
37157
37158 name = DECL_NAME (fn);
37159 if (!name)
37160 return "<no-name>";
37161
37162 return IDENTIFIER_POINTER (name);
37163 }
37164
37165 /* Return the clone id of the target we are compiling code for in a target
37166 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37167 the priority list for the target clones (ordered from lowest to
37168 highest). */
37169
37170 static int
37171 rs6000_clone_priority (tree fndecl)
37172 {
37173 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37174 HOST_WIDE_INT isa_masks;
37175 int ret = CLONE_DEFAULT;
37176 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
37177 const char *attrs_str = NULL;
37178
37179 attrs = TREE_VALUE (TREE_VALUE (attrs));
37180 attrs_str = TREE_STRING_POINTER (attrs);
37181
37182 /* Return priority zero for default function. Return the ISA needed for the
37183 function if it is not the default. */
37184 if (strcmp (attrs_str, "default") != 0)
37185 {
37186 if (fn_opts == NULL_TREE)
37187 fn_opts = target_option_default_node;
37188
37189 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37190 isa_masks = rs6000_isa_flags;
37191 else
37192 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37193
37194 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37195 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37196 break;
37197 }
37198
37199 if (TARGET_DEBUG_TARGET)
37200 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37201 get_decl_name (fndecl), ret);
37202
37203 return ret;
37204 }
37205
37206 /* This compares the priority of target features in function DECL1 and DECL2.
37207 It returns positive value if DECL1 is higher priority, negative value if
37208 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37209 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37210
37211 static int
37212 rs6000_compare_version_priority (tree decl1, tree decl2)
37213 {
37214 int priority1 = rs6000_clone_priority (decl1);
37215 int priority2 = rs6000_clone_priority (decl2);
37216 int ret = priority1 - priority2;
37217
37218 if (TARGET_DEBUG_TARGET)
37219 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37220 get_decl_name (decl1), get_decl_name (decl2), ret);
37221
37222 return ret;
37223 }
37224
37225 /* Make a dispatcher declaration for the multi-versioned function DECL.
37226 Calls to DECL function will be replaced with calls to the dispatcher
37227 by the front-end. Returns the decl of the dispatcher function. */
37228
37229 static tree
37230 rs6000_get_function_versions_dispatcher (void *decl)
37231 {
37232 tree fn = (tree) decl;
37233 struct cgraph_node *node = NULL;
37234 struct cgraph_node *default_node = NULL;
37235 struct cgraph_function_version_info *node_v = NULL;
37236 struct cgraph_function_version_info *first_v = NULL;
37237
37238 tree dispatch_decl = NULL;
37239
37240 struct cgraph_function_version_info *default_version_info = NULL;
37241 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37242
37243 if (TARGET_DEBUG_TARGET)
37244 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37245 get_decl_name (fn));
37246
37247 node = cgraph_node::get (fn);
37248 gcc_assert (node != NULL);
37249
37250 node_v = node->function_version ();
37251 gcc_assert (node_v != NULL);
37252
37253 if (node_v->dispatcher_resolver != NULL)
37254 return node_v->dispatcher_resolver;
37255
37256 /* Find the default version and make it the first node. */
37257 first_v = node_v;
37258 /* Go to the beginning of the chain. */
37259 while (first_v->prev != NULL)
37260 first_v = first_v->prev;
37261
37262 default_version_info = first_v;
37263 while (default_version_info != NULL)
37264 {
37265 const tree decl2 = default_version_info->this_node->decl;
37266 if (is_function_default_version (decl2))
37267 break;
37268 default_version_info = default_version_info->next;
37269 }
37270
37271 /* If there is no default node, just return NULL. */
37272 if (default_version_info == NULL)
37273 return NULL;
37274
37275 /* Make default info the first node. */
37276 if (first_v != default_version_info)
37277 {
37278 default_version_info->prev->next = default_version_info->next;
37279 if (default_version_info->next)
37280 default_version_info->next->prev = default_version_info->prev;
37281 first_v->prev = default_version_info;
37282 default_version_info->next = first_v;
37283 default_version_info->prev = NULL;
37284 }
37285
37286 default_node = default_version_info->this_node;
37287
37288 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37289 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37290 "target_clones attribute needs GLIBC (2.23 and newer) that "
37291 "exports hardware capability bits");
37292 #else
37293
37294 if (targetm.has_ifunc_p ())
37295 {
37296 struct cgraph_function_version_info *it_v = NULL;
37297 struct cgraph_node *dispatcher_node = NULL;
37298 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37299
37300 /* Right now, the dispatching is done via ifunc. */
37301 dispatch_decl = make_dispatcher_decl (default_node->decl);
37302
37303 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37304 gcc_assert (dispatcher_node != NULL);
37305 dispatcher_node->dispatcher_function = 1;
37306 dispatcher_version_info
37307 = dispatcher_node->insert_new_function_version ();
37308 dispatcher_version_info->next = default_version_info;
37309 dispatcher_node->definition = 1;
37310
37311 /* Set the dispatcher for all the versions. */
37312 it_v = default_version_info;
37313 while (it_v != NULL)
37314 {
37315 it_v->dispatcher_resolver = dispatch_decl;
37316 it_v = it_v->next;
37317 }
37318 }
37319 else
37320 {
37321 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37322 "multiversioning needs ifunc which is not supported "
37323 "on this target");
37324 }
37325 #endif
37326
37327 return dispatch_decl;
37328 }
37329
37330 /* Make the resolver function decl to dispatch the versions of a multi-
37331 versioned function, DEFAULT_DECL. Create an empty basic block in the
37332 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37333 function. */
37334
37335 static tree
37336 make_resolver_func (const tree default_decl,
37337 const tree dispatch_decl,
37338 basic_block *empty_bb)
37339 {
37340 /* Make the resolver function static. The resolver function returns
37341 void *. */
37342 tree decl_name = clone_function_name (default_decl, "resolver");
37343 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37344 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37345 tree decl = build_fn_decl (resolver_name, type);
37346 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37347
37348 DECL_NAME (decl) = decl_name;
37349 TREE_USED (decl) = 1;
37350 DECL_ARTIFICIAL (decl) = 1;
37351 DECL_IGNORED_P (decl) = 0;
37352 TREE_PUBLIC (decl) = 0;
37353 DECL_UNINLINABLE (decl) = 1;
37354
37355 /* Resolver is not external, body is generated. */
37356 DECL_EXTERNAL (decl) = 0;
37357 DECL_EXTERNAL (dispatch_decl) = 0;
37358
37359 DECL_CONTEXT (decl) = NULL_TREE;
37360 DECL_INITIAL (decl) = make_node (BLOCK);
37361 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37362
37363 /* Build result decl and add to function_decl. */
37364 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37365 DECL_ARTIFICIAL (t) = 1;
37366 DECL_IGNORED_P (t) = 1;
37367 DECL_RESULT (decl) = t;
37368
37369 gimplify_function_tree (decl);
37370 push_cfun (DECL_STRUCT_FUNCTION (decl));
37371 *empty_bb = init_lowered_empty_function (decl, false,
37372 profile_count::uninitialized ());
37373
37374 cgraph_node::add_new_function (decl, true);
37375 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37376
37377 pop_cfun ();
37378
37379 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37380 DECL_ATTRIBUTES (dispatch_decl)
37381 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37382
37383 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37384
37385 return decl;
37386 }
37387
37388 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37389 return a pointer to VERSION_DECL if we are running on a machine that
37390 supports the index CLONE_ISA hardware architecture bits. This function will
37391 be called during version dispatch to decide which function version to
37392 execute. It returns the basic block at the end, to which more conditions
37393 can be added. */
37394
37395 static basic_block
37396 add_condition_to_bb (tree function_decl, tree version_decl,
37397 int clone_isa, basic_block new_bb)
37398 {
37399 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37400
37401 gcc_assert (new_bb != NULL);
37402 gimple_seq gseq = bb_seq (new_bb);
37403
37404
37405 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37406 build_fold_addr_expr (version_decl));
37407 tree result_var = create_tmp_var (ptr_type_node);
37408 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37409 gimple *return_stmt = gimple_build_return (result_var);
37410
37411 if (clone_isa == CLONE_DEFAULT)
37412 {
37413 gimple_seq_add_stmt (&gseq, convert_stmt);
37414 gimple_seq_add_stmt (&gseq, return_stmt);
37415 set_bb_seq (new_bb, gseq);
37416 gimple_set_bb (convert_stmt, new_bb);
37417 gimple_set_bb (return_stmt, new_bb);
37418 pop_cfun ();
37419 return new_bb;
37420 }
37421
37422 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37423 tree cond_var = create_tmp_var (bool_int_type_node);
37424 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37425 const char *arg_str = rs6000_clone_map[clone_isa].name;
37426 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37427 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37428 gimple_call_set_lhs (call_cond_stmt, cond_var);
37429
37430 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37431 gimple_set_bb (call_cond_stmt, new_bb);
37432 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37433
37434 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37435 NULL_TREE, NULL_TREE);
37436 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37437 gimple_set_bb (if_else_stmt, new_bb);
37438 gimple_seq_add_stmt (&gseq, if_else_stmt);
37439
37440 gimple_seq_add_stmt (&gseq, convert_stmt);
37441 gimple_seq_add_stmt (&gseq, return_stmt);
37442 set_bb_seq (new_bb, gseq);
37443
37444 basic_block bb1 = new_bb;
37445 edge e12 = split_block (bb1, if_else_stmt);
37446 basic_block bb2 = e12->dest;
37447 e12->flags &= ~EDGE_FALLTHRU;
37448 e12->flags |= EDGE_TRUE_VALUE;
37449
37450 edge e23 = split_block (bb2, return_stmt);
37451 gimple_set_bb (convert_stmt, bb2);
37452 gimple_set_bb (return_stmt, bb2);
37453
37454 basic_block bb3 = e23->dest;
37455 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37456
37457 remove_edge (e23);
37458 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37459
37460 pop_cfun ();
37461 return bb3;
37462 }
37463
37464 /* This function generates the dispatch function for multi-versioned functions.
37465 DISPATCH_DECL is the function which will contain the dispatch logic.
37466 FNDECLS are the function choices for dispatch, and is a tree chain.
37467 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37468 code is generated. */
37469
37470 static int
37471 dispatch_function_versions (tree dispatch_decl,
37472 void *fndecls_p,
37473 basic_block *empty_bb)
37474 {
37475 int ix;
37476 tree ele;
37477 vec<tree> *fndecls;
37478 tree clones[CLONE_MAX];
37479
37480 if (TARGET_DEBUG_TARGET)
37481 fputs ("dispatch_function_versions, top\n", stderr);
37482
37483 gcc_assert (dispatch_decl != NULL
37484 && fndecls_p != NULL
37485 && empty_bb != NULL);
37486
37487 /* fndecls_p is actually a vector. */
37488 fndecls = static_cast<vec<tree> *> (fndecls_p);
37489
37490 /* At least one more version other than the default. */
37491 gcc_assert (fndecls->length () >= 2);
37492
37493 /* The first version in the vector is the default decl. */
37494 memset ((void *) clones, '\0', sizeof (clones));
37495 clones[CLONE_DEFAULT] = (*fndecls)[0];
37496
37497 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37498 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37499 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37500 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37501 to insert the code here to do the call. */
37502
37503 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37504 {
37505 int priority = rs6000_clone_priority (ele);
37506 if (!clones[priority])
37507 clones[priority] = ele;
37508 }
37509
37510 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37511 if (clones[ix])
37512 {
37513 if (TARGET_DEBUG_TARGET)
37514 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37515 ix, get_decl_name (clones[ix]));
37516
37517 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37518 *empty_bb);
37519 }
37520
37521 return 0;
37522 }
37523
37524 /* Generate the dispatching code body to dispatch multi-versioned function
37525 DECL. The target hook is called to process the "target" attributes and
37526 provide the code to dispatch the right function at run-time. NODE points
37527 to the dispatcher decl whose body will be created. */
37528
37529 static tree
37530 rs6000_generate_version_dispatcher_body (void *node_p)
37531 {
37532 tree resolver;
37533 basic_block empty_bb;
37534 struct cgraph_node *node = (cgraph_node *) node_p;
37535 struct cgraph_function_version_info *ninfo = node->function_version ();
37536
37537 if (ninfo->dispatcher_resolver)
37538 return ninfo->dispatcher_resolver;
37539
37540 /* node is going to be an alias, so remove the finalized bit. */
37541 node->definition = false;
37542
37543 /* The first version in the chain corresponds to the default version. */
37544 ninfo->dispatcher_resolver = resolver
37545 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37546
37547 if (TARGET_DEBUG_TARGET)
37548 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37549 get_decl_name (resolver));
37550
37551 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37552 auto_vec<tree, 2> fn_ver_vec;
37553
37554 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37555 vinfo;
37556 vinfo = vinfo->next)
37557 {
37558 struct cgraph_node *version = vinfo->this_node;
37559 /* Check for virtual functions here again, as by this time it should
37560 have been determined if this function needs a vtable index or
37561 not. This happens for methods in derived classes that override
37562 virtual methods in base classes but are not explicitly marked as
37563 virtual. */
37564 if (DECL_VINDEX (version->decl))
37565 sorry ("Virtual function multiversioning not supported");
37566
37567 fn_ver_vec.safe_push (version->decl);
37568 }
37569
37570 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37571 cgraph_edge::rebuild_edges ();
37572 pop_cfun ();
37573 return resolver;
37574 }
37575
37576 \f
37577 /* Hook to determine if one function can safely inline another. */
37578
37579 static bool
37580 rs6000_can_inline_p (tree caller, tree callee)
37581 {
37582 bool ret = false;
37583 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37584 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37585
37586 /* If callee has no option attributes, then it is ok to inline. */
37587 if (!callee_tree)
37588 ret = true;
37589
37590 /* If caller has no option attributes, but callee does then it is not ok to
37591 inline. */
37592 else if (!caller_tree)
37593 ret = false;
37594
37595 else
37596 {
37597 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37598 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37599
37600 /* Callee's options should a subset of the caller's, i.e. a vsx function
37601 can inline an altivec function but a non-vsx function can't inline a
37602 vsx function. */
37603 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37604 == callee_opts->x_rs6000_isa_flags)
37605 ret = true;
37606 }
37607
37608 if (TARGET_DEBUG_TARGET)
37609 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37610 get_decl_name (caller), get_decl_name (callee),
37611 (ret ? "can" : "cannot"));
37612
37613 return ret;
37614 }
37615 \f
37616 /* Allocate a stack temp and fixup the address so it meets the particular
37617 memory requirements (either offetable or REG+REG addressing). */
37618
37619 rtx
37620 rs6000_allocate_stack_temp (machine_mode mode,
37621 bool offsettable_p,
37622 bool reg_reg_p)
37623 {
37624 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37625 rtx addr = XEXP (stack, 0);
37626 int strict_p = reload_completed;
37627
37628 if (!legitimate_indirect_address_p (addr, strict_p))
37629 {
37630 if (offsettable_p
37631 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37632 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37633
37634 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37635 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37636 }
37637
37638 return stack;
37639 }
37640
37641 /* Given a memory reference, if it is not a reg or reg+reg addressing,
37642 convert to such a form to deal with memory reference instructions
37643 like STFIWX and LDBRX that only take reg+reg addressing. */
37644
37645 rtx
37646 rs6000_force_indexed_or_indirect_mem (rtx x)
37647 {
37648 machine_mode mode = GET_MODE (x);
37649
37650 gcc_assert (MEM_P (x));
37651 if (can_create_pseudo_p () && !indexed_or_indirect_operand (x, mode))
37652 {
37653 rtx addr = XEXP (x, 0);
37654 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37655 {
37656 rtx reg = XEXP (addr, 0);
37657 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37658 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37659 gcc_assert (REG_P (reg));
37660 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37661 addr = reg;
37662 }
37663 else if (GET_CODE (addr) == PRE_MODIFY)
37664 {
37665 rtx reg = XEXP (addr, 0);
37666 rtx expr = XEXP (addr, 1);
37667 gcc_assert (REG_P (reg));
37668 gcc_assert (GET_CODE (expr) == PLUS);
37669 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37670 addr = reg;
37671 }
37672
37673 x = replace_equiv_address (x, force_reg (Pmode, addr));
37674 }
37675
37676 return x;
37677 }
37678
37679 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37680
37681 On the RS/6000, all integer constants are acceptable, most won't be valid
37682 for particular insns, though. Only easy FP constants are acceptable. */
37683
37684 static bool
37685 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37686 {
37687 if (TARGET_ELF && tls_referenced_p (x))
37688 return false;
37689
37690 if (CONST_DOUBLE_P (x))
37691 return easy_fp_constant (x, mode);
37692
37693 if (GET_CODE (x) == CONST_VECTOR)
37694 return easy_vector_constant (x, mode);
37695
37696 return true;
37697 }
37698
37699 \f
37700 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37701
37702 static bool
37703 chain_already_loaded (rtx_insn *last)
37704 {
37705 for (; last != NULL; last = PREV_INSN (last))
37706 {
37707 if (NONJUMP_INSN_P (last))
37708 {
37709 rtx patt = PATTERN (last);
37710
37711 if (GET_CODE (patt) == SET)
37712 {
37713 rtx lhs = XEXP (patt, 0);
37714
37715 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37716 return true;
37717 }
37718 }
37719 }
37720 return false;
37721 }
37722
37723 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37724
37725 void
37726 rs6000_call_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37727 {
37728 rtx func = func_desc;
37729 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37730 rtx toc_load = NULL_RTX;
37731 rtx toc_restore = NULL_RTX;
37732 rtx func_addr;
37733 rtx abi_reg = NULL_RTX;
37734 rtx call[4];
37735 int n_call;
37736 rtx insn;
37737
37738 if (global_tlsarg)
37739 tlsarg = global_tlsarg;
37740
37741 /* Handle longcall attributes. */
37742 if ((INTVAL (cookie) & CALL_LONG) != 0
37743 && GET_CODE (func_desc) == SYMBOL_REF)
37744 func = rs6000_longcall_ref (func_desc, tlsarg);
37745
37746 /* Handle indirect calls. */
37747 if (GET_CODE (func) != SYMBOL_REF
37748 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func)))
37749 {
37750 /* Save the TOC into its reserved slot before the call,
37751 and prepare to restore it after the call. */
37752 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37753 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37754 gen_rtvec (1, stack_toc_offset),
37755 UNSPEC_TOCSLOT);
37756 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37757
37758 /* Can we optimize saving the TOC in the prologue or
37759 do we need to do it at every call? */
37760 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37761 cfun->machine->save_toc_in_prologue = true;
37762 else
37763 {
37764 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37765 rtx stack_toc_mem = gen_frame_mem (Pmode,
37766 gen_rtx_PLUS (Pmode, stack_ptr,
37767 stack_toc_offset));
37768 MEM_VOLATILE_P (stack_toc_mem) = 1;
37769 if (HAVE_AS_PLTSEQ
37770 && TARGET_TLS_MARKERS
37771 && DEFAULT_ABI == ABI_ELFv2
37772 && GET_CODE (func_desc) == SYMBOL_REF)
37773 {
37774 rtvec v = gen_rtvec (3, toc_reg, func_desc, tlsarg);
37775 rtx mark_toc_reg = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37776 emit_insn (gen_rtx_SET (stack_toc_mem, mark_toc_reg));
37777 }
37778 else
37779 emit_move_insn (stack_toc_mem, toc_reg);
37780 }
37781
37782 if (DEFAULT_ABI == ABI_ELFv2)
37783 {
37784 /* A function pointer in the ELFv2 ABI is just a plain address, but
37785 the ABI requires it to be loaded into r12 before the call. */
37786 func_addr = gen_rtx_REG (Pmode, 12);
37787 if (!rtx_equal_p (func_addr, func))
37788 emit_move_insn (func_addr, func);
37789 abi_reg = func_addr;
37790 /* Indirect calls via CTR are strongly preferred over indirect
37791 calls via LR, so move the address there. Needed to mark
37792 this insn for linker plt sequence editing too. */
37793 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37794 if (HAVE_AS_PLTSEQ
37795 && TARGET_TLS_MARKERS
37796 && GET_CODE (func_desc) == SYMBOL_REF)
37797 {
37798 rtvec v = gen_rtvec (3, abi_reg, func_desc, tlsarg);
37799 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37800 emit_insn (gen_rtx_SET (func_addr, mark_func));
37801 v = gen_rtvec (2, func_addr, func_desc);
37802 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37803 }
37804 else
37805 emit_move_insn (func_addr, abi_reg);
37806 }
37807 else
37808 {
37809 /* A function pointer under AIX is a pointer to a data area whose
37810 first word contains the actual address of the function, whose
37811 second word contains a pointer to its TOC, and whose third word
37812 contains a value to place in the static chain register (r11).
37813 Note that if we load the static chain, our "trampoline" need
37814 not have any executable code. */
37815
37816 /* Load up address of the actual function. */
37817 func = force_reg (Pmode, func);
37818 func_addr = gen_reg_rtx (Pmode);
37819 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func));
37820
37821 /* Indirect calls via CTR are strongly preferred over indirect
37822 calls via LR, so move the address there. */
37823 rtx ctr_reg = gen_rtx_REG (Pmode, CTR_REGNO);
37824 emit_move_insn (ctr_reg, func_addr);
37825 func_addr = ctr_reg;
37826
37827 /* Prepare to load the TOC of the called function. Note that the
37828 TOC load must happen immediately before the actual call so
37829 that unwinding the TOC registers works correctly. See the
37830 comment in frob_update_context. */
37831 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37832 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37833 gen_rtx_PLUS (Pmode, func,
37834 func_toc_offset));
37835 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37836
37837 /* If we have a static chain, load it up. But, if the call was
37838 originally direct, the 3rd word has not been written since no
37839 trampoline has been built, so we ought not to load it, lest we
37840 override a static chain value. */
37841 if (!(GET_CODE (func_desc) == SYMBOL_REF
37842 && SYMBOL_REF_FUNCTION_P (func_desc))
37843 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37844 && !chain_already_loaded (get_current_sequence ()->next->last))
37845 {
37846 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37847 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37848 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37849 gen_rtx_PLUS (Pmode, func,
37850 func_sc_offset));
37851 emit_move_insn (sc_reg, func_sc_mem);
37852 abi_reg = sc_reg;
37853 }
37854 }
37855 }
37856 else
37857 {
37858 /* Direct calls use the TOC: for local calls, the callee will
37859 assume the TOC register is set; for non-local calls, the
37860 PLT stub needs the TOC register. */
37861 abi_reg = toc_reg;
37862 func_addr = func;
37863 }
37864
37865 /* Create the call. */
37866 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37867 if (value != NULL_RTX)
37868 call[0] = gen_rtx_SET (value, call[0]);
37869 n_call = 1;
37870
37871 if (toc_load)
37872 call[n_call++] = toc_load;
37873 if (toc_restore)
37874 call[n_call++] = toc_restore;
37875
37876 call[n_call++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
37877
37878 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37879 insn = emit_call_insn (insn);
37880
37881 /* Mention all registers defined by the ABI to hold information
37882 as uses in CALL_INSN_FUNCTION_USAGE. */
37883 if (abi_reg)
37884 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37885 }
37886
37887 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37888
37889 void
37890 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37891 {
37892 rtx call[2];
37893 rtx insn;
37894
37895 gcc_assert (INTVAL (cookie) == 0);
37896
37897 if (global_tlsarg)
37898 tlsarg = global_tlsarg;
37899
37900 /* Create the call. */
37901 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), tlsarg);
37902 if (value != NULL_RTX)
37903 call[0] = gen_rtx_SET (value, call[0]);
37904
37905 call[1] = simple_return_rtx;
37906
37907 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37908 insn = emit_call_insn (insn);
37909
37910 /* Note use of the TOC register. */
37911 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37912 }
37913
37914 /* Expand code to perform a call under the SYSV4 ABI. */
37915
37916 void
37917 rs6000_call_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37918 {
37919 rtx func = func_desc;
37920 rtx func_addr;
37921 rtx call[3];
37922 rtx insn;
37923 rtx abi_reg = NULL_RTX;
37924
37925 if (global_tlsarg)
37926 tlsarg = global_tlsarg;
37927
37928 /* Handle longcall attributes. */
37929 if ((INTVAL (cookie) & CALL_LONG) != 0
37930 && GET_CODE (func_desc) == SYMBOL_REF)
37931 {
37932 func = rs6000_longcall_ref (func_desc, tlsarg);
37933 /* If the longcall was implemented using PLT16 relocs, then r11
37934 needs to be valid at the call for lazy linking. */
37935 if (HAVE_AS_PLTSEQ
37936 && TARGET_TLS_MARKERS)
37937 abi_reg = func;
37938 }
37939
37940 /* Handle indirect calls. */
37941 if (GET_CODE (func) != SYMBOL_REF)
37942 {
37943 func = force_reg (Pmode, func);
37944
37945 /* Indirect calls via CTR are strongly preferred over indirect
37946 calls via LR, so move the address there. Needed to mark
37947 this insn for linker plt sequence editing too. */
37948 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37949 if (HAVE_AS_PLTSEQ
37950 && TARGET_TLS_MARKERS
37951 && GET_CODE (func_desc) == SYMBOL_REF)
37952 {
37953 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
37954 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37955 emit_insn (gen_rtx_SET (func_addr, mark_func));
37956 v = gen_rtvec (2, func_addr, func_desc);
37957 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37958 }
37959 else
37960 emit_move_insn (func_addr, func);
37961 }
37962 else
37963 func_addr = func;
37964
37965 /* Create the call. */
37966 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37967 if (value != NULL_RTX)
37968 call[0] = gen_rtx_SET (value, call[0]);
37969
37970 call[1] = gen_rtx_USE (VOIDmode, cookie);
37971 call[2] = gen_hard_reg_clobber (Pmode, LR_REGNO);
37972
37973 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
37974 insn = emit_call_insn (insn);
37975 if (abi_reg)
37976 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37977 }
37978
37979 /* Expand code to perform a sibling call under the SysV4 ABI. */
37980
37981 void
37982 rs6000_sibcall_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37983 {
37984 rtx func = func_desc;
37985 rtx func_addr;
37986 rtx call[3];
37987 rtx insn;
37988 rtx abi_reg = NULL_RTX;
37989
37990 if (global_tlsarg)
37991 tlsarg = global_tlsarg;
37992
37993 /* Handle longcall attributes. */
37994 if ((INTVAL (cookie) & CALL_LONG) != 0
37995 && GET_CODE (func_desc) == SYMBOL_REF)
37996 {
37997 func = rs6000_longcall_ref (func_desc, tlsarg);
37998 /* If the longcall was implemented using PLT16 relocs, then r11
37999 needs to be valid at the call for lazy linking. */
38000 if (HAVE_AS_PLTSEQ
38001 && TARGET_TLS_MARKERS)
38002 abi_reg = func;
38003 }
38004
38005 /* Handle indirect calls. */
38006 if (GET_CODE (func) != SYMBOL_REF)
38007 {
38008 func = force_reg (Pmode, func);
38009
38010 /* Indirect sibcalls must go via CTR. Needed to mark
38011 this insn for linker plt sequence editing too. */
38012 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38013 if (HAVE_AS_PLTSEQ
38014 && TARGET_TLS_MARKERS
38015 && GET_CODE (func_desc) == SYMBOL_REF)
38016 {
38017 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
38018 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38019 emit_insn (gen_rtx_SET (func_addr, mark_func));
38020 v = gen_rtvec (2, func_addr, func_desc);
38021 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38022 }
38023 else
38024 emit_move_insn (func_addr, func);
38025 }
38026 else
38027 func_addr = func;
38028
38029 /* Create the call. */
38030 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38031 if (value != NULL_RTX)
38032 call[0] = gen_rtx_SET (value, call[0]);
38033
38034 call[1] = gen_rtx_USE (VOIDmode, cookie);
38035 call[2] = simple_return_rtx;
38036
38037 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38038 insn = emit_call_insn (insn);
38039 if (abi_reg)
38040 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38041 }
38042
38043 #if TARGET_MACHO
38044
38045 /* Expand code to perform a call under the Darwin ABI.
38046 Modulo handling of mlongcall, this is much the same as sysv.
38047 if/when the longcall optimisation is removed, we could drop this
38048 code and use the sysv case (taking care to avoid the tls stuff).
38049
38050 We can use this for sibcalls too, if needed. */
38051
38052 void
38053 rs6000_call_darwin_1 (rtx value, rtx func_desc, rtx tlsarg,
38054 rtx cookie, bool sibcall)
38055 {
38056 rtx func = func_desc;
38057 rtx func_addr;
38058 rtx call[3];
38059 rtx insn;
38060 int cookie_val = INTVAL (cookie);
38061 bool make_island = false;
38062
38063 /* Handle longcall attributes, there are two cases for Darwin:
38064 1) Newer linkers are capable of synthesising any branch islands needed.
38065 2) We need a helper branch island synthesised by the compiler.
38066 The second case has mostly been retired and we don't use it for m64.
38067 In fact, it's is an optimisation, we could just indirect as sysv does..
38068 ... however, backwards compatibility for now.
38069 If we're going to use this, then we need to keep the CALL_LONG bit set,
38070 so that we can pick up the special insn form later. */
38071 if ((cookie_val & CALL_LONG) != 0
38072 && GET_CODE (func_desc) == SYMBOL_REF)
38073 {
38074 if (darwin_emit_branch_islands && TARGET_32BIT)
38075 make_island = true; /* Do nothing yet, retain the CALL_LONG flag. */
38076 else
38077 {
38078 /* The linker is capable of doing this, but the user explicitly
38079 asked for -mlongcall, so we'll do the 'normal' version. */
38080 func = rs6000_longcall_ref (func_desc, NULL_RTX);
38081 cookie_val &= ~CALL_LONG; /* Handled, zap it. */
38082 }
38083 }
38084
38085 /* Handle indirect calls. */
38086 if (GET_CODE (func) != SYMBOL_REF)
38087 {
38088 func = force_reg (Pmode, func);
38089
38090 /* Indirect calls via CTR are strongly preferred over indirect
38091 calls via LR, and are required for indirect sibcalls, so move
38092 the address there. */
38093 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38094 emit_move_insn (func_addr, func);
38095 }
38096 else
38097 func_addr = func;
38098
38099 /* Create the call. */
38100 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38101 if (value != NULL_RTX)
38102 call[0] = gen_rtx_SET (value, call[0]);
38103
38104 call[1] = gen_rtx_USE (VOIDmode, GEN_INT (cookie_val));
38105
38106 if (sibcall)
38107 call[2] = simple_return_rtx;
38108 else
38109 call[2] = gen_hard_reg_clobber (Pmode, LR_REGNO);
38110
38111 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38112 insn = emit_call_insn (insn);
38113 /* Now we have the debug info in the insn, we can set up the branch island
38114 if we're using one. */
38115 if (make_island)
38116 {
38117 tree funname = get_identifier (XSTR (func_desc, 0));
38118
38119 if (no_previous_def (funname))
38120 {
38121 rtx label_rtx = gen_label_rtx ();
38122 char *label_buf, temp_buf[256];
38123 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
38124 CODE_LABEL_NUMBER (label_rtx));
38125 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
38126 tree labelname = get_identifier (label_buf);
38127 add_compiler_branch_island (labelname, funname,
38128 insn_line ((const rtx_insn*)insn));
38129 }
38130 }
38131 }
38132 #endif
38133
38134 void
38135 rs6000_call_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38136 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38137 {
38138 #if TARGET_MACHO
38139 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, false);
38140 #else
38141 gcc_unreachable();
38142 #endif
38143 }
38144
38145
38146 void
38147 rs6000_sibcall_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38148 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38149 {
38150 #if TARGET_MACHO
38151 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, true);
38152 #else
38153 gcc_unreachable();
38154 #endif
38155 }
38156
38157
38158 /* Return whether we need to always update the saved TOC pointer when we update
38159 the stack pointer. */
38160
38161 static bool
38162 rs6000_save_toc_in_prologue_p (void)
38163 {
38164 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
38165 }
38166
38167 #ifdef HAVE_GAS_HIDDEN
38168 # define USE_HIDDEN_LINKONCE 1
38169 #else
38170 # define USE_HIDDEN_LINKONCE 0
38171 #endif
38172
38173 /* Fills in the label name that should be used for a 476 link stack thunk. */
38174
38175 void
38176 get_ppc476_thunk_name (char name[32])
38177 {
38178 gcc_assert (TARGET_LINK_STACK);
38179
38180 if (USE_HIDDEN_LINKONCE)
38181 sprintf (name, "__ppc476.get_thunk");
38182 else
38183 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
38184 }
38185
38186 /* This function emits the simple thunk routine that is used to preserve
38187 the link stack on the 476 cpu. */
38188
38189 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
38190 static void
38191 rs6000_code_end (void)
38192 {
38193 char name[32];
38194 tree decl;
38195
38196 if (!TARGET_LINK_STACK)
38197 return;
38198
38199 get_ppc476_thunk_name (name);
38200
38201 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
38202 build_function_type_list (void_type_node, NULL_TREE));
38203 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
38204 NULL_TREE, void_type_node);
38205 TREE_PUBLIC (decl) = 1;
38206 TREE_STATIC (decl) = 1;
38207
38208 #if RS6000_WEAK
38209 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
38210 {
38211 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
38212 targetm.asm_out.unique_section (decl, 0);
38213 switch_to_section (get_named_section (decl, NULL, 0));
38214 DECL_WEAK (decl) = 1;
38215 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
38216 targetm.asm_out.globalize_label (asm_out_file, name);
38217 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
38218 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
38219 }
38220 else
38221 #endif
38222 {
38223 switch_to_section (text_section);
38224 ASM_OUTPUT_LABEL (asm_out_file, name);
38225 }
38226
38227 DECL_INITIAL (decl) = make_node (BLOCK);
38228 current_function_decl = decl;
38229 allocate_struct_function (decl, false);
38230 init_function_start (decl);
38231 first_function_block_is_cold = false;
38232 /* Make sure unwind info is emitted for the thunk if needed. */
38233 final_start_function (emit_barrier (), asm_out_file, 1);
38234
38235 fputs ("\tblr\n", asm_out_file);
38236
38237 final_end_function ();
38238 init_insn_lengths ();
38239 free_after_compilation (cfun);
38240 set_cfun (NULL);
38241 current_function_decl = NULL;
38242 }
38243
38244 /* Add r30 to hard reg set if the prologue sets it up and it is not
38245 pic_offset_table_rtx. */
38246
38247 static void
38248 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
38249 {
38250 if (!TARGET_SINGLE_PIC_BASE
38251 && TARGET_TOC
38252 && TARGET_MINIMAL_TOC
38253 && !constant_pool_empty_p ())
38254 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
38255 if (cfun->machine->split_stack_argp_used)
38256 add_to_hard_reg_set (&set->set, Pmode, 12);
38257
38258 /* Make sure the hard reg set doesn't include r2, which was possibly added
38259 via PIC_OFFSET_TABLE_REGNUM. */
38260 if (TARGET_TOC)
38261 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
38262 }
38263
38264 \f
38265 /* Helper function for rs6000_split_logical to emit a logical instruction after
38266 spliting the operation to single GPR registers.
38267
38268 DEST is the destination register.
38269 OP1 and OP2 are the input source registers.
38270 CODE is the base operation (AND, IOR, XOR, NOT).
38271 MODE is the machine mode.
38272 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38273 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38274 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38275
38276 static void
38277 rs6000_split_logical_inner (rtx dest,
38278 rtx op1,
38279 rtx op2,
38280 enum rtx_code code,
38281 machine_mode mode,
38282 bool complement_final_p,
38283 bool complement_op1_p,
38284 bool complement_op2_p)
38285 {
38286 rtx bool_rtx;
38287
38288 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38289 if (op2 && GET_CODE (op2) == CONST_INT
38290 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
38291 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38292 {
38293 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
38294 HOST_WIDE_INT value = INTVAL (op2) & mask;
38295
38296 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38297 if (code == AND)
38298 {
38299 if (value == 0)
38300 {
38301 emit_insn (gen_rtx_SET (dest, const0_rtx));
38302 return;
38303 }
38304
38305 else if (value == mask)
38306 {
38307 if (!rtx_equal_p (dest, op1))
38308 emit_insn (gen_rtx_SET (dest, op1));
38309 return;
38310 }
38311 }
38312
38313 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38314 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38315 else if (code == IOR || code == XOR)
38316 {
38317 if (value == 0)
38318 {
38319 if (!rtx_equal_p (dest, op1))
38320 emit_insn (gen_rtx_SET (dest, op1));
38321 return;
38322 }
38323 }
38324 }
38325
38326 if (code == AND && mode == SImode
38327 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38328 {
38329 emit_insn (gen_andsi3 (dest, op1, op2));
38330 return;
38331 }
38332
38333 if (complement_op1_p)
38334 op1 = gen_rtx_NOT (mode, op1);
38335
38336 if (complement_op2_p)
38337 op2 = gen_rtx_NOT (mode, op2);
38338
38339 /* For canonical RTL, if only one arm is inverted it is the first. */
38340 if (!complement_op1_p && complement_op2_p)
38341 std::swap (op1, op2);
38342
38343 bool_rtx = ((code == NOT)
38344 ? gen_rtx_NOT (mode, op1)
38345 : gen_rtx_fmt_ee (code, mode, op1, op2));
38346
38347 if (complement_final_p)
38348 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
38349
38350 emit_insn (gen_rtx_SET (dest, bool_rtx));
38351 }
38352
38353 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38354 operations are split immediately during RTL generation to allow for more
38355 optimizations of the AND/IOR/XOR.
38356
38357 OPERANDS is an array containing the destination and two input operands.
38358 CODE is the base operation (AND, IOR, XOR, NOT).
38359 MODE is the machine mode.
38360 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38361 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38362 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38363 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38364 formation of the AND instructions. */
38365
38366 static void
38367 rs6000_split_logical_di (rtx operands[3],
38368 enum rtx_code code,
38369 bool complement_final_p,
38370 bool complement_op1_p,
38371 bool complement_op2_p)
38372 {
38373 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
38374 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
38375 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
38376 enum hi_lo { hi = 0, lo = 1 };
38377 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
38378 size_t i;
38379
38380 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
38381 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
38382 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
38383 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
38384
38385 if (code == NOT)
38386 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
38387 else
38388 {
38389 if (GET_CODE (operands[2]) != CONST_INT)
38390 {
38391 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
38392 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
38393 }
38394 else
38395 {
38396 HOST_WIDE_INT value = INTVAL (operands[2]);
38397 HOST_WIDE_INT value_hi_lo[2];
38398
38399 gcc_assert (!complement_final_p);
38400 gcc_assert (!complement_op1_p);
38401 gcc_assert (!complement_op2_p);
38402
38403 value_hi_lo[hi] = value >> 32;
38404 value_hi_lo[lo] = value & lower_32bits;
38405
38406 for (i = 0; i < 2; i++)
38407 {
38408 HOST_WIDE_INT sub_value = value_hi_lo[i];
38409
38410 if (sub_value & sign_bit)
38411 sub_value |= upper_32bits;
38412
38413 op2_hi_lo[i] = GEN_INT (sub_value);
38414
38415 /* If this is an AND instruction, check to see if we need to load
38416 the value in a register. */
38417 if (code == AND && sub_value != -1 && sub_value != 0
38418 && !and_operand (op2_hi_lo[i], SImode))
38419 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
38420 }
38421 }
38422 }
38423
38424 for (i = 0; i < 2; i++)
38425 {
38426 /* Split large IOR/XOR operations. */
38427 if ((code == IOR || code == XOR)
38428 && GET_CODE (op2_hi_lo[i]) == CONST_INT
38429 && !complement_final_p
38430 && !complement_op1_p
38431 && !complement_op2_p
38432 && !logical_const_operand (op2_hi_lo[i], SImode))
38433 {
38434 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
38435 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
38436 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
38437 rtx tmp = gen_reg_rtx (SImode);
38438
38439 /* Make sure the constant is sign extended. */
38440 if ((hi_16bits & sign_bit) != 0)
38441 hi_16bits |= upper_32bits;
38442
38443 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
38444 code, SImode, false, false, false);
38445
38446 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
38447 code, SImode, false, false, false);
38448 }
38449 else
38450 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
38451 code, SImode, complement_final_p,
38452 complement_op1_p, complement_op2_p);
38453 }
38454
38455 return;
38456 }
38457
38458 /* Split the insns that make up boolean operations operating on multiple GPR
38459 registers. The boolean MD patterns ensure that the inputs either are
38460 exactly the same as the output registers, or there is no overlap.
38461
38462 OPERANDS is an array containing the destination and two input operands.
38463 CODE is the base operation (AND, IOR, XOR, NOT).
38464 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38465 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38466 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38467
38468 void
38469 rs6000_split_logical (rtx operands[3],
38470 enum rtx_code code,
38471 bool complement_final_p,
38472 bool complement_op1_p,
38473 bool complement_op2_p)
38474 {
38475 machine_mode mode = GET_MODE (operands[0]);
38476 machine_mode sub_mode;
38477 rtx op0, op1, op2;
38478 int sub_size, regno0, regno1, nregs, i;
38479
38480 /* If this is DImode, use the specialized version that can run before
38481 register allocation. */
38482 if (mode == DImode && !TARGET_POWERPC64)
38483 {
38484 rs6000_split_logical_di (operands, code, complement_final_p,
38485 complement_op1_p, complement_op2_p);
38486 return;
38487 }
38488
38489 op0 = operands[0];
38490 op1 = operands[1];
38491 op2 = (code == NOT) ? NULL_RTX : operands[2];
38492 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38493 sub_size = GET_MODE_SIZE (sub_mode);
38494 regno0 = REGNO (op0);
38495 regno1 = REGNO (op1);
38496
38497 gcc_assert (reload_completed);
38498 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38499 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38500
38501 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38502 gcc_assert (nregs > 1);
38503
38504 if (op2 && REG_P (op2))
38505 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38506
38507 for (i = 0; i < nregs; i++)
38508 {
38509 int offset = i * sub_size;
38510 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38511 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38512 rtx sub_op2 = ((code == NOT)
38513 ? NULL_RTX
38514 : simplify_subreg (sub_mode, op2, mode, offset));
38515
38516 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38517 complement_final_p, complement_op1_p,
38518 complement_op2_p);
38519 }
38520
38521 return;
38522 }
38523
38524 \f
38525 /* Return true if the peephole2 can combine a load involving a combination of
38526 an addis instruction and a load with an offset that can be fused together on
38527 a power8. */
38528
38529 bool
38530 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38531 rtx addis_value, /* addis value. */
38532 rtx target, /* target register that is loaded. */
38533 rtx mem) /* bottom part of the memory addr. */
38534 {
38535 rtx addr;
38536 rtx base_reg;
38537
38538 /* Validate arguments. */
38539 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38540 return false;
38541
38542 if (!base_reg_operand (target, GET_MODE (target)))
38543 return false;
38544
38545 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38546 return false;
38547
38548 /* Allow sign/zero extension. */
38549 if (GET_CODE (mem) == ZERO_EXTEND
38550 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38551 mem = XEXP (mem, 0);
38552
38553 if (!MEM_P (mem))
38554 return false;
38555
38556 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38557 return false;
38558
38559 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38560 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38561 return false;
38562
38563 /* Validate that the register used to load the high value is either the
38564 register being loaded, or we can safely replace its use.
38565
38566 This function is only called from the peephole2 pass and we assume that
38567 there are 2 instructions in the peephole (addis and load), so we want to
38568 check if the target register was not used in the memory address and the
38569 register to hold the addis result is dead after the peephole. */
38570 if (REGNO (addis_reg) != REGNO (target))
38571 {
38572 if (reg_mentioned_p (target, mem))
38573 return false;
38574
38575 if (!peep2_reg_dead_p (2, addis_reg))
38576 return false;
38577
38578 /* If the target register being loaded is the stack pointer, we must
38579 avoid loading any other value into it, even temporarily. */
38580 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38581 return false;
38582 }
38583
38584 base_reg = XEXP (addr, 0);
38585 return REGNO (addis_reg) == REGNO (base_reg);
38586 }
38587
38588 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38589 sequence. We adjust the addis register to use the target register. If the
38590 load sign extends, we adjust the code to do the zero extending load, and an
38591 explicit sign extension later since the fusion only covers zero extending
38592 loads.
38593
38594 The operands are:
38595 operands[0] register set with addis (to be replaced with target)
38596 operands[1] value set via addis
38597 operands[2] target register being loaded
38598 operands[3] D-form memory reference using operands[0]. */
38599
38600 void
38601 expand_fusion_gpr_load (rtx *operands)
38602 {
38603 rtx addis_value = operands[1];
38604 rtx target = operands[2];
38605 rtx orig_mem = operands[3];
38606 rtx new_addr, new_mem, orig_addr, offset;
38607 enum rtx_code plus_or_lo_sum;
38608 machine_mode target_mode = GET_MODE (target);
38609 machine_mode extend_mode = target_mode;
38610 machine_mode ptr_mode = Pmode;
38611 enum rtx_code extend = UNKNOWN;
38612
38613 if (GET_CODE (orig_mem) == ZERO_EXTEND
38614 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38615 {
38616 extend = GET_CODE (orig_mem);
38617 orig_mem = XEXP (orig_mem, 0);
38618 target_mode = GET_MODE (orig_mem);
38619 }
38620
38621 gcc_assert (MEM_P (orig_mem));
38622
38623 orig_addr = XEXP (orig_mem, 0);
38624 plus_or_lo_sum = GET_CODE (orig_addr);
38625 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38626
38627 offset = XEXP (orig_addr, 1);
38628 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38629 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38630
38631 if (extend != UNKNOWN)
38632 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38633
38634 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38635 UNSPEC_FUSION_GPR);
38636 emit_insn (gen_rtx_SET (target, new_mem));
38637
38638 if (extend == SIGN_EXTEND)
38639 {
38640 int sub_off = ((BYTES_BIG_ENDIAN)
38641 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38642 : 0);
38643 rtx sign_reg
38644 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38645
38646 emit_insn (gen_rtx_SET (target,
38647 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38648 }
38649
38650 return;
38651 }
38652
38653 /* Emit the addis instruction that will be part of a fused instruction
38654 sequence. */
38655
38656 void
38657 emit_fusion_addis (rtx target, rtx addis_value)
38658 {
38659 rtx fuse_ops[10];
38660 const char *addis_str = NULL;
38661
38662 /* Emit the addis instruction. */
38663 fuse_ops[0] = target;
38664 if (satisfies_constraint_L (addis_value))
38665 {
38666 fuse_ops[1] = addis_value;
38667 addis_str = "lis %0,%v1";
38668 }
38669
38670 else if (GET_CODE (addis_value) == PLUS)
38671 {
38672 rtx op0 = XEXP (addis_value, 0);
38673 rtx op1 = XEXP (addis_value, 1);
38674
38675 if (REG_P (op0) && CONST_INT_P (op1)
38676 && satisfies_constraint_L (op1))
38677 {
38678 fuse_ops[1] = op0;
38679 fuse_ops[2] = op1;
38680 addis_str = "addis %0,%1,%v2";
38681 }
38682 }
38683
38684 else if (GET_CODE (addis_value) == HIGH)
38685 {
38686 rtx value = XEXP (addis_value, 0);
38687 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38688 {
38689 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38690 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38691 if (TARGET_ELF)
38692 addis_str = "addis %0,%2,%1@toc@ha";
38693
38694 else if (TARGET_XCOFF)
38695 addis_str = "addis %0,%1@u(%2)";
38696
38697 else
38698 gcc_unreachable ();
38699 }
38700
38701 else if (GET_CODE (value) == PLUS)
38702 {
38703 rtx op0 = XEXP (value, 0);
38704 rtx op1 = XEXP (value, 1);
38705
38706 if (GET_CODE (op0) == UNSPEC
38707 && XINT (op0, 1) == UNSPEC_TOCREL
38708 && CONST_INT_P (op1))
38709 {
38710 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38711 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38712 fuse_ops[3] = op1;
38713 if (TARGET_ELF)
38714 addis_str = "addis %0,%2,%1+%3@toc@ha";
38715
38716 else if (TARGET_XCOFF)
38717 addis_str = "addis %0,%1+%3@u(%2)";
38718
38719 else
38720 gcc_unreachable ();
38721 }
38722 }
38723
38724 else if (satisfies_constraint_L (value))
38725 {
38726 fuse_ops[1] = value;
38727 addis_str = "lis %0,%v1";
38728 }
38729
38730 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38731 {
38732 fuse_ops[1] = value;
38733 addis_str = "lis %0,%1@ha";
38734 }
38735 }
38736
38737 if (!addis_str)
38738 fatal_insn ("Could not generate addis value for fusion", addis_value);
38739
38740 output_asm_insn (addis_str, fuse_ops);
38741 }
38742
38743 /* Emit a D-form load or store instruction that is the second instruction
38744 of a fusion sequence. */
38745
38746 static void
38747 emit_fusion_load (rtx load_reg, rtx addis_reg, rtx offset, const char *insn_str)
38748 {
38749 rtx fuse_ops[10];
38750 char insn_template[80];
38751
38752 fuse_ops[0] = load_reg;
38753 fuse_ops[1] = addis_reg;
38754
38755 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38756 {
38757 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38758 fuse_ops[2] = offset;
38759 output_asm_insn (insn_template, fuse_ops);
38760 }
38761
38762 else if (GET_CODE (offset) == UNSPEC
38763 && XINT (offset, 1) == UNSPEC_TOCREL)
38764 {
38765 if (TARGET_ELF)
38766 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38767
38768 else if (TARGET_XCOFF)
38769 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38770
38771 else
38772 gcc_unreachable ();
38773
38774 fuse_ops[2] = XVECEXP (offset, 0, 0);
38775 output_asm_insn (insn_template, fuse_ops);
38776 }
38777
38778 else if (GET_CODE (offset) == PLUS
38779 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38780 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38781 && CONST_INT_P (XEXP (offset, 1)))
38782 {
38783 rtx tocrel_unspec = XEXP (offset, 0);
38784 if (TARGET_ELF)
38785 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38786
38787 else if (TARGET_XCOFF)
38788 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38789
38790 else
38791 gcc_unreachable ();
38792
38793 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38794 fuse_ops[3] = XEXP (offset, 1);
38795 output_asm_insn (insn_template, fuse_ops);
38796 }
38797
38798 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38799 {
38800 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38801
38802 fuse_ops[2] = offset;
38803 output_asm_insn (insn_template, fuse_ops);
38804 }
38805
38806 else
38807 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38808
38809 return;
38810 }
38811
38812 /* Given an address, convert it into the addis and load offset parts. Addresses
38813 created during the peephole2 process look like:
38814 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38815 (unspec [(...)] UNSPEC_TOCREL)) */
38816
38817 static void
38818 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38819 {
38820 rtx hi, lo;
38821
38822 if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38823 {
38824 hi = XEXP (addr, 0);
38825 lo = XEXP (addr, 1);
38826 }
38827 else
38828 gcc_unreachable ();
38829
38830 *p_hi = hi;
38831 *p_lo = lo;
38832 }
38833
38834 /* Return a string to fuse an addis instruction with a gpr load to the same
38835 register that we loaded up the addis instruction. The address that is used
38836 is the logical address that was formed during peephole2:
38837 (lo_sum (high) (low-part))
38838
38839 The code is complicated, so we call output_asm_insn directly, and just
38840 return "". */
38841
38842 const char *
38843 emit_fusion_gpr_load (rtx target, rtx mem)
38844 {
38845 rtx addis_value;
38846 rtx addr;
38847 rtx load_offset;
38848 const char *load_str = NULL;
38849 machine_mode mode;
38850
38851 if (GET_CODE (mem) == ZERO_EXTEND)
38852 mem = XEXP (mem, 0);
38853
38854 gcc_assert (REG_P (target) && MEM_P (mem));
38855
38856 addr = XEXP (mem, 0);
38857 fusion_split_address (addr, &addis_value, &load_offset);
38858
38859 /* Now emit the load instruction to the same register. */
38860 mode = GET_MODE (mem);
38861 switch (mode)
38862 {
38863 case E_QImode:
38864 load_str = "lbz";
38865 break;
38866
38867 case E_HImode:
38868 load_str = "lhz";
38869 break;
38870
38871 case E_SImode:
38872 case E_SFmode:
38873 load_str = "lwz";
38874 break;
38875
38876 case E_DImode:
38877 case E_DFmode:
38878 gcc_assert (TARGET_POWERPC64);
38879 load_str = "ld";
38880 break;
38881
38882 default:
38883 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38884 }
38885
38886 /* Emit the addis instruction. */
38887 emit_fusion_addis (target, addis_value);
38888
38889 /* Emit the D-form load instruction. */
38890 emit_fusion_load (target, target, load_offset, load_str);
38891
38892 return "";
38893 }
38894 \f
38895
38896 #ifdef RS6000_GLIBC_ATOMIC_FENV
38897 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38898 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
38899 #endif
38900
38901 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38902
38903 static void
38904 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
38905 {
38906 if (!TARGET_HARD_FLOAT)
38907 {
38908 #ifdef RS6000_GLIBC_ATOMIC_FENV
38909 if (atomic_hold_decl == NULL_TREE)
38910 {
38911 atomic_hold_decl
38912 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38913 get_identifier ("__atomic_feholdexcept"),
38914 build_function_type_list (void_type_node,
38915 double_ptr_type_node,
38916 NULL_TREE));
38917 TREE_PUBLIC (atomic_hold_decl) = 1;
38918 DECL_EXTERNAL (atomic_hold_decl) = 1;
38919 }
38920
38921 if (atomic_clear_decl == NULL_TREE)
38922 {
38923 atomic_clear_decl
38924 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38925 get_identifier ("__atomic_feclearexcept"),
38926 build_function_type_list (void_type_node,
38927 NULL_TREE));
38928 TREE_PUBLIC (atomic_clear_decl) = 1;
38929 DECL_EXTERNAL (atomic_clear_decl) = 1;
38930 }
38931
38932 tree const_double = build_qualified_type (double_type_node,
38933 TYPE_QUAL_CONST);
38934 tree const_double_ptr = build_pointer_type (const_double);
38935 if (atomic_update_decl == NULL_TREE)
38936 {
38937 atomic_update_decl
38938 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38939 get_identifier ("__atomic_feupdateenv"),
38940 build_function_type_list (void_type_node,
38941 const_double_ptr,
38942 NULL_TREE));
38943 TREE_PUBLIC (atomic_update_decl) = 1;
38944 DECL_EXTERNAL (atomic_update_decl) = 1;
38945 }
38946
38947 tree fenv_var = create_tmp_var_raw (double_type_node);
38948 TREE_ADDRESSABLE (fenv_var) = 1;
38949 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
38950
38951 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
38952 *clear = build_call_expr (atomic_clear_decl, 0);
38953 *update = build_call_expr (atomic_update_decl, 1,
38954 fold_convert (const_double_ptr, fenv_addr));
38955 #endif
38956 return;
38957 }
38958
38959 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
38960 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
38961 tree call_mffs = build_call_expr (mffs, 0);
38962
38963 /* Generates the equivalent of feholdexcept (&fenv_var)
38964
38965 *fenv_var = __builtin_mffs ();
38966 double fenv_hold;
38967 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38968 __builtin_mtfsf (0xff, fenv_hold); */
38969
38970 /* Mask to clear everything except for the rounding modes and non-IEEE
38971 arithmetic flag. */
38972 const unsigned HOST_WIDE_INT hold_exception_mask =
38973 HOST_WIDE_INT_C (0xffffffff00000007);
38974
38975 tree fenv_var = create_tmp_var_raw (double_type_node);
38976
38977 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
38978
38979 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
38980 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38981 build_int_cst (uint64_type_node,
38982 hold_exception_mask));
38983
38984 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38985 fenv_llu_and);
38986
38987 tree hold_mtfsf = build_call_expr (mtfsf, 2,
38988 build_int_cst (unsigned_type_node, 0xff),
38989 fenv_hold_mtfsf);
38990
38991 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
38992
38993 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38994
38995 double fenv_clear = __builtin_mffs ();
38996 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38997 __builtin_mtfsf (0xff, fenv_clear); */
38998
38999 /* Mask to clear everything except for the rounding modes and non-IEEE
39000 arithmetic flag. */
39001 const unsigned HOST_WIDE_INT clear_exception_mask =
39002 HOST_WIDE_INT_C (0xffffffff00000000);
39003
39004 tree fenv_clear = create_tmp_var_raw (double_type_node);
39005
39006 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
39007
39008 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
39009 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
39010 fenv_clean_llu,
39011 build_int_cst (uint64_type_node,
39012 clear_exception_mask));
39013
39014 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39015 fenv_clear_llu_and);
39016
39017 tree clear_mtfsf = build_call_expr (mtfsf, 2,
39018 build_int_cst (unsigned_type_node, 0xff),
39019 fenv_clear_mtfsf);
39020
39021 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
39022
39023 /* Generates the equivalent of feupdateenv (&fenv_var)
39024
39025 double old_fenv = __builtin_mffs ();
39026 double fenv_update;
39027 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
39028 (*(uint64_t*)fenv_var 0x1ff80fff);
39029 __builtin_mtfsf (0xff, fenv_update); */
39030
39031 const unsigned HOST_WIDE_INT update_exception_mask =
39032 HOST_WIDE_INT_C (0xffffffff1fffff00);
39033 const unsigned HOST_WIDE_INT new_exception_mask =
39034 HOST_WIDE_INT_C (0x1ff80fff);
39035
39036 tree old_fenv = create_tmp_var_raw (double_type_node);
39037 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
39038
39039 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
39040 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
39041 build_int_cst (uint64_type_node,
39042 update_exception_mask));
39043
39044 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39045 build_int_cst (uint64_type_node,
39046 new_exception_mask));
39047
39048 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
39049 old_llu_and, new_llu_and);
39050
39051 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39052 new_llu_mask);
39053
39054 tree update_mtfsf = build_call_expr (mtfsf, 2,
39055 build_int_cst (unsigned_type_node, 0xff),
39056 fenv_update_mtfsf);
39057
39058 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
39059 }
39060
39061 void
39062 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
39063 {
39064 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39065
39066 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39067 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39068
39069 /* The destination of the vmrgew instruction layout is:
39070 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39071 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39072 vmrgew instruction will be correct. */
39073 if (BYTES_BIG_ENDIAN)
39074 {
39075 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
39076 GEN_INT (0)));
39077 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
39078 GEN_INT (3)));
39079 }
39080 else
39081 {
39082 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
39083 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
39084 }
39085
39086 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39087 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39088
39089 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
39090 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
39091
39092 if (BYTES_BIG_ENDIAN)
39093 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39094 else
39095 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39096 }
39097
39098 void
39099 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
39100 {
39101 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39102
39103 rtx_tmp0 = gen_reg_rtx (V2DImode);
39104 rtx_tmp1 = gen_reg_rtx (V2DImode);
39105
39106 /* The destination of the vmrgew instruction layout is:
39107 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39108 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39109 vmrgew instruction will be correct. */
39110 if (BYTES_BIG_ENDIAN)
39111 {
39112 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
39113 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
39114 }
39115 else
39116 {
39117 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
39118 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
39119 }
39120
39121 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39122 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39123
39124 if (signed_convert)
39125 {
39126 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
39127 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39128 }
39129 else
39130 {
39131 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39132 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39133 }
39134
39135 if (BYTES_BIG_ENDIAN)
39136 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39137 else
39138 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39139 }
39140
39141 void
39142 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39143 rtx src2)
39144 {
39145 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39146
39147 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39148 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39149
39150 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39151 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39152
39153 rtx_tmp2 = gen_reg_rtx (V4SImode);
39154 rtx_tmp3 = gen_reg_rtx (V4SImode);
39155
39156 if (signed_convert)
39157 {
39158 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39159 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39160 }
39161 else
39162 {
39163 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39164 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39165 }
39166
39167 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39168 }
39169
39170 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39171
39172 static bool
39173 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39174 optimization_type opt_type)
39175 {
39176 switch (op)
39177 {
39178 case rsqrt_optab:
39179 return (opt_type == OPTIMIZE_FOR_SPEED
39180 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39181
39182 default:
39183 return true;
39184 }
39185 }
39186
39187 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39188
39189 static HOST_WIDE_INT
39190 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
39191 {
39192 if (TREE_CODE (exp) == STRING_CST
39193 && (STRICT_ALIGNMENT || !optimize_size))
39194 return MAX (align, BITS_PER_WORD);
39195 return align;
39196 }
39197
39198 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39199
39200 static HOST_WIDE_INT
39201 rs6000_starting_frame_offset (void)
39202 {
39203 if (FRAME_GROWS_DOWNWARD)
39204 return 0;
39205 return RS6000_STARTING_FRAME_OFFSET;
39206 }
39207 \f
39208
39209 /* Create an alias for a mangled name where we have changed the mangling (in
39210 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
39211 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
39212
39213 #if TARGET_ELF && RS6000_WEAK
39214 static void
39215 rs6000_globalize_decl_name (FILE * stream, tree decl)
39216 {
39217 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
39218
39219 targetm.asm_out.globalize_label (stream, name);
39220
39221 if (rs6000_passes_ieee128 && name[0] == '_' && name[1] == 'Z')
39222 {
39223 tree save_asm_name = DECL_ASSEMBLER_NAME (decl);
39224 const char *old_name;
39225
39226 ieee128_mangling_gcc_8_1 = true;
39227 lang_hooks.set_decl_assembler_name (decl);
39228 old_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
39229 SET_DECL_ASSEMBLER_NAME (decl, save_asm_name);
39230 ieee128_mangling_gcc_8_1 = false;
39231
39232 if (strcmp (name, old_name) != 0)
39233 {
39234 fprintf (stream, "\t.weak %s\n", old_name);
39235 fprintf (stream, "\t.set %s,%s\n", old_name, name);
39236 }
39237 }
39238 }
39239 #endif
39240
39241 \f
39242 /* On 64-bit Linux and Freebsd systems, possibly switch the long double library
39243 function names from <foo>l to <foo>f128 if the default long double type is
39244 IEEE 128-bit. Typically, with the C and C++ languages, the standard math.h
39245 include file switches the names on systems that support long double as IEEE
39246 128-bit, but that doesn't work if the user uses __builtin_<foo>l directly.
39247 In the future, glibc will export names like __ieee128_sinf128 and we can
39248 switch to using those instead of using sinf128, which pollutes the user's
39249 namespace.
39250
39251 This will switch the names for Fortran math functions as well (which doesn't
39252 use math.h). However, Fortran needs other changes to the compiler and
39253 library before you can switch the real*16 type at compile time.
39254
39255 We use the TARGET_MANGLE_DECL_ASSEMBLER_NAME hook to change this name. We
39256 only do this if the default is that long double is IBM extended double, and
39257 the user asked for IEEE 128-bit. */
39258
39259 static tree
39260 rs6000_mangle_decl_assembler_name (tree decl, tree id)
39261 {
39262 if (!TARGET_IEEEQUAD_DEFAULT && TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
39263 && TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl) )
39264 {
39265 size_t len = IDENTIFIER_LENGTH (id);
39266 const char *name = IDENTIFIER_POINTER (id);
39267
39268 if (name[len - 1] == 'l')
39269 {
39270 bool uses_ieee128_p = false;
39271 tree type = TREE_TYPE (decl);
39272 machine_mode ret_mode = TYPE_MODE (type);
39273
39274 /* See if the function returns a IEEE 128-bit floating point type or
39275 complex type. */
39276 if (ret_mode == TFmode || ret_mode == TCmode)
39277 uses_ieee128_p = true;
39278 else
39279 {
39280 function_args_iterator args_iter;
39281 tree arg;
39282
39283 /* See if the function passes a IEEE 128-bit floating point type
39284 or complex type. */
39285 FOREACH_FUNCTION_ARGS (type, arg, args_iter)
39286 {
39287 machine_mode arg_mode = TYPE_MODE (arg);
39288 if (arg_mode == TFmode || arg_mode == TCmode)
39289 {
39290 uses_ieee128_p = true;
39291 break;
39292 }
39293 }
39294 }
39295
39296 /* If we passed or returned an IEEE 128-bit floating point type,
39297 change the name. */
39298 if (uses_ieee128_p)
39299 {
39300 char *name2 = (char *) alloca (len + 4);
39301 memcpy (name2, name, len - 1);
39302 strcpy (name2 + len - 1, "f128");
39303 id = get_identifier (name2);
39304 }
39305 }
39306 }
39307
39308 return id;
39309 }
39310
39311 \f
39312 struct gcc_target targetm = TARGET_INITIALIZER;
39313
39314 #include "gt-rs6000.h"